Merge remote-tracking branch 'upstream/master'
This commit is contained in:
commit
5804441d38
4
.gitignore
vendored
4
.gitignore
vendored
@ -15,4 +15,6 @@ python_env
|
||||
.ropeproject/
|
||||
.pytest_cache/
|
||||
venv/
|
||||
|
||||
.python-version
|
||||
.vscode/
|
||||
tests/file.tmp
|
||||
|
||||
17
.travis.yml
17
.travis.yml
@ -1,3 +1,4 @@
|
||||
dist: xenial
|
||||
language: python
|
||||
sudo: false
|
||||
services:
|
||||
@ -5,26 +6,12 @@ services:
|
||||
python:
|
||||
- 2.7
|
||||
- 3.6
|
||||
- 3.7
|
||||
env:
|
||||
- TEST_SERVER_MODE=false
|
||||
- TEST_SERVER_MODE=true
|
||||
# Due to incomplete Python 3.7 support on Travis CI (
|
||||
# https://github.com/travis-ci/travis-ci/issues/9815),
|
||||
# using a matrix is necessary
|
||||
matrix:
|
||||
include:
|
||||
- python: 3.7
|
||||
env: TEST_SERVER_MODE=false
|
||||
dist: xenial
|
||||
sudo: true
|
||||
- python: 3.7
|
||||
env: TEST_SERVER_MODE=true
|
||||
dist: xenial
|
||||
sudo: true
|
||||
before_install:
|
||||
- export BOTO_CONFIG=/dev/null
|
||||
- export AWS_SECRET_ACCESS_KEY=foobar_secret
|
||||
- export AWS_ACCESS_KEY_ID=foobar_key
|
||||
install:
|
||||
# We build moto first so the docker container doesn't try to compile it as well, also note we don't use
|
||||
# -d for docker run so the logs show up in travis
|
||||
|
||||
@ -1,4 +1,3 @@
|
||||
|
||||
## acm - 41% implemented
|
||||
- [X] add_tags_to_certificate
|
||||
- [X] delete_certificate
|
||||
|
||||
1
Makefile
1
Makefile
@ -19,6 +19,7 @@ test: lint
|
||||
rm -f .coverage
|
||||
rm -rf cover
|
||||
@nosetests -sv --with-coverage --cover-html ./tests/ $(TEST_EXCLUDE)
|
||||
|
||||
test_server:
|
||||
@TEST_SERVER_MODE=true nosetests -sv --with-coverage --cover-html ./tests/
|
||||
|
||||
|
||||
10
README.md
10
README.md
@ -2,8 +2,8 @@
|
||||
|
||||
[](https://gitter.im/awsmoto/Lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
||||
|
||||
[](https://travis-ci.org/spulec/moto)
|
||||
[](https://coveralls.io/r/spulec/moto)
|
||||
[](https://travis-ci.org/spulec/moto)
|
||||
[](https://coveralls.io/r/spulec/moto)
|
||||
[](http://docs.getmoto.org)
|
||||
|
||||
# In a nutshell
|
||||
@ -70,10 +70,12 @@ It gets even better! Moto isn't just for Python code and it isn't just for S3. L
|
||||
|------------------------------------------------------------------------------|
|
||||
| CloudwatchEvents | @mock_events | all endpoints done |
|
||||
|------------------------------------------------------------------------------|
|
||||
| Cognito Identity | @mock_cognitoidentity| basic endpoints done |
|
||||
| Cognito Identity | @mock_cognitoidentity| basic endpoints done |
|
||||
|------------------------------------------------------------------------------|
|
||||
| Cognito Identity Provider | @mock_cognitoidp| basic endpoints done |
|
||||
|------------------------------------------------------------------------------|
|
||||
| Config | @mock_config | basic endpoints done |
|
||||
|------------------------------------------------------------------------------|
|
||||
| Data Pipeline | @mock_datapipeline| basic endpoints done |
|
||||
|------------------------------------------------------------------------------|
|
||||
| DynamoDB | @mock_dynamodb | core endpoints done |
|
||||
@ -259,7 +261,7 @@ It uses flask, which isn't a default dependency. You can install the
|
||||
server 'extra' package with:
|
||||
|
||||
```python
|
||||
pip install moto[server]
|
||||
pip install "moto[server]"
|
||||
```
|
||||
|
||||
You can then start it running a service:
|
||||
|
||||
@ -3,7 +3,7 @@ import logging
|
||||
# logging.getLogger('boto').setLevel(logging.CRITICAL)
|
||||
|
||||
__title__ = 'moto'
|
||||
__version__ = '1.3.7'
|
||||
__version__ = '1.3.8'
|
||||
|
||||
from .acm import mock_acm # flake8: noqa
|
||||
from .apigateway import mock_apigateway, mock_apigateway_deprecated # flake8: noqa
|
||||
@ -13,9 +13,11 @@ from .cloudformation import mock_cloudformation, mock_cloudformation_deprecated
|
||||
from .cloudwatch import mock_cloudwatch, mock_cloudwatch_deprecated # flake8: noqa
|
||||
from .cognitoidentity import mock_cognitoidentity, mock_cognitoidentity_deprecated # flake8: noqa
|
||||
from .cognitoidp import mock_cognitoidp, mock_cognitoidp_deprecated # flake8: noqa
|
||||
from .config import mock_config # flake8: noqa
|
||||
from .datapipeline import mock_datapipeline, mock_datapipeline_deprecated # flake8: noqa
|
||||
from .dynamodb import mock_dynamodb, mock_dynamodb_deprecated # flake8: noqa
|
||||
from .dynamodb2 import mock_dynamodb2, mock_dynamodb2_deprecated # flake8: noqa
|
||||
from .dynamodbstreams import mock_dynamodbstreams # flake8: noqa
|
||||
from .ec2 import mock_ec2, mock_ec2_deprecated # flake8: noqa
|
||||
from .ecr import mock_ecr, mock_ecr_deprecated # flake8: noqa
|
||||
from .ecs import mock_ecs, mock_ecs_deprecated # flake8: noqa
|
||||
|
||||
@ -243,7 +243,7 @@ class CertBundle(BaseModel):
|
||||
'KeyAlgorithm': key_algo,
|
||||
'NotAfter': datetime_to_epoch(self._cert.not_valid_after),
|
||||
'NotBefore': datetime_to_epoch(self._cert.not_valid_before),
|
||||
'Serial': self._cert.serial,
|
||||
'Serial': self._cert.serial_number,
|
||||
'SignatureAlgorithm': self._cert.signature_algorithm_oid._name.upper().replace('ENCRYPTION', ''),
|
||||
'Status': self.status, # One of PENDING_VALIDATION, ISSUED, INACTIVE, EXPIRED, VALIDATION_TIMED_OUT, REVOKED, FAILED.
|
||||
'Subject': 'CN={0}'.format(self.common_name),
|
||||
|
||||
@ -17,10 +17,12 @@ ASG_NAME_TAG = "aws:autoscaling:groupName"
|
||||
|
||||
|
||||
class InstanceState(object):
|
||||
def __init__(self, instance, lifecycle_state="InService", health_status="Healthy"):
|
||||
def __init__(self, instance, lifecycle_state="InService",
|
||||
health_status="Healthy", protected_from_scale_in=False):
|
||||
self.instance = instance
|
||||
self.lifecycle_state = lifecycle_state
|
||||
self.health_status = health_status
|
||||
self.protected_from_scale_in = protected_from_scale_in
|
||||
|
||||
|
||||
class FakeScalingPolicy(BaseModel):
|
||||
@ -152,7 +154,8 @@ class FakeAutoScalingGroup(BaseModel):
|
||||
min_size, launch_config_name, vpc_zone_identifier,
|
||||
default_cooldown, health_check_period, health_check_type,
|
||||
load_balancers, target_group_arns, placement_group, termination_policies,
|
||||
autoscaling_backend, tags):
|
||||
autoscaling_backend, tags,
|
||||
new_instances_protected_from_scale_in=False):
|
||||
self.autoscaling_backend = autoscaling_backend
|
||||
self.name = name
|
||||
|
||||
@ -178,6 +181,7 @@ class FakeAutoScalingGroup(BaseModel):
|
||||
self.target_group_arns = target_group_arns
|
||||
self.placement_group = placement_group
|
||||
self.termination_policies = termination_policies
|
||||
self.new_instances_protected_from_scale_in = new_instances_protected_from_scale_in
|
||||
|
||||
self.suspended_processes = []
|
||||
self.instance_states = []
|
||||
@ -210,6 +214,8 @@ class FakeAutoScalingGroup(BaseModel):
|
||||
placement_group=None,
|
||||
termination_policies=properties.get("TerminationPolicies", []),
|
||||
tags=properties.get("Tags", []),
|
||||
new_instances_protected_from_scale_in=properties.get(
|
||||
"NewInstancesProtectedFromScaleIn", False)
|
||||
)
|
||||
return group
|
||||
|
||||
@ -238,7 +244,8 @@ class FakeAutoScalingGroup(BaseModel):
|
||||
def update(self, availability_zones, desired_capacity, max_size, min_size,
|
||||
launch_config_name, vpc_zone_identifier, default_cooldown,
|
||||
health_check_period, health_check_type,
|
||||
placement_group, termination_policies):
|
||||
placement_group, termination_policies,
|
||||
new_instances_protected_from_scale_in=None):
|
||||
if availability_zones:
|
||||
self.availability_zones = availability_zones
|
||||
if max_size is not None:
|
||||
@ -256,6 +263,8 @@ class FakeAutoScalingGroup(BaseModel):
|
||||
self.health_check_period = health_check_period
|
||||
if health_check_type is not None:
|
||||
self.health_check_type = health_check_type
|
||||
if new_instances_protected_from_scale_in is not None:
|
||||
self.new_instances_protected_from_scale_in = new_instances_protected_from_scale_in
|
||||
|
||||
if desired_capacity is not None:
|
||||
self.set_desired_capacity(desired_capacity)
|
||||
@ -280,12 +289,16 @@ class FakeAutoScalingGroup(BaseModel):
|
||||
else:
|
||||
# Need to remove some instances
|
||||
count_to_remove = curr_instance_count - self.desired_capacity
|
||||
instances_to_remove = self.instance_states[:count_to_remove]
|
||||
instance_ids_to_remove = [
|
||||
instance.instance.id for instance in instances_to_remove]
|
||||
self.autoscaling_backend.ec2_backend.terminate_instances(
|
||||
instance_ids_to_remove)
|
||||
self.instance_states = self.instance_states[count_to_remove:]
|
||||
instances_to_remove = [ # only remove unprotected
|
||||
state for state in self.instance_states
|
||||
if not state.protected_from_scale_in
|
||||
][:count_to_remove]
|
||||
if instances_to_remove: # just in case not instances to remove
|
||||
instance_ids_to_remove = [
|
||||
instance.instance.id for instance in instances_to_remove]
|
||||
self.autoscaling_backend.ec2_backend.terminate_instances(
|
||||
instance_ids_to_remove)
|
||||
self.instance_states = list(set(self.instance_states) - set(instances_to_remove))
|
||||
|
||||
def get_propagated_tags(self):
|
||||
propagated_tags = {}
|
||||
@ -310,7 +323,10 @@ class FakeAutoScalingGroup(BaseModel):
|
||||
)
|
||||
for instance in reservation.instances:
|
||||
instance.autoscaling_group = self
|
||||
self.instance_states.append(InstanceState(instance))
|
||||
self.instance_states.append(InstanceState(
|
||||
instance,
|
||||
protected_from_scale_in=self.new_instances_protected_from_scale_in,
|
||||
))
|
||||
|
||||
def append_target_groups(self, target_group_arns):
|
||||
append = [x for x in target_group_arns if x not in self.target_group_arns]
|
||||
@ -372,7 +388,8 @@ class AutoScalingBackend(BaseBackend):
|
||||
default_cooldown, health_check_period,
|
||||
health_check_type, load_balancers,
|
||||
target_group_arns, placement_group,
|
||||
termination_policies, tags):
|
||||
termination_policies, tags,
|
||||
new_instances_protected_from_scale_in=False):
|
||||
|
||||
def make_int(value):
|
||||
return int(value) if value is not None else value
|
||||
@ -403,6 +420,7 @@ class AutoScalingBackend(BaseBackend):
|
||||
termination_policies=termination_policies,
|
||||
autoscaling_backend=self,
|
||||
tags=tags,
|
||||
new_instances_protected_from_scale_in=new_instances_protected_from_scale_in,
|
||||
)
|
||||
|
||||
self.autoscaling_groups[name] = group
|
||||
@ -415,12 +433,14 @@ class AutoScalingBackend(BaseBackend):
|
||||
launch_config_name, vpc_zone_identifier,
|
||||
default_cooldown, health_check_period,
|
||||
health_check_type, placement_group,
|
||||
termination_policies):
|
||||
termination_policies,
|
||||
new_instances_protected_from_scale_in=None):
|
||||
group = self.autoscaling_groups[name]
|
||||
group.update(availability_zones, desired_capacity, max_size,
|
||||
min_size, launch_config_name, vpc_zone_identifier,
|
||||
default_cooldown, health_check_period, health_check_type,
|
||||
placement_group, termination_policies)
|
||||
placement_group, termination_policies,
|
||||
new_instances_protected_from_scale_in=new_instances_protected_from_scale_in)
|
||||
return group
|
||||
|
||||
def describe_auto_scaling_groups(self, names):
|
||||
@ -448,7 +468,13 @@ class AutoScalingBackend(BaseBackend):
|
||||
raise ResourceContentionError
|
||||
else:
|
||||
group.desired_capacity = original_size + len(instance_ids)
|
||||
new_instances = [InstanceState(self.ec2_backend.get_instance(x)) for x in instance_ids]
|
||||
new_instances = [
|
||||
InstanceState(
|
||||
self.ec2_backend.get_instance(x),
|
||||
protected_from_scale_in=group.new_instances_protected_from_scale_in,
|
||||
)
|
||||
for x in instance_ids
|
||||
]
|
||||
for instance in new_instances:
|
||||
self.ec2_backend.create_tags([instance.instance.id], {ASG_NAME_TAG: group.name})
|
||||
group.instance_states.extend(new_instances)
|
||||
@ -626,6 +652,13 @@ class AutoScalingBackend(BaseBackend):
|
||||
group = self.autoscaling_groups[group_name]
|
||||
group.suspended_processes = scaling_processes or []
|
||||
|
||||
def set_instance_protection(self, group_name, instance_ids, protected_from_scale_in):
|
||||
group = self.autoscaling_groups[group_name]
|
||||
protected_instances = [
|
||||
x for x in group.instance_states if x.instance.id in instance_ids]
|
||||
for instance in protected_instances:
|
||||
instance.protected_from_scale_in = protected_from_scale_in
|
||||
|
||||
|
||||
autoscaling_backends = {}
|
||||
for region, ec2_backend in ec2_backends.items():
|
||||
|
||||
@ -85,6 +85,8 @@ class AutoScalingResponse(BaseResponse):
|
||||
termination_policies=self._get_multi_param(
|
||||
'TerminationPolicies.member'),
|
||||
tags=self._get_list_prefix('Tags.member'),
|
||||
new_instances_protected_from_scale_in=self._get_bool_param(
|
||||
'NewInstancesProtectedFromScaleIn', False)
|
||||
)
|
||||
template = self.response_template(CREATE_AUTOSCALING_GROUP_TEMPLATE)
|
||||
return template.render()
|
||||
@ -192,6 +194,8 @@ class AutoScalingResponse(BaseResponse):
|
||||
placement_group=self._get_param('PlacementGroup'),
|
||||
termination_policies=self._get_multi_param(
|
||||
'TerminationPolicies.member'),
|
||||
new_instances_protected_from_scale_in=self._get_bool_param(
|
||||
'NewInstancesProtectedFromScaleIn', None)
|
||||
)
|
||||
template = self.response_template(UPDATE_AUTOSCALING_GROUP_TEMPLATE)
|
||||
return template.render()
|
||||
@ -290,6 +294,15 @@ class AutoScalingResponse(BaseResponse):
|
||||
template = self.response_template(SUSPEND_PROCESSES_TEMPLATE)
|
||||
return template.render()
|
||||
|
||||
def set_instance_protection(self):
|
||||
group_name = self._get_param('AutoScalingGroupName')
|
||||
instance_ids = self._get_multi_param('InstanceIds.member')
|
||||
protected_from_scale_in = self._get_bool_param('ProtectedFromScaleIn')
|
||||
self.autoscaling_backend.set_instance_protection(
|
||||
group_name, instance_ids, protected_from_scale_in)
|
||||
template = self.response_template(SET_INSTANCE_PROTECTION_TEMPLATE)
|
||||
return template.render()
|
||||
|
||||
|
||||
CREATE_LAUNCH_CONFIGURATION_TEMPLATE = """<CreateLaunchConfigurationResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
|
||||
<ResponseMetadata>
|
||||
@ -490,6 +503,7 @@ DESCRIBE_AUTOSCALING_GROUPS_TEMPLATE = """<DescribeAutoScalingGroupsResponse xml
|
||||
<InstanceId>{{ instance_state.instance.id }}</InstanceId>
|
||||
<LaunchConfigurationName>{{ group.launch_config_name }}</LaunchConfigurationName>
|
||||
<LifecycleState>{{ instance_state.lifecycle_state }}</LifecycleState>
|
||||
<ProtectedFromScaleIn>{{ instance_state.protected_from_scale_in|string|lower }}</ProtectedFromScaleIn>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</Instances>
|
||||
@ -508,6 +522,15 @@ DESCRIBE_AUTOSCALING_GROUPS_TEMPLATE = """<DescribeAutoScalingGroupsResponse xml
|
||||
{% else %}
|
||||
<LoadBalancerNames/>
|
||||
{% endif %}
|
||||
{% if group.target_group_arns %}
|
||||
<TargetGroupARNs>
|
||||
{% for target_group_arn in group.target_group_arns %}
|
||||
<member>{{ target_group_arn }}</member>
|
||||
{% endfor %}
|
||||
</TargetGroupARNs>
|
||||
{% else %}
|
||||
<TargetGroupARNs/>
|
||||
{% endif %}
|
||||
<MinSize>{{ group.min_size }}</MinSize>
|
||||
{% if group.vpc_zone_identifier %}
|
||||
<VPCZoneIdentifier>{{ group.vpc_zone_identifier }}</VPCZoneIdentifier>
|
||||
@ -530,6 +553,7 @@ DESCRIBE_AUTOSCALING_GROUPS_TEMPLATE = """<DescribeAutoScalingGroupsResponse xml
|
||||
{% if group.placement_group %}
|
||||
<PlacementGroup>{{ group.placement_group }}</PlacementGroup>
|
||||
{% endif %}
|
||||
<NewInstancesProtectedFromScaleIn>{{ group.new_instances_protected_from_scale_in|string|lower }}</NewInstancesProtectedFromScaleIn>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</AutoScalingGroups>
|
||||
@ -565,6 +589,7 @@ DESCRIBE_AUTOSCALING_INSTANCES_TEMPLATE = """<DescribeAutoScalingInstancesRespon
|
||||
<InstanceId>{{ instance_state.instance.id }}</InstanceId>
|
||||
<LaunchConfigurationName>{{ instance_state.instance.autoscaling_group.launch_config_name }}</LaunchConfigurationName>
|
||||
<LifecycleState>{{ instance_state.lifecycle_state }}</LifecycleState>
|
||||
<ProtectedFromScaleIn>{{ instance_state.protected_from_scale_in|string|lower }}</ProtectedFromScaleIn>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</AutoScalingInstances>
|
||||
@ -668,3 +693,10 @@ SET_INSTANCE_HEALTH_TEMPLATE = """<SetInstanceHealthResponse xmlns="http://autos
|
||||
<RequestId>{{ requestid }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</SetInstanceHealthResponse>"""
|
||||
|
||||
SET_INSTANCE_PROTECTION_TEMPLATE = """<SetInstanceProtectionResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
|
||||
<SetInstanceProtectionResult></SetInstanceProtectionResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>{{ requestid }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</SetInstanceProtectionResponse>"""
|
||||
|
||||
@ -386,7 +386,7 @@ class LambdaFunction(BaseModel):
|
||||
'Role': properties['Role'],
|
||||
'Runtime': properties['Runtime'],
|
||||
}
|
||||
optional_properties = 'Description MemorySize Publish Timeout VpcConfig'.split()
|
||||
optional_properties = 'Description MemorySize Publish Timeout VpcConfig Environment'.split()
|
||||
# NOTE: Not doing `properties.get(k, DEFAULT)` to avoid duplicating the
|
||||
# default logic
|
||||
for prop in optional_properties:
|
||||
@ -500,6 +500,11 @@ class LambdaStorage(object):
|
||||
except ValueError:
|
||||
return self._functions[name]['latest']
|
||||
|
||||
def list_versions_by_function(self, name):
|
||||
if name not in self._functions:
|
||||
return None
|
||||
return [self._functions[name]['latest']]
|
||||
|
||||
def get_arn(self, arn):
|
||||
return self._arns.get(arn, None)
|
||||
|
||||
@ -607,6 +612,9 @@ class LambdaBackend(BaseBackend):
|
||||
def get_function(self, function_name, qualifier=None):
|
||||
return self._lambdas.get_function(function_name, qualifier)
|
||||
|
||||
def list_versions_by_function(self, function_name):
|
||||
return self._lambdas.list_versions_by_function(function_name)
|
||||
|
||||
def get_function_by_arn(self, function_arn):
|
||||
return self._lambdas.get_arn(function_arn)
|
||||
|
||||
|
||||
@ -52,7 +52,11 @@ class LambdaResponse(BaseResponse):
|
||||
self.setup_class(request, full_url, headers)
|
||||
if request.method == 'GET':
|
||||
# This is ListVersionByFunction
|
||||
raise ValueError("Cannot handle request")
|
||||
|
||||
path = request.path if hasattr(request, 'path') else path_url(request.url)
|
||||
function_name = path.split('/')[-2]
|
||||
return self._list_versions_by_function(function_name)
|
||||
|
||||
elif request.method == 'POST':
|
||||
return self._publish_function(request, full_url, headers)
|
||||
else:
|
||||
@ -151,6 +155,19 @@ class LambdaResponse(BaseResponse):
|
||||
|
||||
return 200, {}, json.dumps(result)
|
||||
|
||||
def _list_versions_by_function(self, function_name):
|
||||
result = {
|
||||
'Versions': []
|
||||
}
|
||||
|
||||
functions = self.lambda_backend.list_versions_by_function(function_name)
|
||||
if functions:
|
||||
for fn in functions:
|
||||
json_data = fn.get_configuration()
|
||||
result['Versions'].append(json_data)
|
||||
|
||||
return 200, {}, json.dumps(result)
|
||||
|
||||
def _create_function(self, request, full_url, headers):
|
||||
try:
|
||||
fn = self.lambda_backend.create_function(self.json_body)
|
||||
@ -166,7 +183,7 @@ class LambdaResponse(BaseResponse):
|
||||
fn = self.lambda_backend.publish_function(function_name)
|
||||
if fn:
|
||||
config = fn.get_configuration()
|
||||
return 200, {}, json.dumps(config)
|
||||
return 201, {}, json.dumps(config)
|
||||
else:
|
||||
return 404, {}, "{}"
|
||||
|
||||
|
||||
@ -12,6 +12,7 @@ from moto.core import moto_api_backends
|
||||
from moto.datapipeline import datapipeline_backends
|
||||
from moto.dynamodb import dynamodb_backends
|
||||
from moto.dynamodb2 import dynamodb_backends2
|
||||
from moto.dynamodbstreams import dynamodbstreams_backends
|
||||
from moto.ec2 import ec2_backends
|
||||
from moto.ecr import ecr_backends
|
||||
from moto.ecs import ecs_backends
|
||||
@ -45,7 +46,7 @@ from moto.iot import iot_backends
|
||||
from moto.iotdata import iotdata_backends
|
||||
from moto.batch import batch_backends
|
||||
from moto.resourcegroupstaggingapi import resourcegroupstaggingapi_backends
|
||||
|
||||
from moto.config import config_backends
|
||||
|
||||
BACKENDS = {
|
||||
'acm': acm_backends,
|
||||
@ -56,9 +57,11 @@ BACKENDS = {
|
||||
'cloudwatch': cloudwatch_backends,
|
||||
'cognito-identity': cognitoidentity_backends,
|
||||
'cognito-idp': cognitoidp_backends,
|
||||
'config': config_backends,
|
||||
'datapipeline': datapipeline_backends,
|
||||
'dynamodb': dynamodb_backends,
|
||||
'dynamodb2': dynamodb_backends2,
|
||||
'dynamodbstreams': dynamodbstreams_backends,
|
||||
'ec2': ec2_backends,
|
||||
'ecr': ecr_backends,
|
||||
'ecs': ecs_backends,
|
||||
|
||||
@ -27,7 +27,7 @@ class BatchResponse(BaseResponse):
|
||||
elif not hasattr(self, '_json'):
|
||||
try:
|
||||
self._json = json.loads(self.body)
|
||||
except json.JSONDecodeError:
|
||||
except ValueError:
|
||||
print()
|
||||
return self._json
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
from __future__ import unicode_literals
|
||||
from datetime import datetime
|
||||
from datetime import datetime, timedelta
|
||||
import json
|
||||
import yaml
|
||||
import uuid
|
||||
@ -12,11 +12,156 @@ from .parsing import ResourceMap, OutputMap
|
||||
from .utils import (
|
||||
generate_changeset_id,
|
||||
generate_stack_id,
|
||||
generate_stackset_arn,
|
||||
generate_stackset_id,
|
||||
yaml_tag_constructor,
|
||||
validate_template_cfn_lint,
|
||||
)
|
||||
from .exceptions import ValidationError
|
||||
|
||||
|
||||
class FakeStackSet(BaseModel):
|
||||
|
||||
def __init__(self, stackset_id, name, template, region='us-east-1',
|
||||
status='ACTIVE', description=None, parameters=None, tags=None,
|
||||
admin_role='AWSCloudFormationStackSetAdministrationRole',
|
||||
execution_role='AWSCloudFormationStackSetExecutionRole'):
|
||||
self.id = stackset_id
|
||||
self.arn = generate_stackset_arn(stackset_id, region)
|
||||
self.name = name
|
||||
self.template = template
|
||||
self.description = description
|
||||
self.parameters = parameters
|
||||
self.tags = tags
|
||||
self.admin_role = admin_role
|
||||
self.execution_role = execution_role
|
||||
self.status = status
|
||||
self.instances = FakeStackInstances(parameters, self.id, self.name)
|
||||
self.stack_instances = self.instances.stack_instances
|
||||
self.operations = []
|
||||
|
||||
def _create_operation(self, operation_id, action, status, accounts=[], regions=[]):
|
||||
operation = {
|
||||
'OperationId': str(operation_id),
|
||||
'Action': action,
|
||||
'Status': status,
|
||||
'CreationTimestamp': datetime.now(),
|
||||
'EndTimestamp': datetime.now() + timedelta(minutes=2),
|
||||
'Instances': [{account: region} for account in accounts for region in regions],
|
||||
}
|
||||
|
||||
self.operations += [operation]
|
||||
return operation
|
||||
|
||||
def get_operation(self, operation_id):
|
||||
for operation in self.operations:
|
||||
if operation_id == operation['OperationId']:
|
||||
return operation
|
||||
raise ValidationError(operation_id)
|
||||
|
||||
def update_operation(self, operation_id, status):
|
||||
operation = self.get_operation(operation_id)
|
||||
operation['Status'] = status
|
||||
return operation_id
|
||||
|
||||
def delete(self):
|
||||
self.status = 'DELETED'
|
||||
|
||||
def update(self, template, description, parameters, tags, admin_role,
|
||||
execution_role, accounts, regions, operation_id=None):
|
||||
if not operation_id:
|
||||
operation_id = uuid.uuid4()
|
||||
|
||||
self.template = template if template else self.template
|
||||
self.description = description if description is not None else self.description
|
||||
self.parameters = parameters if parameters else self.parameters
|
||||
self.tags = tags if tags else self.tags
|
||||
self.admin_role = admin_role if admin_role else self.admin_role
|
||||
self.execution_role = execution_role if execution_role else self.execution_role
|
||||
|
||||
if accounts and regions:
|
||||
self.update_instances(accounts, regions, self.parameters)
|
||||
|
||||
operation = self._create_operation(operation_id=operation_id,
|
||||
action='UPDATE', status='SUCCEEDED', accounts=accounts,
|
||||
regions=regions)
|
||||
return operation
|
||||
|
||||
def create_stack_instances(self, accounts, regions, parameters, operation_id=None):
|
||||
if not operation_id:
|
||||
operation_id = uuid.uuid4()
|
||||
if not parameters:
|
||||
parameters = self.parameters
|
||||
|
||||
self.instances.create_instances(accounts, regions, parameters, operation_id)
|
||||
self._create_operation(operation_id=operation_id, action='CREATE',
|
||||
status='SUCCEEDED', accounts=accounts, regions=regions)
|
||||
|
||||
def delete_stack_instances(self, accounts, regions, operation_id=None):
|
||||
if not operation_id:
|
||||
operation_id = uuid.uuid4()
|
||||
|
||||
self.instances.delete(accounts, regions)
|
||||
|
||||
operation = self._create_operation(operation_id=operation_id, action='DELETE',
|
||||
status='SUCCEEDED', accounts=accounts, regions=regions)
|
||||
return operation
|
||||
|
||||
def update_instances(self, accounts, regions, parameters, operation_id=None):
|
||||
if not operation_id:
|
||||
operation_id = uuid.uuid4()
|
||||
|
||||
self.instances.update(accounts, regions, parameters)
|
||||
operation = self._create_operation(operation_id=operation_id,
|
||||
action='UPDATE', status='SUCCEEDED', accounts=accounts,
|
||||
regions=regions)
|
||||
return operation
|
||||
|
||||
|
||||
class FakeStackInstances(BaseModel):
|
||||
def __init__(self, parameters, stackset_id, stackset_name):
|
||||
self.parameters = parameters if parameters else {}
|
||||
self.stackset_id = stackset_id
|
||||
self.stack_name = "StackSet-{}".format(stackset_id)
|
||||
self.stackset_name = stackset_name
|
||||
self.stack_instances = []
|
||||
|
||||
def create_instances(self, accounts, regions, parameters, operation_id):
|
||||
new_instances = []
|
||||
for region in regions:
|
||||
for account in accounts:
|
||||
instance = {
|
||||
'StackId': generate_stack_id(self.stack_name, region, account),
|
||||
'StackSetId': self.stackset_id,
|
||||
'Region': region,
|
||||
'Account': account,
|
||||
'Status': "CURRENT",
|
||||
'ParameterOverrides': parameters if parameters else [],
|
||||
}
|
||||
new_instances.append(instance)
|
||||
self.stack_instances += new_instances
|
||||
return new_instances
|
||||
|
||||
def update(self, accounts, regions, parameters):
|
||||
for account in accounts:
|
||||
for region in regions:
|
||||
instance = self.get_instance(account, region)
|
||||
if parameters:
|
||||
instance['ParameterOverrides'] = parameters
|
||||
else:
|
||||
instance['ParameterOverrides'] = []
|
||||
|
||||
def delete(self, accounts, regions):
|
||||
for i, instance in enumerate(self.stack_instances):
|
||||
if instance['Region'] in regions and instance['Account'] in accounts:
|
||||
self.stack_instances.pop(i)
|
||||
|
||||
def get_instance(self, account, region):
|
||||
for i, instance in enumerate(self.stack_instances):
|
||||
if instance['Region'] == region and instance['Account'] == account:
|
||||
return self.stack_instances[i]
|
||||
|
||||
|
||||
class FakeStack(BaseModel):
|
||||
|
||||
def __init__(self, stack_id, name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None, cross_stack_resources=None, create_change_set=False):
|
||||
@ -84,9 +229,9 @@ class FakeStack(BaseModel):
|
||||
def _parse_template(self):
|
||||
yaml.add_multi_constructor('', yaml_tag_constructor)
|
||||
try:
|
||||
self.template_dict = yaml.load(self.template)
|
||||
self.template_dict = yaml.load(self.template, Loader=yaml.Loader)
|
||||
except yaml.parser.ParserError:
|
||||
self.template_dict = json.loads(self.template)
|
||||
self.template_dict = json.loads(self.template, Loader=yaml.Loader)
|
||||
|
||||
@property
|
||||
def stack_parameters(self):
|
||||
@ -126,6 +271,49 @@ class FakeStack(BaseModel):
|
||||
self.status = "DELETE_COMPLETE"
|
||||
|
||||
|
||||
class FakeChange(BaseModel):
|
||||
|
||||
def __init__(self, action, logical_resource_id, resource_type):
|
||||
self.action = action
|
||||
self.logical_resource_id = logical_resource_id
|
||||
self.resource_type = resource_type
|
||||
|
||||
|
||||
class FakeChangeSet(FakeStack):
|
||||
|
||||
def __init__(self, stack_id, stack_name, stack_template, change_set_id, change_set_name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None, cross_stack_resources=None):
|
||||
super(FakeChangeSet, self).__init__(
|
||||
stack_id,
|
||||
stack_name,
|
||||
stack_template,
|
||||
parameters,
|
||||
region_name,
|
||||
notification_arns=notification_arns,
|
||||
tags=tags,
|
||||
role_arn=role_arn,
|
||||
cross_stack_resources=cross_stack_resources,
|
||||
create_change_set=True,
|
||||
)
|
||||
self.stack_name = stack_name
|
||||
self.change_set_id = change_set_id
|
||||
self.change_set_name = change_set_name
|
||||
self.changes = self.diff(template=template, parameters=parameters)
|
||||
|
||||
def diff(self, template, parameters=None):
|
||||
self.template = template
|
||||
self._parse_template()
|
||||
changes = []
|
||||
resources_by_action = self.resource_map.diff(self.template_dict, parameters)
|
||||
for action, resources in resources_by_action.items():
|
||||
for resource_name, resource in resources.items():
|
||||
changes.append(FakeChange(
|
||||
action=action,
|
||||
logical_resource_id=resource_name,
|
||||
resource_type=resource['ResourceType'],
|
||||
))
|
||||
return changes
|
||||
|
||||
|
||||
class FakeEvent(BaseModel):
|
||||
|
||||
def __init__(self, stack_id, stack_name, logical_resource_id, physical_resource_id, resource_type, resource_status, resource_status_reason=None, resource_properties=None):
|
||||
@ -145,10 +333,72 @@ class CloudFormationBackend(BaseBackend):
|
||||
|
||||
def __init__(self):
|
||||
self.stacks = OrderedDict()
|
||||
self.stacksets = OrderedDict()
|
||||
self.deleted_stacks = {}
|
||||
self.exports = OrderedDict()
|
||||
self.change_sets = OrderedDict()
|
||||
|
||||
def create_stack_set(self, name, template, parameters, tags=None, description=None, region='us-east-1', admin_role=None, execution_role=None):
|
||||
stackset_id = generate_stackset_id(name)
|
||||
new_stackset = FakeStackSet(
|
||||
stackset_id=stackset_id,
|
||||
name=name,
|
||||
template=template,
|
||||
parameters=parameters,
|
||||
description=description,
|
||||
tags=tags,
|
||||
admin_role=admin_role,
|
||||
execution_role=execution_role,
|
||||
)
|
||||
self.stacksets[stackset_id] = new_stackset
|
||||
return new_stackset
|
||||
|
||||
def get_stack_set(self, name):
|
||||
stacksets = self.stacksets.keys()
|
||||
for stackset in stacksets:
|
||||
if self.stacksets[stackset].name == name:
|
||||
return self.stacksets[stackset]
|
||||
raise ValidationError(name)
|
||||
|
||||
def delete_stack_set(self, name):
|
||||
stacksets = self.stacksets.keys()
|
||||
for stackset in stacksets:
|
||||
if self.stacksets[stackset].name == name:
|
||||
self.stacksets[stackset].delete()
|
||||
|
||||
def create_stack_instances(self, stackset_name, accounts, regions, parameters, operation_id=None):
|
||||
stackset = self.get_stack_set(stackset_name)
|
||||
|
||||
stackset.create_stack_instances(
|
||||
accounts=accounts,
|
||||
regions=regions,
|
||||
parameters=parameters,
|
||||
operation_id=operation_id,
|
||||
)
|
||||
return stackset
|
||||
|
||||
def update_stack_set(self, stackset_name, template=None, description=None,
|
||||
parameters=None, tags=None, admin_role=None, execution_role=None,
|
||||
accounts=None, regions=None, operation_id=None):
|
||||
stackset = self.get_stack_set(stackset_name)
|
||||
update = stackset.update(
|
||||
template=template,
|
||||
description=description,
|
||||
parameters=parameters,
|
||||
tags=tags,
|
||||
admin_role=admin_role,
|
||||
execution_role=execution_role,
|
||||
accounts=accounts,
|
||||
regions=regions,
|
||||
operation_id=operation_id
|
||||
)
|
||||
return update
|
||||
|
||||
def delete_stack_instances(self, stackset_name, accounts, regions, operation_id=None):
|
||||
stackset = self.get_stack_set(stackset_name)
|
||||
stackset.delete_stack_instances(accounts, regions, operation_id)
|
||||
return stackset
|
||||
|
||||
def create_stack(self, name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None, create_change_set=False):
|
||||
stack_id = generate_stack_id(name)
|
||||
new_stack = FakeStack(
|
||||
@ -170,24 +420,62 @@ class CloudFormationBackend(BaseBackend):
|
||||
return new_stack
|
||||
|
||||
def create_change_set(self, stack_name, change_set_name, template, parameters, region_name, change_set_type, notification_arns=None, tags=None, role_arn=None):
|
||||
stack_id = None
|
||||
stack_template = None
|
||||
if change_set_type == 'UPDATE':
|
||||
stacks = self.stacks.values()
|
||||
stack = None
|
||||
for s in stacks:
|
||||
if s.name == stack_name:
|
||||
stack = s
|
||||
stack_id = stack.stack_id
|
||||
stack_template = stack.template
|
||||
if stack is None:
|
||||
raise ValidationError(stack_name)
|
||||
|
||||
else:
|
||||
stack = self.create_stack(stack_name, template, parameters,
|
||||
region_name, notification_arns, tags,
|
||||
role_arn, create_change_set=True)
|
||||
stack_id = generate_stack_id(stack_name)
|
||||
stack_template = template
|
||||
|
||||
change_set_id = generate_changeset_id(change_set_name, region_name)
|
||||
self.stacks[change_set_name] = {'Id': change_set_id,
|
||||
'StackId': stack.stack_id}
|
||||
self.change_sets[change_set_id] = stack
|
||||
return change_set_id, stack.stack_id
|
||||
new_change_set = FakeChangeSet(
|
||||
stack_id=stack_id,
|
||||
stack_name=stack_name,
|
||||
stack_template=stack_template,
|
||||
change_set_id=change_set_id,
|
||||
change_set_name=change_set_name,
|
||||
template=template,
|
||||
parameters=parameters,
|
||||
region_name=region_name,
|
||||
notification_arns=notification_arns,
|
||||
tags=tags,
|
||||
role_arn=role_arn,
|
||||
cross_stack_resources=self.exports
|
||||
)
|
||||
self.change_sets[change_set_id] = new_change_set
|
||||
self.stacks[stack_id] = new_change_set
|
||||
return change_set_id, stack_id
|
||||
|
||||
def delete_change_set(self, change_set_name, stack_name=None):
|
||||
if change_set_name in self.change_sets:
|
||||
# This means arn was passed in
|
||||
del self.change_sets[change_set_name]
|
||||
else:
|
||||
for cs in self.change_sets:
|
||||
if self.change_sets[cs].change_set_name == change_set_name:
|
||||
del self.change_sets[cs]
|
||||
|
||||
def describe_change_set(self, change_set_name, stack_name=None):
|
||||
change_set = None
|
||||
if change_set_name in self.change_sets:
|
||||
# This means arn was passed in
|
||||
change_set = self.change_sets[change_set_name]
|
||||
else:
|
||||
for cs in self.change_sets:
|
||||
if self.change_sets[cs].change_set_name == change_set_name:
|
||||
change_set = self.change_sets[cs]
|
||||
if change_set is None:
|
||||
raise ValidationError(change_set_name)
|
||||
return change_set
|
||||
|
||||
def execute_change_set(self, change_set_name, stack_name=None):
|
||||
stack = None
|
||||
@ -196,7 +484,7 @@ class CloudFormationBackend(BaseBackend):
|
||||
stack = self.change_sets[change_set_name]
|
||||
else:
|
||||
for cs in self.change_sets:
|
||||
if self.change_sets[cs].name == change_set_name:
|
||||
if self.change_sets[cs].change_set_name == change_set_name:
|
||||
stack = self.change_sets[cs]
|
||||
if stack is None:
|
||||
raise ValidationError(stack_name)
|
||||
@ -222,8 +510,15 @@ class CloudFormationBackend(BaseBackend):
|
||||
else:
|
||||
return list(stacks)
|
||||
|
||||
def list_change_sets(self):
|
||||
return self.change_sets.values()
|
||||
|
||||
def list_stacks(self):
|
||||
return self.stacks.values()
|
||||
return [
|
||||
v for v in self.stacks.values()
|
||||
] + [
|
||||
v for v in self.deleted_stacks.values()
|
||||
]
|
||||
|
||||
def get_stack(self, name_or_stack_id):
|
||||
all_stacks = dict(self.deleted_stacks, **self.stacks)
|
||||
@ -270,6 +565,9 @@ class CloudFormationBackend(BaseBackend):
|
||||
next_token = str(token + 100) if len(all_exports) > token + 100 else None
|
||||
return exports, next_token
|
||||
|
||||
def validate_template(self, template):
|
||||
return validate_template_cfn_lint(template)
|
||||
|
||||
def _validate_export_uniqueness(self, stack):
|
||||
new_stack_export_names = [x.name for x in stack.exports]
|
||||
export_names = self.exports.keys()
|
||||
|
||||
@ -465,36 +465,70 @@ class ResourceMap(collections.Mapping):
|
||||
ec2_models.ec2_backends[self._region_name].create_tags(
|
||||
[self[resource].physical_resource_id], self.tags)
|
||||
|
||||
def update(self, template, parameters=None):
|
||||
def diff(self, template, parameters=None):
|
||||
if parameters:
|
||||
self.input_parameters = parameters
|
||||
self.load_mapping()
|
||||
self.load_parameters()
|
||||
self.load_conditions()
|
||||
|
||||
old_template = self._resource_json_map
|
||||
new_template = template['Resources']
|
||||
|
||||
resource_names_by_action = {
|
||||
'Add': set(new_template) - set(old_template),
|
||||
'Modify': set(name for name in new_template if name in old_template and new_template[
|
||||
name] != old_template[name]),
|
||||
'Remove': set(old_template) - set(new_template)
|
||||
}
|
||||
resources_by_action = {
|
||||
'Add': {},
|
||||
'Modify': {},
|
||||
'Remove': {},
|
||||
}
|
||||
|
||||
for resource_name in resource_names_by_action['Add']:
|
||||
resources_by_action['Add'][resource_name] = {
|
||||
'LogicalResourceId': resource_name,
|
||||
'ResourceType': new_template[resource_name]['Type']
|
||||
}
|
||||
|
||||
for resource_name in resource_names_by_action['Modify']:
|
||||
resources_by_action['Modify'][resource_name] = {
|
||||
'LogicalResourceId': resource_name,
|
||||
'ResourceType': new_template[resource_name]['Type']
|
||||
}
|
||||
|
||||
for resource_name in resource_names_by_action['Remove']:
|
||||
resources_by_action['Remove'][resource_name] = {
|
||||
'LogicalResourceId': resource_name,
|
||||
'ResourceType': old_template[resource_name]['Type']
|
||||
}
|
||||
|
||||
return resources_by_action
|
||||
|
||||
def update(self, template, parameters=None):
|
||||
resources_by_action = self.diff(template, parameters)
|
||||
|
||||
old_template = self._resource_json_map
|
||||
new_template = template['Resources']
|
||||
self._resource_json_map = new_template
|
||||
|
||||
new_resource_names = set(new_template) - set(old_template)
|
||||
for resource_name in new_resource_names:
|
||||
for resource_name, resource in resources_by_action['Add'].items():
|
||||
resource_json = new_template[resource_name]
|
||||
new_resource = parse_and_create_resource(
|
||||
resource_name, resource_json, self, self._region_name)
|
||||
self._parsed_resources[resource_name] = new_resource
|
||||
|
||||
removed_resource_names = set(old_template) - set(new_template)
|
||||
for resource_name in removed_resource_names:
|
||||
for resource_name, resource in resources_by_action['Remove'].items():
|
||||
resource_json = old_template[resource_name]
|
||||
parse_and_delete_resource(
|
||||
resource_name, resource_json, self, self._region_name)
|
||||
self._parsed_resources.pop(resource_name)
|
||||
|
||||
resources_to_update = set(name for name in new_template if name in old_template and new_template[
|
||||
name] != old_template[name])
|
||||
tries = 1
|
||||
while resources_to_update and tries < 5:
|
||||
for resource_name in resources_to_update.copy():
|
||||
while resources_by_action['Modify'] and tries < 5:
|
||||
for resource_name, resource in resources_by_action['Modify'].copy().items():
|
||||
resource_json = new_template[resource_name]
|
||||
try:
|
||||
changed_resource = parse_and_update_resource(
|
||||
@ -505,7 +539,7 @@ class ResourceMap(collections.Mapping):
|
||||
last_exception = e
|
||||
else:
|
||||
self._parsed_resources[resource_name] = changed_resource
|
||||
resources_to_update.remove(resource_name)
|
||||
del resources_by_action['Modify'][resource_name]
|
||||
tries += 1
|
||||
if tries == 5:
|
||||
raise last_exception
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
import yaml
|
||||
from six.moves.urllib.parse import urlparse
|
||||
|
||||
from moto.core.responses import BaseResponse
|
||||
@ -87,7 +88,8 @@ class CloudFormationResponse(BaseResponse):
|
||||
role_arn = self._get_param('RoleARN')
|
||||
update_or_create = self._get_param('ChangeSetType', 'CREATE')
|
||||
parameters_list = self._get_list_prefix("Parameters.member")
|
||||
tags = {tag[0]: tag[1] for tag in self._get_list_prefix("Tags.member")}
|
||||
tags = dict((item['key'], item['value'])
|
||||
for item in self._get_list_prefix("Tags.member"))
|
||||
parameters = {param['parameter_key']: param['parameter_value']
|
||||
for param in parameters_list}
|
||||
if template_url:
|
||||
@ -118,6 +120,31 @@ class CloudFormationResponse(BaseResponse):
|
||||
template = self.response_template(CREATE_CHANGE_SET_RESPONSE_TEMPLATE)
|
||||
return template.render(stack_id=stack_id, change_set_id=change_set_id)
|
||||
|
||||
def delete_change_set(self):
|
||||
stack_name = self._get_param('StackName')
|
||||
change_set_name = self._get_param('ChangeSetName')
|
||||
|
||||
self.cloudformation_backend.delete_change_set(change_set_name=change_set_name, stack_name=stack_name)
|
||||
if self.request_json:
|
||||
return json.dumps({
|
||||
'DeleteChangeSetResponse': {
|
||||
'DeleteChangeSetResult': {},
|
||||
}
|
||||
})
|
||||
else:
|
||||
template = self.response_template(DELETE_CHANGE_SET_RESPONSE_TEMPLATE)
|
||||
return template.render()
|
||||
|
||||
def describe_change_set(self):
|
||||
stack_name = self._get_param('StackName')
|
||||
change_set_name = self._get_param('ChangeSetName')
|
||||
change_set = self.cloudformation_backend.describe_change_set(
|
||||
change_set_name=change_set_name,
|
||||
stack_name=stack_name,
|
||||
)
|
||||
template = self.response_template(DESCRIBE_CHANGE_SET_RESPONSE_TEMPLATE)
|
||||
return template.render(change_set=change_set)
|
||||
|
||||
@amzn_request_id
|
||||
def execute_change_set(self):
|
||||
stack_name = self._get_param('StackName')
|
||||
@ -185,6 +212,11 @@ class CloudFormationResponse(BaseResponse):
|
||||
template = self.response_template(DESCRIBE_STACK_EVENTS_RESPONSE)
|
||||
return template.render(stack=stack)
|
||||
|
||||
def list_change_sets(self):
|
||||
change_sets = self.cloudformation_backend.list_change_sets()
|
||||
template = self.response_template(LIST_CHANGE_SETS_RESPONSE)
|
||||
return template.render(change_sets=change_sets)
|
||||
|
||||
def list_stacks(self):
|
||||
stacks = self.cloudformation_backend.list_stacks()
|
||||
template = self.response_template(LIST_STACKS_RESPONSE)
|
||||
@ -294,6 +326,201 @@ class CloudFormationResponse(BaseResponse):
|
||||
template = self.response_template(LIST_EXPORTS_RESPONSE)
|
||||
return template.render(exports=exports, next_token=next_token)
|
||||
|
||||
def validate_template(self):
|
||||
cfn_lint = self.cloudformation_backend.validate_template(self._get_param('TemplateBody'))
|
||||
if cfn_lint:
|
||||
raise ValidationError(cfn_lint[0].message)
|
||||
description = ""
|
||||
try:
|
||||
description = json.loads(self._get_param('TemplateBody'))['Description']
|
||||
except (ValueError, KeyError):
|
||||
pass
|
||||
try:
|
||||
description = yaml.load(self._get_param('TemplateBody'))['Description']
|
||||
except (yaml.ParserError, KeyError):
|
||||
pass
|
||||
template = self.response_template(VALIDATE_STACK_RESPONSE_TEMPLATE)
|
||||
return template.render(description=description)
|
||||
|
||||
def create_stack_set(self):
|
||||
stackset_name = self._get_param('StackSetName')
|
||||
stack_body = self._get_param('TemplateBody')
|
||||
template_url = self._get_param('TemplateURL')
|
||||
# role_arn = self._get_param('RoleARN')
|
||||
parameters_list = self._get_list_prefix("Parameters.member")
|
||||
tags = dict((item['key'], item['value'])
|
||||
for item in self._get_list_prefix("Tags.member"))
|
||||
|
||||
# Copy-Pasta - Hack dict-comprehension
|
||||
parameters = dict([
|
||||
(parameter['parameter_key'], parameter['parameter_value'])
|
||||
for parameter
|
||||
in parameters_list
|
||||
])
|
||||
if template_url:
|
||||
stack_body = self._get_stack_from_s3_url(template_url)
|
||||
|
||||
stackset = self.cloudformation_backend.create_stack_set(
|
||||
name=stackset_name,
|
||||
template=stack_body,
|
||||
parameters=parameters,
|
||||
tags=tags,
|
||||
# role_arn=role_arn,
|
||||
)
|
||||
if self.request_json:
|
||||
return json.dumps({
|
||||
'CreateStackSetResponse': {
|
||||
'CreateStackSetResult': {
|
||||
'StackSetId': stackset.stackset_id,
|
||||
}
|
||||
}
|
||||
})
|
||||
else:
|
||||
template = self.response_template(CREATE_STACK_SET_RESPONSE_TEMPLATE)
|
||||
return template.render(stackset=stackset)
|
||||
|
||||
def create_stack_instances(self):
|
||||
stackset_name = self._get_param('StackSetName')
|
||||
accounts = self._get_multi_param('Accounts.member')
|
||||
regions = self._get_multi_param('Regions.member')
|
||||
parameters = self._get_multi_param('ParameterOverrides.member')
|
||||
self.cloudformation_backend.create_stack_instances(stackset_name, accounts, regions, parameters)
|
||||
template = self.response_template(CREATE_STACK_INSTANCES_TEMPLATE)
|
||||
return template.render()
|
||||
|
||||
def delete_stack_set(self):
|
||||
stackset_name = self._get_param('StackSetName')
|
||||
self.cloudformation_backend.delete_stack_set(stackset_name)
|
||||
template = self.response_template(DELETE_STACK_SET_RESPONSE_TEMPLATE)
|
||||
return template.render()
|
||||
|
||||
def delete_stack_instances(self):
|
||||
stackset_name = self._get_param('StackSetName')
|
||||
accounts = self._get_multi_param('Accounts.member')
|
||||
regions = self._get_multi_param('Regions.member')
|
||||
operation = self.cloudformation_backend.delete_stack_instances(stackset_name, accounts, regions)
|
||||
|
||||
template = self.response_template(DELETE_STACK_INSTANCES_TEMPLATE)
|
||||
return template.render(operation=operation)
|
||||
|
||||
def describe_stack_set(self):
|
||||
stackset_name = self._get_param('StackSetName')
|
||||
stackset = self.cloudformation_backend.get_stack_set(stackset_name)
|
||||
|
||||
if not stackset.admin_role:
|
||||
stackset.admin_role = 'arn:aws:iam::123456789012:role/AWSCloudFormationStackSetAdministrationRole'
|
||||
if not stackset.execution_role:
|
||||
stackset.execution_role = 'AWSCloudFormationStackSetExecutionRole'
|
||||
|
||||
template = self.response_template(DESCRIBE_STACK_SET_RESPONSE_TEMPLATE)
|
||||
return template.render(stackset=stackset)
|
||||
|
||||
def describe_stack_instance(self):
|
||||
stackset_name = self._get_param('StackSetName')
|
||||
account = self._get_param('StackInstanceAccount')
|
||||
region = self._get_param('StackInstanceRegion')
|
||||
|
||||
instance = self.cloudformation_backend.get_stack_set(stackset_name).instances.get_instance(account, region)
|
||||
template = self.response_template(DESCRIBE_STACK_INSTANCE_TEMPLATE)
|
||||
rendered = template.render(instance=instance)
|
||||
return rendered
|
||||
|
||||
def list_stack_sets(self):
|
||||
stacksets = self.cloudformation_backend.stacksets
|
||||
template = self.response_template(LIST_STACK_SETS_TEMPLATE)
|
||||
return template.render(stacksets=stacksets)
|
||||
|
||||
def list_stack_instances(self):
|
||||
stackset_name = self._get_param('StackSetName')
|
||||
stackset = self.cloudformation_backend.get_stack_set(stackset_name)
|
||||
template = self.response_template(LIST_STACK_INSTANCES_TEMPLATE)
|
||||
return template.render(stackset=stackset)
|
||||
|
||||
def list_stack_set_operations(self):
|
||||
stackset_name = self._get_param('StackSetName')
|
||||
stackset = self.cloudformation_backend.get_stack_set(stackset_name)
|
||||
template = self.response_template(LIST_STACK_SET_OPERATIONS_RESPONSE_TEMPLATE)
|
||||
return template.render(stackset=stackset)
|
||||
|
||||
def stop_stack_set_operation(self):
|
||||
stackset_name = self._get_param('StackSetName')
|
||||
operation_id = self._get_param('OperationId')
|
||||
stackset = self.cloudformation_backend.get_stack_set(stackset_name)
|
||||
stackset.update_operation(operation_id, 'STOPPED')
|
||||
template = self.response_template(STOP_STACK_SET_OPERATION_RESPONSE_TEMPLATE)
|
||||
return template.render()
|
||||
|
||||
def describe_stack_set_operation(self):
|
||||
stackset_name = self._get_param('StackSetName')
|
||||
operation_id = self._get_param('OperationId')
|
||||
stackset = self.cloudformation_backend.get_stack_set(stackset_name)
|
||||
operation = stackset.get_operation(operation_id)
|
||||
template = self.response_template(DESCRIBE_STACKSET_OPERATION_RESPONSE_TEMPLATE)
|
||||
return template.render(stackset=stackset, operation=operation)
|
||||
|
||||
def list_stack_set_operation_results(self):
|
||||
stackset_name = self._get_param('StackSetName')
|
||||
operation_id = self._get_param('OperationId')
|
||||
stackset = self.cloudformation_backend.get_stack_set(stackset_name)
|
||||
operation = stackset.get_operation(operation_id)
|
||||
template = self.response_template(LIST_STACK_SET_OPERATION_RESULTS_RESPONSE_TEMPLATE)
|
||||
return template.render(operation=operation)
|
||||
|
||||
def update_stack_set(self):
|
||||
stackset_name = self._get_param('StackSetName')
|
||||
operation_id = self._get_param('OperationId')
|
||||
description = self._get_param('Description')
|
||||
execution_role = self._get_param('ExecutionRoleName')
|
||||
admin_role = self._get_param('AdministrationRoleARN')
|
||||
accounts = self._get_multi_param('Accounts.member')
|
||||
regions = self._get_multi_param('Regions.member')
|
||||
template_body = self._get_param('TemplateBody')
|
||||
template_url = self._get_param('TemplateURL')
|
||||
if template_url:
|
||||
template_body = self._get_stack_from_s3_url(template_url)
|
||||
tags = dict((item['key'], item['value'])
|
||||
for item in self._get_list_prefix("Tags.member"))
|
||||
parameters_list = self._get_list_prefix("Parameters.member")
|
||||
parameters = dict([
|
||||
(parameter['parameter_key'], parameter['parameter_value'])
|
||||
for parameter
|
||||
in parameters_list
|
||||
])
|
||||
operation = self.cloudformation_backend.update_stack_set(
|
||||
stackset_name=stackset_name,
|
||||
template=template_body,
|
||||
description=description,
|
||||
parameters=parameters,
|
||||
tags=tags,
|
||||
admin_role=admin_role,
|
||||
execution_role=execution_role,
|
||||
accounts=accounts,
|
||||
regions=regions,
|
||||
operation_id=operation_id
|
||||
)
|
||||
|
||||
template = self.response_template(UPDATE_STACK_SET_RESPONSE_TEMPLATE)
|
||||
return template.render(operation=operation)
|
||||
|
||||
def update_stack_instances(self):
|
||||
stackset_name = self._get_param('StackSetName')
|
||||
accounts = self._get_multi_param('Accounts.member')
|
||||
regions = self._get_multi_param('Regions.member')
|
||||
parameters = self._get_multi_param('ParameterOverrides.member')
|
||||
operation = self.cloudformation_backend.get_stack_set(stackset_name).update_instances(accounts, regions, parameters)
|
||||
template = self.response_template(UPDATE_STACK_INSTANCES_RESPONSE_TEMPLATE)
|
||||
return template.render(operation=operation)
|
||||
|
||||
|
||||
VALIDATE_STACK_RESPONSE_TEMPLATE = """<ValidateTemplateResponse>
|
||||
<ValidateTemplateResult>
|
||||
<Capabilities></Capabilities>
|
||||
<CapabilitiesReason></CapabilitiesReason>
|
||||
<DeclaredTransforms></DeclaredTransforms>
|
||||
<Description>{{ description }}</Description>
|
||||
<Parameters></Parameters>
|
||||
</ValidateTemplateResult>
|
||||
</ValidateTemplateResponse>"""
|
||||
|
||||
CREATE_STACK_RESPONSE_TEMPLATE = """<CreateStackResponse>
|
||||
<CreateStackResult>
|
||||
@ -326,6 +553,66 @@ CREATE_CHANGE_SET_RESPONSE_TEMPLATE = """<CreateStackResponse>
|
||||
</CreateStackResponse>
|
||||
"""
|
||||
|
||||
DELETE_CHANGE_SET_RESPONSE_TEMPLATE = """<DeleteChangeSetResponse>
|
||||
<DeleteChangeSetResult>
|
||||
</DeleteChangeSetResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>3d3200a1-810e-3023-6cc3-example</RequestId>
|
||||
</ResponseMetadata>
|
||||
</DeleteChangeSetResponse>
|
||||
"""
|
||||
|
||||
DESCRIBE_CHANGE_SET_RESPONSE_TEMPLATE = """<DescribeChangeSetResponse>
|
||||
<DescribeChangeSetResult>
|
||||
<ChangeSetId>{{ change_set.change_set_id }}</ChangeSetId>
|
||||
<ChangeSetName>{{ change_set.change_set_name }}</ChangeSetName>
|
||||
<StackId>{{ change_set.stack_id }}</StackId>
|
||||
<StackName>{{ change_set.stack_name }}</StackName>
|
||||
<Description>{{ change_set.description }}</Description>
|
||||
<Parameters>
|
||||
{% for param_name, param_value in change_set.stack_parameters.items() %}
|
||||
<member>
|
||||
<ParameterKey>{{ param_name }}</ParameterKey>
|
||||
<ParameterValue>{{ param_value }}</ParameterValue>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</Parameters>
|
||||
<CreationTime>2011-05-23T15:47:44Z</CreationTime>
|
||||
<ExecutionStatus>{{ change_set.execution_status }}</ExecutionStatus>
|
||||
<Status>{{ change_set.status }}</Status>
|
||||
<StatusReason>{{ change_set.status_reason }}</StatusReason>
|
||||
{% if change_set.notification_arns %}
|
||||
<NotificationARNs>
|
||||
{% for notification_arn in change_set.notification_arns %}
|
||||
<member>{{ notification_arn }}</member>
|
||||
{% endfor %}
|
||||
</NotificationARNs>
|
||||
{% else %}
|
||||
<NotificationARNs/>
|
||||
{% endif %}
|
||||
{% if change_set.role_arn %}
|
||||
<RoleARN>{{ change_set.role_arn }}</RoleARN>
|
||||
{% endif %}
|
||||
{% if change_set.changes %}
|
||||
<Changes>
|
||||
{% for change in change_set.changes %}
|
||||
<member>
|
||||
<Type>Resource</Type>
|
||||
<ResourceChange>
|
||||
<Action>{{ change.action }}</Action>
|
||||
<LogicalResourceId>{{ change.logical_resource_id }}</LogicalResourceId>
|
||||
<ResourceType>{{ change.resource_type }}</ResourceType>
|
||||
</ResourceChange>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</Changes>
|
||||
{% endif %}
|
||||
{% if next_token %}
|
||||
<NextToken>{{ next_token }}</NextToken>
|
||||
{% endif %}
|
||||
</DescribeChangeSetResult>
|
||||
</DescribeChangeSetResponse>"""
|
||||
|
||||
EXECUTE_CHANGE_SET_RESPONSE_TEMPLATE = """<ExecuteChangeSetResponse>
|
||||
<ExecuteChangeSetResult>
|
||||
<ExecuteChangeSetResult/>
|
||||
@ -451,6 +738,27 @@ DESCRIBE_STACK_EVENTS_RESPONSE = """<DescribeStackEventsResponse xmlns="http://c
|
||||
</DescribeStackEventsResponse>"""
|
||||
|
||||
|
||||
LIST_CHANGE_SETS_RESPONSE = """<ListChangeSetsResponse>
|
||||
<ListChangeSetsResult>
|
||||
<Summaries>
|
||||
{% for change_set in change_sets %}
|
||||
<member>
|
||||
<StackId>{{ change_set.stack_id }}</StackId>
|
||||
<StackName>{{ change_set.stack_name }}</StackName>
|
||||
<ChangeSetId>{{ change_set.change_set_id }}</ChangeSetId>
|
||||
<ChangeSetName>{{ change_set.change_set_name }}</ChangeSetName>
|
||||
<ExecutionStatus>{{ change_set.execution_status }}</ExecutionStatus>
|
||||
<Status>{{ change_set.status }}</Status>
|
||||
<StatusReason>{{ change_set.status_reason }}</StatusReason>
|
||||
<CreationTime>2011-05-23T15:47:44Z</CreationTime>
|
||||
<Description>{{ change_set.description }}</Description>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</Summaries>
|
||||
</ListChangeSetsResult>
|
||||
</ListChangeSetsResponse>"""
|
||||
|
||||
|
||||
LIST_STACKS_RESPONSE = """<ListStacksResponse>
|
||||
<ListStacksResult>
|
||||
<StackSummaries>
|
||||
@ -525,3 +833,236 @@ LIST_EXPORTS_RESPONSE = """<ListExportsResponse xmlns="http://cloudformation.ama
|
||||
<RequestId>5ccc7dcd-744c-11e5-be70-example</RequestId>
|
||||
</ResponseMetadata>
|
||||
</ListExportsResponse>"""
|
||||
|
||||
CREATE_STACK_SET_RESPONSE_TEMPLATE = """<CreateStackSetResponse xmlns="http://internal.amazon.com/coral/com.amazonaws.maestro.service.v20160713/">
|
||||
<CreateStackSetResult>
|
||||
<StackSetId>{{ stackset.stackset_id }}</StackSetId>
|
||||
</CreateStackSetResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>f457258c-391d-41d1-861f-example</RequestId>
|
||||
</ResponseMetadata>
|
||||
</CreateStackSetResponse>
|
||||
"""
|
||||
|
||||
DESCRIBE_STACK_SET_RESPONSE_TEMPLATE = """<DescribeStackSetResponse xmlns="http://internal.amazon.com/coral/com.amazonaws.maestro.service.v20160713/">
|
||||
<DescribeStackSetResult>
|
||||
<StackSet>
|
||||
<Capabilities/>
|
||||
<StackSetARN>{{ stackset.arn }}</StackSetARN>
|
||||
<ExecutionRoleName>{{ stackset.execution_role }}</ExecutionRoleName>
|
||||
<AdministrationRoleARN>{{ stackset.admin_role }}</AdministrationRoleARN>
|
||||
<StackSetId>{{ stackset.id }}</StackSetId>
|
||||
<TemplateBody>{{ stackset.template }}</TemplateBody>
|
||||
<StackSetName>{{ stackset.name }}</StackSetName>
|
||||
<Parameters>
|
||||
{% for param_name, param_value in stackset.parameters.items() %}
|
||||
<member>
|
||||
<ParameterKey>{{ param_name }}</ParameterKey>
|
||||
<ParameterValue>{{ param_value }}</ParameterValue>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</Parameters>
|
||||
<Tags>
|
||||
{% for tag_key, tag_value in stackset.tags.items() %}
|
||||
<member>
|
||||
<Key>{{ tag_key }}</Key>
|
||||
<Value>{{ tag_value }}</Value>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</Tags>
|
||||
<Status>{{ stackset.status }}</Status>
|
||||
</StackSet>
|
||||
</DescribeStackSetResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>d8b64e11-5332-46e1-9603-example</RequestId>
|
||||
</ResponseMetadata>
|
||||
</DescribeStackSetResponse>"""
|
||||
|
||||
DELETE_STACK_SET_RESPONSE_TEMPLATE = """<DeleteStackSetResponse xmlns="http://internal.amazon.com/coral/com.amazonaws.maestro.service.v20160713/">
|
||||
<DeleteStackSetResult/>
|
||||
<ResponseMetadata>
|
||||
<RequestId>c35ec2d0-d69f-4c4d-9bd7-example</RequestId>
|
||||
</ResponseMetadata>
|
||||
</DeleteStackSetResponse>"""
|
||||
|
||||
CREATE_STACK_INSTANCES_TEMPLATE = """<CreateStackInstancesResponse xmlns="http://internal.amazon.com/coral/com.amazonaws.maestro.service.v20160713/">
|
||||
<CreateStackInstancesResult>
|
||||
<OperationId>1459ad6d-63cc-4c96-a73e-example</OperationId>
|
||||
</CreateStackInstancesResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>6b29f7e3-69be-4d32-b374-example</RequestId>
|
||||
</ResponseMetadata>
|
||||
</CreateStackInstancesResponse>
|
||||
"""
|
||||
|
||||
LIST_STACK_INSTANCES_TEMPLATE = """<ListStackInstancesResponse xmlns="http://internal.amazon.com/coral/com.amazonaws.maestro.service.v20160713/">
|
||||
<ListStackInstancesResult>
|
||||
<Summaries>
|
||||
{% for instance in stackset.stack_instances %}
|
||||
<member>
|
||||
<StackId>{{ instance.StackId }}</StackId>
|
||||
<StackSetId>{{ instance.StackSetId }}</StackSetId>
|
||||
<Region>{{ instance.Region }}</Region>
|
||||
<Account>{{ instance.Account }}</Account>
|
||||
<Status>{{ instance.Status }}</Status>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</Summaries>
|
||||
</ListStackInstancesResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>83c27e73-b498-410f-993c-example</RequestId>
|
||||
</ResponseMetadata>
|
||||
</ListStackInstancesResponse>
|
||||
"""
|
||||
|
||||
DELETE_STACK_INSTANCES_TEMPLATE = """<DeleteStackInstancesResponse xmlns="http://internal.amazon.com/coral/com.amazonaws.maestro.service.v20160713/">
|
||||
<DeleteStackInstancesResult>
|
||||
<OperationId>{{ operation.OperationId }}</OperationId>
|
||||
</DeleteStackInstancesResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>e5325090-66f6-4ecd-a531-example</RequestId>
|
||||
</ResponseMetadata>
|
||||
</DeleteStackInstancesResponse>
|
||||
"""
|
||||
|
||||
DESCRIBE_STACK_INSTANCE_TEMPLATE = """<DescribeStackInstanceResponse xmlns="http://internal.amazon.com/coral/com.amazonaws.maestro.service.v20160713/">
|
||||
<DescribeStackInstanceResult>
|
||||
<StackInstance>
|
||||
<StackId>{{ instance.StackId }}</StackId>
|
||||
<StackSetId>{{ instance.StackSetId }}</StackSetId>
|
||||
{% if instance.ParameterOverrides %}
|
||||
<ParameterOverrides>
|
||||
{% for override in instance.ParameterOverrides %}
|
||||
{% if override['ParameterKey'] or override['ParameterValue'] %}
|
||||
<member>
|
||||
<ParameterKey>{{ override.ParameterKey }}</ParameterKey>
|
||||
<UsePreviousValue>false</UsePreviousValue>
|
||||
<ParameterValue>{{ override.ParameterValue }}</ParameterValue>
|
||||
</member>
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
</ParameterOverrides>
|
||||
{% else %}
|
||||
<ParameterOverrides/>
|
||||
{% endif %}
|
||||
<Region>{{ instance.Region }}</Region>
|
||||
<Account>{{ instance.Account }}</Account>
|
||||
<Status>{{ instance.Status }}</Status>
|
||||
</StackInstance>
|
||||
</DescribeStackInstanceResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>c6c7be10-0343-4319-8a25-example</RequestId>
|
||||
</ResponseMetadata>
|
||||
</DescribeStackInstanceResponse>
|
||||
"""
|
||||
|
||||
LIST_STACK_SETS_TEMPLATE = """<ListStackSetsResponse xmlns="http://internal.amazon.com/coral/com.amazonaws.maestro.service.v20160713/">
|
||||
<ListStackSetsResult>
|
||||
<Summaries>
|
||||
{% for key, value in stacksets.items() %}
|
||||
<member>
|
||||
<StackSetName>{{ value.name }}</StackSetName>
|
||||
<StackSetId>{{ value.id }}</StackSetId>
|
||||
<Status>{{ value.status }}</Status>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</Summaries>
|
||||
</ListStackSetsResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>4dcacb73-841e-4ed8-b335-example</RequestId>
|
||||
</ResponseMetadata>
|
||||
</ListStackSetsResponse>
|
||||
"""
|
||||
|
||||
UPDATE_STACK_INSTANCES_RESPONSE_TEMPLATE = """<UpdateStackInstancesResponse xmlns="http://internal.amazon.com/coral/com.amazonaws.maestro.service.v20160713/">
|
||||
<UpdateStackInstancesResult>
|
||||
<OperationId>{{ operation }}</OperationId>
|
||||
</UpdateStackInstancesResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>bdbf8e94-19b6-4ce4-af85-example</RequestId>
|
||||
</ResponseMetadata>
|
||||
</UpdateStackInstancesResponse>
|
||||
"""
|
||||
|
||||
UPDATE_STACK_SET_RESPONSE_TEMPLATE = """<UpdateStackSetResponse xmlns="http://internal.amazon.com/coral/com.amazonaws.maestro.service.v20160713/">
|
||||
<UpdateStackSetResult>
|
||||
<OperationId>{{ operation.OperationId }}</OperationId>
|
||||
</UpdateStackSetResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>adac907b-17e3-43e6-a254-example</RequestId>
|
||||
</ResponseMetadata>
|
||||
</UpdateStackSetResponse>
|
||||
"""
|
||||
|
||||
LIST_STACK_SET_OPERATIONS_RESPONSE_TEMPLATE = """<ListStackSetOperationsResponse xmlns="http://internal.amazon.com/coral/com.amazonaws.maestro.service.v20160713/">
|
||||
<ListStackSetOperationsResult>
|
||||
<Summaries>
|
||||
{% for operation in stackset.operations %}
|
||||
<member>
|
||||
<CreationTimestamp>{{ operation.CreationTimestamp }}</CreationTimestamp>
|
||||
<OperationId>{{ operation.OperationId }}</OperationId>
|
||||
<Action>{{ operation.Action }}</Action>
|
||||
<EndTimestamp>{{ operation.EndTimestamp }}</EndTimestamp>
|
||||
<Status>{{ operation.Status }}</Status>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</Summaries>
|
||||
</ListStackSetOperationsResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>65b9d9be-08bb-4a43-9a21-example</RequestId>
|
||||
</ResponseMetadata>
|
||||
</ListStackSetOperationsResponse>
|
||||
"""
|
||||
|
||||
STOP_STACK_SET_OPERATION_RESPONSE_TEMPLATE = """<StopStackSetOperationResponse xmlns="http://internal.amazon.com/coral/com.amazonaws.maestro.service.v20160713/">
|
||||
<StopStackSetOperationResult/>
|
||||
<ResponseMetadata>
|
||||
<RequestId>2188554a-07c6-4396-b2c5-example</RequestId>
|
||||
</ResponseMetadata> </StopStackSetOperationResponse>
|
||||
"""
|
||||
|
||||
DESCRIBE_STACKSET_OPERATION_RESPONSE_TEMPLATE = """<DescribeStackSetOperationResponse xmlns="http://internal.amazon.com/coral/com.amazonaws.maestro.service.v20160713/">
|
||||
<DescribeStackSetOperationResult>
|
||||
<StackSetOperation>
|
||||
<ExecutionRoleName>{{ stackset.execution_role }}</ExecutionRoleName>
|
||||
<AdministrationRoleARN>arn:aws:iam::123456789012:role/{{ stackset.admin_role }}</AdministrationRoleARN>
|
||||
<StackSetId>{{ stackset.id }}</StackSetId>
|
||||
<CreationTimestamp>{{ operation.CreationTimestamp }}</CreationTimestamp>
|
||||
<OperationId>{{ operation.OperationId }}</OperationId>
|
||||
<Action>{{ operation.Action }}</Action>
|
||||
<OperationPreferences>
|
||||
<RegionOrder/>
|
||||
</OperationPreferences>
|
||||
<EndTimestamp>{{ operation.EndTimestamp }}</EndTimestamp>
|
||||
<Status>{{ operation.Status }}</Status>
|
||||
</StackSetOperation>
|
||||
</DescribeStackSetOperationResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>2edc27b6-9ce2-486a-a192-example</RequestId>
|
||||
</ResponseMetadata>
|
||||
</DescribeStackSetOperationResponse>
|
||||
"""
|
||||
|
||||
LIST_STACK_SET_OPERATION_RESULTS_RESPONSE_TEMPLATE = """<ListStackSetOperationResultsResponse xmlns="http://internal.amazon.com/coral/com.amazonaws.maestro.service.v20160713/">
|
||||
<ListStackSetOperationResultsResult>
|
||||
<Summaries>
|
||||
{% for instance in operation.Instances %}
|
||||
{% for account, region in instance.items() %}
|
||||
<member>
|
||||
<AccountGateResult>
|
||||
<StatusReason>Function not found: arn:aws:lambda:us-west-2:123456789012:function:AWSCloudFormationStackSetAccountGate</StatusReason>
|
||||
<Status>SKIPPED</Status>
|
||||
</AccountGateResult>
|
||||
<Region>{{ region }}</Region>
|
||||
<Account>{{ account }}</Account>
|
||||
<Status>{{ operation.Status }}</Status>
|
||||
</member>
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
</Summaries>
|
||||
</ListStackSetOperationResultsResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>ac05a9ce-5f98-4197-a29b-example</RequestId>
|
||||
</ResponseMetadata>
|
||||
</ListStackSetOperationResultsResponse>
|
||||
"""
|
||||
|
||||
@ -3,11 +3,14 @@ import uuid
|
||||
import six
|
||||
import random
|
||||
import yaml
|
||||
import os
|
||||
|
||||
from cfnlint import decode, core
|
||||
|
||||
|
||||
def generate_stack_id(stack_name):
|
||||
def generate_stack_id(stack_name, region="us-east-1", account="123456789"):
|
||||
random_id = uuid.uuid4()
|
||||
return "arn:aws:cloudformation:us-east-1:123456789:stack/{0}/{1}".format(stack_name, random_id)
|
||||
return "arn:aws:cloudformation:{}:{}:stack/{}/{}".format(region, account, stack_name, random_id)
|
||||
|
||||
|
||||
def generate_changeset_id(changeset_name, region_name):
|
||||
@ -15,6 +18,15 @@ def generate_changeset_id(changeset_name, region_name):
|
||||
return 'arn:aws:cloudformation:{0}:123456789:changeSet/{1}/{2}'.format(region_name, changeset_name, random_id)
|
||||
|
||||
|
||||
def generate_stackset_id(stackset_name):
|
||||
random_id = uuid.uuid4()
|
||||
return '{}:{}'.format(stackset_name, random_id)
|
||||
|
||||
|
||||
def generate_stackset_arn(stackset_id, region_name):
|
||||
return 'arn:aws:cloudformation:{}:123456789012:stackset/{}'.format(region_name, stackset_id)
|
||||
|
||||
|
||||
def random_suffix():
|
||||
size = 12
|
||||
chars = list(range(10)) + ['A-Z']
|
||||
@ -38,3 +50,33 @@ def yaml_tag_constructor(loader, tag, node):
|
||||
key = 'Fn::{}'.format(tag[1:])
|
||||
|
||||
return {key: _f(loader, tag, node)}
|
||||
|
||||
|
||||
def validate_template_cfn_lint(template):
|
||||
|
||||
# Save the template to a temporary file -- cfn-lint requires a file
|
||||
filename = "file.tmp"
|
||||
with open(filename, "w") as file:
|
||||
file.write(template)
|
||||
abs_filename = os.path.abspath(filename)
|
||||
|
||||
# decode handles both yaml and json
|
||||
template, matches = decode.decode(abs_filename, False)
|
||||
|
||||
# Set cfn-lint to info
|
||||
core.configure_logging(None)
|
||||
|
||||
# Initialize the ruleset to be applied (no overrules, no excludes)
|
||||
rules = core.get_rules([], [], [])
|
||||
|
||||
# Use us-east-1 region (spec file) for validation
|
||||
regions = ['us-east-1']
|
||||
|
||||
# Process all the rules and gather the errors
|
||||
matches = core.run_checks(
|
||||
abs_filename,
|
||||
template,
|
||||
rules,
|
||||
regions)
|
||||
|
||||
return matches
|
||||
|
||||
@ -24,6 +24,16 @@ class UserNotFoundError(BadRequest):
|
||||
})
|
||||
|
||||
|
||||
class GroupExistsException(BadRequest):
|
||||
|
||||
def __init__(self, message):
|
||||
super(GroupExistsException, self).__init__()
|
||||
self.description = json.dumps({
|
||||
"message": message,
|
||||
'__type': 'GroupExistsException',
|
||||
})
|
||||
|
||||
|
||||
class NotAuthorizedError(BadRequest):
|
||||
|
||||
def __init__(self, message):
|
||||
|
||||
@ -1,6 +1,8 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import datetime
|
||||
import functools
|
||||
import itertools
|
||||
import json
|
||||
import os
|
||||
import time
|
||||
@ -11,8 +13,7 @@ from jose import jws
|
||||
|
||||
from moto.compat import OrderedDict
|
||||
from moto.core import BaseBackend, BaseModel
|
||||
from .exceptions import NotAuthorizedError, ResourceNotFoundError, UserNotFoundError
|
||||
|
||||
from .exceptions import GroupExistsException, NotAuthorizedError, ResourceNotFoundError, UserNotFoundError
|
||||
|
||||
UserStatus = {
|
||||
"FORCE_CHANGE_PASSWORD": "FORCE_CHANGE_PASSWORD",
|
||||
@ -20,6 +21,39 @@ UserStatus = {
|
||||
}
|
||||
|
||||
|
||||
def paginate(limit, start_arg="next_token", limit_arg="max_results"):
|
||||
"""Returns a limited result list, and an offset into list of remaining items
|
||||
|
||||
Takes the next_token, and max_results kwargs given to a function and handles
|
||||
the slicing of the results. The kwarg `next_token` is the offset into the
|
||||
list to begin slicing from. `max_results` is the size of the result required
|
||||
|
||||
If the max_results is not supplied then the `limit` parameter is used as a
|
||||
default
|
||||
|
||||
:param limit_arg: the name of argument in the decorated function that
|
||||
controls amount of items returned
|
||||
:param start_arg: the name of the argument in the decorated that provides
|
||||
the starting offset
|
||||
:param limit: A default maximum items to return
|
||||
:return: a tuple containing a list of items, and the offset into the list
|
||||
"""
|
||||
default_start = 0
|
||||
|
||||
def outer_wrapper(func):
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
start = int(default_start if kwargs.get(start_arg) is None else kwargs[start_arg])
|
||||
lim = int(limit if kwargs.get(limit_arg) is None else kwargs[limit_arg])
|
||||
stop = start + lim
|
||||
result = func(*args, **kwargs)
|
||||
limited_results = list(itertools.islice(result, start, stop))
|
||||
next_token = stop if stop < len(result) else None
|
||||
return limited_results, next_token
|
||||
return wrapper
|
||||
return outer_wrapper
|
||||
|
||||
|
||||
class CognitoIdpUserPool(BaseModel):
|
||||
|
||||
def __init__(self, region, name, extended_config):
|
||||
@ -33,6 +67,7 @@ class CognitoIdpUserPool(BaseModel):
|
||||
|
||||
self.clients = OrderedDict()
|
||||
self.identity_providers = OrderedDict()
|
||||
self.groups = OrderedDict()
|
||||
self.users = OrderedDict()
|
||||
self.refresh_tokens = {}
|
||||
self.access_tokens = {}
|
||||
@ -185,6 +220,33 @@ class CognitoIdpIdentityProvider(BaseModel):
|
||||
return identity_provider_json
|
||||
|
||||
|
||||
class CognitoIdpGroup(BaseModel):
|
||||
|
||||
def __init__(self, user_pool_id, group_name, description, role_arn, precedence):
|
||||
self.user_pool_id = user_pool_id
|
||||
self.group_name = group_name
|
||||
self.description = description or ""
|
||||
self.role_arn = role_arn
|
||||
self.precedence = precedence
|
||||
self.last_modified_date = datetime.datetime.now()
|
||||
self.creation_date = self.last_modified_date
|
||||
|
||||
# Users who are members of this group.
|
||||
# Note that these links are bidirectional.
|
||||
self.users = set()
|
||||
|
||||
def to_json(self):
|
||||
return {
|
||||
"GroupName": self.group_name,
|
||||
"UserPoolId": self.user_pool_id,
|
||||
"Description": self.description,
|
||||
"RoleArn": self.role_arn,
|
||||
"Precedence": self.precedence,
|
||||
"LastModifiedDate": time.mktime(self.last_modified_date.timetuple()),
|
||||
"CreationDate": time.mktime(self.creation_date.timetuple()),
|
||||
}
|
||||
|
||||
|
||||
class CognitoIdpUser(BaseModel):
|
||||
|
||||
def __init__(self, user_pool_id, username, password, status, attributes):
|
||||
@ -198,6 +260,10 @@ class CognitoIdpUser(BaseModel):
|
||||
self.create_date = datetime.datetime.utcnow()
|
||||
self.last_modified_date = datetime.datetime.utcnow()
|
||||
|
||||
# Groups this user is a member of.
|
||||
# Note that these links are bidirectional.
|
||||
self.groups = set()
|
||||
|
||||
def _base_json(self):
|
||||
return {
|
||||
"UserPoolId": self.user_pool_id,
|
||||
@ -242,7 +308,8 @@ class CognitoIdpBackend(BaseBackend):
|
||||
self.user_pools[user_pool.id] = user_pool
|
||||
return user_pool
|
||||
|
||||
def list_user_pools(self):
|
||||
@paginate(60)
|
||||
def list_user_pools(self, max_results=None, next_token=None):
|
||||
return self.user_pools.values()
|
||||
|
||||
def describe_user_pool(self, user_pool_id):
|
||||
@ -289,7 +356,8 @@ class CognitoIdpBackend(BaseBackend):
|
||||
user_pool.clients[user_pool_client.id] = user_pool_client
|
||||
return user_pool_client
|
||||
|
||||
def list_user_pool_clients(self, user_pool_id):
|
||||
@paginate(60)
|
||||
def list_user_pool_clients(self, user_pool_id, max_results=None, next_token=None):
|
||||
user_pool = self.user_pools.get(user_pool_id)
|
||||
if not user_pool:
|
||||
raise ResourceNotFoundError(user_pool_id)
|
||||
@ -339,7 +407,8 @@ class CognitoIdpBackend(BaseBackend):
|
||||
user_pool.identity_providers[name] = identity_provider
|
||||
return identity_provider
|
||||
|
||||
def list_identity_providers(self, user_pool_id):
|
||||
@paginate(60)
|
||||
def list_identity_providers(self, user_pool_id, max_results=None, next_token=None):
|
||||
user_pool = self.user_pools.get(user_pool_id)
|
||||
if not user_pool:
|
||||
raise ResourceNotFoundError(user_pool_id)
|
||||
@ -357,6 +426,19 @@ class CognitoIdpBackend(BaseBackend):
|
||||
|
||||
return identity_provider
|
||||
|
||||
def update_identity_provider(self, user_pool_id, name, extended_config):
|
||||
user_pool = self.user_pools.get(user_pool_id)
|
||||
if not user_pool:
|
||||
raise ResourceNotFoundError(user_pool_id)
|
||||
|
||||
identity_provider = user_pool.identity_providers.get(name)
|
||||
if not identity_provider:
|
||||
raise ResourceNotFoundError(name)
|
||||
|
||||
identity_provider.extended_config.update(extended_config)
|
||||
|
||||
return identity_provider
|
||||
|
||||
def delete_identity_provider(self, user_pool_id, name):
|
||||
user_pool = self.user_pools.get(user_pool_id)
|
||||
if not user_pool:
|
||||
@ -367,6 +449,72 @@ class CognitoIdpBackend(BaseBackend):
|
||||
|
||||
del user_pool.identity_providers[name]
|
||||
|
||||
# Group
|
||||
def create_group(self, user_pool_id, group_name, description, role_arn, precedence):
|
||||
user_pool = self.user_pools.get(user_pool_id)
|
||||
if not user_pool:
|
||||
raise ResourceNotFoundError(user_pool_id)
|
||||
|
||||
group = CognitoIdpGroup(user_pool_id, group_name, description, role_arn, precedence)
|
||||
if group.group_name in user_pool.groups:
|
||||
raise GroupExistsException("A group with the name already exists")
|
||||
user_pool.groups[group.group_name] = group
|
||||
|
||||
return group
|
||||
|
||||
def get_group(self, user_pool_id, group_name):
|
||||
user_pool = self.user_pools.get(user_pool_id)
|
||||
if not user_pool:
|
||||
raise ResourceNotFoundError(user_pool_id)
|
||||
|
||||
if group_name not in user_pool.groups:
|
||||
raise ResourceNotFoundError(group_name)
|
||||
|
||||
return user_pool.groups[group_name]
|
||||
|
||||
def list_groups(self, user_pool_id):
|
||||
user_pool = self.user_pools.get(user_pool_id)
|
||||
if not user_pool:
|
||||
raise ResourceNotFoundError(user_pool_id)
|
||||
|
||||
return user_pool.groups.values()
|
||||
|
||||
def delete_group(self, user_pool_id, group_name):
|
||||
user_pool = self.user_pools.get(user_pool_id)
|
||||
if not user_pool:
|
||||
raise ResourceNotFoundError(user_pool_id)
|
||||
|
||||
if group_name not in user_pool.groups:
|
||||
raise ResourceNotFoundError(group_name)
|
||||
|
||||
group = user_pool.groups[group_name]
|
||||
for user in group.users:
|
||||
user.groups.remove(group)
|
||||
|
||||
del user_pool.groups[group_name]
|
||||
|
||||
def admin_add_user_to_group(self, user_pool_id, group_name, username):
|
||||
group = self.get_group(user_pool_id, group_name)
|
||||
user = self.admin_get_user(user_pool_id, username)
|
||||
|
||||
group.users.add(user)
|
||||
user.groups.add(group)
|
||||
|
||||
def list_users_in_group(self, user_pool_id, group_name):
|
||||
group = self.get_group(user_pool_id, group_name)
|
||||
return list(group.users)
|
||||
|
||||
def admin_list_groups_for_user(self, user_pool_id, username):
|
||||
user = self.admin_get_user(user_pool_id, username)
|
||||
return list(user.groups)
|
||||
|
||||
def admin_remove_user_from_group(self, user_pool_id, group_name, username):
|
||||
group = self.get_group(user_pool_id, group_name)
|
||||
user = self.admin_get_user(user_pool_id, username)
|
||||
|
||||
group.users.discard(user)
|
||||
user.groups.discard(group)
|
||||
|
||||
# User
|
||||
def admin_create_user(self, user_pool_id, username, temporary_password, attributes):
|
||||
user_pool = self.user_pools.get(user_pool_id)
|
||||
@ -387,7 +535,8 @@ class CognitoIdpBackend(BaseBackend):
|
||||
|
||||
return user_pool.users[username]
|
||||
|
||||
def list_users(self, user_pool_id):
|
||||
@paginate(60, "pagination_token", "limit")
|
||||
def list_users(self, user_pool_id, pagination_token=None, limit=None):
|
||||
user_pool = self.user_pools.get(user_pool_id)
|
||||
if not user_pool:
|
||||
raise ResourceNotFoundError(user_pool_id)
|
||||
@ -410,6 +559,10 @@ class CognitoIdpBackend(BaseBackend):
|
||||
if username not in user_pool.users:
|
||||
raise UserNotFoundError(username)
|
||||
|
||||
user = user_pool.users[username]
|
||||
for group in user.groups:
|
||||
group.users.remove(user)
|
||||
|
||||
del user_pool.users[username]
|
||||
|
||||
def _log_user_in(self, user_pool, client, username):
|
||||
|
||||
@ -22,10 +22,17 @@ class CognitoIdpResponse(BaseResponse):
|
||||
})
|
||||
|
||||
def list_user_pools(self):
|
||||
user_pools = cognitoidp_backends[self.region].list_user_pools()
|
||||
return json.dumps({
|
||||
"UserPools": [user_pool.to_json() for user_pool in user_pools]
|
||||
})
|
||||
max_results = self._get_param("MaxResults")
|
||||
next_token = self._get_param("NextToken", "0")
|
||||
user_pools, next_token = cognitoidp_backends[self.region].list_user_pools(
|
||||
max_results=max_results, next_token=next_token
|
||||
)
|
||||
response = {
|
||||
"UserPools": [user_pool.to_json() for user_pool in user_pools],
|
||||
}
|
||||
if next_token:
|
||||
response["NextToken"] = str(next_token)
|
||||
return json.dumps(response)
|
||||
|
||||
def describe_user_pool(self):
|
||||
user_pool_id = self._get_param("UserPoolId")
|
||||
@ -72,10 +79,16 @@ class CognitoIdpResponse(BaseResponse):
|
||||
|
||||
def list_user_pool_clients(self):
|
||||
user_pool_id = self._get_param("UserPoolId")
|
||||
user_pool_clients = cognitoidp_backends[self.region].list_user_pool_clients(user_pool_id)
|
||||
return json.dumps({
|
||||
max_results = self._get_param("MaxResults")
|
||||
next_token = self._get_param("NextToken", "0")
|
||||
user_pool_clients, next_token = cognitoidp_backends[self.region].list_user_pool_clients(user_pool_id,
|
||||
max_results=max_results, next_token=next_token)
|
||||
response = {
|
||||
"UserPoolClients": [user_pool_client.to_json() for user_pool_client in user_pool_clients]
|
||||
})
|
||||
}
|
||||
if next_token:
|
||||
response["NextToken"] = str(next_token)
|
||||
return json.dumps(response)
|
||||
|
||||
def describe_user_pool_client(self):
|
||||
user_pool_id = self._get_param("UserPoolId")
|
||||
@ -110,10 +123,17 @@ class CognitoIdpResponse(BaseResponse):
|
||||
|
||||
def list_identity_providers(self):
|
||||
user_pool_id = self._get_param("UserPoolId")
|
||||
identity_providers = cognitoidp_backends[self.region].list_identity_providers(user_pool_id)
|
||||
return json.dumps({
|
||||
max_results = self._get_param("MaxResults")
|
||||
next_token = self._get_param("NextToken", "0")
|
||||
identity_providers, next_token = cognitoidp_backends[self.region].list_identity_providers(
|
||||
user_pool_id, max_results=max_results, next_token=next_token
|
||||
)
|
||||
response = {
|
||||
"Providers": [identity_provider.to_json() for identity_provider in identity_providers]
|
||||
})
|
||||
}
|
||||
if next_token:
|
||||
response["NextToken"] = str(next_token)
|
||||
return json.dumps(response)
|
||||
|
||||
def describe_identity_provider(self):
|
||||
user_pool_id = self._get_param("UserPoolId")
|
||||
@ -123,12 +143,103 @@ class CognitoIdpResponse(BaseResponse):
|
||||
"IdentityProvider": identity_provider.to_json(extended=True)
|
||||
})
|
||||
|
||||
def update_identity_provider(self):
|
||||
user_pool_id = self._get_param("UserPoolId")
|
||||
name = self._get_param("ProviderName")
|
||||
identity_provider = cognitoidp_backends[self.region].update_identity_provider(user_pool_id, name, self.parameters)
|
||||
return json.dumps({
|
||||
"IdentityProvider": identity_provider.to_json(extended=True)
|
||||
})
|
||||
|
||||
def delete_identity_provider(self):
|
||||
user_pool_id = self._get_param("UserPoolId")
|
||||
name = self._get_param("ProviderName")
|
||||
cognitoidp_backends[self.region].delete_identity_provider(user_pool_id, name)
|
||||
return ""
|
||||
|
||||
# Group
|
||||
def create_group(self):
|
||||
group_name = self._get_param("GroupName")
|
||||
user_pool_id = self._get_param("UserPoolId")
|
||||
description = self._get_param("Description")
|
||||
role_arn = self._get_param("RoleArn")
|
||||
precedence = self._get_param("Precedence")
|
||||
|
||||
group = cognitoidp_backends[self.region].create_group(
|
||||
user_pool_id,
|
||||
group_name,
|
||||
description,
|
||||
role_arn,
|
||||
precedence,
|
||||
)
|
||||
|
||||
return json.dumps({
|
||||
"Group": group.to_json(),
|
||||
})
|
||||
|
||||
def get_group(self):
|
||||
group_name = self._get_param("GroupName")
|
||||
user_pool_id = self._get_param("UserPoolId")
|
||||
group = cognitoidp_backends[self.region].get_group(user_pool_id, group_name)
|
||||
return json.dumps({
|
||||
"Group": group.to_json(),
|
||||
})
|
||||
|
||||
def list_groups(self):
|
||||
user_pool_id = self._get_param("UserPoolId")
|
||||
groups = cognitoidp_backends[self.region].list_groups(user_pool_id)
|
||||
return json.dumps({
|
||||
"Groups": [group.to_json() for group in groups],
|
||||
})
|
||||
|
||||
def delete_group(self):
|
||||
group_name = self._get_param("GroupName")
|
||||
user_pool_id = self._get_param("UserPoolId")
|
||||
cognitoidp_backends[self.region].delete_group(user_pool_id, group_name)
|
||||
return ""
|
||||
|
||||
def admin_add_user_to_group(self):
|
||||
user_pool_id = self._get_param("UserPoolId")
|
||||
username = self._get_param("Username")
|
||||
group_name = self._get_param("GroupName")
|
||||
|
||||
cognitoidp_backends[self.region].admin_add_user_to_group(
|
||||
user_pool_id,
|
||||
group_name,
|
||||
username,
|
||||
)
|
||||
|
||||
return ""
|
||||
|
||||
def list_users_in_group(self):
|
||||
user_pool_id = self._get_param("UserPoolId")
|
||||
group_name = self._get_param("GroupName")
|
||||
users = cognitoidp_backends[self.region].list_users_in_group(user_pool_id, group_name)
|
||||
return json.dumps({
|
||||
"Users": [user.to_json(extended=True) for user in users],
|
||||
})
|
||||
|
||||
def admin_list_groups_for_user(self):
|
||||
username = self._get_param("Username")
|
||||
user_pool_id = self._get_param("UserPoolId")
|
||||
groups = cognitoidp_backends[self.region].admin_list_groups_for_user(user_pool_id, username)
|
||||
return json.dumps({
|
||||
"Groups": [group.to_json() for group in groups],
|
||||
})
|
||||
|
||||
def admin_remove_user_from_group(self):
|
||||
user_pool_id = self._get_param("UserPoolId")
|
||||
username = self._get_param("Username")
|
||||
group_name = self._get_param("GroupName")
|
||||
|
||||
cognitoidp_backends[self.region].admin_remove_user_from_group(
|
||||
user_pool_id,
|
||||
group_name,
|
||||
username,
|
||||
)
|
||||
|
||||
return ""
|
||||
|
||||
# User
|
||||
def admin_create_user(self):
|
||||
user_pool_id = self._get_param("UserPoolId")
|
||||
@ -155,10 +266,15 @@ class CognitoIdpResponse(BaseResponse):
|
||||
|
||||
def list_users(self):
|
||||
user_pool_id = self._get_param("UserPoolId")
|
||||
users = cognitoidp_backends[self.region].list_users(user_pool_id)
|
||||
return json.dumps({
|
||||
"Users": [user.to_json(extended=True) for user in users]
|
||||
})
|
||||
limit = self._get_param("Limit")
|
||||
token = self._get_param("PaginationToken")
|
||||
users, token = cognitoidp_backends[self.region].list_users(user_pool_id,
|
||||
limit=limit,
|
||||
pagination_token=token)
|
||||
response = {"Users": [user.to_json(extended=True) for user in users]}
|
||||
if token:
|
||||
response["PaginationToken"] = str(token)
|
||||
return json.dumps(response)
|
||||
|
||||
def admin_disable_user(self):
|
||||
user_pool_id = self._get_param("UserPoolId")
|
||||
|
||||
4
moto/config/__init__.py
Normal file
4
moto/config/__init__.py
Normal file
@ -0,0 +1,4 @@
|
||||
from .models import config_backends
|
||||
from ..core.models import base_decorator
|
||||
|
||||
mock_config = base_decorator(config_backends)
|
||||
149
moto/config/exceptions.py
Normal file
149
moto/config/exceptions.py
Normal file
@ -0,0 +1,149 @@
|
||||
from __future__ import unicode_literals
|
||||
from moto.core.exceptions import JsonRESTError
|
||||
|
||||
|
||||
class NameTooLongException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, name, location):
|
||||
message = '1 validation error detected: Value \'{name}\' at \'{location}\' failed to satisfy' \
|
||||
' constraint: Member must have length less than or equal to 256'.format(name=name, location=location)
|
||||
super(NameTooLongException, self).__init__("ValidationException", message)
|
||||
|
||||
|
||||
class InvalidConfigurationRecorderNameException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, name):
|
||||
message = 'The configuration recorder name \'{name}\' is not valid, blank string.'.format(name=name)
|
||||
super(InvalidConfigurationRecorderNameException, self).__init__("InvalidConfigurationRecorderNameException",
|
||||
message)
|
||||
|
||||
|
||||
class MaxNumberOfConfigurationRecordersExceededException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, name):
|
||||
message = 'Failed to put configuration recorder \'{name}\' because the maximum number of ' \
|
||||
'configuration recorders: 1 is reached.'.format(name=name)
|
||||
super(MaxNumberOfConfigurationRecordersExceededException, self).__init__(
|
||||
"MaxNumberOfConfigurationRecordersExceededException", message)
|
||||
|
||||
|
||||
class InvalidRecordingGroupException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self):
|
||||
message = 'The recording group provided is not valid'
|
||||
super(InvalidRecordingGroupException, self).__init__("InvalidRecordingGroupException", message)
|
||||
|
||||
|
||||
class InvalidResourceTypeException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, bad_list, good_list):
|
||||
message = '{num} validation error detected: Value \'{bad_list}\' at ' \
|
||||
'\'configurationRecorder.recordingGroup.resourceTypes\' failed to satisfy constraint: ' \
|
||||
'Member must satisfy constraint: [Member must satisfy enum value set: {good_list}]'.format(
|
||||
num=len(bad_list), bad_list=bad_list, good_list=good_list)
|
||||
# For PY2:
|
||||
message = str(message)
|
||||
|
||||
super(InvalidResourceTypeException, self).__init__("ValidationException", message)
|
||||
|
||||
|
||||
class NoSuchConfigurationRecorderException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, name):
|
||||
message = 'Cannot find configuration recorder with the specified name \'{name}\'.'.format(name=name)
|
||||
super(NoSuchConfigurationRecorderException, self).__init__("NoSuchConfigurationRecorderException", message)
|
||||
|
||||
|
||||
class InvalidDeliveryChannelNameException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, name):
|
||||
message = 'The delivery channel name \'{name}\' is not valid, blank string.'.format(name=name)
|
||||
super(InvalidDeliveryChannelNameException, self).__init__("InvalidDeliveryChannelNameException",
|
||||
message)
|
||||
|
||||
|
||||
class NoSuchBucketException(JsonRESTError):
|
||||
"""We are *only* validating that there is value that is not '' here."""
|
||||
code = 400
|
||||
|
||||
def __init__(self):
|
||||
message = 'Cannot find a S3 bucket with an empty bucket name.'
|
||||
super(NoSuchBucketException, self).__init__("NoSuchBucketException", message)
|
||||
|
||||
|
||||
class InvalidS3KeyPrefixException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self):
|
||||
message = 'The s3 key prefix \'\' is not valid, empty s3 key prefix.'
|
||||
super(InvalidS3KeyPrefixException, self).__init__("InvalidS3KeyPrefixException", message)
|
||||
|
||||
|
||||
class InvalidSNSTopicARNException(JsonRESTError):
|
||||
"""We are *only* validating that there is value that is not '' here."""
|
||||
code = 400
|
||||
|
||||
def __init__(self):
|
||||
message = 'The sns topic arn \'\' is not valid.'
|
||||
super(InvalidSNSTopicARNException, self).__init__("InvalidSNSTopicARNException", message)
|
||||
|
||||
|
||||
class InvalidDeliveryFrequency(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, value, good_list):
|
||||
message = '1 validation error detected: Value \'{value}\' at ' \
|
||||
'\'deliveryChannel.configSnapshotDeliveryProperties.deliveryFrequency\' failed to satisfy ' \
|
||||
'constraint: Member must satisfy enum value set: {good_list}'.format(value=value, good_list=good_list)
|
||||
super(InvalidDeliveryFrequency, self).__init__("InvalidDeliveryFrequency", message)
|
||||
|
||||
|
||||
class MaxNumberOfDeliveryChannelsExceededException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, name):
|
||||
message = 'Failed to put delivery channel \'{name}\' because the maximum number of ' \
|
||||
'delivery channels: 1 is reached.'.format(name=name)
|
||||
super(MaxNumberOfDeliveryChannelsExceededException, self).__init__(
|
||||
"MaxNumberOfDeliveryChannelsExceededException", message)
|
||||
|
||||
|
||||
class NoSuchDeliveryChannelException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, name):
|
||||
message = 'Cannot find delivery channel with specified name \'{name}\'.'.format(name=name)
|
||||
super(NoSuchDeliveryChannelException, self).__init__("NoSuchDeliveryChannelException", message)
|
||||
|
||||
|
||||
class NoAvailableConfigurationRecorderException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self):
|
||||
message = 'Configuration recorder is not available to put delivery channel.'
|
||||
super(NoAvailableConfigurationRecorderException, self).__init__("NoAvailableConfigurationRecorderException",
|
||||
message)
|
||||
|
||||
|
||||
class NoAvailableDeliveryChannelException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self):
|
||||
message = 'Delivery channel is not available to start configuration recorder.'
|
||||
super(NoAvailableDeliveryChannelException, self).__init__("NoAvailableDeliveryChannelException", message)
|
||||
|
||||
|
||||
class LastDeliveryChannelDeleteFailedException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, name):
|
||||
message = 'Failed to delete last specified delivery channel with name \'{name}\', because there, ' \
|
||||
'because there is a running configuration recorder.'.format(name=name)
|
||||
super(LastDeliveryChannelDeleteFailedException, self).__init__("LastDeliveryChannelDeleteFailedException", message)
|
||||
335
moto/config/models.py
Normal file
335
moto/config/models.py
Normal file
@ -0,0 +1,335 @@
|
||||
import json
|
||||
import time
|
||||
import pkg_resources
|
||||
|
||||
from datetime import datetime
|
||||
|
||||
from boto3 import Session
|
||||
|
||||
from moto.config.exceptions import InvalidResourceTypeException, InvalidDeliveryFrequency, \
|
||||
InvalidConfigurationRecorderNameException, NameTooLongException, \
|
||||
MaxNumberOfConfigurationRecordersExceededException, InvalidRecordingGroupException, \
|
||||
NoSuchConfigurationRecorderException, NoAvailableConfigurationRecorderException, \
|
||||
InvalidDeliveryChannelNameException, NoSuchBucketException, InvalidS3KeyPrefixException, \
|
||||
InvalidSNSTopicARNException, MaxNumberOfDeliveryChannelsExceededException, NoAvailableDeliveryChannelException, \
|
||||
NoSuchDeliveryChannelException, LastDeliveryChannelDeleteFailedException
|
||||
|
||||
from moto.core import BaseBackend, BaseModel
|
||||
|
||||
DEFAULT_ACCOUNT_ID = 123456789012
|
||||
|
||||
|
||||
def datetime2int(date):
|
||||
return int(time.mktime(date.timetuple()))
|
||||
|
||||
|
||||
def snake_to_camels(original):
|
||||
parts = original.split('_')
|
||||
|
||||
camel_cased = parts[0].lower() + ''.join(p.title() for p in parts[1:])
|
||||
camel_cased = camel_cased.replace('Arn', 'ARN') # Config uses 'ARN' instead of 'Arn'
|
||||
|
||||
return camel_cased
|
||||
|
||||
|
||||
class ConfigEmptyDictable(BaseModel):
|
||||
"""Base class to make serialization easy. This assumes that the sub-class will NOT return 'None's in the JSON."""
|
||||
|
||||
def to_dict(self):
|
||||
data = {}
|
||||
for item, value in self.__dict__.items():
|
||||
if value is not None:
|
||||
if isinstance(value, ConfigEmptyDictable):
|
||||
data[snake_to_camels(item)] = value.to_dict()
|
||||
else:
|
||||
data[snake_to_camels(item)] = value
|
||||
|
||||
return data
|
||||
|
||||
|
||||
class ConfigRecorderStatus(ConfigEmptyDictable):
|
||||
|
||||
def __init__(self, name):
|
||||
self.name = name
|
||||
|
||||
self.recording = False
|
||||
self.last_start_time = None
|
||||
self.last_stop_time = None
|
||||
self.last_status = None
|
||||
self.last_error_code = None
|
||||
self.last_error_message = None
|
||||
self.last_status_change_time = None
|
||||
|
||||
def start(self):
|
||||
self.recording = True
|
||||
self.last_status = 'PENDING'
|
||||
self.last_start_time = datetime2int(datetime.utcnow())
|
||||
self.last_status_change_time = datetime2int(datetime.utcnow())
|
||||
|
||||
def stop(self):
|
||||
self.recording = False
|
||||
self.last_stop_time = datetime2int(datetime.utcnow())
|
||||
self.last_status_change_time = datetime2int(datetime.utcnow())
|
||||
|
||||
|
||||
class ConfigDeliverySnapshotProperties(ConfigEmptyDictable):
|
||||
|
||||
def __init__(self, delivery_frequency):
|
||||
self.delivery_frequency = delivery_frequency
|
||||
|
||||
|
||||
class ConfigDeliveryChannel(ConfigEmptyDictable):
|
||||
|
||||
def __init__(self, name, s3_bucket_name, prefix=None, sns_arn=None, snapshot_properties=None):
|
||||
self.name = name
|
||||
self.s3_bucket_name = s3_bucket_name
|
||||
self.s3_key_prefix = prefix
|
||||
self.sns_topic_arn = sns_arn
|
||||
self.config_snapshot_delivery_properties = snapshot_properties
|
||||
|
||||
|
||||
class RecordingGroup(ConfigEmptyDictable):
|
||||
|
||||
def __init__(self, all_supported=True, include_global_resource_types=False, resource_types=None):
|
||||
self.all_supported = all_supported
|
||||
self.include_global_resource_types = include_global_resource_types
|
||||
self.resource_types = resource_types
|
||||
|
||||
|
||||
class ConfigRecorder(ConfigEmptyDictable):
|
||||
|
||||
def __init__(self, role_arn, recording_group, name='default', status=None):
|
||||
self.name = name
|
||||
self.role_arn = role_arn
|
||||
self.recording_group = recording_group
|
||||
|
||||
if not status:
|
||||
self.status = ConfigRecorderStatus(name)
|
||||
else:
|
||||
self.status = status
|
||||
|
||||
|
||||
class ConfigBackend(BaseBackend):
|
||||
|
||||
def __init__(self):
|
||||
self.recorders = {}
|
||||
self.delivery_channels = {}
|
||||
|
||||
@staticmethod
|
||||
def _validate_resource_types(resource_list):
|
||||
# Load the service file:
|
||||
resource_package = 'botocore'
|
||||
resource_path = '/'.join(('data', 'config', '2014-11-12', 'service-2.json'))
|
||||
conifg_schema = json.loads(pkg_resources.resource_string(resource_package, resource_path))
|
||||
|
||||
# Verify that each entry exists in the supported list:
|
||||
bad_list = []
|
||||
for resource in resource_list:
|
||||
# For PY2:
|
||||
r_str = str(resource)
|
||||
|
||||
if r_str not in conifg_schema['shapes']['ResourceType']['enum']:
|
||||
bad_list.append(r_str)
|
||||
|
||||
if bad_list:
|
||||
raise InvalidResourceTypeException(bad_list, conifg_schema['shapes']['ResourceType']['enum'])
|
||||
|
||||
@staticmethod
|
||||
def _validate_delivery_snapshot_properties(properties):
|
||||
# Load the service file:
|
||||
resource_package = 'botocore'
|
||||
resource_path = '/'.join(('data', 'config', '2014-11-12', 'service-2.json'))
|
||||
conifg_schema = json.loads(pkg_resources.resource_string(resource_package, resource_path))
|
||||
|
||||
# Verify that the deliveryFrequency is set to an acceptable value:
|
||||
if properties.get('deliveryFrequency', None) not in \
|
||||
conifg_schema['shapes']['MaximumExecutionFrequency']['enum']:
|
||||
raise InvalidDeliveryFrequency(properties.get('deliveryFrequency', None),
|
||||
conifg_schema['shapes']['MaximumExecutionFrequency']['enum'])
|
||||
|
||||
def put_configuration_recorder(self, config_recorder):
|
||||
# Validate the name:
|
||||
if not config_recorder.get('name'):
|
||||
raise InvalidConfigurationRecorderNameException(config_recorder.get('name'))
|
||||
if len(config_recorder.get('name')) > 256:
|
||||
raise NameTooLongException(config_recorder.get('name'), 'configurationRecorder.name')
|
||||
|
||||
# We're going to assume that the passed in Role ARN is correct.
|
||||
|
||||
# Config currently only allows 1 configuration recorder for an account:
|
||||
if len(self.recorders) == 1 and not self.recorders.get(config_recorder['name']):
|
||||
raise MaxNumberOfConfigurationRecordersExceededException(config_recorder['name'])
|
||||
|
||||
# Is this updating an existing one?
|
||||
recorder_status = None
|
||||
if self.recorders.get(config_recorder['name']):
|
||||
recorder_status = self.recorders[config_recorder['name']].status
|
||||
|
||||
# Validate the Recording Group:
|
||||
if config_recorder.get('recordingGroup') is None:
|
||||
recording_group = RecordingGroup()
|
||||
else:
|
||||
rg = config_recorder['recordingGroup']
|
||||
|
||||
# If an empty dict is passed in, then bad:
|
||||
if not rg:
|
||||
raise InvalidRecordingGroupException()
|
||||
|
||||
# Can't have both the resource types specified and the other flags as True.
|
||||
if rg.get('resourceTypes') and (
|
||||
rg.get('allSupported', False) or
|
||||
rg.get('includeGlobalResourceTypes', False)):
|
||||
raise InvalidRecordingGroupException()
|
||||
|
||||
# Must supply resourceTypes if 'allSupported' is not supplied:
|
||||
if not rg.get('allSupported') and not rg.get('resourceTypes'):
|
||||
raise InvalidRecordingGroupException()
|
||||
|
||||
# Validate that the list provided is correct:
|
||||
self._validate_resource_types(rg.get('resourceTypes', []))
|
||||
|
||||
recording_group = RecordingGroup(
|
||||
all_supported=rg.get('allSupported', True),
|
||||
include_global_resource_types=rg.get('includeGlobalResourceTypes', False),
|
||||
resource_types=rg.get('resourceTypes', [])
|
||||
)
|
||||
|
||||
self.recorders[config_recorder['name']] = \
|
||||
ConfigRecorder(config_recorder['roleARN'], recording_group, name=config_recorder['name'],
|
||||
status=recorder_status)
|
||||
|
||||
def describe_configuration_recorders(self, recorder_names):
|
||||
recorders = []
|
||||
|
||||
if recorder_names:
|
||||
for rn in recorder_names:
|
||||
if not self.recorders.get(rn):
|
||||
raise NoSuchConfigurationRecorderException(rn)
|
||||
|
||||
# Format the recorder:
|
||||
recorders.append(self.recorders[rn].to_dict())
|
||||
|
||||
else:
|
||||
for recorder in self.recorders.values():
|
||||
recorders.append(recorder.to_dict())
|
||||
|
||||
return recorders
|
||||
|
||||
def describe_configuration_recorder_status(self, recorder_names):
|
||||
recorders = []
|
||||
|
||||
if recorder_names:
|
||||
for rn in recorder_names:
|
||||
if not self.recorders.get(rn):
|
||||
raise NoSuchConfigurationRecorderException(rn)
|
||||
|
||||
# Format the recorder:
|
||||
recorders.append(self.recorders[rn].status.to_dict())
|
||||
|
||||
else:
|
||||
for recorder in self.recorders.values():
|
||||
recorders.append(recorder.status.to_dict())
|
||||
|
||||
return recorders
|
||||
|
||||
def put_delivery_channel(self, delivery_channel):
|
||||
# Must have a configuration recorder:
|
||||
if not self.recorders:
|
||||
raise NoAvailableConfigurationRecorderException()
|
||||
|
||||
# Validate the name:
|
||||
if not delivery_channel.get('name'):
|
||||
raise InvalidDeliveryChannelNameException(delivery_channel.get('name'))
|
||||
if len(delivery_channel.get('name')) > 256:
|
||||
raise NameTooLongException(delivery_channel.get('name'), 'deliveryChannel.name')
|
||||
|
||||
# We are going to assume that the bucket exists -- but will verify if the bucket provided is blank:
|
||||
if not delivery_channel.get('s3BucketName'):
|
||||
raise NoSuchBucketException()
|
||||
|
||||
# We are going to assume that the bucket has the correct policy attached to it. We are only going to verify
|
||||
# if the prefix provided is not an empty string:
|
||||
if delivery_channel.get('s3KeyPrefix', None) == '':
|
||||
raise InvalidS3KeyPrefixException()
|
||||
|
||||
# Ditto for SNS -- Only going to assume that the ARN provided is not an empty string:
|
||||
if delivery_channel.get('snsTopicARN', None) == '':
|
||||
raise InvalidSNSTopicARNException()
|
||||
|
||||
# Config currently only allows 1 delivery channel for an account:
|
||||
if len(self.delivery_channels) == 1 and not self.delivery_channels.get(delivery_channel['name']):
|
||||
raise MaxNumberOfDeliveryChannelsExceededException(delivery_channel['name'])
|
||||
|
||||
if not delivery_channel.get('configSnapshotDeliveryProperties'):
|
||||
dp = None
|
||||
|
||||
else:
|
||||
# Validate the config snapshot delivery properties:
|
||||
self._validate_delivery_snapshot_properties(delivery_channel['configSnapshotDeliveryProperties'])
|
||||
|
||||
dp = ConfigDeliverySnapshotProperties(
|
||||
delivery_channel['configSnapshotDeliveryProperties']['deliveryFrequency'])
|
||||
|
||||
self.delivery_channels[delivery_channel['name']] = \
|
||||
ConfigDeliveryChannel(delivery_channel['name'], delivery_channel['s3BucketName'],
|
||||
prefix=delivery_channel.get('s3KeyPrefix', None),
|
||||
sns_arn=delivery_channel.get('snsTopicARN', None),
|
||||
snapshot_properties=dp)
|
||||
|
||||
def describe_delivery_channels(self, channel_names):
|
||||
channels = []
|
||||
|
||||
if channel_names:
|
||||
for cn in channel_names:
|
||||
if not self.delivery_channels.get(cn):
|
||||
raise NoSuchDeliveryChannelException(cn)
|
||||
|
||||
# Format the delivery channel:
|
||||
channels.append(self.delivery_channels[cn].to_dict())
|
||||
|
||||
else:
|
||||
for channel in self.delivery_channels.values():
|
||||
channels.append(channel.to_dict())
|
||||
|
||||
return channels
|
||||
|
||||
def start_configuration_recorder(self, recorder_name):
|
||||
if not self.recorders.get(recorder_name):
|
||||
raise NoSuchConfigurationRecorderException(recorder_name)
|
||||
|
||||
# Must have a delivery channel available as well:
|
||||
if not self.delivery_channels:
|
||||
raise NoAvailableDeliveryChannelException()
|
||||
|
||||
# Start recording:
|
||||
self.recorders[recorder_name].status.start()
|
||||
|
||||
def stop_configuration_recorder(self, recorder_name):
|
||||
if not self.recorders.get(recorder_name):
|
||||
raise NoSuchConfigurationRecorderException(recorder_name)
|
||||
|
||||
# Stop recording:
|
||||
self.recorders[recorder_name].status.stop()
|
||||
|
||||
def delete_configuration_recorder(self, recorder_name):
|
||||
if not self.recorders.get(recorder_name):
|
||||
raise NoSuchConfigurationRecorderException(recorder_name)
|
||||
|
||||
del self.recorders[recorder_name]
|
||||
|
||||
def delete_delivery_channel(self, channel_name):
|
||||
if not self.delivery_channels.get(channel_name):
|
||||
raise NoSuchDeliveryChannelException(channel_name)
|
||||
|
||||
# Check if a channel is recording -- if so, bad -- (there can only be 1 recorder):
|
||||
for recorder in self.recorders.values():
|
||||
if recorder.status.recording:
|
||||
raise LastDeliveryChannelDeleteFailedException(channel_name)
|
||||
|
||||
del self.delivery_channels[channel_name]
|
||||
|
||||
|
||||
config_backends = {}
|
||||
boto3_session = Session()
|
||||
for region in boto3_session.get_available_regions('config'):
|
||||
config_backends[region] = ConfigBackend()
|
||||
53
moto/config/responses.py
Normal file
53
moto/config/responses.py
Normal file
@ -0,0 +1,53 @@
|
||||
import json
|
||||
from moto.core.responses import BaseResponse
|
||||
from .models import config_backends
|
||||
|
||||
|
||||
class ConfigResponse(BaseResponse):
|
||||
|
||||
@property
|
||||
def config_backend(self):
|
||||
return config_backends[self.region]
|
||||
|
||||
def put_configuration_recorder(self):
|
||||
self.config_backend.put_configuration_recorder(self._get_param('ConfigurationRecorder'))
|
||||
return ""
|
||||
|
||||
def describe_configuration_recorders(self):
|
||||
recorders = self.config_backend.describe_configuration_recorders(self._get_param('ConfigurationRecorderNames'))
|
||||
schema = {'ConfigurationRecorders': recorders}
|
||||
return json.dumps(schema)
|
||||
|
||||
def describe_configuration_recorder_status(self):
|
||||
recorder_statuses = self.config_backend.describe_configuration_recorder_status(
|
||||
self._get_param('ConfigurationRecorderNames'))
|
||||
schema = {'ConfigurationRecordersStatus': recorder_statuses}
|
||||
return json.dumps(schema)
|
||||
|
||||
def put_delivery_channel(self):
|
||||
self.config_backend.put_delivery_channel(self._get_param('DeliveryChannel'))
|
||||
return ""
|
||||
|
||||
def describe_delivery_channels(self):
|
||||
delivery_channels = self.config_backend.describe_delivery_channels(self._get_param('DeliveryChannelNames'))
|
||||
schema = {'DeliveryChannels': delivery_channels}
|
||||
return json.dumps(schema)
|
||||
|
||||
def describe_delivery_channel_status(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def delete_delivery_channel(self):
|
||||
self.config_backend.delete_delivery_channel(self._get_param('DeliveryChannelName'))
|
||||
return ""
|
||||
|
||||
def delete_configuration_recorder(self):
|
||||
self.config_backend.delete_configuration_recorder(self._get_param('ConfigurationRecorderName'))
|
||||
return ""
|
||||
|
||||
def start_configuration_recorder(self):
|
||||
self.config_backend.start_configuration_recorder(self._get_param('ConfigurationRecorderName'))
|
||||
return ""
|
||||
|
||||
def stop_configuration_recorder(self):
|
||||
self.config_backend.stop_configuration_recorder(self._get_param('ConfigurationRecorderName'))
|
||||
return ""
|
||||
10
moto/config/urls.py
Normal file
10
moto/config/urls.py
Normal file
@ -0,0 +1,10 @@
|
||||
from __future__ import unicode_literals
|
||||
from .responses import ConfigResponse
|
||||
|
||||
url_bases = [
|
||||
"https?://config.(.+).amazonaws.com",
|
||||
]
|
||||
|
||||
url_paths = {
|
||||
'{0}/$': ConfigResponse.dispatch,
|
||||
}
|
||||
@ -4,6 +4,7 @@ from __future__ import absolute_import
|
||||
|
||||
import functools
|
||||
import inspect
|
||||
import os
|
||||
import re
|
||||
import six
|
||||
from io import BytesIO
|
||||
@ -21,6 +22,11 @@ from .utils import (
|
||||
)
|
||||
|
||||
|
||||
# "Mock" the AWS credentials as they can't be mocked in Botocore currently
|
||||
os.environ.setdefault("AWS_ACCESS_KEY_ID", "foobar_key")
|
||||
os.environ.setdefault("AWS_SECRET_ACCESS_KEY", "foobar_secret")
|
||||
|
||||
|
||||
class BaseMockAWS(object):
|
||||
nested_count = 0
|
||||
|
||||
|
||||
@ -718,6 +718,8 @@ def to_str(value, spec):
|
||||
return str(value)
|
||||
elif vtype == 'float':
|
||||
return str(value)
|
||||
elif vtype == 'double':
|
||||
return str(value)
|
||||
elif vtype == 'timestamp':
|
||||
return datetime.datetime.utcfromtimestamp(
|
||||
value).replace(tzinfo=pytz.utc).isoformat()
|
||||
@ -737,6 +739,8 @@ def from_str(value, spec):
|
||||
return int(value)
|
||||
elif vtype == 'float':
|
||||
return float(value)
|
||||
elif vtype == 'double':
|
||||
return float(value)
|
||||
elif vtype == 'timestamp':
|
||||
return value
|
||||
elif vtype == 'string':
|
||||
|
||||
@ -383,7 +383,7 @@ class OpNotEqual(Op):
|
||||
def expr(self, item):
|
||||
lhs = self._lhs(item)
|
||||
rhs = self._rhs(item)
|
||||
return lhs == rhs
|
||||
return lhs != rhs
|
||||
|
||||
|
||||
class OpLessThanOrEqual(Op):
|
||||
|
||||
@ -5,6 +5,7 @@ import datetime
|
||||
import decimal
|
||||
import json
|
||||
import re
|
||||
import uuid
|
||||
|
||||
import boto3
|
||||
from moto.compat import OrderedDict
|
||||
@ -65,6 +66,8 @@ class DynamoType(object):
|
||||
return int(self.value)
|
||||
except ValueError:
|
||||
return float(self.value)
|
||||
elif self.is_set():
|
||||
return set(self.value)
|
||||
else:
|
||||
return self.value
|
||||
|
||||
@ -292,9 +295,82 @@ class Item(BaseModel):
|
||||
'ADD not supported for %s' % ', '.join(update_action['Value'].keys()))
|
||||
|
||||
|
||||
class StreamRecord(BaseModel):
|
||||
def __init__(self, table, stream_type, event_name, old, new, seq):
|
||||
old_a = old.to_json()['Attributes'] if old is not None else {}
|
||||
new_a = new.to_json()['Attributes'] if new is not None else {}
|
||||
|
||||
rec = old if old is not None else new
|
||||
keys = {table.hash_key_attr: rec.hash_key.to_json()}
|
||||
if table.range_key_attr is not None:
|
||||
keys[table.range_key_attr] = rec.range_key.to_json()
|
||||
|
||||
self.record = {
|
||||
'eventID': uuid.uuid4().hex,
|
||||
'eventName': event_name,
|
||||
'eventSource': 'aws:dynamodb',
|
||||
'eventVersion': '1.0',
|
||||
'awsRegion': 'us-east-1',
|
||||
'dynamodb': {
|
||||
'StreamViewType': stream_type,
|
||||
'ApproximateCreationDateTime': datetime.datetime.utcnow().isoformat(),
|
||||
'SequenceNumber': seq,
|
||||
'SizeBytes': 1,
|
||||
'Keys': keys
|
||||
}
|
||||
}
|
||||
|
||||
if stream_type in ('NEW_IMAGE', 'NEW_AND_OLD_IMAGES'):
|
||||
self.record['dynamodb']['NewImage'] = new_a
|
||||
if stream_type in ('OLD_IMAGE', 'NEW_AND_OLD_IMAGES'):
|
||||
self.record['dynamodb']['OldImage'] = old_a
|
||||
|
||||
# This is a substantial overestimate but it's the easiest to do now
|
||||
self.record['dynamodb']['SizeBytes'] = len(
|
||||
json.dumps(self.record['dynamodb']))
|
||||
|
||||
def to_json(self):
|
||||
return self.record
|
||||
|
||||
|
||||
class StreamShard(BaseModel):
|
||||
def __init__(self, table):
|
||||
self.table = table
|
||||
self.id = 'shardId-00000001541626099285-f35f62ef'
|
||||
self.starting_sequence_number = 1100000000017454423009
|
||||
self.items = []
|
||||
self.created_on = datetime.datetime.utcnow()
|
||||
|
||||
def to_json(self):
|
||||
return {
|
||||
'ShardId': self.id,
|
||||
'SequenceNumberRange': {
|
||||
'StartingSequenceNumber': str(self.starting_sequence_number)
|
||||
}
|
||||
}
|
||||
|
||||
def add(self, old, new):
|
||||
t = self.table.stream_specification['StreamViewType']
|
||||
if old is None:
|
||||
event_name = 'INSERT'
|
||||
elif new is None:
|
||||
event_name = 'DELETE'
|
||||
else:
|
||||
event_name = 'MODIFY'
|
||||
seq = len(self.items) + self.starting_sequence_number
|
||||
self.items.append(
|
||||
StreamRecord(self.table, t, event_name, old, new, seq))
|
||||
|
||||
def get(self, start, quantity):
|
||||
start -= self.starting_sequence_number
|
||||
assert start >= 0
|
||||
end = start + quantity
|
||||
return [i.to_json() for i in self.items[start:end]]
|
||||
|
||||
|
||||
class Table(BaseModel):
|
||||
|
||||
def __init__(self, table_name, schema=None, attr=None, throughput=None, indexes=None, global_indexes=None):
|
||||
def __init__(self, table_name, schema=None, attr=None, throughput=None, indexes=None, global_indexes=None, streams=None):
|
||||
self.name = table_name
|
||||
self.attr = attr
|
||||
self.schema = schema
|
||||
@ -325,10 +401,22 @@ class Table(BaseModel):
|
||||
'TimeToLiveStatus': 'DISABLED' # One of 'ENABLING'|'DISABLING'|'ENABLED'|'DISABLED',
|
||||
# 'AttributeName': 'string' # Can contain this
|
||||
}
|
||||
self.set_stream_specification(streams)
|
||||
|
||||
def _generate_arn(self, name):
|
||||
return 'arn:aws:dynamodb:us-east-1:123456789011:table/' + name
|
||||
|
||||
def set_stream_specification(self, streams):
|
||||
self.stream_specification = streams
|
||||
if streams and (streams.get('StreamEnabled') or streams.get('StreamViewType')):
|
||||
self.stream_specification['StreamEnabled'] = True
|
||||
self.latest_stream_label = datetime.datetime.utcnow().isoformat()
|
||||
self.stream_shard = StreamShard(self)
|
||||
else:
|
||||
self.stream_specification = {'StreamEnabled': False}
|
||||
self.latest_stream_label = None
|
||||
self.stream_shard = None
|
||||
|
||||
def describe(self, base_key='TableDescription'):
|
||||
results = {
|
||||
base_key: {
|
||||
@ -345,6 +433,11 @@ class Table(BaseModel):
|
||||
'LocalSecondaryIndexes': [index for index in self.indexes],
|
||||
}
|
||||
}
|
||||
if self.stream_specification and self.stream_specification['StreamEnabled']:
|
||||
results[base_key]['StreamSpecification'] = self.stream_specification
|
||||
if self.latest_stream_label:
|
||||
results[base_key]['LatestStreamLabel'] = self.latest_stream_label
|
||||
results[base_key]['LatestStreamArn'] = self.table_arn + '/stream/' + self.latest_stream_label
|
||||
return results
|
||||
|
||||
def __len__(self):
|
||||
@ -385,23 +478,22 @@ class Table(BaseModel):
|
||||
else:
|
||||
range_value = None
|
||||
|
||||
if expected is None:
|
||||
expected = {}
|
||||
lookup_range_value = range_value
|
||||
else:
|
||||
expected_range_value = expected.get(
|
||||
self.range_key_attr, {}).get("Value")
|
||||
if(expected_range_value is None):
|
||||
lookup_range_value = range_value
|
||||
else:
|
||||
lookup_range_value = DynamoType(expected_range_value)
|
||||
current = self.get_item(hash_value, lookup_range_value)
|
||||
|
||||
item = Item(hash_value, self.hash_key_type, range_value,
|
||||
self.range_key_type, item_attrs)
|
||||
|
||||
if not overwrite:
|
||||
if expected is None:
|
||||
expected = {}
|
||||
lookup_range_value = range_value
|
||||
else:
|
||||
expected_range_value = expected.get(
|
||||
self.range_key_attr, {}).get("Value")
|
||||
if(expected_range_value is None):
|
||||
lookup_range_value = range_value
|
||||
else:
|
||||
lookup_range_value = DynamoType(expected_range_value)
|
||||
|
||||
current = self.get_item(hash_value, lookup_range_value)
|
||||
|
||||
if current is None:
|
||||
current_attr = {}
|
||||
elif hasattr(current, 'attrs'):
|
||||
@ -419,19 +511,20 @@ class Table(BaseModel):
|
||||
elif 'Value' in val and DynamoType(val['Value']).value != current_attr[key].value:
|
||||
raise ValueError("The conditional request failed")
|
||||
elif 'ComparisonOperator' in val:
|
||||
comparison_func = get_comparison_func(
|
||||
val['ComparisonOperator'])
|
||||
dynamo_types = [
|
||||
DynamoType(ele) for ele in
|
||||
val.get("AttributeValueList", [])
|
||||
]
|
||||
for t in dynamo_types:
|
||||
if not comparison_func(current_attr[key].value, t.value):
|
||||
raise ValueError('The conditional request failed')
|
||||
if not current_attr[key].compare(val['ComparisonOperator'], dynamo_types):
|
||||
raise ValueError('The conditional request failed')
|
||||
if range_value:
|
||||
self.items[hash_value][range_value] = item
|
||||
else:
|
||||
self.items[hash_value] = item
|
||||
|
||||
if self.stream_shard is not None:
|
||||
self.stream_shard.add(current, item)
|
||||
|
||||
return item
|
||||
|
||||
def __nonzero__(self):
|
||||
@ -462,9 +555,14 @@ class Table(BaseModel):
|
||||
def delete_item(self, hash_key, range_key):
|
||||
try:
|
||||
if range_key:
|
||||
return self.items[hash_key].pop(range_key)
|
||||
item = self.items[hash_key].pop(range_key)
|
||||
else:
|
||||
return self.items.pop(hash_key)
|
||||
item = self.items.pop(hash_key)
|
||||
|
||||
if self.stream_shard is not None:
|
||||
self.stream_shard.add(item, None)
|
||||
|
||||
return item
|
||||
except KeyError:
|
||||
return None
|
||||
|
||||
@ -472,6 +570,7 @@ class Table(BaseModel):
|
||||
exclusive_start_key, scan_index_forward, projection_expression,
|
||||
index_name=None, filter_expression=None, **filter_kwargs):
|
||||
results = []
|
||||
|
||||
if index_name:
|
||||
all_indexes = (self.global_indexes or []) + (self.indexes or [])
|
||||
indexes_by_name = dict((i['IndexName'], i) for i in all_indexes)
|
||||
@ -488,24 +587,28 @@ class Table(BaseModel):
|
||||
raise ValueError('Missing Hash Key. KeySchema: %s' %
|
||||
index['KeySchema'])
|
||||
|
||||
possible_results = []
|
||||
for item in self.all_items():
|
||||
if not isinstance(item, Item):
|
||||
continue
|
||||
item_hash_key = item.attrs.get(index_hash_key['AttributeName'])
|
||||
if item_hash_key and item_hash_key == hash_key:
|
||||
possible_results.append(item)
|
||||
else:
|
||||
possible_results = [item for item in list(self.all_items()) if isinstance(
|
||||
item, Item) and item.hash_key == hash_key]
|
||||
|
||||
if index_name:
|
||||
try:
|
||||
index_range_key = [key for key in index[
|
||||
'KeySchema'] if key['KeyType'] == 'RANGE'][0]
|
||||
except IndexError:
|
||||
index_range_key = None
|
||||
|
||||
possible_results = []
|
||||
for item in self.all_items():
|
||||
if not isinstance(item, Item):
|
||||
continue
|
||||
item_hash_key = item.attrs.get(index_hash_key['AttributeName'])
|
||||
if index_range_key is None:
|
||||
if item_hash_key and item_hash_key == hash_key:
|
||||
possible_results.append(item)
|
||||
else:
|
||||
item_range_key = item.attrs.get(index_range_key['AttributeName'])
|
||||
if item_hash_key and item_hash_key == hash_key and item_range_key:
|
||||
possible_results.append(item)
|
||||
else:
|
||||
possible_results = [item for item in list(self.all_items()) if isinstance(
|
||||
item, Item) and item.hash_key == hash_key]
|
||||
|
||||
if range_comparison:
|
||||
if index_name and not index_range_key:
|
||||
raise ValueError(
|
||||
@ -680,6 +783,13 @@ class DynamoDBBackend(BaseBackend):
|
||||
table.throughput = throughput
|
||||
return table
|
||||
|
||||
def update_table_streams(self, name, stream_specification):
|
||||
table = self.tables[name]
|
||||
if (stream_specification.get('StreamEnabled') or stream_specification.get('StreamViewType')) and table.latest_stream_label:
|
||||
raise ValueError('Table already has stream enabled')
|
||||
table.set_stream_specification(stream_specification)
|
||||
return table
|
||||
|
||||
def update_table_global_indexes(self, name, global_index_updates):
|
||||
table = self.tables[name]
|
||||
gsis_by_name = dict((i['IndexName'], i) for i in table.global_indexes)
|
||||
@ -840,15 +950,12 @@ class DynamoDBBackend(BaseBackend):
|
||||
elif 'Value' in val and DynamoType(val['Value']).value != item_attr[key].value:
|
||||
raise ValueError("The conditional request failed")
|
||||
elif 'ComparisonOperator' in val:
|
||||
comparison_func = get_comparison_func(
|
||||
val['ComparisonOperator'])
|
||||
dynamo_types = [
|
||||
DynamoType(ele) for ele in
|
||||
val.get("AttributeValueList", [])
|
||||
]
|
||||
for t in dynamo_types:
|
||||
if not comparison_func(item_attr[key].value, t.value):
|
||||
raise ValueError('The conditional request failed')
|
||||
if not item_attr[key].compare(val['ComparisonOperator'], dynamo_types):
|
||||
raise ValueError('The conditional request failed')
|
||||
|
||||
# Update does not fail on new items, so create one
|
||||
if item is None:
|
||||
|
||||
@ -31,6 +31,67 @@ def get_empty_str_error():
|
||||
))
|
||||
|
||||
|
||||
def condition_expression_to_expected(condition_expression, expression_attribute_names, expression_attribute_values):
|
||||
"""
|
||||
Limited condition expression syntax parsing.
|
||||
Supports Global Negation ex: NOT(inner expressions).
|
||||
Supports simple AND conditions ex: cond_a AND cond_b and cond_c.
|
||||
Atomic expressions supported are attribute_exists(key), attribute_not_exists(key) and #key = :value.
|
||||
"""
|
||||
expected = {}
|
||||
if condition_expression and 'OR' not in condition_expression:
|
||||
reverse_re = re.compile('^NOT\s*\((.*)\)$')
|
||||
reverse_m = reverse_re.match(condition_expression.strip())
|
||||
|
||||
reverse = False
|
||||
if reverse_m:
|
||||
reverse = True
|
||||
condition_expression = reverse_m.group(1)
|
||||
|
||||
cond_items = [c.strip() for c in condition_expression.split('AND')]
|
||||
if cond_items:
|
||||
exists_re = re.compile('^attribute_exists\s*\((.*)\)$')
|
||||
not_exists_re = re.compile(
|
||||
'^attribute_not_exists\s*\((.*)\)$')
|
||||
equals_re = re.compile('^(#?\w+)\s*=\s*(\:?\w+)')
|
||||
|
||||
for cond in cond_items:
|
||||
exists_m = exists_re.match(cond)
|
||||
not_exists_m = not_exists_re.match(cond)
|
||||
equals_m = equals_re.match(cond)
|
||||
|
||||
if exists_m:
|
||||
attribute_name = expression_attribute_names_lookup(exists_m.group(1), expression_attribute_names)
|
||||
expected[attribute_name] = {'Exists': True if not reverse else False}
|
||||
elif not_exists_m:
|
||||
attribute_name = expression_attribute_names_lookup(not_exists_m.group(1), expression_attribute_names)
|
||||
expected[attribute_name] = {'Exists': False if not reverse else True}
|
||||
elif equals_m:
|
||||
attribute_name = expression_attribute_names_lookup(equals_m.group(1), expression_attribute_names)
|
||||
attribute_value = expression_attribute_values_lookup(equals_m.group(2), expression_attribute_values)
|
||||
expected[attribute_name] = {
|
||||
'AttributeValueList': [attribute_value],
|
||||
'ComparisonOperator': 'EQ' if not reverse else 'NEQ'}
|
||||
|
||||
return expected
|
||||
|
||||
|
||||
def expression_attribute_names_lookup(attribute_name, expression_attribute_names):
|
||||
if attribute_name.startswith('#') and attribute_name in expression_attribute_names:
|
||||
return expression_attribute_names[attribute_name]
|
||||
else:
|
||||
return attribute_name
|
||||
|
||||
|
||||
def expression_attribute_values_lookup(attribute_value, expression_attribute_values):
|
||||
if isinstance(attribute_value, six.string_types) and \
|
||||
attribute_value.startswith(':') and\
|
||||
attribute_value in expression_attribute_values:
|
||||
return expression_attribute_values[attribute_value]
|
||||
else:
|
||||
return attribute_value
|
||||
|
||||
|
||||
class DynamoHandler(BaseResponse):
|
||||
|
||||
def get_endpoint_name(self, headers):
|
||||
@ -104,13 +165,16 @@ class DynamoHandler(BaseResponse):
|
||||
# getting the indexes
|
||||
global_indexes = body.get("GlobalSecondaryIndexes", [])
|
||||
local_secondary_indexes = body.get("LocalSecondaryIndexes", [])
|
||||
# get the stream specification
|
||||
streams = body.get("StreamSpecification")
|
||||
|
||||
table = self.dynamodb_backend.create_table(table_name,
|
||||
schema=key_schema,
|
||||
throughput=throughput,
|
||||
attr=attr,
|
||||
global_indexes=global_indexes,
|
||||
indexes=local_secondary_indexes)
|
||||
indexes=local_secondary_indexes,
|
||||
streams=streams)
|
||||
if table is not None:
|
||||
return dynamo_json_dump(table.describe())
|
||||
else:
|
||||
@ -163,12 +227,20 @@ class DynamoHandler(BaseResponse):
|
||||
|
||||
def update_table(self):
|
||||
name = self.body['TableName']
|
||||
table = self.dynamodb_backend.get_table(name)
|
||||
if 'GlobalSecondaryIndexUpdates' in self.body:
|
||||
table = self.dynamodb_backend.update_table_global_indexes(
|
||||
name, self.body['GlobalSecondaryIndexUpdates'])
|
||||
if 'ProvisionedThroughput' in self.body:
|
||||
throughput = self.body["ProvisionedThroughput"]
|
||||
table = self.dynamodb_backend.update_table_throughput(name, throughput)
|
||||
if 'StreamSpecification' in self.body:
|
||||
try:
|
||||
table = self.dynamodb_backend.update_table_streams(name, self.body['StreamSpecification'])
|
||||
except ValueError:
|
||||
er = 'com.amazonaws.dynamodb.v20111205#ResourceInUseException'
|
||||
return self.error(er, 'Cannot enable stream')
|
||||
|
||||
return dynamo_json_dump(table.describe())
|
||||
|
||||
def describe_table(self):
|
||||
@ -183,6 +255,11 @@ class DynamoHandler(BaseResponse):
|
||||
def put_item(self):
|
||||
name = self.body['TableName']
|
||||
item = self.body['Item']
|
||||
return_values = self.body.get('ReturnValues', 'NONE')
|
||||
|
||||
if return_values not in ('ALL_OLD', 'NONE'):
|
||||
er = 'com.amazonaws.dynamodb.v20111205#ValidationException'
|
||||
return self.error(er, 'Return values set to invalid value')
|
||||
|
||||
if has_empty_keys_or_values(item):
|
||||
return get_empty_str_error()
|
||||
@ -193,28 +270,24 @@ class DynamoHandler(BaseResponse):
|
||||
else:
|
||||
expected = None
|
||||
|
||||
if return_values == 'ALL_OLD':
|
||||
existing_item = self.dynamodb_backend.get_item(name, item)
|
||||
if existing_item:
|
||||
existing_attributes = existing_item.to_json()['Attributes']
|
||||
else:
|
||||
existing_attributes = {}
|
||||
|
||||
# Attempt to parse simple ConditionExpressions into an Expected
|
||||
# expression
|
||||
if not expected:
|
||||
condition_expression = self.body.get('ConditionExpression')
|
||||
if condition_expression and 'OR' not in condition_expression:
|
||||
cond_items = [c.strip()
|
||||
for c in condition_expression.split('AND')]
|
||||
|
||||
if cond_items:
|
||||
expected = {}
|
||||
overwrite = False
|
||||
exists_re = re.compile('^attribute_exists\s*\((.*)\)$')
|
||||
not_exists_re = re.compile(
|
||||
'^attribute_not_exists\s*\((.*)\)$')
|
||||
|
||||
for cond in cond_items:
|
||||
exists_m = exists_re.match(cond)
|
||||
not_exists_m = not_exists_re.match(cond)
|
||||
if exists_m:
|
||||
expected[exists_m.group(1)] = {'Exists': True}
|
||||
elif not_exists_m:
|
||||
expected[not_exists_m.group(1)] = {'Exists': False}
|
||||
expression_attribute_names = self.body.get('ExpressionAttributeNames', {})
|
||||
expression_attribute_values = self.body.get('ExpressionAttributeValues', {})
|
||||
expected = condition_expression_to_expected(condition_expression,
|
||||
expression_attribute_names,
|
||||
expression_attribute_values)
|
||||
if expected:
|
||||
overwrite = False
|
||||
|
||||
try:
|
||||
result = self.dynamodb_backend.put_item(name, item, expected, overwrite)
|
||||
@ -228,6 +301,10 @@ class DynamoHandler(BaseResponse):
|
||||
'TableName': name,
|
||||
'CapacityUnits': 1
|
||||
}
|
||||
if return_values == 'ALL_OLD':
|
||||
item_dict['Attributes'] = existing_attributes
|
||||
else:
|
||||
item_dict.pop('Attributes', None)
|
||||
return dynamo_json_dump(item_dict)
|
||||
else:
|
||||
er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'
|
||||
@ -385,7 +462,7 @@ class DynamoHandler(BaseResponse):
|
||||
range_values = [value_alias_map[
|
||||
range_key_expression_components[2]]]
|
||||
else:
|
||||
hash_key_expression = key_condition_expression
|
||||
hash_key_expression = key_condition_expression.strip('()')
|
||||
range_comparison = None
|
||||
range_values = []
|
||||
|
||||
@ -512,7 +589,11 @@ class DynamoHandler(BaseResponse):
|
||||
def delete_item(self):
|
||||
name = self.body['TableName']
|
||||
keys = self.body['Key']
|
||||
return_values = self.body.get('ReturnValues', '')
|
||||
return_values = self.body.get('ReturnValues', 'NONE')
|
||||
if return_values not in ('ALL_OLD', 'NONE'):
|
||||
er = 'com.amazonaws.dynamodb.v20111205#ValidationException'
|
||||
return self.error(er, 'Return values set to invalid value')
|
||||
|
||||
table = self.dynamodb_backend.get_table(name)
|
||||
if not table:
|
||||
er = 'com.amazonaws.dynamodb.v20120810#ConditionalCheckFailedException'
|
||||
@ -527,9 +608,9 @@ class DynamoHandler(BaseResponse):
|
||||
return dynamo_json_dump(item_dict)
|
||||
|
||||
def update_item(self):
|
||||
|
||||
name = self.body['TableName']
|
||||
key = self.body['Key']
|
||||
return_values = self.body.get('ReturnValues', 'NONE')
|
||||
update_expression = self.body.get('UpdateExpression')
|
||||
attribute_updates = self.body.get('AttributeUpdates')
|
||||
expression_attribute_names = self.body.get(
|
||||
@ -537,6 +618,15 @@ class DynamoHandler(BaseResponse):
|
||||
expression_attribute_values = self.body.get(
|
||||
'ExpressionAttributeValues', {})
|
||||
existing_item = self.dynamodb_backend.get_item(name, key)
|
||||
if existing_item:
|
||||
existing_attributes = existing_item.to_json()['Attributes']
|
||||
else:
|
||||
existing_attributes = {}
|
||||
|
||||
if return_values not in ('NONE', 'ALL_OLD', 'ALL_NEW', 'UPDATED_OLD',
|
||||
'UPDATED_NEW'):
|
||||
er = 'com.amazonaws.dynamodb.v20111205#ValidationException'
|
||||
return self.error(er, 'Return values set to invalid value')
|
||||
|
||||
if has_empty_keys_or_values(expression_attribute_values):
|
||||
return get_empty_str_error()
|
||||
@ -550,23 +640,11 @@ class DynamoHandler(BaseResponse):
|
||||
# expression
|
||||
if not expected:
|
||||
condition_expression = self.body.get('ConditionExpression')
|
||||
if condition_expression and 'OR' not in condition_expression:
|
||||
cond_items = [c.strip()
|
||||
for c in condition_expression.split('AND')]
|
||||
|
||||
if cond_items:
|
||||
expected = {}
|
||||
exists_re = re.compile('^attribute_exists\s*\((.*)\)$')
|
||||
not_exists_re = re.compile(
|
||||
'^attribute_not_exists\s*\((.*)\)$')
|
||||
|
||||
for cond in cond_items:
|
||||
exists_m = exists_re.match(cond)
|
||||
not_exists_m = not_exists_re.match(cond)
|
||||
if exists_m:
|
||||
expected[exists_m.group(1)] = {'Exists': True}
|
||||
elif not_exists_m:
|
||||
expected[not_exists_m.group(1)] = {'Exists': False}
|
||||
expression_attribute_names = self.body.get('ExpressionAttributeNames', {})
|
||||
expression_attribute_values = self.body.get('ExpressionAttributeValues', {})
|
||||
expected = condition_expression_to_expected(condition_expression,
|
||||
expression_attribute_names,
|
||||
expression_attribute_values)
|
||||
|
||||
# Support spaces between operators in an update expression
|
||||
# E.g. `a = b + c` -> `a=b+c`
|
||||
@ -591,8 +669,26 @@ class DynamoHandler(BaseResponse):
|
||||
'TableName': name,
|
||||
'CapacityUnits': 0.5
|
||||
}
|
||||
if not existing_item:
|
||||
unchanged_attributes = {
|
||||
k for k in existing_attributes.keys()
|
||||
if existing_attributes[k] == item_dict['Attributes'].get(k)
|
||||
}
|
||||
changed_attributes = set(existing_attributes.keys()).union(item_dict['Attributes'].keys()).difference(unchanged_attributes)
|
||||
|
||||
if return_values == 'NONE':
|
||||
item_dict['Attributes'] = {}
|
||||
elif return_values == 'ALL_OLD':
|
||||
item_dict['Attributes'] = existing_attributes
|
||||
elif return_values == 'UPDATED_OLD':
|
||||
item_dict['Attributes'] = {
|
||||
k: v for k, v in existing_attributes.items()
|
||||
if k in changed_attributes
|
||||
}
|
||||
elif return_values == 'UPDATED_NEW':
|
||||
item_dict['Attributes'] = {
|
||||
k: v for k, v in item_dict['Attributes'].items()
|
||||
if k in changed_attributes
|
||||
}
|
||||
|
||||
return dynamo_json_dump(item_dict)
|
||||
|
||||
|
||||
6
moto/dynamodbstreams/__init__.py
Normal file
6
moto/dynamodbstreams/__init__.py
Normal file
@ -0,0 +1,6 @@
|
||||
from __future__ import unicode_literals
|
||||
from .models import dynamodbstreams_backends
|
||||
from ..core.models import base_decorator
|
||||
|
||||
dynamodbstreams_backend = dynamodbstreams_backends['us-east-1']
|
||||
mock_dynamodbstreams = base_decorator(dynamodbstreams_backends)
|
||||
129
moto/dynamodbstreams/models.py
Normal file
129
moto/dynamodbstreams/models.py
Normal file
@ -0,0 +1,129 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import os
|
||||
import json
|
||||
import boto3
|
||||
import base64
|
||||
|
||||
from moto.core import BaseBackend, BaseModel
|
||||
from moto.dynamodb2.models import dynamodb_backends
|
||||
|
||||
|
||||
class ShardIterator(BaseModel):
|
||||
def __init__(self, streams_backend, stream_shard, shard_iterator_type, sequence_number=None):
|
||||
self.id = base64.b64encode(os.urandom(472)).decode('utf-8')
|
||||
self.streams_backend = streams_backend
|
||||
self.stream_shard = stream_shard
|
||||
self.shard_iterator_type = shard_iterator_type
|
||||
if shard_iterator_type == 'TRIM_HORIZON':
|
||||
self.sequence_number = stream_shard.starting_sequence_number
|
||||
elif shard_iterator_type == 'LATEST':
|
||||
self.sequence_number = stream_shard.starting_sequence_number + len(stream_shard.items)
|
||||
elif shard_iterator_type == 'AT_SEQUENCE_NUMBER':
|
||||
self.sequence_number = sequence_number
|
||||
elif shard_iterator_type == 'AFTER_SEQUENCE_NUMBER':
|
||||
self.sequence_number = sequence_number + 1
|
||||
|
||||
@property
|
||||
def arn(self):
|
||||
return '{}/stream/{}|1|{}'.format(
|
||||
self.stream_shard.table.table_arn,
|
||||
self.stream_shard.table.latest_stream_label,
|
||||
self.id)
|
||||
|
||||
def to_json(self):
|
||||
return {
|
||||
'ShardIterator': self.arn
|
||||
}
|
||||
|
||||
def get(self, limit=1000):
|
||||
items = self.stream_shard.get(self.sequence_number, limit)
|
||||
try:
|
||||
last_sequence_number = max(i['dynamodb']['SequenceNumber'] for i in items)
|
||||
new_shard_iterator = ShardIterator(self.streams_backend,
|
||||
self.stream_shard,
|
||||
'AFTER_SEQUENCE_NUMBER',
|
||||
last_sequence_number)
|
||||
except ValueError:
|
||||
new_shard_iterator = ShardIterator(self.streams_backend,
|
||||
self.stream_shard,
|
||||
'AT_SEQUENCE_NUMBER',
|
||||
self.sequence_number)
|
||||
|
||||
self.streams_backend.shard_iterators[new_shard_iterator.arn] = new_shard_iterator
|
||||
return {
|
||||
'NextShardIterator': new_shard_iterator.arn,
|
||||
'Records': items
|
||||
}
|
||||
|
||||
|
||||
class DynamoDBStreamsBackend(BaseBackend):
|
||||
def __init__(self, region):
|
||||
self.region = region
|
||||
self.shard_iterators = {}
|
||||
|
||||
def reset(self):
|
||||
region = self.region
|
||||
self.__dict__ = {}
|
||||
self.__init__(region)
|
||||
|
||||
@property
|
||||
def dynamodb(self):
|
||||
return dynamodb_backends[self.region]
|
||||
|
||||
def _get_table_from_arn(self, arn):
|
||||
table_name = arn.split(':', 6)[5].split('/')[1]
|
||||
return self.dynamodb.get_table(table_name)
|
||||
|
||||
def describe_stream(self, arn):
|
||||
table = self._get_table_from_arn(arn)
|
||||
resp = {'StreamDescription': {
|
||||
'StreamArn': arn,
|
||||
'StreamLabel': table.latest_stream_label,
|
||||
'StreamStatus': ('ENABLED' if table.latest_stream_label
|
||||
else 'DISABLED'),
|
||||
'StreamViewType': table.stream_specification['StreamViewType'],
|
||||
'CreationRequestDateTime': table.stream_shard.created_on.isoformat(),
|
||||
'TableName': table.name,
|
||||
'KeySchema': table.schema,
|
||||
'Shards': ([table.stream_shard.to_json()] if table.stream_shard
|
||||
else [])
|
||||
}}
|
||||
|
||||
return json.dumps(resp)
|
||||
|
||||
def list_streams(self, table_name=None):
|
||||
streams = []
|
||||
for table in self.dynamodb.tables.values():
|
||||
if table_name is not None and table.name != table_name:
|
||||
continue
|
||||
if table.latest_stream_label:
|
||||
d = table.describe(base_key='Table')
|
||||
streams.append({
|
||||
'StreamArn': d['Table']['LatestStreamArn'],
|
||||
'TableName': d['Table']['TableName'],
|
||||
'StreamLabel': d['Table']['LatestStreamLabel']
|
||||
})
|
||||
|
||||
return json.dumps({'Streams': streams})
|
||||
|
||||
def get_shard_iterator(self, arn, shard_id, shard_iterator_type, sequence_number=None):
|
||||
table = self._get_table_from_arn(arn)
|
||||
assert table.stream_shard.id == shard_id
|
||||
|
||||
shard_iterator = ShardIterator(self, table.stream_shard,
|
||||
shard_iterator_type,
|
||||
sequence_number)
|
||||
self.shard_iterators[shard_iterator.arn] = shard_iterator
|
||||
|
||||
return json.dumps(shard_iterator.to_json())
|
||||
|
||||
def get_records(self, iterator_arn, limit):
|
||||
shard_iterator = self.shard_iterators[iterator_arn]
|
||||
return json.dumps(shard_iterator.get(limit))
|
||||
|
||||
|
||||
available_regions = boto3.session.Session().get_available_regions(
|
||||
'dynamodbstreams')
|
||||
dynamodbstreams_backends = {region: DynamoDBStreamsBackend(region=region)
|
||||
for region in available_regions}
|
||||
34
moto/dynamodbstreams/responses.py
Normal file
34
moto/dynamodbstreams/responses.py
Normal file
@ -0,0 +1,34 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from moto.core.responses import BaseResponse
|
||||
|
||||
from .models import dynamodbstreams_backends
|
||||
|
||||
|
||||
class DynamoDBStreamsHandler(BaseResponse):
|
||||
|
||||
@property
|
||||
def backend(self):
|
||||
return dynamodbstreams_backends[self.region]
|
||||
|
||||
def describe_stream(self):
|
||||
arn = self._get_param('StreamArn')
|
||||
return self.backend.describe_stream(arn)
|
||||
|
||||
def list_streams(self):
|
||||
table_name = self._get_param('TableName')
|
||||
return self.backend.list_streams(table_name)
|
||||
|
||||
def get_shard_iterator(self):
|
||||
arn = self._get_param('StreamArn')
|
||||
shard_id = self._get_param('ShardId')
|
||||
shard_iterator_type = self._get_param('ShardIteratorType')
|
||||
return self.backend.get_shard_iterator(arn, shard_id,
|
||||
shard_iterator_type)
|
||||
|
||||
def get_records(self):
|
||||
arn = self._get_param('ShardIterator')
|
||||
limit = self._get_param('Limit')
|
||||
if limit is None:
|
||||
limit = 1000
|
||||
return self.backend.get_records(arn, limit)
|
||||
10
moto/dynamodbstreams/urls.py
Normal file
10
moto/dynamodbstreams/urls.py
Normal file
@ -0,0 +1,10 @@
|
||||
from __future__ import unicode_literals
|
||||
from .responses import DynamoDBStreamsHandler
|
||||
|
||||
url_bases = [
|
||||
"https?://streams.dynamodb.(.+).amazonaws.com"
|
||||
]
|
||||
|
||||
url_paths = {
|
||||
"{0}/$": DynamoDBStreamsHandler.dispatch,
|
||||
}
|
||||
72
moto/ec2/models.py
Executable file → Normal file
72
moto/ec2/models.py
Executable file → Normal file
@ -134,6 +134,8 @@ def utc_date_and_time():
|
||||
|
||||
|
||||
def validate_resource_ids(resource_ids):
|
||||
if not resource_ids:
|
||||
raise MissingParameterError(parameter='resourceIdSet')
|
||||
for resource_id in resource_ids:
|
||||
if not is_valid_resource_id(resource_id):
|
||||
raise InvalidID(resource_id=resource_id)
|
||||
@ -189,7 +191,7 @@ class NetworkInterface(TaggedEC2Resource):
|
||||
self.ec2_backend = ec2_backend
|
||||
self.id = random_eni_id()
|
||||
self.device_index = device_index
|
||||
self.private_ip_address = private_ip_address
|
||||
self.private_ip_address = private_ip_address or random_private_ip()
|
||||
self.subnet = subnet
|
||||
self.instance = None
|
||||
self.attachment_id = None
|
||||
@ -388,6 +390,7 @@ class Instance(TaggedEC2Resource, BotoInstance):
|
||||
self.ebs_optimized = kwargs.get("ebs_optimized", False)
|
||||
self.source_dest_check = "true"
|
||||
self.launch_time = utc_date_and_time()
|
||||
self.ami_launch_index = kwargs.get("ami_launch_index", 0)
|
||||
self.disable_api_termination = kwargs.get("disable_api_termination", False)
|
||||
self._spot_fleet_id = kwargs.get("spot_fleet_id", None)
|
||||
associate_public_ip = kwargs.get("associate_public_ip", False)
|
||||
@ -719,6 +722,7 @@ class InstanceBackend(object):
|
||||
instance_tags = tags.get('instance', {})
|
||||
|
||||
for index in range(count):
|
||||
kwargs["ami_launch_index"] = index
|
||||
new_instance = Instance(
|
||||
self,
|
||||
image_id,
|
||||
@ -1115,7 +1119,7 @@ class Ami(TaggedEC2Resource):
|
||||
elif filter_name == 'image-id':
|
||||
return self.id
|
||||
elif filter_name == 'is-public':
|
||||
return str(self.is_public)
|
||||
return self.is_public_string
|
||||
elif filter_name == 'state':
|
||||
return self.state
|
||||
elif filter_name == 'name':
|
||||
@ -2230,6 +2234,10 @@ class VPCPeeringConnectionStatus(object):
|
||||
self.code = code
|
||||
self.message = message
|
||||
|
||||
def deleted(self):
|
||||
self.code = 'deleted'
|
||||
self.message = 'Deleted by {deleter ID}'
|
||||
|
||||
def initiating(self):
|
||||
self.code = 'initiating-request'
|
||||
self.message = 'Initiating Request to {accepter ID}'
|
||||
@ -2292,9 +2300,8 @@ class VPCPeeringConnectionBackend(object):
|
||||
return self.vpc_pcxs.get(vpc_pcx_id)
|
||||
|
||||
def delete_vpc_peering_connection(self, vpc_pcx_id):
|
||||
deleted = self.vpc_pcxs.pop(vpc_pcx_id, None)
|
||||
if not deleted:
|
||||
raise InvalidVPCPeeringConnectionIdError(vpc_pcx_id)
|
||||
deleted = self.get_vpc_peering_connection(vpc_pcx_id)
|
||||
deleted._status.deleted()
|
||||
return deleted
|
||||
|
||||
def accept_vpc_peering_connection(self, vpc_pcx_id):
|
||||
@ -2461,7 +2468,7 @@ class SubnetBackend(object):
|
||||
default_for_az, map_public_ip_on_launch)
|
||||
|
||||
# AWS associates a new subnet with the default Network ACL
|
||||
self.associate_default_network_acl_with_subnet(subnet_id)
|
||||
self.associate_default_network_acl_with_subnet(subnet_id, vpc_id)
|
||||
self.subnets[availability_zone][subnet_id] = subnet
|
||||
return subnet
|
||||
|
||||
@ -2876,7 +2883,7 @@ class SpotInstanceRequest(BotoSpotRequest, TaggedEC2Resource):
|
||||
def __init__(self, ec2_backend, spot_request_id, price, image_id, type,
|
||||
valid_from, valid_until, launch_group, availability_zone_group,
|
||||
key_name, security_groups, user_data, instance_type, placement,
|
||||
kernel_id, ramdisk_id, monitoring_enabled, subnet_id, spot_fleet_id,
|
||||
kernel_id, ramdisk_id, monitoring_enabled, subnet_id, tags, spot_fleet_id,
|
||||
**kwargs):
|
||||
super(SpotInstanceRequest, self).__init__(**kwargs)
|
||||
ls = LaunchSpecification()
|
||||
@ -2900,6 +2907,7 @@ class SpotInstanceRequest(BotoSpotRequest, TaggedEC2Resource):
|
||||
ls.monitored = monitoring_enabled
|
||||
ls.subnet_id = subnet_id
|
||||
self.spot_fleet_id = spot_fleet_id
|
||||
self.tags = tags
|
||||
|
||||
if security_groups:
|
||||
for group_name in security_groups:
|
||||
@ -2933,6 +2941,7 @@ class SpotInstanceRequest(BotoSpotRequest, TaggedEC2Resource):
|
||||
security_group_names=[],
|
||||
security_group_ids=self.launch_specification.groups,
|
||||
spot_fleet_id=self.spot_fleet_id,
|
||||
tags=self.tags,
|
||||
)
|
||||
instance = reservation.instances[0]
|
||||
return instance
|
||||
@ -2948,15 +2957,16 @@ class SpotRequestBackend(object):
|
||||
valid_until, launch_group, availability_zone_group,
|
||||
key_name, security_groups, user_data,
|
||||
instance_type, placement, kernel_id, ramdisk_id,
|
||||
monitoring_enabled, subnet_id, spot_fleet_id=None):
|
||||
monitoring_enabled, subnet_id, tags=None, spot_fleet_id=None):
|
||||
requests = []
|
||||
tags = tags or {}
|
||||
for _ in range(count):
|
||||
spot_request_id = random_spot_request_id()
|
||||
request = SpotInstanceRequest(self,
|
||||
spot_request_id, price, image_id, type, valid_from, valid_until,
|
||||
launch_group, availability_zone_group, key_name, security_groups,
|
||||
user_data, instance_type, placement, kernel_id, ramdisk_id,
|
||||
monitoring_enabled, subnet_id, spot_fleet_id)
|
||||
monitoring_enabled, subnet_id, tags, spot_fleet_id)
|
||||
self.spot_instance_requests[spot_request_id] = request
|
||||
requests.append(request)
|
||||
return requests
|
||||
@ -2976,8 +2986,8 @@ class SpotRequestBackend(object):
|
||||
|
||||
class SpotFleetLaunchSpec(object):
|
||||
def __init__(self, ebs_optimized, group_set, iam_instance_profile, image_id,
|
||||
instance_type, key_name, monitoring, spot_price, subnet_id, user_data,
|
||||
weighted_capacity):
|
||||
instance_type, key_name, monitoring, spot_price, subnet_id, tag_specifications,
|
||||
user_data, weighted_capacity):
|
||||
self.ebs_optimized = ebs_optimized
|
||||
self.group_set = group_set
|
||||
self.iam_instance_profile = iam_instance_profile
|
||||
@ -2987,6 +2997,7 @@ class SpotFleetLaunchSpec(object):
|
||||
self.monitoring = monitoring
|
||||
self.spot_price = spot_price
|
||||
self.subnet_id = subnet_id
|
||||
self.tag_specifications = tag_specifications
|
||||
self.user_data = user_data
|
||||
self.weighted_capacity = float(weighted_capacity)
|
||||
|
||||
@ -3017,6 +3028,7 @@ class SpotFleetRequest(TaggedEC2Resource):
|
||||
monitoring=spec.get('monitoring._enabled'),
|
||||
spot_price=spec.get('spot_price', self.spot_price),
|
||||
subnet_id=spec['subnet_id'],
|
||||
tag_specifications=self._parse_tag_specifications(spec),
|
||||
user_data=spec.get('user_data'),
|
||||
weighted_capacity=spec['weighted_capacity'],
|
||||
)
|
||||
@ -3099,6 +3111,7 @@ class SpotFleetRequest(TaggedEC2Resource):
|
||||
monitoring_enabled=launch_spec.monitoring,
|
||||
subnet_id=launch_spec.subnet_id,
|
||||
spot_fleet_id=self.id,
|
||||
tags=launch_spec.tag_specifications,
|
||||
)
|
||||
self.spot_requests.extend(requests)
|
||||
self.fulfilled_capacity += added_weight
|
||||
@ -3121,6 +3134,25 @@ class SpotFleetRequest(TaggedEC2Resource):
|
||||
self.spot_requests = [req for req in self.spot_requests if req.instance.id not in instance_ids]
|
||||
self.ec2_backend.terminate_instances(instance_ids)
|
||||
|
||||
def _parse_tag_specifications(self, spec):
|
||||
try:
|
||||
tag_spec_num = max([int(key.split('.')[1]) for key in spec if key.startswith("tag_specification_set")])
|
||||
except ValueError: # no tag specifications
|
||||
return {}
|
||||
|
||||
tag_specifications = {}
|
||||
for si in range(1, tag_spec_num + 1):
|
||||
resource_type = spec["tag_specification_set.{si}._resource_type".format(si=si)]
|
||||
|
||||
tags = [key for key in spec if key.startswith("tag_specification_set.{si}._tag".format(si=si))]
|
||||
tag_num = max([int(key.split('.')[3]) for key in tags])
|
||||
tag_specifications[resource_type] = dict((
|
||||
spec["tag_specification_set.{si}._tag.{ti}._key".format(si=si, ti=ti)],
|
||||
spec["tag_specification_set.{si}._tag.{ti}._value".format(si=si, ti=ti)],
|
||||
) for ti in range(1, tag_num + 1))
|
||||
|
||||
return tag_specifications
|
||||
|
||||
|
||||
class SpotFleetBackend(object):
|
||||
def __init__(self):
|
||||
@ -3557,8 +3589,22 @@ class NetworkAclBackend(object):
|
||||
self.get_vpc(vpc_id)
|
||||
network_acl = NetworkAcl(self, network_acl_id, vpc_id, default)
|
||||
self.network_acls[network_acl_id] = network_acl
|
||||
if default:
|
||||
self.add_default_entries(network_acl_id)
|
||||
return network_acl
|
||||
|
||||
def add_default_entries(self, network_acl_id):
|
||||
default_acl_entries = [
|
||||
{'rule_number': 100, 'rule_action': 'allow', 'egress': 'true'},
|
||||
{'rule_number': 32767, 'rule_action': 'deny', 'egress': 'true'},
|
||||
{'rule_number': 100, 'rule_action': 'allow', 'egress': 'false'},
|
||||
{'rule_number': 32767, 'rule_action': 'deny', 'egress': 'false'}
|
||||
]
|
||||
for entry in default_acl_entries:
|
||||
self.create_network_acl_entry(network_acl_id=network_acl_id, rule_number=entry['rule_number'], protocol='-1',
|
||||
rule_action=entry['rule_action'], egress=entry['egress'], cidr_block='0.0.0.0/0',
|
||||
icmp_code=None, icmp_type=None, port_range_from=None, port_range_to=None)
|
||||
|
||||
def get_all_network_acls(self, network_acl_ids=None, filters=None):
|
||||
network_acls = self.network_acls.values()
|
||||
|
||||
@ -3633,9 +3679,9 @@ class NetworkAclBackend(object):
|
||||
new_acl.associations[new_assoc_id] = association
|
||||
return association
|
||||
|
||||
def associate_default_network_acl_with_subnet(self, subnet_id):
|
||||
def associate_default_network_acl_with_subnet(self, subnet_id, vpc_id):
|
||||
association_id = random_network_acl_subnet_association_id()
|
||||
acl = next(acl for acl in self.network_acls.values() if acl.default)
|
||||
acl = next(acl for acl in self.network_acls.values() if acl.default and acl.vpc_id == vpc_id)
|
||||
acl.associations[association_id] = NetworkAclAssociation(self, association_id,
|
||||
subnet_id, acl.id)
|
||||
|
||||
|
||||
@ -150,16 +150,18 @@ CREATE_VOLUME_RESPONSE = """<CreateVolumeResponse xmlns="http://ec2.amazonaws.co
|
||||
<availabilityZone>{{ volume.zone.name }}</availabilityZone>
|
||||
<status>creating</status>
|
||||
<createTime>{{ volume.create_time}}</createTime>
|
||||
<tagSet>
|
||||
{% for tag in volume.get_tags() %}
|
||||
<item>
|
||||
<resourceId>{{ tag.resource_id }}</resourceId>
|
||||
<resourceType>{{ tag.resource_type }}</resourceType>
|
||||
<key>{{ tag.key }}</key>
|
||||
<value>{{ tag.value }}</value>
|
||||
</item>
|
||||
{% endfor %}
|
||||
</tagSet>
|
||||
{% if volume.get_tags() %}
|
||||
<tagSet>
|
||||
{% for tag in volume.get_tags() %}
|
||||
<item>
|
||||
<resourceId>{{ tag.resource_id }}</resourceId>
|
||||
<resourceType>{{ tag.resource_type }}</resourceType>
|
||||
<key>{{ tag.key }}</key>
|
||||
<value>{{ tag.value }}</value>
|
||||
</item>
|
||||
{% endfor %}
|
||||
</tagSet>
|
||||
{% endif %}
|
||||
<volumeType>standard</volumeType>
|
||||
</CreateVolumeResponse>"""
|
||||
|
||||
@ -191,16 +193,18 @@ DESCRIBE_VOLUMES_RESPONSE = """<DescribeVolumesResponse xmlns="http://ec2.amazon
|
||||
</item>
|
||||
{% endif %}
|
||||
</attachmentSet>
|
||||
<tagSet>
|
||||
{% for tag in volume.get_tags() %}
|
||||
<item>
|
||||
<resourceId>{{ tag.resource_id }}</resourceId>
|
||||
<resourceType>{{ tag.resource_type }}</resourceType>
|
||||
<key>{{ tag.key }}</key>
|
||||
<value>{{ tag.value }}</value>
|
||||
</item>
|
||||
{% endfor %}
|
||||
</tagSet>
|
||||
{% if volume.get_tags() %}
|
||||
<tagSet>
|
||||
{% for tag in volume.get_tags() %}
|
||||
<item>
|
||||
<resourceId>{{ tag.resource_id }}</resourceId>
|
||||
<resourceType>{{ tag.resource_type }}</resourceType>
|
||||
<key>{{ tag.key }}</key>
|
||||
<value>{{ tag.value }}</value>
|
||||
</item>
|
||||
{% endfor %}
|
||||
</tagSet>
|
||||
{% endif %}
|
||||
<volumeType>standard</volumeType>
|
||||
</item>
|
||||
{% endfor %}
|
||||
|
||||
@ -244,7 +244,7 @@ EC2_RUN_INSTANCES = """<RunInstancesResponse xmlns="http://ec2.amazonaws.com/doc
|
||||
<reason/>
|
||||
<keyName>{{ instance.key_name }}</keyName>
|
||||
<ebsOptimized>{{ instance.ebs_optimized }}</ebsOptimized>
|
||||
<amiLaunchIndex>0</amiLaunchIndex>
|
||||
<amiLaunchIndex>{{ instance.ami_launch_index }}</amiLaunchIndex>
|
||||
<instanceType>{{ instance.instance_type }}</instanceType>
|
||||
<launchTime>{{ instance.launch_time }}</launchTime>
|
||||
<placement>
|
||||
@ -384,7 +384,7 @@ EC2_DESCRIBE_INSTANCES = """<DescribeInstancesResponse xmlns="http://ec2.amazona
|
||||
<reason>{{ instance._reason }}</reason>
|
||||
<keyName>{{ instance.key_name }}</keyName>
|
||||
<ebsOptimized>{{ instance.ebs_optimized }}</ebsOptimized>
|
||||
<amiLaunchIndex>0</amiLaunchIndex>
|
||||
<amiLaunchIndex>{{ instance.ami_launch_index }}</amiLaunchIndex>
|
||||
<productCodes/>
|
||||
<instanceType>{{ instance.instance_type }}</instanceType>
|
||||
<launchTime>{{ instance.launch_time }}</launchTime>
|
||||
@ -450,6 +450,7 @@ EC2_DESCRIBE_INSTANCES = """<DescribeInstancesResponse xmlns="http://ec2.amazona
|
||||
</blockDeviceMapping>
|
||||
<virtualizationType>{{ instance.virtualization_type }}</virtualizationType>
|
||||
<clientToken>ABCDE1234567890123</clientToken>
|
||||
{% if instance.get_tags() %}
|
||||
<tagSet>
|
||||
{% for tag in instance.get_tags() %}
|
||||
<item>
|
||||
@ -460,6 +461,7 @@ EC2_DESCRIBE_INSTANCES = """<DescribeInstancesResponse xmlns="http://ec2.amazona
|
||||
</item>
|
||||
{% endfor %}
|
||||
</tagSet>
|
||||
{% endif %}
|
||||
<hypervisor>xen</hypervisor>
|
||||
<networkInterfaceSet>
|
||||
{% for nic in instance.nics.values() %}
|
||||
|
||||
@ -107,6 +107,21 @@ DESCRIBE_SPOT_FLEET_TEMPLATE = """<DescribeSpotFleetRequestsResponse xmlns="http
|
||||
</item>
|
||||
{% endfor %}
|
||||
</groupSet>
|
||||
<tagSpecificationSet>
|
||||
{% for resource_type in launch_spec.tag_specifications %}
|
||||
<item>
|
||||
<resourceType>{{ resource_type }}</resourceType>
|
||||
<tag>
|
||||
{% for key, value in launch_spec.tag_specifications[resource_type].items() %}
|
||||
<item>
|
||||
<key>{{ key }}</key>
|
||||
<value>{{ value }}</value>
|
||||
</item>
|
||||
{% endfor %}
|
||||
</tag>
|
||||
</item>
|
||||
{% endfor %}
|
||||
</tagSpecificationSet>
|
||||
</item>
|
||||
{% endfor %}
|
||||
</launchSpecifications>
|
||||
|
||||
@ -3,6 +3,7 @@ from .responses import ECRResponse
|
||||
|
||||
url_bases = [
|
||||
"https?://ecr.(.+).amazonaws.com",
|
||||
"https?://api.ecr.(.+).amazonaws.com",
|
||||
]
|
||||
|
||||
url_paths = {
|
||||
|
||||
@ -94,6 +94,12 @@ class Cluster(BaseObject):
|
||||
# no-op when nothing changed between old and new resources
|
||||
return original_resource
|
||||
|
||||
def get_cfn_attribute(self, attribute_name):
|
||||
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
|
||||
if attribute_name == 'Arn':
|
||||
return self.arn
|
||||
raise UnformattedGetAttTemplateException()
|
||||
|
||||
|
||||
class TaskDefinition(BaseObject):
|
||||
|
||||
@ -271,6 +277,12 @@ class Service(BaseObject):
|
||||
else:
|
||||
return ecs_backend.update_service(cluster_name, service_name, task_definition, desired_count)
|
||||
|
||||
def get_cfn_attribute(self, attribute_name):
|
||||
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
|
||||
if attribute_name == 'Name':
|
||||
return self.name
|
||||
raise UnformattedGetAttTemplateException()
|
||||
|
||||
|
||||
class ContainerInstance(BaseObject):
|
||||
|
||||
@ -358,6 +370,20 @@ class ContainerInstance(BaseObject):
|
||||
return formatted_attr
|
||||
|
||||
|
||||
class ClusterFailure(BaseObject):
|
||||
def __init__(self, reason, cluster_name):
|
||||
self.reason = reason
|
||||
self.arn = "arn:aws:ecs:us-east-1:012345678910:cluster/{0}".format(
|
||||
cluster_name)
|
||||
|
||||
@property
|
||||
def response_object(self):
|
||||
response_object = self.gen_response_object()
|
||||
response_object['reason'] = self.reason
|
||||
response_object['arn'] = self.arn
|
||||
return response_object
|
||||
|
||||
|
||||
class ContainerInstanceFailure(BaseObject):
|
||||
|
||||
def __init__(self, reason, container_instance_id):
|
||||
@ -419,6 +445,7 @@ class EC2ContainerServiceBackend(BaseBackend):
|
||||
|
||||
def describe_clusters(self, list_clusters_name=None):
|
||||
list_clusters = []
|
||||
failures = []
|
||||
if list_clusters_name is None:
|
||||
if 'default' in self.clusters:
|
||||
list_clusters.append(self.clusters['default'].response_object)
|
||||
@ -429,9 +456,8 @@ class EC2ContainerServiceBackend(BaseBackend):
|
||||
list_clusters.append(
|
||||
self.clusters[cluster_name].response_object)
|
||||
else:
|
||||
raise Exception(
|
||||
"{0} is not a cluster".format(cluster_name))
|
||||
return list_clusters
|
||||
failures.append(ClusterFailure('MISSING', cluster_name))
|
||||
return list_clusters, failures
|
||||
|
||||
def delete_cluster(self, cluster_str):
|
||||
cluster_name = cluster_str.split('/')[-1]
|
||||
@ -769,6 +795,8 @@ class EC2ContainerServiceBackend(BaseBackend):
|
||||
Container instances status should be one of [ACTIVE,DRAINING]")
|
||||
failures = []
|
||||
container_instance_objects = []
|
||||
list_container_instance_ids = [x.split('/')[-1]
|
||||
for x in list_container_instance_ids]
|
||||
for container_instance_id in list_container_instance_ids:
|
||||
container_instance = self.container_instances[cluster_name].get(container_instance_id, None)
|
||||
if container_instance is not None:
|
||||
|
||||
@ -45,10 +45,10 @@ class EC2ContainerServiceResponse(BaseResponse):
|
||||
|
||||
def describe_clusters(self):
|
||||
list_clusters_name = self._get_param('clusters')
|
||||
clusters = self.ecs_backend.describe_clusters(list_clusters_name)
|
||||
clusters, failures = self.ecs_backend.describe_clusters(list_clusters_name)
|
||||
return json.dumps({
|
||||
'clusters': clusters,
|
||||
'failures': []
|
||||
'failures': [cluster.response_object for cluster in failures]
|
||||
})
|
||||
|
||||
def delete_cluster(self):
|
||||
|
||||
@ -613,13 +613,11 @@ DESCRIBE_STEP_TEMPLATE = """<DescribeStepResponse xmlns="http://elasticmapreduce
|
||||
<Id>{{ step.id }}</Id>
|
||||
<Name>{{ step.name | escape }}</Name>
|
||||
<Status>
|
||||
<!-- does not exist for botocore 1.4.28
|
||||
<FailureDetails>
|
||||
<Reason/>
|
||||
<Message/>
|
||||
<LogFile/>
|
||||
</FailureDetails>
|
||||
-->
|
||||
<State>{{ step.state }}</State>
|
||||
<StateChangeReason>{{ step.state_change_reason }}</StateChangeReason>
|
||||
<Timeline>
|
||||
|
||||
@ -24,3 +24,56 @@ class IAMReportNotPresentException(RESTError):
|
||||
def __init__(self, message):
|
||||
super(IAMReportNotPresentException, self).__init__(
|
||||
"ReportNotPresent", message)
|
||||
|
||||
|
||||
class MalformedCertificate(RESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, cert):
|
||||
super(MalformedCertificate, self).__init__(
|
||||
'MalformedCertificate', 'Certificate {cert} is malformed'.format(cert=cert))
|
||||
|
||||
|
||||
class DuplicateTags(RESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self):
|
||||
super(DuplicateTags, self).__init__(
|
||||
'InvalidInput', 'Duplicate tag keys found. Please note that Tag keys are case insensitive.')
|
||||
|
||||
|
||||
class TagKeyTooBig(RESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, tag, param='tags.X.member.key'):
|
||||
super(TagKeyTooBig, self).__init__(
|
||||
'ValidationError', "1 validation error detected: Value '{}' at '{}' failed to satisfy "
|
||||
"constraint: Member must have length less than or equal to 128.".format(tag, param))
|
||||
|
||||
|
||||
class TagValueTooBig(RESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, tag):
|
||||
super(TagValueTooBig, self).__init__(
|
||||
'ValidationError', "1 validation error detected: Value '{}' at 'tags.X.member.value' failed to satisfy "
|
||||
"constraint: Member must have length less than or equal to 256.".format(tag))
|
||||
|
||||
|
||||
class InvalidTagCharacters(RESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, tag, param='tags.X.member.key'):
|
||||
message = "1 validation error detected: Value '{}' at '{}' failed to satisfy ".format(tag, param)
|
||||
message += "constraint: Member must satisfy regular expression pattern: [\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]+"
|
||||
|
||||
super(InvalidTagCharacters, self).__init__('ValidationError', message)
|
||||
|
||||
|
||||
class TooManyTags(RESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, tags, param='tags'):
|
||||
super(TooManyTags, self).__init__(
|
||||
'ValidationError', "1 validation error detected: Value '{}' at '{}' failed to satisfy "
|
||||
"constraint: Member must have length less than or equal to 50.".format(tags, param))
|
||||
|
||||
@ -1,14 +1,20 @@
|
||||
from __future__ import unicode_literals
|
||||
import base64
|
||||
import sys
|
||||
from datetime import datetime
|
||||
import json
|
||||
import re
|
||||
|
||||
from cryptography import x509
|
||||
from cryptography.hazmat.backends import default_backend
|
||||
|
||||
import pytz
|
||||
from moto.core import BaseBackend, BaseModel
|
||||
from moto.core.utils import iso_8601_datetime_without_milliseconds
|
||||
|
||||
from .aws_managed_policies import aws_managed_policies_data
|
||||
from .exceptions import IAMNotFoundException, IAMConflictException, IAMReportNotPresentException
|
||||
from .exceptions import IAMNotFoundException, IAMConflictException, IAMReportNotPresentException, MalformedCertificate, \
|
||||
DuplicateTags, TagKeyTooBig, InvalidTagCharacters, TooManyTags, TagValueTooBig
|
||||
from .utils import random_access_key, random_alphanumeric, random_resource_id, random_policy_id
|
||||
|
||||
ACCOUNT_ID = 123456789012
|
||||
@ -28,7 +34,6 @@ class MFADevice(object):
|
||||
|
||||
|
||||
class Policy(BaseModel):
|
||||
|
||||
is_attachable = False
|
||||
|
||||
def __init__(self,
|
||||
@ -43,13 +48,29 @@ class Policy(BaseModel):
|
||||
self.description = description or ''
|
||||
self.id = random_policy_id()
|
||||
self.path = path or '/'
|
||||
self.default_version_id = default_version_id or 'v1'
|
||||
|
||||
if default_version_id:
|
||||
self.default_version_id = default_version_id
|
||||
self.next_version_num = int(default_version_id.lstrip('v')) + 1
|
||||
else:
|
||||
self.default_version_id = 'v1'
|
||||
self.next_version_num = 2
|
||||
self.versions = [PolicyVersion(self.arn, document, True)]
|
||||
|
||||
self.create_datetime = datetime.now(pytz.utc)
|
||||
self.update_datetime = datetime.now(pytz.utc)
|
||||
|
||||
|
||||
class SAMLProvider(BaseModel):
|
||||
def __init__(self, name, saml_metadata_document=None):
|
||||
self.name = name
|
||||
self.saml_metadata_document = saml_metadata_document
|
||||
|
||||
@property
|
||||
def arn(self):
|
||||
return "arn:aws:iam::{0}:saml-provider/{1}".format(ACCOUNT_ID, self.name)
|
||||
|
||||
|
||||
class PolicyVersion(object):
|
||||
|
||||
def __init__(self,
|
||||
@ -114,9 +135,12 @@ class Role(BaseModel):
|
||||
self.id = role_id
|
||||
self.name = name
|
||||
self.assume_role_policy_document = assume_role_policy_document
|
||||
self.path = path
|
||||
self.path = path or '/'
|
||||
self.policies = {}
|
||||
self.managed_policies = {}
|
||||
self.create_date = datetime.now(pytz.utc)
|
||||
self.tags = {}
|
||||
self.description = ""
|
||||
|
||||
@classmethod
|
||||
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
|
||||
@ -160,14 +184,18 @@ class Role(BaseModel):
|
||||
raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "Arn" ]"')
|
||||
raise UnformattedGetAttTemplateException()
|
||||
|
||||
def get_tags(self):
|
||||
return [self.tags[tag] for tag in self.tags]
|
||||
|
||||
|
||||
class InstanceProfile(BaseModel):
|
||||
|
||||
def __init__(self, instance_profile_id, name, path, roles):
|
||||
self.id = instance_profile_id
|
||||
self.name = name
|
||||
self.path = path
|
||||
self.path = path or '/'
|
||||
self.roles = roles if roles else []
|
||||
self.create_date = datetime.now(pytz.utc)
|
||||
|
||||
@classmethod
|
||||
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
|
||||
@ -191,7 +219,7 @@ class InstanceProfile(BaseModel):
|
||||
def get_cfn_attribute(self, attribute_name):
|
||||
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
|
||||
if attribute_name == 'Arn':
|
||||
raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "Arn" ]"')
|
||||
return self.arn
|
||||
raise UnformattedGetAttTemplateException()
|
||||
|
||||
|
||||
@ -213,6 +241,16 @@ class Certificate(BaseModel):
|
||||
return "arn:aws:iam::{0}:server-certificate{1}{2}".format(ACCOUNT_ID, self.path, self.cert_name)
|
||||
|
||||
|
||||
class SigningCertificate(BaseModel):
|
||||
|
||||
def __init__(self, id, user_name, body):
|
||||
self.id = id
|
||||
self.user_name = user_name
|
||||
self.body = body
|
||||
self.upload_date = datetime.strftime(datetime.utcnow(), "%Y-%m-%d-%H-%M-%S")
|
||||
self.status = 'Active'
|
||||
|
||||
|
||||
class AccessKey(BaseModel):
|
||||
|
||||
def __init__(self, user_name):
|
||||
@ -224,6 +262,10 @@ class AccessKey(BaseModel):
|
||||
datetime.utcnow(),
|
||||
"%Y-%m-%dT%H:%M:%SZ"
|
||||
)
|
||||
self.last_used = datetime.strftime(
|
||||
datetime.utcnow(),
|
||||
"%Y-%m-%dT%H:%M:%SZ"
|
||||
)
|
||||
|
||||
def get_cfn_attribute(self, attribute_name):
|
||||
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
|
||||
@ -297,6 +339,7 @@ class User(BaseModel):
|
||||
self.access_keys = []
|
||||
self.password = None
|
||||
self.password_reset_required = False
|
||||
self.signing_certificates = {}
|
||||
|
||||
@property
|
||||
def arn(self):
|
||||
@ -352,21 +395,20 @@ class User(BaseModel):
|
||||
return self.access_keys
|
||||
|
||||
def delete_access_key(self, access_key_id):
|
||||
for key in self.access_keys:
|
||||
if key.access_key_id == access_key_id:
|
||||
self.access_keys.remove(key)
|
||||
break
|
||||
else:
|
||||
raise IAMNotFoundException(
|
||||
"Key {0} not found".format(access_key_id))
|
||||
key = self.get_access_key_by_id(access_key_id)
|
||||
self.access_keys.remove(key)
|
||||
|
||||
def update_access_key(self, access_key_id, status):
|
||||
key = self.get_access_key_by_id(access_key_id)
|
||||
key.status = status
|
||||
|
||||
def get_access_key_by_id(self, access_key_id):
|
||||
for key in self.access_keys:
|
||||
if key.access_key_id == access_key_id:
|
||||
key.status = status
|
||||
break
|
||||
return key
|
||||
else:
|
||||
raise IAMNotFoundException("The Access Key with id {0} cannot be found".format(access_key_id))
|
||||
raise IAMNotFoundException(
|
||||
"The Access Key with id {0} cannot be found".format(access_key_id))
|
||||
|
||||
def get_cfn_attribute(self, attribute_name):
|
||||
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
|
||||
@ -427,6 +469,7 @@ class IAMBackend(BaseBackend):
|
||||
self.credential_report = None
|
||||
self.managed_policies = self._init_managed_policies()
|
||||
self.account_aliases = []
|
||||
self.saml_providers = {}
|
||||
super(IAMBackend, self).__init__()
|
||||
|
||||
def _init_managed_policies(self):
|
||||
@ -437,6 +480,16 @@ class IAMBackend(BaseBackend):
|
||||
policy = arns[policy_arn]
|
||||
policy.attach_to(self.get_role(role_name))
|
||||
|
||||
def update_role_description(self, role_name, role_description):
|
||||
role = self.get_role(role_name)
|
||||
role.description = role_description
|
||||
return role
|
||||
|
||||
def update_role(self, role_name, role_description):
|
||||
role = self.get_role(role_name)
|
||||
role.description = role_description
|
||||
return role
|
||||
|
||||
def detach_role_policy(self, policy_arn, role_name):
|
||||
arns = dict((p.arn, p) for p in self.managed_policies.values())
|
||||
try:
|
||||
@ -583,13 +636,94 @@ class IAMBackend(BaseBackend):
|
||||
role = self.get_role(role_name)
|
||||
return role.policies.keys()
|
||||
|
||||
def _validate_tag_key(self, tag_key, exception_param='tags.X.member.key'):
|
||||
"""Validates the tag key.
|
||||
|
||||
:param all_tags: Dict to check if there is a duplicate tag.
|
||||
:param tag_key: The tag key to check against.
|
||||
:param exception_param: The exception parameter to send over to help format the message. This is to reflect
|
||||
the difference between the tag and untag APIs.
|
||||
:return:
|
||||
"""
|
||||
# Validate that the key length is correct:
|
||||
if len(tag_key) > 128:
|
||||
raise TagKeyTooBig(tag_key, param=exception_param)
|
||||
|
||||
# Validate that the tag key fits the proper Regex:
|
||||
# [\w\s_.:/=+\-@]+ SHOULD be the same as the Java regex on the AWS documentation: [\p{L}\p{Z}\p{N}_.:/=+\-@]+
|
||||
match = re.findall(r'[\w\s_.:/=+\-@]+', tag_key)
|
||||
# Kudos if you can come up with a better way of doing a global search :)
|
||||
if not len(match) or len(match[0]) < len(tag_key):
|
||||
raise InvalidTagCharacters(tag_key, param=exception_param)
|
||||
|
||||
def _check_tag_duplicate(self, all_tags, tag_key):
|
||||
"""Validates that a tag key is not a duplicate
|
||||
|
||||
:param all_tags: Dict to check if there is a duplicate tag.
|
||||
:param tag_key: The tag key to check against.
|
||||
:return:
|
||||
"""
|
||||
if tag_key in all_tags:
|
||||
raise DuplicateTags()
|
||||
|
||||
def list_role_tags(self, role_name, marker, max_items=100):
|
||||
role = self.get_role(role_name)
|
||||
|
||||
max_items = int(max_items)
|
||||
tag_index = sorted(role.tags)
|
||||
start_idx = int(marker) if marker else 0
|
||||
|
||||
tag_index = tag_index[start_idx:start_idx + max_items]
|
||||
|
||||
if len(role.tags) <= (start_idx + max_items):
|
||||
marker = None
|
||||
else:
|
||||
marker = str(start_idx + max_items)
|
||||
|
||||
# Make the tag list of dict's:
|
||||
tags = [role.tags[tag] for tag in tag_index]
|
||||
|
||||
return tags, marker
|
||||
|
||||
def tag_role(self, role_name, tags):
|
||||
if len(tags) > 50:
|
||||
raise TooManyTags(tags)
|
||||
|
||||
role = self.get_role(role_name)
|
||||
|
||||
tag_keys = {}
|
||||
for tag in tags:
|
||||
# Need to index by the lowercase tag key since the keys are case insensitive, but their case is retained.
|
||||
ref_key = tag['Key'].lower()
|
||||
self._check_tag_duplicate(tag_keys, ref_key)
|
||||
self._validate_tag_key(tag['Key'])
|
||||
if len(tag['Value']) > 256:
|
||||
raise TagValueTooBig(tag['Value'])
|
||||
|
||||
tag_keys[ref_key] = tag
|
||||
|
||||
role.tags.update(tag_keys)
|
||||
|
||||
def untag_role(self, role_name, tag_keys):
|
||||
if len(tag_keys) > 50:
|
||||
raise TooManyTags(tag_keys, param='tagKeys')
|
||||
|
||||
role = self.get_role(role_name)
|
||||
|
||||
for key in tag_keys:
|
||||
ref_key = key.lower()
|
||||
self._validate_tag_key(key, exception_param='tagKeys')
|
||||
|
||||
role.tags.pop(ref_key, None)
|
||||
|
||||
def create_policy_version(self, policy_arn, policy_document, set_as_default):
|
||||
policy = self.get_policy(policy_arn)
|
||||
if not policy:
|
||||
raise IAMNotFoundException("Policy not found")
|
||||
version = PolicyVersion(policy_arn, policy_document, set_as_default)
|
||||
policy.versions.append(version)
|
||||
version.version_id = 'v{0}'.format(len(policy.versions))
|
||||
version.version_id = 'v{0}'.format(policy.next_version_num)
|
||||
policy.next_version_num += 1
|
||||
if set_as_default:
|
||||
policy.default_version_id = version.version_id
|
||||
return version
|
||||
@ -765,6 +899,70 @@ class IAMBackend(BaseBackend):
|
||||
|
||||
return users
|
||||
|
||||
def update_user(self, user_name, new_path=None, new_user_name=None):
|
||||
try:
|
||||
user = self.users[user_name]
|
||||
except KeyError:
|
||||
raise IAMNotFoundException("User {0} not found".format(user_name))
|
||||
|
||||
if new_path:
|
||||
user.path = new_path
|
||||
if new_user_name:
|
||||
user.name = new_user_name
|
||||
self.users[new_user_name] = self.users.pop(user_name)
|
||||
|
||||
def list_roles(self, path_prefix, marker, max_items):
|
||||
roles = None
|
||||
try:
|
||||
roles = self.roles.values()
|
||||
except KeyError:
|
||||
raise IAMNotFoundException(
|
||||
"Users {0}, {1}, {2} not found".format(path_prefix, marker, max_items))
|
||||
|
||||
return roles
|
||||
|
||||
def upload_signing_certificate(self, user_name, body):
|
||||
user = self.get_user(user_name)
|
||||
cert_id = random_resource_id(size=32)
|
||||
|
||||
# Validate the signing cert:
|
||||
try:
|
||||
if sys.version_info < (3, 0):
|
||||
data = bytes(body)
|
||||
else:
|
||||
data = bytes(body, 'utf8')
|
||||
|
||||
x509.load_pem_x509_certificate(data, default_backend())
|
||||
|
||||
except Exception:
|
||||
raise MalformedCertificate(body)
|
||||
|
||||
user.signing_certificates[cert_id] = SigningCertificate(cert_id, user_name, body)
|
||||
|
||||
return user.signing_certificates[cert_id]
|
||||
|
||||
def delete_signing_certificate(self, user_name, cert_id):
|
||||
user = self.get_user(user_name)
|
||||
|
||||
try:
|
||||
del user.signing_certificates[cert_id]
|
||||
except KeyError:
|
||||
raise IAMNotFoundException("The Certificate with id {id} cannot be found.".format(id=cert_id))
|
||||
|
||||
def list_signing_certificates(self, user_name):
|
||||
user = self.get_user(user_name)
|
||||
|
||||
return list(user.signing_certificates.values())
|
||||
|
||||
def update_signing_certificate(self, user_name, cert_id, status):
|
||||
user = self.get_user(user_name)
|
||||
|
||||
try:
|
||||
user.signing_certificates[cert_id].status = status
|
||||
|
||||
except KeyError:
|
||||
raise IAMNotFoundException("The Certificate with id {id} cannot be found.".format(id=cert_id))
|
||||
|
||||
def create_login_profile(self, user_name, password):
|
||||
# This does not currently deal with PasswordPolicyViolation.
|
||||
user = self.get_user(user_name)
|
||||
@ -838,6 +1036,24 @@ class IAMBackend(BaseBackend):
|
||||
user = self.get_user(user_name)
|
||||
user.update_access_key(access_key_id, status)
|
||||
|
||||
def get_access_key_last_used(self, access_key_id):
|
||||
access_keys_list = self.get_all_access_keys_for_all_users()
|
||||
for key in access_keys_list:
|
||||
if key.access_key_id == access_key_id:
|
||||
return {
|
||||
'user_name': key.user_name,
|
||||
'last_used': key.last_used
|
||||
}
|
||||
else:
|
||||
raise IAMNotFoundException(
|
||||
"The Access Key with id {0} cannot be found".format(access_key_id))
|
||||
|
||||
def get_all_access_keys_for_all_users(self):
|
||||
access_keys_list = []
|
||||
for user_name in self.users:
|
||||
access_keys_list += self.get_all_access_keys(user_name)
|
||||
return access_keys_list
|
||||
|
||||
def get_all_access_keys(self, user_name, marker=None, max_items=None):
|
||||
user = self.get_user(user_name)
|
||||
keys = user.get_all_access_keys()
|
||||
@ -937,5 +1153,33 @@ class IAMBackend(BaseBackend):
|
||||
'managed_policies': returned_policies
|
||||
}
|
||||
|
||||
def create_saml_provider(self, name, saml_metadata_document):
|
||||
saml_provider = SAMLProvider(name, saml_metadata_document)
|
||||
self.saml_providers[name] = saml_provider
|
||||
return saml_provider
|
||||
|
||||
def update_saml_provider(self, saml_provider_arn, saml_metadata_document):
|
||||
saml_provider = self.get_saml_provider(saml_provider_arn)
|
||||
saml_provider.saml_metadata_document = saml_metadata_document
|
||||
return saml_provider
|
||||
|
||||
def delete_saml_provider(self, saml_provider_arn):
|
||||
try:
|
||||
for saml_provider in list(self.list_saml_providers()):
|
||||
if saml_provider.arn == saml_provider_arn:
|
||||
del self.saml_providers[saml_provider.name]
|
||||
except KeyError:
|
||||
raise IAMNotFoundException(
|
||||
"SAMLProvider {0} not found".format(saml_provider_arn))
|
||||
|
||||
def list_saml_providers(self):
|
||||
return self.saml_providers.values()
|
||||
|
||||
def get_saml_provider(self, saml_provider_arn):
|
||||
for saml_provider in self.list_saml_providers():
|
||||
if saml_provider.arn == saml_provider_arn:
|
||||
return saml_provider
|
||||
raise IAMNotFoundException("SamlProvider {0} not found".format(saml_provider_arn))
|
||||
|
||||
|
||||
iam_backend = IAMBackend()
|
||||
|
||||
@ -107,6 +107,69 @@ class IamResponse(BaseResponse):
|
||||
template = self.response_template(LIST_POLICIES_TEMPLATE)
|
||||
return template.render(policies=policies, marker=marker)
|
||||
|
||||
def list_entities_for_policy(self):
|
||||
policy_arn = self._get_param('PolicyArn')
|
||||
|
||||
# Options 'User'|'Role'|'Group'|'LocalManagedPolicy'|'AWSManagedPolicy
|
||||
entity = self._get_param('EntityFilter')
|
||||
path_prefix = self._get_param('PathPrefix')
|
||||
# policy_usage_filter = self._get_param('PolicyUsageFilter')
|
||||
marker = self._get_param('Marker')
|
||||
max_items = self._get_param('MaxItems')
|
||||
|
||||
entity_roles = []
|
||||
entity_groups = []
|
||||
entity_users = []
|
||||
|
||||
if entity == 'User':
|
||||
users = iam_backend.list_users(path_prefix, marker, max_items)
|
||||
if users:
|
||||
for user in users:
|
||||
for p in user.managed_policies:
|
||||
if p == policy_arn:
|
||||
entity_users.append(user.name)
|
||||
|
||||
elif entity == 'Role':
|
||||
roles = iam_backend.list_roles(path_prefix, marker, max_items)
|
||||
if roles:
|
||||
for role in roles:
|
||||
for p in role.managed_policies:
|
||||
if p == policy_arn:
|
||||
entity_roles.append(role.name)
|
||||
|
||||
elif entity == 'Group':
|
||||
groups = iam_backend.list_groups()
|
||||
if groups:
|
||||
for group in groups:
|
||||
for p in group.managed_policies:
|
||||
if p == policy_arn:
|
||||
entity_groups.append(group.name)
|
||||
|
||||
elif entity == 'LocalManagedPolicy' or entity == 'AWSManagedPolicy':
|
||||
users = iam_backend.list_users(path_prefix, marker, max_items)
|
||||
if users:
|
||||
for user in users:
|
||||
for p in user.managed_policies:
|
||||
if p == policy_arn:
|
||||
entity_users.append(user.name)
|
||||
|
||||
roles = iam_backend.list_roles(path_prefix, marker, max_items)
|
||||
if roles:
|
||||
for role in roles:
|
||||
for p in role.managed_policies:
|
||||
if p == policy_arn:
|
||||
entity_roles.append(role.name)
|
||||
|
||||
groups = iam_backend.list_groups()
|
||||
if groups:
|
||||
for group in groups:
|
||||
for p in group.managed_policies:
|
||||
if p == policy_arn:
|
||||
entity_groups.append(group.name)
|
||||
|
||||
template = self.response_template(LIST_ENTITIES_FOR_POLICY_TEMPLATE)
|
||||
return template.render(roles=entity_roles, users=entity_users, groups=entity_groups)
|
||||
|
||||
def create_role(self):
|
||||
role_name = self._get_param('RoleName')
|
||||
path = self._get_param('Path')
|
||||
@ -169,6 +232,20 @@ class IamResponse(BaseResponse):
|
||||
template = self.response_template(GENERIC_EMPTY_TEMPLATE)
|
||||
return template.render(name="UpdateAssumeRolePolicyResponse")
|
||||
|
||||
def update_role_description(self):
|
||||
role_name = self._get_param('RoleName')
|
||||
description = self._get_param('Description')
|
||||
role = iam_backend.update_role_description(role_name, description)
|
||||
template = self.response_template(UPDATE_ROLE_DESCRIPTION_TEMPLATE)
|
||||
return template.render(role=role)
|
||||
|
||||
def update_role(self):
|
||||
role_name = self._get_param('RoleName')
|
||||
description = self._get_param('Description')
|
||||
role = iam_backend.update_role(role_name, description)
|
||||
template = self.response_template(UPDATE_ROLE_TEMPLATE)
|
||||
return template.render(role=role)
|
||||
|
||||
def create_policy_version(self):
|
||||
policy_arn = self._get_param('PolicyArn')
|
||||
policy_document = self._get_param('PolicyDocument')
|
||||
@ -201,7 +278,7 @@ class IamResponse(BaseResponse):
|
||||
|
||||
def create_instance_profile(self):
|
||||
profile_name = self._get_param('InstanceProfileName')
|
||||
path = self._get_param('Path')
|
||||
path = self._get_param('Path', '/')
|
||||
|
||||
profile = iam_backend.create_instance_profile(
|
||||
profile_name, path, role_ids=[])
|
||||
@ -363,6 +440,18 @@ class IamResponse(BaseResponse):
|
||||
template = self.response_template(LIST_USERS_TEMPLATE)
|
||||
return template.render(action='List', users=users)
|
||||
|
||||
def update_user(self):
|
||||
user_name = self._get_param('UserName')
|
||||
new_path = self._get_param('NewPath')
|
||||
new_user_name = self._get_param('NewUserName')
|
||||
iam_backend.update_user(user_name, new_path, new_user_name)
|
||||
if new_user_name:
|
||||
user = iam_backend.get_user(new_user_name)
|
||||
else:
|
||||
user = iam_backend.get_user(user_name)
|
||||
template = self.response_template(USER_TEMPLATE)
|
||||
return template.render(action='Update', user=user)
|
||||
|
||||
def create_login_profile(self):
|
||||
user_name = self._get_param('UserName')
|
||||
password = self._get_param('Password')
|
||||
@ -454,9 +543,14 @@ class IamResponse(BaseResponse):
|
||||
template = self.response_template(GENERIC_EMPTY_TEMPLATE)
|
||||
return template.render(name='UpdateAccessKey')
|
||||
|
||||
def get_access_key_last_used(self):
|
||||
access_key_id = self._get_param('AccessKeyId')
|
||||
last_used_response = iam_backend.get_access_key_last_used(access_key_id)
|
||||
template = self.response_template(GET_ACCESS_KEY_LAST_USED_TEMPLATE)
|
||||
return template.render(user_name=last_used_response["user_name"], last_used=last_used_response["last_used"])
|
||||
|
||||
def list_access_keys(self):
|
||||
user_name = self._get_param('UserName')
|
||||
|
||||
keys = iam_backend.get_all_access_keys(user_name)
|
||||
template = self.response_template(LIST_ACCESS_KEYS_TEMPLATE)
|
||||
return template.render(user_name=user_name, keys=keys)
|
||||
@ -549,9 +643,137 @@ class IamResponse(BaseResponse):
|
||||
policies=account_details['managed_policies'],
|
||||
users=account_details['users'],
|
||||
groups=account_details['groups'],
|
||||
roles=account_details['roles']
|
||||
roles=account_details['roles'],
|
||||
get_groups_for_user=iam_backend.get_groups_for_user
|
||||
)
|
||||
|
||||
def create_saml_provider(self):
|
||||
saml_provider_name = self._get_param('Name')
|
||||
saml_metadata_document = self._get_param('SAMLMetadataDocument')
|
||||
saml_provider = iam_backend.create_saml_provider(saml_provider_name, saml_metadata_document)
|
||||
|
||||
template = self.response_template(CREATE_SAML_PROVIDER_TEMPLATE)
|
||||
return template.render(saml_provider=saml_provider)
|
||||
|
||||
def update_saml_provider(self):
|
||||
saml_provider_arn = self._get_param('SAMLProviderArn')
|
||||
saml_metadata_document = self._get_param('SAMLMetadataDocument')
|
||||
saml_provider = iam_backend.update_saml_provider(saml_provider_arn, saml_metadata_document)
|
||||
|
||||
template = self.response_template(UPDATE_SAML_PROVIDER_TEMPLATE)
|
||||
return template.render(saml_provider=saml_provider)
|
||||
|
||||
def delete_saml_provider(self):
|
||||
saml_provider_arn = self._get_param('SAMLProviderArn')
|
||||
iam_backend.delete_saml_provider(saml_provider_arn)
|
||||
|
||||
template = self.response_template(DELETE_SAML_PROVIDER_TEMPLATE)
|
||||
return template.render()
|
||||
|
||||
def list_saml_providers(self):
|
||||
saml_providers = iam_backend.list_saml_providers()
|
||||
|
||||
template = self.response_template(LIST_SAML_PROVIDERS_TEMPLATE)
|
||||
return template.render(saml_providers=saml_providers)
|
||||
|
||||
def get_saml_provider(self):
|
||||
saml_provider_arn = self._get_param('SAMLProviderArn')
|
||||
saml_provider = iam_backend.get_saml_provider(saml_provider_arn)
|
||||
|
||||
template = self.response_template(GET_SAML_PROVIDER_TEMPLATE)
|
||||
return template.render(saml_provider=saml_provider)
|
||||
|
||||
def upload_signing_certificate(self):
|
||||
user_name = self._get_param('UserName')
|
||||
cert_body = self._get_param('CertificateBody')
|
||||
|
||||
cert = iam_backend.upload_signing_certificate(user_name, cert_body)
|
||||
template = self.response_template(UPLOAD_SIGNING_CERTIFICATE_TEMPLATE)
|
||||
return template.render(cert=cert)
|
||||
|
||||
def update_signing_certificate(self):
|
||||
user_name = self._get_param('UserName')
|
||||
cert_id = self._get_param('CertificateId')
|
||||
status = self._get_param('Status')
|
||||
|
||||
iam_backend.update_signing_certificate(user_name, cert_id, status)
|
||||
template = self.response_template(UPDATE_SIGNING_CERTIFICATE_TEMPLATE)
|
||||
return template.render()
|
||||
|
||||
def delete_signing_certificate(self):
|
||||
user_name = self._get_param('UserName')
|
||||
cert_id = self._get_param('CertificateId')
|
||||
|
||||
iam_backend.delete_signing_certificate(user_name, cert_id)
|
||||
template = self.response_template(DELETE_SIGNING_CERTIFICATE_TEMPLATE)
|
||||
return template.render()
|
||||
|
||||
def list_signing_certificates(self):
|
||||
user_name = self._get_param('UserName')
|
||||
|
||||
certs = iam_backend.list_signing_certificates(user_name)
|
||||
template = self.response_template(LIST_SIGNING_CERTIFICATES_TEMPLATE)
|
||||
return template.render(user_name=user_name, certificates=certs)
|
||||
|
||||
def list_role_tags(self):
|
||||
role_name = self._get_param('RoleName')
|
||||
marker = self._get_param('Marker')
|
||||
max_items = self._get_param('MaxItems', 100)
|
||||
|
||||
tags, marker = iam_backend.list_role_tags(role_name, marker, max_items)
|
||||
|
||||
template = self.response_template(LIST_ROLE_TAG_TEMPLATE)
|
||||
return template.render(tags=tags, marker=marker)
|
||||
|
||||
def tag_role(self):
|
||||
role_name = self._get_param('RoleName')
|
||||
tags = self._get_multi_param('Tags.member')
|
||||
|
||||
iam_backend.tag_role(role_name, tags)
|
||||
|
||||
template = self.response_template(TAG_ROLE_TEMPLATE)
|
||||
return template.render()
|
||||
|
||||
def untag_role(self):
|
||||
role_name = self._get_param('RoleName')
|
||||
tag_keys = self._get_multi_param('TagKeys.member')
|
||||
|
||||
iam_backend.untag_role(role_name, tag_keys)
|
||||
|
||||
template = self.response_template(UNTAG_ROLE_TEMPLATE)
|
||||
return template.render()
|
||||
|
||||
|
||||
LIST_ENTITIES_FOR_POLICY_TEMPLATE = """<ListEntitiesForPolicyResponse>
|
||||
<ListEntitiesForPolicyResult>
|
||||
<PolicyRoles>
|
||||
{% for role in roles %}
|
||||
<member>
|
||||
<RoleName>{{ role }}</RoleName>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</PolicyRoles>
|
||||
<PolicyGroups>
|
||||
{% for group in groups %}
|
||||
<member>
|
||||
<GroupName>{{ group }}</GroupName>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</PolicyGroups>
|
||||
<IsTruncated>false</IsTruncated>
|
||||
<PolicyUsers>
|
||||
{% for user in users %}
|
||||
<member>
|
||||
<UserName>{{ user }}</UserName>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</PolicyUsers>
|
||||
</ListEntitiesForPolicyResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>eb358e22-9d1f-11e4-93eb-190ecEXAMPLE</RequestId>
|
||||
</ResponseMetadata>
|
||||
</ListEntitiesForPolicyResponse>"""
|
||||
|
||||
|
||||
ATTACH_ROLE_POLICY_TEMPLATE = """<AttachRolePolicyResponse>
|
||||
<ResponseMetadata>
|
||||
@ -734,7 +956,7 @@ CREATE_INSTANCE_PROFILE_TEMPLATE = """<CreateInstanceProfileResponse xmlns="http
|
||||
<InstanceProfileName>{{ profile.name }}</InstanceProfileName>
|
||||
<Path>{{ profile.path }}</Path>
|
||||
<Arn>{{ profile.arn }}</Arn>
|
||||
<CreateDate>2012-05-09T16:11:10.222Z</CreateDate>
|
||||
<CreateDate>{{ profile.create_date }}</CreateDate>
|
||||
</InstanceProfile>
|
||||
</CreateInstanceProfileResult>
|
||||
<ResponseMetadata>
|
||||
@ -753,7 +975,7 @@ GET_INSTANCE_PROFILE_TEMPLATE = """<GetInstanceProfileResponse xmlns="https://ia
|
||||
<Arn>{{ role.arn }}</Arn>
|
||||
<RoleName>{{ role.name }}</RoleName>
|
||||
<AssumeRolePolicyDocument>{{ role.assume_role_policy_document }}</AssumeRolePolicyDocument>
|
||||
<CreateDate>2012-05-09T15:45:35Z</CreateDate>
|
||||
<CreateDate>{{ role.create_date }}</CreateDate>
|
||||
<RoleId>{{ role.id }}</RoleId>
|
||||
</member>
|
||||
{% endfor %}
|
||||
@ -761,7 +983,7 @@ GET_INSTANCE_PROFILE_TEMPLATE = """<GetInstanceProfileResponse xmlns="https://ia
|
||||
<InstanceProfileName>{{ profile.name }}</InstanceProfileName>
|
||||
<Path>{{ profile.path }}</Path>
|
||||
<Arn>{{ profile.arn }}</Arn>
|
||||
<CreateDate>2012-05-09T16:11:10Z</CreateDate>
|
||||
<CreateDate>{{ profile.create_date }}</CreateDate>
|
||||
</InstanceProfile>
|
||||
</GetInstanceProfileResult>
|
||||
<ResponseMetadata>
|
||||
@ -776,7 +998,7 @@ CREATE_ROLE_TEMPLATE = """<CreateRoleResponse xmlns="https://iam.amazonaws.com/d
|
||||
<Arn>{{ role.arn }}</Arn>
|
||||
<RoleName>{{ role.name }}</RoleName>
|
||||
<AssumeRolePolicyDocument>{{ role.assume_role_policy_document }}</AssumeRolePolicyDocument>
|
||||
<CreateDate>2012-05-08T23:34:01.495Z</CreateDate>
|
||||
<CreateDate>{{ role.create_date }}</CreateDate>
|
||||
<RoleId>{{ role.id }}</RoleId>
|
||||
</Role>
|
||||
</CreateRoleResult>
|
||||
@ -796,6 +1018,40 @@ GET_ROLE_POLICY_TEMPLATE = """<GetRolePolicyResponse xmlns="https://iam.amazonaw
|
||||
</ResponseMetadata>
|
||||
</GetRolePolicyResponse>"""
|
||||
|
||||
UPDATE_ROLE_TEMPLATE = """<UpdateRoleResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
|
||||
<UpdateRoleResult>
|
||||
</UpdateRoleResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>df37e965-9967-11e1-a4c3-270EXAMPLE04</RequestId>
|
||||
</ResponseMetadata>
|
||||
</UpdateRoleResponse>"""
|
||||
|
||||
UPDATE_ROLE_DESCRIPTION_TEMPLATE = """<UpdateRoleDescriptionResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
|
||||
<UpdateRoleDescriptionResult>
|
||||
<Role>
|
||||
<Path>{{ role.path }}</Path>
|
||||
<Arn>{{ role.arn }}</Arn>
|
||||
<RoleName>{{ role.name }}</RoleName>
|
||||
<AssumeRolePolicyDocument>{{ role.assume_role_policy_document }}</AssumeRolePolicyDocument>
|
||||
<CreateDate>{{ role.create_date.isoformat() }}</CreateDate>
|
||||
<RoleId>{{ role.id }}</RoleId>
|
||||
{% if role.tags %}
|
||||
<Tags>
|
||||
{% for tag in role.get_tags() %}
|
||||
<member>
|
||||
<Key>{{ tag['Key'] }}</Key>
|
||||
<Value>{{ tag['Value'] }}</Value>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</Tags>
|
||||
{% endif %}
|
||||
</Role>
|
||||
</UpdateRoleDescriptionResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>df37e965-9967-11e1-a4c3-270EXAMPLE04</RequestId>
|
||||
</ResponseMetadata>
|
||||
</UpdateRoleDescriptionResponse>"""
|
||||
|
||||
GET_ROLE_TEMPLATE = """<GetRoleResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
|
||||
<GetRoleResult>
|
||||
<Role>
|
||||
@ -803,8 +1059,18 @@ GET_ROLE_TEMPLATE = """<GetRoleResponse xmlns="https://iam.amazonaws.com/doc/201
|
||||
<Arn>{{ role.arn }}</Arn>
|
||||
<RoleName>{{ role.name }}</RoleName>
|
||||
<AssumeRolePolicyDocument>{{ role.assume_role_policy_document }}</AssumeRolePolicyDocument>
|
||||
<CreateDate>2012-05-08T23:34:01Z</CreateDate>
|
||||
<CreateDate>{{ role.create_date }}</CreateDate>
|
||||
<RoleId>{{ role.id }}</RoleId>
|
||||
{% if role.tags %}
|
||||
<Tags>
|
||||
{% for tag in role.get_tags() %}
|
||||
<member>
|
||||
<Key>{{ tag['Key'] }}</Key>
|
||||
<Value>{{ tag['Value'] }}</Value>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</Tags>
|
||||
{% endif %}
|
||||
</Role>
|
||||
</GetRoleResult>
|
||||
<ResponseMetadata>
|
||||
@ -834,7 +1100,7 @@ LIST_ROLES_TEMPLATE = """<ListRolesResponse xmlns="https://iam.amazonaws.com/doc
|
||||
<Arn>{{ role.arn }}</Arn>
|
||||
<RoleName>{{ role.name }}</RoleName>
|
||||
<AssumeRolePolicyDocument>{{ role.assume_role_policy_document }}</AssumeRolePolicyDocument>
|
||||
<CreateDate>2012-05-09T15:45:35Z</CreateDate>
|
||||
<CreateDate>{{ role.create_date }}</CreateDate>
|
||||
<RoleId>{{ role.id }}</RoleId>
|
||||
</member>
|
||||
{% endfor %}
|
||||
@ -865,7 +1131,7 @@ CREATE_POLICY_VERSION_TEMPLATE = """<CreatePolicyVersionResponse xmlns="https://
|
||||
<Document>{{ policy_version.document }}</Document>
|
||||
<VersionId>{{ policy_version.version_id }}</VersionId>
|
||||
<IsDefaultVersion>{{ policy_version.is_default }}</IsDefaultVersion>
|
||||
<CreateDate>2012-05-09T15:45:35Z</CreateDate>
|
||||
<CreateDate>{{ policy_version.create_datetime }}</CreateDate>
|
||||
</PolicyVersion>
|
||||
</CreatePolicyVersionResult>
|
||||
<ResponseMetadata>
|
||||
@ -879,7 +1145,7 @@ GET_POLICY_VERSION_TEMPLATE = """<GetPolicyVersionResponse xmlns="https://iam.am
|
||||
<Document>{{ policy_version.document }}</Document>
|
||||
<VersionId>{{ policy_version.version_id }}</VersionId>
|
||||
<IsDefaultVersion>{{ policy_version.is_default }}</IsDefaultVersion>
|
||||
<CreateDate>2012-05-09T15:45:35Z</CreateDate>
|
||||
<CreateDate>{{ policy_version.create_datetime }}</CreateDate>
|
||||
</PolicyVersion>
|
||||
</GetPolicyVersionResult>
|
||||
<ResponseMetadata>
|
||||
@ -896,7 +1162,7 @@ LIST_POLICY_VERSIONS_TEMPLATE = """<ListPolicyVersionsResponse xmlns="https://ia
|
||||
<Document>{{ policy_version.document }}</Document>
|
||||
<VersionId>{{ policy_version.version_id }}</VersionId>
|
||||
<IsDefaultVersion>{{ policy_version.is_default }}</IsDefaultVersion>
|
||||
<CreateDate>2012-05-09T15:45:35Z</CreateDate>
|
||||
<CreateDate>{{ policy_version.create_datetime }}</CreateDate>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</Versions>
|
||||
@ -912,7 +1178,7 @@ LIST_INSTANCE_PROFILES_TEMPLATE = """<ListInstanceProfilesResponse xmlns="https:
|
||||
<InstanceProfiles>
|
||||
{% for instance in instance_profiles %}
|
||||
<member>
|
||||
<Id>{{ instance.id }}</Id>
|
||||
<InstanceProfileId>{{ instance.id }}</InstanceProfileId>
|
||||
<Roles>
|
||||
{% for role in instance.roles %}
|
||||
<member>
|
||||
@ -920,7 +1186,7 @@ LIST_INSTANCE_PROFILES_TEMPLATE = """<ListInstanceProfilesResponse xmlns="https:
|
||||
<Arn>{{ role.arn }}</Arn>
|
||||
<RoleName>{{ role.name }}</RoleName>
|
||||
<AssumeRolePolicyDocument>{{ role.assume_role_policy_document }}</AssumeRolePolicyDocument>
|
||||
<CreateDate>2012-05-09T15:45:35Z</CreateDate>
|
||||
<CreateDate>{{ role.create_date }}</CreateDate>
|
||||
<RoleId>{{ role.id }}</RoleId>
|
||||
</member>
|
||||
{% endfor %}
|
||||
@ -928,7 +1194,7 @@ LIST_INSTANCE_PROFILES_TEMPLATE = """<ListInstanceProfilesResponse xmlns="https:
|
||||
<InstanceProfileName>{{ instance.name }}</InstanceProfileName>
|
||||
<Path>{{ instance.path }}</Path>
|
||||
<Arn>{{ instance.arn }}</Arn>
|
||||
<CreateDate>2012-05-09T16:27:03Z</CreateDate>
|
||||
<CreateDate>{{ instance.create_date }}</CreateDate>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</InstanceProfiles>
|
||||
@ -1199,8 +1465,8 @@ LIST_USER_POLICIES_TEMPLATE = """<ListUserPoliciesResponse>
|
||||
<member>{{ policy }}</member>
|
||||
{% endfor %}
|
||||
</PolicyNames>
|
||||
<IsTruncated>false</IsTruncated>
|
||||
</ListUserPoliciesResult>
|
||||
<IsTruncated>false</IsTruncated>
|
||||
<ResponseMetadata>
|
||||
<RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
|
||||
</ResponseMetadata>
|
||||
@ -1240,11 +1506,23 @@ LIST_ACCESS_KEYS_TEMPLATE = """<ListAccessKeysResponse>
|
||||
</ResponseMetadata>
|
||||
</ListAccessKeysResponse>"""
|
||||
|
||||
|
||||
GET_ACCESS_KEY_LAST_USED_TEMPLATE = """
|
||||
<GetAccessKeyLastUsedResponse>
|
||||
<GetAccessKeyLastUsedResult>
|
||||
<UserName>{{ user_name }}</UserName>
|
||||
<AccessKeyLastUsed>
|
||||
<LastUsedDate>{{ last_used }}</LastUsedDate>
|
||||
</AccessKeyLastUsed>
|
||||
</GetAccessKeyLastUsedResult>
|
||||
</GetAccessKeyLastUsedResponse>
|
||||
"""
|
||||
|
||||
CREDENTIAL_REPORT_GENERATING = """
|
||||
<GenerateCredentialReportResponse>
|
||||
<GenerateCredentialReportResult>
|
||||
<state>STARTED</state>
|
||||
<description>No report exists. Starting a new report generation task</description>
|
||||
<State>STARTED</State>
|
||||
<Description>No report exists. Starting a new report generation task</Description>
|
||||
</GenerateCredentialReportResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>fa788a82-aa8a-11e4-a278-1786c418872b"</RequestId>
|
||||
@ -1253,7 +1531,7 @@ CREDENTIAL_REPORT_GENERATING = """
|
||||
|
||||
CREDENTIAL_REPORT_GENERATED = """<GenerateCredentialReportResponse>
|
||||
<GenerateCredentialReportResult>
|
||||
<state>COMPLETE</state>
|
||||
<State>COMPLETE</State>
|
||||
</GenerateCredentialReportResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>fa788a82-aa8a-11e4-a278-1786c418872b"</RequestId>
|
||||
@ -1262,7 +1540,7 @@ CREDENTIAL_REPORT_GENERATED = """<GenerateCredentialReportResponse>
|
||||
|
||||
CREDENTIAL_REPORT = """<GetCredentialReportResponse>
|
||||
<GetCredentialReportResult>
|
||||
<content>{{ report }}</content>
|
||||
<Content>{{ report }}</Content>
|
||||
<GeneratedTime>2015-02-02T20:02:02Z</GeneratedTime>
|
||||
<ReportFormat>text/csv</ReportFormat>
|
||||
</GetCredentialReportResult>
|
||||
@ -1277,23 +1555,23 @@ LIST_INSTANCE_PROFILES_FOR_ROLE_TEMPLATE = """<ListInstanceProfilesForRoleRespon
|
||||
<InstanceProfiles>
|
||||
{% for profile in instance_profiles %}
|
||||
<member>
|
||||
<Id>{{ profile.id }}</Id>
|
||||
<Roles>
|
||||
{% for role in profile.roles %}
|
||||
<member>
|
||||
<Path>{{ role.path }}</Path>
|
||||
<Arn>{{ role.arn }}</Arn>
|
||||
<RoleName>{{ role.name }}</RoleName>
|
||||
<AssumeRolePolicyDocument>{{ role.assume_policy_document }}</AssumeRolePolicyDocument>
|
||||
<CreateDate>2012-05-09T15:45:35Z</CreateDate>
|
||||
<RoleId>{{ role.id }}</RoleId>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</Roles>
|
||||
<InstanceProfileName>{{ profile.name }}</InstanceProfileName>
|
||||
<Path>{{ profile.path }}</Path>
|
||||
<Arn>{{ profile.arn }}</Arn>
|
||||
<CreateDate>2012-05-09T16:27:11Z</CreateDate>
|
||||
<InstanceProfileId>{{ profile.id }}</InstanceProfileId>
|
||||
<Roles>
|
||||
{% for role in profile.roles %}
|
||||
<member>
|
||||
<Path>{{ role.path }}</Path>
|
||||
<Arn>{{ role.arn }}</Arn>
|
||||
<RoleName>{{ role.name }}</RoleName>
|
||||
<AssumeRolePolicyDocument>{{ role.assume_policy_document }}</AssumeRolePolicyDocument>
|
||||
<CreateDate>{{ role.create_date }}</CreateDate>
|
||||
<RoleId>{{ role.id }}</RoleId>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</Roles>
|
||||
<InstanceProfileName>{{ profile.name }}</InstanceProfileName>
|
||||
<Path>{{ profile.path }}</Path>
|
||||
<Arn>{{ profile.arn }}</Arn>
|
||||
<CreateDate>{{ profile.create_date }}</CreateDate>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</InstanceProfiles>
|
||||
@ -1376,13 +1654,24 @@ GET_ACCOUNT_AUTHORIZATION_DETAILS_TEMPLATE = """<GetAccountAuthorizationDetailsR
|
||||
<UserDetailList>
|
||||
{% for user in users %}
|
||||
<member>
|
||||
<GroupList />
|
||||
<AttachedManagedPolicies/>
|
||||
<GroupList>
|
||||
{% for group in get_groups_for_user(user.name) %}
|
||||
<member>{{ group.name }}</member>
|
||||
{% endfor %}
|
||||
</GroupList>
|
||||
<AttachedManagedPolicies>
|
||||
{% for policy in user.managed_policies %}
|
||||
<member>
|
||||
<PolicyName>{{ user.managed_policies[policy].name }}</PolicyName>
|
||||
<PolicyArn>{{ policy }}</PolicyArn>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</AttachedManagedPolicies>
|
||||
<UserId>{{ user.id }}</UserId>
|
||||
<Path>{{ user.path }}</Path>
|
||||
<UserName>{{ user.name }}</UserName>
|
||||
<Arn>{{ user.arn }}</Arn>
|
||||
<CreateDate>2012-05-09T15:45:35Z</CreateDate>
|
||||
<CreateDate>{{ user.created_iso_8601 }}</CreateDate>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</UserDetailList>
|
||||
@ -1391,53 +1680,75 @@ GET_ACCOUNT_AUTHORIZATION_DETAILS_TEMPLATE = """<GetAccountAuthorizationDetailsR
|
||||
<member>
|
||||
<GroupId>{{ group.id }}</GroupId>
|
||||
<AttachedManagedPolicies>
|
||||
{% for policy in group.managed_policies %}
|
||||
<member>
|
||||
<PolicyName>{{ policy.name }}</PolicyName>
|
||||
<PolicyArn>{{ policy.arn }}</PolicyArn>
|
||||
</member>
|
||||
{% for policy_arn in group.managed_policies %}
|
||||
<member>
|
||||
<PolicyName>{{ group.managed_policies[policy_arn].name }}</PolicyName>
|
||||
<PolicyArn>{{ policy_arn }}</PolicyArn>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</AttachedManagedPolicies>
|
||||
<GroupName>{{ group.name }}</GroupName>
|
||||
<Path>{{ group.path }}</Path>
|
||||
<Arn>{{ group.arn }}</Arn>
|
||||
<CreateDate>2012-05-09T16:27:11Z</CreateDate>
|
||||
<GroupPolicyList/>
|
||||
<CreateDate>{{ group.create_date }}</CreateDate>
|
||||
<GroupPolicyList>
|
||||
{% for policy in group.policies %}
|
||||
<member>
|
||||
<PolicyName>{{ policy }}</PolicyName>
|
||||
<PolicyDocument>{{ group.get_policy(policy) }}</PolicyDocument>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</GroupPolicyList>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</GroupDetailList>
|
||||
<RoleDetailList>
|
||||
{% for role in roles %}
|
||||
<member>
|
||||
<RolePolicyList/>
|
||||
<AttachedManagedPolicies>
|
||||
{% for policy in role.managed_policies %}
|
||||
<RolePolicyList>
|
||||
{% for inline_policy in role.policies %}
|
||||
<member>
|
||||
<PolicyName>{{ policy.name }}</PolicyName>
|
||||
<PolicyArn>{{ policy.arn }}</PolicyArn>
|
||||
<PolicyName>{{ inline_policy }}</PolicyName>
|
||||
<PolicyDocument>{{ role.policies[inline_policy] }}</PolicyDocument>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</RolePolicyList>
|
||||
<AttachedManagedPolicies>
|
||||
{% for policy_arn in role.managed_policies %}
|
||||
<member>
|
||||
<PolicyName>{{ role.managed_policies[policy_arn].name }}</PolicyName>
|
||||
<PolicyArn>{{ policy_arn }}</PolicyArn>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</AttachedManagedPolicies>
|
||||
<Tags>
|
||||
{% for tag in role.get_tags() %}
|
||||
<member>
|
||||
<Key>{{ tag['Key'] }}</Key>
|
||||
<Value>{{ tag['Value'] }}</Value>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</Tags>
|
||||
<InstanceProfileList>
|
||||
{% for profile in instance_profiles %}
|
||||
<member>
|
||||
<Id>{{ profile.id }}</Id>
|
||||
<Roles>
|
||||
{% for role in profile.roles %}
|
||||
<member>
|
||||
<Path>{{ role.path }}</Path>
|
||||
<Arn>{{ role.arn }}</Arn>
|
||||
<RoleName>{{ role.name }}</RoleName>
|
||||
<AssumeRolePolicyDocument>{{ role.assume_role_policy_document }}</AssumeRolePolicyDocument>
|
||||
<CreateDate>2012-05-09T15:45:35Z</CreateDate>
|
||||
<RoleId>{{ role.id }}</RoleId>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</Roles>
|
||||
<InstanceProfileName>{{ profile.name }}</InstanceProfileName>
|
||||
<Path>{{ profile.path }}</Path>
|
||||
<Arn>{{ profile.arn }}</Arn>
|
||||
<CreateDate>2012-05-09T16:27:11Z</CreateDate>
|
||||
<InstanceProfileId>{{ profile.id }}</InstanceProfileId>
|
||||
<Roles>
|
||||
{% for role in profile.roles %}
|
||||
<member>
|
||||
<Path>{{ role.path }}</Path>
|
||||
<Arn>{{ role.arn }}</Arn>
|
||||
<RoleName>{{ role.name }}</RoleName>
|
||||
<AssumeRolePolicyDocument>{{ role.assume_role_policy_document }}</AssumeRolePolicyDocument>
|
||||
<CreateDate>{{ role.create_date }}</CreateDate>
|
||||
<RoleId>{{ role.id }}</RoleId>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</Roles>
|
||||
<InstanceProfileName>{{ profile.name }}</InstanceProfileName>
|
||||
<Path>{{ profile.path }}</Path>
|
||||
<Arn>{{ profile.arn }}</Arn>
|
||||
<CreateDate>{{ profile.create_date }}</CreateDate>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</InstanceProfileList>
|
||||
@ -1445,7 +1756,7 @@ GET_ACCOUNT_AUTHORIZATION_DETAILS_TEMPLATE = """<GetAccountAuthorizationDetailsR
|
||||
<Arn>{{ role.arn }}</Arn>
|
||||
<RoleName>{{ role.name }}</RoleName>
|
||||
<AssumeRolePolicyDocument>{{ role.assume_role_policy_document }}</AssumeRolePolicyDocument>
|
||||
<CreateDate>2014-07-30T17:09:20Z</CreateDate>
|
||||
<CreateDate>{{ role.create_date }}</CreateDate>
|
||||
<RoleId>{{ role.id }}</RoleId>
|
||||
</member>
|
||||
{% endfor %}
|
||||
@ -1458,25 +1769,20 @@ GET_ACCOUNT_AUTHORIZATION_DETAILS_TEMPLATE = """<GetAccountAuthorizationDetailsR
|
||||
<PolicyId>{{ policy.id }}</PolicyId>
|
||||
<Path>{{ policy.path }}</Path>
|
||||
<PolicyVersionList>
|
||||
{% for policy_version in policy.versions %}
|
||||
<member>
|
||||
<Document>
|
||||
{"Version":"2012-10-17","Statement":{"Effect":"Allow",
|
||||
"Action":["iam:CreatePolicy","iam:CreatePolicyVersion",
|
||||
"iam:DeletePolicy","iam:DeletePolicyVersion","iam:GetPolicy",
|
||||
"iam:GetPolicyVersion","iam:ListPolicies",
|
||||
"iam:ListPolicyVersions","iam:SetDefaultPolicyVersion"],
|
||||
"Resource":"*"}}
|
||||
</Document>
|
||||
<IsDefaultVersion>true</IsDefaultVersion>
|
||||
<VersionId>v1</VersionId>
|
||||
<CreateDate>2012-05-09T16:27:11Z</CreateDate>
|
||||
<Document>{{ policy_version.document }}</Document>
|
||||
<IsDefaultVersion>{{ policy_version.is_default }}</IsDefaultVersion>
|
||||
<VersionId>{{ policy_version.version_id }}</VersionId>
|
||||
<CreateDate>{{ policy_version.create_datetime }}</CreateDate>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</PolicyVersionList>
|
||||
<Arn>{{ policy.arn }}</Arn>
|
||||
<AttachmentCount>1</AttachmentCount>
|
||||
<CreateDate>2012-05-09T16:27:11Z</CreateDate>
|
||||
<CreateDate>{{ policy.create_datetime }}</CreateDate>
|
||||
<IsAttachable>true</IsAttachable>
|
||||
<UpdateDate>2012-05-09T16:27:11Z</UpdateDate>
|
||||
<UpdateDate>{{ policy.update_datetime }}</UpdateDate>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</Policies>
|
||||
@ -1485,3 +1791,139 @@ GET_ACCOUNT_AUTHORIZATION_DETAILS_TEMPLATE = """<GetAccountAuthorizationDetailsR
|
||||
<RequestId>92e79ae7-7399-11e4-8c85-4b53eEXAMPLE</RequestId>
|
||||
</ResponseMetadata>
|
||||
</GetAccountAuthorizationDetailsResponse>"""
|
||||
|
||||
CREATE_SAML_PROVIDER_TEMPLATE = """<CreateSAMLProviderResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
|
||||
<CreateSAMLProviderResult>
|
||||
<SAMLProviderArn>{{ saml_provider.arn }}</SAMLProviderArn>
|
||||
</CreateSAMLProviderResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>29f47818-99f5-11e1-a4c3-27EXAMPLE804</RequestId>
|
||||
</ResponseMetadata>
|
||||
</CreateSAMLProviderResponse>"""
|
||||
|
||||
LIST_SAML_PROVIDERS_TEMPLATE = """<ListSAMLProvidersResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
|
||||
<ListSAMLProvidersResult>
|
||||
<SAMLProviderList>
|
||||
{% for saml_provider in saml_providers %}
|
||||
<member>
|
||||
<Arn>{{ saml_provider.arn }}</Arn>
|
||||
<ValidUntil>2032-05-09T16:27:11Z</ValidUntil>
|
||||
<CreateDate>2012-05-09T16:27:03Z</CreateDate>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</SAMLProviderList>
|
||||
</ListSAMLProvidersResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>fd74fa8d-99f3-11e1-a4c3-27EXAMPLE804</RequestId>
|
||||
</ResponseMetadata>
|
||||
</ListSAMLProvidersResponse>"""
|
||||
|
||||
GET_SAML_PROVIDER_TEMPLATE = """<GetSAMLProviderResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
|
||||
<GetSAMLProviderResult>
|
||||
<CreateDate>2012-05-09T16:27:11Z</CreateDate>
|
||||
<ValidUntil>2015-12-31T21:59:59Z</ValidUntil>
|
||||
<SAMLMetadataDocument>{{ saml_provider.saml_metadata_document }}</SAMLMetadataDocument>
|
||||
</GetSAMLProviderResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>29f47818-99f5-11e1-a4c3-27EXAMPLE804</RequestId>
|
||||
</ResponseMetadata>
|
||||
</GetSAMLProviderResponse>"""
|
||||
|
||||
DELETE_SAML_PROVIDER_TEMPLATE = """<DeleteSAMLProviderResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
|
||||
<ResponseMetadata>
|
||||
<RequestId>c749ee7f-99ef-11e1-a4c3-27EXAMPLE804</RequestId>
|
||||
</ResponseMetadata>
|
||||
</DeleteSAMLProviderResponse>"""
|
||||
|
||||
UPDATE_SAML_PROVIDER_TEMPLATE = """<UpdateSAMLProviderResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
|
||||
<UpdateSAMLProviderResult>
|
||||
<SAMLProviderArn>{{ saml_provider.arn }}</SAMLProviderArn>
|
||||
</UpdateSAMLProviderResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>29f47818-99f5-11e1-a4c3-27EXAMPLE804</RequestId>
|
||||
</ResponseMetadata>
|
||||
</UpdateSAMLProviderResponse>"""
|
||||
|
||||
UPLOAD_SIGNING_CERTIFICATE_TEMPLATE = """<UploadSigningCertificateResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
|
||||
<UploadSigningCertificateResult>
|
||||
<Certificate>
|
||||
<UserName>{{ cert.user_name }}</UserName>
|
||||
<CertificateId>{{ cert.id }}</CertificateId>
|
||||
<CertificateBody>{{ cert.body }}</CertificateBody>
|
||||
<Status>{{ cert.status }}</Status>
|
||||
</Certificate>
|
||||
</UploadSigningCertificateResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
|
||||
</ResponseMetadata>
|
||||
</UploadSigningCertificateResponse>"""
|
||||
|
||||
|
||||
UPDATE_SIGNING_CERTIFICATE_TEMPLATE = """<UpdateSigningCertificateResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
|
||||
<ResponseMetadata>
|
||||
<RequestId>EXAMPLE8-90ab-cdef-fedc-ba987EXAMPLE</RequestId>
|
||||
</ResponseMetadata>
|
||||
</UpdateSigningCertificateResponse>"""
|
||||
|
||||
|
||||
DELETE_SIGNING_CERTIFICATE_TEMPLATE = """<DeleteSigningCertificateResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
|
||||
<ResponseMetadata>
|
||||
<RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
|
||||
</ResponseMetadata>
|
||||
</DeleteSigningCertificateResponse>"""
|
||||
|
||||
|
||||
LIST_SIGNING_CERTIFICATES_TEMPLATE = """<ListSigningCertificatesResponse>
|
||||
<ListSigningCertificatesResult>
|
||||
<UserName>{{ user_name }}</UserName>
|
||||
<Certificates>
|
||||
{% for cert in certificates %}
|
||||
<member>
|
||||
<UserName>{{ user_name }}</UserName>
|
||||
<CertificateId>{{ cert.id }}</CertificateId>
|
||||
<CertificateBody>{{ cert.body }}</CertificateBody>
|
||||
<Status>{{ cert.status }}</Status>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</Certificates>
|
||||
<IsTruncated>false</IsTruncated>
|
||||
</ListSigningCertificatesResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
|
||||
</ResponseMetadata>
|
||||
</ListSigningCertificatesResponse>"""
|
||||
|
||||
|
||||
TAG_ROLE_TEMPLATE = """<TagRoleResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
|
||||
<ResponseMetadata>
|
||||
<RequestId>EXAMPLE8-90ab-cdef-fedc-ba987EXAMPLE</RequestId>
|
||||
</ResponseMetadata>
|
||||
</TagRoleResponse>"""
|
||||
|
||||
|
||||
LIST_ROLE_TAG_TEMPLATE = """<ListRoleTagsResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
|
||||
<ListRoleTagsResult>
|
||||
<IsTruncated>{{ 'true' if marker else 'false' }}</IsTruncated>
|
||||
{% if marker %}
|
||||
<Marker>{{ marker }}</Marker>
|
||||
{% endif %}
|
||||
<Tags>
|
||||
{% for tag in tags %}
|
||||
<member>
|
||||
<Key>{{ tag['Key'] }}</Key>
|
||||
<Value>{{ tag['Value'] }}</Value>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</Tags>
|
||||
</ListRoleTagsResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>EXAMPLE8-90ab-cdef-fedc-ba987EXAMPLE</RequestId>
|
||||
</ResponseMetadata>
|
||||
</ListRoleTagsResponse>"""
|
||||
|
||||
|
||||
UNTAG_ROLE_TEMPLATE = """<UntagRoleResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
|
||||
<ResponseMetadata>
|
||||
<RequestId>EXAMPLE8-90ab-cdef-fedc-ba987EXAMPLE</RequestId>
|
||||
</ResponseMetadata>
|
||||
</UntagRoleResponse>"""
|
||||
|
||||
@ -12,8 +12,7 @@ def random_alphanumeric(length):
|
||||
)
|
||||
|
||||
|
||||
def random_resource_id():
|
||||
size = 20
|
||||
def random_resource_id(size=20):
|
||||
chars = list(range(10)) + list(string.ascii_lowercase)
|
||||
|
||||
return ''.join(six.text_type(random.choice(chars)) for x in range(size))
|
||||
|
||||
@ -1,42 +1,50 @@
|
||||
from __future__ import unicode_literals
|
||||
from moto.core.exceptions import JsonRESTError
|
||||
|
||||
|
||||
class IoTClientError(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
|
||||
class ResourceNotFoundException(IoTClientError):
|
||||
def __init__(self):
|
||||
self.code = 404
|
||||
super(ResourceNotFoundException, self).__init__(
|
||||
"ResourceNotFoundException",
|
||||
"The specified resource does not exist"
|
||||
)
|
||||
|
||||
|
||||
class InvalidRequestException(IoTClientError):
|
||||
def __init__(self, msg=None):
|
||||
self.code = 400
|
||||
super(InvalidRequestException, self).__init__(
|
||||
"InvalidRequestException",
|
||||
msg or "The request is not valid."
|
||||
)
|
||||
|
||||
|
||||
class InvalidStateTransitionException(IoTClientError):
|
||||
def __init__(self, msg=None):
|
||||
self.code = 409
|
||||
super(InvalidStateTransitionException, self).__init__(
|
||||
"InvalidStateTransitionException",
|
||||
msg or "An attempt was made to change to an invalid state."
|
||||
)
|
||||
|
||||
|
||||
class VersionConflictException(IoTClientError):
|
||||
def __init__(self, name):
|
||||
self.code = 409
|
||||
super(VersionConflictException, self).__init__(
|
||||
'VersionConflictException',
|
||||
'The version for thing %s does not match the expected version.' % name
|
||||
)
|
||||
from __future__ import unicode_literals
|
||||
from moto.core.exceptions import JsonRESTError
|
||||
|
||||
|
||||
class IoTClientError(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
|
||||
class ResourceNotFoundException(IoTClientError):
|
||||
def __init__(self):
|
||||
self.code = 404
|
||||
super(ResourceNotFoundException, self).__init__(
|
||||
"ResourceNotFoundException",
|
||||
"The specified resource does not exist"
|
||||
)
|
||||
|
||||
|
||||
class InvalidRequestException(IoTClientError):
|
||||
def __init__(self, msg=None):
|
||||
self.code = 400
|
||||
super(InvalidRequestException, self).__init__(
|
||||
"InvalidRequestException",
|
||||
msg or "The request is not valid."
|
||||
)
|
||||
|
||||
|
||||
class VersionConflictException(IoTClientError):
|
||||
def __init__(self, name):
|
||||
self.code = 409
|
||||
super(VersionConflictException, self).__init__(
|
||||
'VersionConflictException',
|
||||
'The version for thing %s does not match the expected version.' % name
|
||||
)
|
||||
|
||||
|
||||
class CertificateStateException(IoTClientError):
|
||||
def __init__(self, msg, cert_id):
|
||||
self.code = 406
|
||||
super(CertificateStateException, self).__init__(
|
||||
'CertificateStateException',
|
||||
'%s Id: %s' % (msg, cert_id)
|
||||
)
|
||||
|
||||
|
||||
class DeleteConflictException(IoTClientError):
|
||||
def __init__(self, msg):
|
||||
self.code = 409
|
||||
super(DeleteConflictException, self).__init__(
|
||||
'DeleteConflictException', msg
|
||||
)
|
||||
36
moto/kms/exceptions.py
Normal file
36
moto/kms/exceptions.py
Normal file
@ -0,0 +1,36 @@
|
||||
from __future__ import unicode_literals
|
||||
from moto.core.exceptions import JsonRESTError
|
||||
|
||||
|
||||
class NotFoundException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, message):
|
||||
super(NotFoundException, self).__init__(
|
||||
"NotFoundException", message)
|
||||
|
||||
|
||||
class ValidationException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, message):
|
||||
super(ValidationException, self).__init__(
|
||||
"ValidationException", message)
|
||||
|
||||
|
||||
class AlreadyExistsException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, message):
|
||||
super(AlreadyExistsException, self).__init__(
|
||||
"AlreadyExistsException", message)
|
||||
|
||||
|
||||
class NotAuthorizedException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self):
|
||||
super(NotAuthorizedException, self).__init__(
|
||||
"NotAuthorizedException", None)
|
||||
|
||||
self.description = '{"__type":"NotAuthorizedException"}'
|
||||
@ -1,5 +1,6 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import os
|
||||
import boto.kms
|
||||
from moto.core import BaseBackend, BaseModel
|
||||
from moto.core.utils import iso_8601_datetime_without_milliseconds
|
||||
@ -21,6 +22,7 @@ class Key(BaseModel):
|
||||
self.account_id = "0123456789012"
|
||||
self.key_rotation_status = False
|
||||
self.deletion_date = None
|
||||
self.tags = {}
|
||||
|
||||
@property
|
||||
def physical_resource_id(self):
|
||||
@ -35,7 +37,7 @@ class Key(BaseModel):
|
||||
"KeyMetadata": {
|
||||
"AWSAccountId": self.account_id,
|
||||
"Arn": self.arn,
|
||||
"CreationDate": "2015-01-01 00:00:00",
|
||||
"CreationDate": datetime.strftime(datetime.utcnow(), "%s"),
|
||||
"Description": self.description,
|
||||
"Enabled": self.enabled,
|
||||
"KeyId": self.id,
|
||||
@ -63,7 +65,6 @@ class Key(BaseModel):
|
||||
)
|
||||
key.key_rotation_status = properties['EnableKeyRotation']
|
||||
key.enabled = properties['Enabled']
|
||||
|
||||
return key
|
||||
|
||||
def get_cfn_attribute(self, attribute_name):
|
||||
@ -84,6 +85,18 @@ class KmsBackend(BaseBackend):
|
||||
self.keys[key.id] = key
|
||||
return key
|
||||
|
||||
def update_key_description(self, key_id, description):
|
||||
key = self.keys[self.get_key_id(key_id)]
|
||||
key.description = description
|
||||
|
||||
def tag_resource(self, key_id, tags):
|
||||
key = self.keys[self.get_key_id(key_id)]
|
||||
key.tags = tags
|
||||
|
||||
def list_resource_tags(self, key_id):
|
||||
key = self.keys[self.get_key_id(key_id)]
|
||||
return key.tags
|
||||
|
||||
def delete_key(self, key_id):
|
||||
if key_id in self.keys:
|
||||
if key_id in self.key_to_aliases:
|
||||
@ -147,27 +160,38 @@ class KmsBackend(BaseBackend):
|
||||
return self.keys[self.get_key_id(key_id)].policy
|
||||
|
||||
def disable_key(self, key_id):
|
||||
if key_id in self.keys:
|
||||
self.keys[key_id].enabled = False
|
||||
self.keys[key_id].key_state = 'Disabled'
|
||||
self.keys[key_id].enabled = False
|
||||
self.keys[key_id].key_state = 'Disabled'
|
||||
|
||||
def enable_key(self, key_id):
|
||||
if key_id in self.keys:
|
||||
self.keys[key_id].enabled = True
|
||||
self.keys[key_id].key_state = 'Enabled'
|
||||
self.keys[key_id].enabled = True
|
||||
self.keys[key_id].key_state = 'Enabled'
|
||||
|
||||
def cancel_key_deletion(self, key_id):
|
||||
if key_id in self.keys:
|
||||
self.keys[key_id].key_state = 'Disabled'
|
||||
self.keys[key_id].deletion_date = None
|
||||
self.keys[key_id].key_state = 'Disabled'
|
||||
self.keys[key_id].deletion_date = None
|
||||
|
||||
def schedule_key_deletion(self, key_id, pending_window_in_days):
|
||||
if key_id in self.keys:
|
||||
if 7 <= pending_window_in_days <= 30:
|
||||
self.keys[key_id].enabled = False
|
||||
self.keys[key_id].key_state = 'PendingDeletion'
|
||||
self.keys[key_id].deletion_date = datetime.now() + timedelta(days=pending_window_in_days)
|
||||
return iso_8601_datetime_without_milliseconds(self.keys[key_id].deletion_date)
|
||||
if 7 <= pending_window_in_days <= 30:
|
||||
self.keys[key_id].enabled = False
|
||||
self.keys[key_id].key_state = 'PendingDeletion'
|
||||
self.keys[key_id].deletion_date = datetime.now() + timedelta(days=pending_window_in_days)
|
||||
return iso_8601_datetime_without_milliseconds(self.keys[key_id].deletion_date)
|
||||
|
||||
def generate_data_key(self, key_id, encryption_context, number_of_bytes, key_spec, grant_tokens):
|
||||
key = self.keys[self.get_key_id(key_id)]
|
||||
|
||||
if key_spec:
|
||||
if key_spec == 'AES_128':
|
||||
bytes = 16
|
||||
else:
|
||||
bytes = 32
|
||||
else:
|
||||
bytes = number_of_bytes
|
||||
|
||||
plaintext = os.urandom(bytes)
|
||||
|
||||
return plaintext, key.arn
|
||||
|
||||
|
||||
kms_backends = {}
|
||||
|
||||
@ -5,11 +5,9 @@ import json
|
||||
import re
|
||||
import six
|
||||
|
||||
from boto.exception import JSONResponseError
|
||||
from boto.kms.exceptions import AlreadyExistsException, NotFoundException
|
||||
|
||||
from moto.core.responses import BaseResponse
|
||||
from .models import kms_backends
|
||||
from .exceptions import NotFoundException, ValidationException, AlreadyExistsException, NotAuthorizedException
|
||||
|
||||
reserved_aliases = [
|
||||
'alias/aws/ebs',
|
||||
@ -38,6 +36,28 @@ class KmsResponse(BaseResponse):
|
||||
policy, key_usage, description, self.region)
|
||||
return json.dumps(key.to_dict())
|
||||
|
||||
def update_key_description(self):
|
||||
key_id = self.parameters.get('KeyId')
|
||||
description = self.parameters.get('Description')
|
||||
|
||||
self.kms_backend.update_key_description(key_id, description)
|
||||
return json.dumps(None)
|
||||
|
||||
def tag_resource(self):
|
||||
key_id = self.parameters.get('KeyId')
|
||||
tags = self.parameters.get('Tags')
|
||||
self.kms_backend.tag_resource(key_id, tags)
|
||||
return json.dumps({})
|
||||
|
||||
def list_resource_tags(self):
|
||||
key_id = self.parameters.get('KeyId')
|
||||
tags = self.kms_backend.list_resource_tags(key_id)
|
||||
return json.dumps({
|
||||
"Tags": tags,
|
||||
"NextMarker": None,
|
||||
"Truncated": False,
|
||||
})
|
||||
|
||||
def describe_key(self):
|
||||
key_id = self.parameters.get('KeyId')
|
||||
try:
|
||||
@ -66,36 +86,28 @@ class KmsResponse(BaseResponse):
|
||||
def create_alias(self):
|
||||
alias_name = self.parameters['AliasName']
|
||||
target_key_id = self.parameters['TargetKeyId']
|
||||
region = self.region
|
||||
|
||||
if not alias_name.startswith('alias/'):
|
||||
raise JSONResponseError(400, 'Bad Request',
|
||||
body={'message': 'Invalid identifier', '__type': 'ValidationException'})
|
||||
raise ValidationException('Invalid identifier')
|
||||
|
||||
if alias_name in reserved_aliases:
|
||||
raise JSONResponseError(400, 'Bad Request', body={
|
||||
'__type': 'NotAuthorizedException'})
|
||||
raise NotAuthorizedException()
|
||||
|
||||
if ':' in alias_name:
|
||||
raise JSONResponseError(400, 'Bad Request', body={
|
||||
'message': '{alias_name} contains invalid characters for an alias'.format(**locals()),
|
||||
'__type': 'ValidationException'})
|
||||
raise ValidationException('{alias_name} contains invalid characters for an alias'.format(alias_name=alias_name))
|
||||
|
||||
if not re.match(r'^[a-zA-Z0-9:/_-]+$', alias_name):
|
||||
raise JSONResponseError(400, 'Bad Request', body={
|
||||
'message': "1 validation error detected: Value '{alias_name}' at 'aliasName' failed to satisfy constraint: Member must satisfy regular expression pattern: ^[a-zA-Z0-9:/_-]+$"
|
||||
.format(**locals()),
|
||||
'__type': 'ValidationException'})
|
||||
raise ValidationException("1 validation error detected: Value '{alias_name}' at 'aliasName' "
|
||||
"failed to satisfy constraint: Member must satisfy regular "
|
||||
"expression pattern: ^[a-zA-Z0-9:/_-]+$"
|
||||
.format(alias_name=alias_name))
|
||||
|
||||
if self.kms_backend.alias_exists(target_key_id):
|
||||
raise JSONResponseError(400, 'Bad Request', body={
|
||||
'message': 'Aliases must refer to keys. Not aliases',
|
||||
'__type': 'ValidationException'})
|
||||
raise ValidationException('Aliases must refer to keys. Not aliases')
|
||||
|
||||
if self.kms_backend.alias_exists(alias_name):
|
||||
raise AlreadyExistsException(400, 'Bad Request', body={
|
||||
'message': 'An alias with the name arn:aws:kms:{region}:012345678912:{alias_name} already exists'
|
||||
.format(**locals()), '__type': 'AlreadyExistsException'})
|
||||
raise AlreadyExistsException('An alias with the name arn:aws:kms:{region}:012345678912:{alias_name} '
|
||||
'already exists'.format(region=self.region, alias_name=alias_name))
|
||||
|
||||
self.kms_backend.add_alias(target_key_id, alias_name)
|
||||
|
||||
@ -103,16 +115,13 @@ class KmsResponse(BaseResponse):
|
||||
|
||||
def delete_alias(self):
|
||||
alias_name = self.parameters['AliasName']
|
||||
region = self.region
|
||||
|
||||
if not alias_name.startswith('alias/'):
|
||||
raise JSONResponseError(400, 'Bad Request',
|
||||
body={'message': 'Invalid identifier', '__type': 'ValidationException'})
|
||||
raise ValidationException('Invalid identifier')
|
||||
|
||||
if not self.kms_backend.alias_exists(alias_name):
|
||||
raise NotFoundException(400, 'Bad Request', body={
|
||||
'message': 'Alias arn:aws:kms:{region}:012345678912:{alias_name} is not found.'.format(**locals()),
|
||||
'__type': 'NotFoundException'})
|
||||
raise NotFoundException('Alias arn:aws:kms:{region}:012345678912:'
|
||||
'{alias_name} is not found.'.format(region=self.region, alias_name=alias_name))
|
||||
|
||||
self.kms_backend.delete_alias(alias_name)
|
||||
|
||||
@ -150,9 +159,8 @@ class KmsResponse(BaseResponse):
|
||||
try:
|
||||
self.kms_backend.enable_key_rotation(key_id)
|
||||
except KeyError:
|
||||
raise JSONResponseError(404, 'Not Found', body={
|
||||
'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id),
|
||||
'__type': 'NotFoundException'})
|
||||
raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/"
|
||||
"{key_id}' does not exist".format(region=self.region, key_id=key_id))
|
||||
|
||||
return json.dumps(None)
|
||||
|
||||
@ -162,9 +170,8 @@ class KmsResponse(BaseResponse):
|
||||
try:
|
||||
self.kms_backend.disable_key_rotation(key_id)
|
||||
except KeyError:
|
||||
raise JSONResponseError(404, 'Not Found', body={
|
||||
'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id),
|
||||
'__type': 'NotFoundException'})
|
||||
raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/"
|
||||
"{key_id}' does not exist".format(region=self.region, key_id=key_id))
|
||||
return json.dumps(None)
|
||||
|
||||
def get_key_rotation_status(self):
|
||||
@ -173,9 +180,8 @@ class KmsResponse(BaseResponse):
|
||||
try:
|
||||
rotation_enabled = self.kms_backend.get_key_rotation_status(key_id)
|
||||
except KeyError:
|
||||
raise JSONResponseError(404, 'Not Found', body={
|
||||
'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id),
|
||||
'__type': 'NotFoundException'})
|
||||
raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/"
|
||||
"{key_id}' does not exist".format(region=self.region, key_id=key_id))
|
||||
return json.dumps({'KeyRotationEnabled': rotation_enabled})
|
||||
|
||||
def put_key_policy(self):
|
||||
@ -188,9 +194,8 @@ class KmsResponse(BaseResponse):
|
||||
try:
|
||||
self.kms_backend.put_key_policy(key_id, policy)
|
||||
except KeyError:
|
||||
raise JSONResponseError(404, 'Not Found', body={
|
||||
'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id),
|
||||
'__type': 'NotFoundException'})
|
||||
raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/"
|
||||
"{key_id}' does not exist".format(region=self.region, key_id=key_id))
|
||||
|
||||
return json.dumps(None)
|
||||
|
||||
@ -203,9 +208,8 @@ class KmsResponse(BaseResponse):
|
||||
try:
|
||||
return json.dumps({'Policy': self.kms_backend.get_key_policy(key_id)})
|
||||
except KeyError:
|
||||
raise JSONResponseError(404, 'Not Found', body={
|
||||
'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id),
|
||||
'__type': 'NotFoundException'})
|
||||
raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/"
|
||||
"{key_id}' does not exist".format(region=self.region, key_id=key_id))
|
||||
|
||||
def list_key_policies(self):
|
||||
key_id = self.parameters.get('KeyId')
|
||||
@ -213,9 +217,8 @@ class KmsResponse(BaseResponse):
|
||||
try:
|
||||
self.kms_backend.describe_key(key_id)
|
||||
except KeyError:
|
||||
raise JSONResponseError(404, 'Not Found', body={
|
||||
'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id),
|
||||
'__type': 'NotFoundException'})
|
||||
raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/"
|
||||
"{key_id}' does not exist".format(region=self.region, key_id=key_id))
|
||||
|
||||
return json.dumps({'Truncated': False, 'PolicyNames': ['default']})
|
||||
|
||||
@ -227,11 +230,17 @@ class KmsResponse(BaseResponse):
|
||||
value = self.parameters.get("Plaintext")
|
||||
if isinstance(value, six.text_type):
|
||||
value = value.encode('utf-8')
|
||||
return json.dumps({"CiphertextBlob": base64.b64encode(value).decode("utf-8")})
|
||||
return json.dumps({"CiphertextBlob": base64.b64encode(value).decode("utf-8"), 'KeyId': 'key_id'})
|
||||
|
||||
def decrypt(self):
|
||||
# TODO refuse decode if EncryptionContext is not the same as when it was encrypted / generated
|
||||
|
||||
value = self.parameters.get("CiphertextBlob")
|
||||
return json.dumps({"Plaintext": base64.b64decode(value).decode("utf-8")})
|
||||
try:
|
||||
return json.dumps({"Plaintext": base64.b64decode(value).decode("utf-8")})
|
||||
except UnicodeDecodeError:
|
||||
# Generate data key will produce random bytes which when decrypted is still returned as base64
|
||||
return json.dumps({"Plaintext": value})
|
||||
|
||||
def disable_key(self):
|
||||
key_id = self.parameters.get('KeyId')
|
||||
@ -239,9 +248,8 @@ class KmsResponse(BaseResponse):
|
||||
try:
|
||||
self.kms_backend.disable_key(key_id)
|
||||
except KeyError:
|
||||
raise JSONResponseError(404, 'Not Found', body={
|
||||
'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id),
|
||||
'__type': 'NotFoundException'})
|
||||
raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/"
|
||||
"{key_id}' does not exist".format(region=self.region, key_id=key_id))
|
||||
return json.dumps(None)
|
||||
|
||||
def enable_key(self):
|
||||
@ -250,9 +258,8 @@ class KmsResponse(BaseResponse):
|
||||
try:
|
||||
self.kms_backend.enable_key(key_id)
|
||||
except KeyError:
|
||||
raise JSONResponseError(404, 'Not Found', body={
|
||||
'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id),
|
||||
'__type': 'NotFoundException'})
|
||||
raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/"
|
||||
"{key_id}' does not exist".format(region=self.region, key_id=key_id))
|
||||
return json.dumps(None)
|
||||
|
||||
def cancel_key_deletion(self):
|
||||
@ -261,9 +268,8 @@ class KmsResponse(BaseResponse):
|
||||
try:
|
||||
self.kms_backend.cancel_key_deletion(key_id)
|
||||
except KeyError:
|
||||
raise JSONResponseError(404, 'Not Found', body={
|
||||
'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id),
|
||||
'__type': 'NotFoundException'})
|
||||
raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/"
|
||||
"{key_id}' does not exist".format(region=self.region, key_id=key_id))
|
||||
return json.dumps({'KeyId': key_id})
|
||||
|
||||
def schedule_key_deletion(self):
|
||||
@ -279,19 +285,62 @@ class KmsResponse(BaseResponse):
|
||||
'DeletionDate': self.kms_backend.schedule_key_deletion(key_id, pending_window_in_days)
|
||||
})
|
||||
except KeyError:
|
||||
raise JSONResponseError(404, 'Not Found', body={
|
||||
'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id),
|
||||
'__type': 'NotFoundException'})
|
||||
raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/"
|
||||
"{key_id}' does not exist".format(region=self.region, key_id=key_id))
|
||||
|
||||
def generate_data_key(self):
|
||||
key_id = self.parameters.get('KeyId')
|
||||
encryption_context = self.parameters.get('EncryptionContext')
|
||||
number_of_bytes = self.parameters.get('NumberOfBytes')
|
||||
key_spec = self.parameters.get('KeySpec')
|
||||
grant_tokens = self.parameters.get('GrantTokens')
|
||||
|
||||
# Param validation
|
||||
if key_id.startswith('alias'):
|
||||
if self.kms_backend.get_key_id_from_alias(key_id) is None:
|
||||
raise NotFoundException('Alias arn:aws:kms:{region}:012345678912:{alias_name} is not found.'.format(
|
||||
region=self.region, alias_name=key_id))
|
||||
else:
|
||||
if self.kms_backend.get_key_id(key_id) not in self.kms_backend.keys:
|
||||
raise NotFoundException('Invalid keyId')
|
||||
|
||||
if number_of_bytes and (number_of_bytes > 1024 or number_of_bytes < 0):
|
||||
raise ValidationException("1 validation error detected: Value '2048' at 'numberOfBytes' failed "
|
||||
"to satisfy constraint: Member must have value less than or "
|
||||
"equal to 1024")
|
||||
|
||||
if key_spec and key_spec not in ('AES_256', 'AES_128'):
|
||||
raise ValidationException("1 validation error detected: Value 'AES_257' at 'keySpec' failed "
|
||||
"to satisfy constraint: Member must satisfy enum value set: "
|
||||
"[AES_256, AES_128]")
|
||||
if not key_spec and not number_of_bytes:
|
||||
raise ValidationException("Please specify either number of bytes or key spec.")
|
||||
if key_spec and number_of_bytes:
|
||||
raise ValidationException("Please specify either number of bytes or key spec.")
|
||||
|
||||
plaintext, key_arn = self.kms_backend.generate_data_key(key_id, encryption_context,
|
||||
number_of_bytes, key_spec, grant_tokens)
|
||||
|
||||
plaintext = base64.b64encode(plaintext).decode()
|
||||
|
||||
return json.dumps({
|
||||
'CiphertextBlob': plaintext,
|
||||
'Plaintext': plaintext,
|
||||
'KeyId': key_arn # not alias
|
||||
})
|
||||
|
||||
def generate_data_key_without_plaintext(self):
|
||||
result = json.loads(self.generate_data_key())
|
||||
del result['Plaintext']
|
||||
|
||||
return json.dumps(result)
|
||||
|
||||
|
||||
def _assert_valid_key_id(key_id):
|
||||
if not re.match(r'^[A-F0-9]{8}-[A-F0-9]{4}-[A-F0-9]{4}-[A-F0-9]{4}-[A-F0-9]{12}$', key_id, re.IGNORECASE):
|
||||
raise JSONResponseError(404, 'Not Found', body={
|
||||
'message': ' Invalid keyId', '__type': 'NotFoundException'})
|
||||
raise NotFoundException('Invalid keyId')
|
||||
|
||||
|
||||
def _assert_default_policy(policy_name):
|
||||
if policy_name != 'default':
|
||||
raise JSONResponseError(404, 'Not Found', body={
|
||||
'message': "No such policy exists",
|
||||
'__type': 'NotFoundException'})
|
||||
raise NotFoundException("No such policy exists")
|
||||
|
||||
@ -242,7 +242,8 @@ class LogsBackend(BaseBackend):
|
||||
if next_token is None:
|
||||
next_token = 0
|
||||
|
||||
groups = sorted(group.to_describe_dict() for name, group in self.groups.items() if name.startswith(log_group_name_prefix))
|
||||
groups = [group.to_describe_dict() for name, group in self.groups.items() if name.startswith(log_group_name_prefix)]
|
||||
groups = sorted(groups, key=lambda x: x['creationTime'], reverse=True)
|
||||
groups_page = groups[next_token:next_token + limit]
|
||||
|
||||
next_token += limit
|
||||
|
||||
@ -1113,4 +1113,4 @@ def httprettified(test):
|
||||
|
||||
if isinstance(test, ClassTypes):
|
||||
return decorate_class(test)
|
||||
return decorate_callable(test)
|
||||
return decorate_callable(test)
|
||||
@ -29,7 +29,6 @@ import re
|
||||
from .compat import BaseClass
|
||||
from .utils import decode_utf8
|
||||
|
||||
|
||||
STATUSES = {
|
||||
100: "Continue",
|
||||
101: "Switching Protocols",
|
||||
|
||||
@ -24,7 +24,7 @@ class HealthCheck(BaseModel):
|
||||
self.id = health_check_id
|
||||
self.ip_address = health_check_args.get("ip_address")
|
||||
self.port = health_check_args.get("port", 80)
|
||||
self._type = health_check_args.get("type")
|
||||
self.type_ = health_check_args.get("type")
|
||||
self.resource_path = health_check_args.get("resource_path")
|
||||
self.fqdn = health_check_args.get("fqdn")
|
||||
self.search_string = health_check_args.get("search_string")
|
||||
@ -58,7 +58,7 @@ class HealthCheck(BaseModel):
|
||||
<HealthCheckConfig>
|
||||
<IPAddress>{{ health_check.ip_address }}</IPAddress>
|
||||
<Port>{{ health_check.port }}</Port>
|
||||
<Type>{{ health_check._type }}</Type>
|
||||
<Type>{{ health_check.type_ }}</Type>
|
||||
<ResourcePath>{{ health_check.resource_path }}</ResourcePath>
|
||||
<FullyQualifiedDomainName>{{ health_check.fqdn }}</FullyQualifiedDomainName>
|
||||
<RequestInterval>{{ health_check.request_interval }}</RequestInterval>
|
||||
@ -76,7 +76,7 @@ class RecordSet(BaseModel):
|
||||
|
||||
def __init__(self, kwargs):
|
||||
self.name = kwargs.get('Name')
|
||||
self._type = kwargs.get('Type')
|
||||
self.type_ = kwargs.get('Type')
|
||||
self.ttl = kwargs.get('TTL')
|
||||
self.records = kwargs.get('ResourceRecords', [])
|
||||
self.set_identifier = kwargs.get('SetIdentifier')
|
||||
@ -130,7 +130,7 @@ class RecordSet(BaseModel):
|
||||
def to_xml(self):
|
||||
template = Template("""<ResourceRecordSet>
|
||||
<Name>{{ record_set.name }}</Name>
|
||||
<Type>{{ record_set._type }}</Type>
|
||||
<Type>{{ record_set.type_ }}</Type>
|
||||
{% if record_set.set_identifier %}
|
||||
<SetIdentifier>{{ record_set.set_identifier }}</SetIdentifier>
|
||||
{% endif %}
|
||||
@ -183,7 +183,7 @@ class FakeZone(BaseModel):
|
||||
def upsert_rrset(self, record_set):
|
||||
new_rrset = RecordSet(record_set)
|
||||
for i, rrset in enumerate(self.rrsets):
|
||||
if rrset.name == new_rrset.name:
|
||||
if rrset.name == new_rrset.name and rrset.type_ == new_rrset.type_:
|
||||
self.rrsets[i] = new_rrset
|
||||
break
|
||||
else:
|
||||
@ -202,7 +202,7 @@ class FakeZone(BaseModel):
|
||||
record_sets = list(self.rrsets) # Copy the list
|
||||
if start_type:
|
||||
record_sets = [
|
||||
record_set for record_set in record_sets if record_set._type >= start_type]
|
||||
record_set for record_set in record_sets if record_set.type_ >= start_type]
|
||||
if start_name:
|
||||
record_sets = [
|
||||
record_set for record_set in record_sets if record_set.name >= start_name]
|
||||
|
||||
@ -123,6 +123,9 @@ class Route53(BaseResponse):
|
||||
""" % (record_set['Name'], the_zone.name)
|
||||
return 400, headers, error_msg
|
||||
|
||||
if not record_set['Name'].endswith('.'):
|
||||
record_set['Name'] += '.'
|
||||
|
||||
if action in ('CREATE', 'UPSERT'):
|
||||
if 'ResourceRecords' in record_set:
|
||||
resource_records = list(
|
||||
|
||||
@ -178,3 +178,24 @@ class InvalidStorageClass(S3ClientError):
|
||||
"InvalidStorageClass",
|
||||
"The storage class you specified is not valid",
|
||||
*args, **kwargs)
|
||||
|
||||
|
||||
class InvalidBucketName(S3ClientError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(InvalidBucketName, self).__init__(
|
||||
"InvalidBucketName",
|
||||
"The specified bucket is not valid.",
|
||||
*args, **kwargs
|
||||
)
|
||||
|
||||
|
||||
class DuplicateTagKeys(S3ClientError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(DuplicateTagKeys, self).__init__(
|
||||
"InvalidTag",
|
||||
"Cannot provide multiple Tags with the same key",
|
||||
*args, **kwargs)
|
||||
|
||||
@ -8,19 +8,26 @@ import itertools
|
||||
import codecs
|
||||
import random
|
||||
import string
|
||||
import tempfile
|
||||
import sys
|
||||
import uuid
|
||||
|
||||
import six
|
||||
|
||||
from bisect import insort
|
||||
from moto.core import BaseBackend, BaseModel
|
||||
from moto.core.utils import iso_8601_datetime_with_milliseconds, rfc_1123_datetime
|
||||
from .exceptions import BucketAlreadyExists, MissingBucket, InvalidPart, EntityTooSmall, MissingKey, \
|
||||
InvalidNotificationDestination, MalformedXML, InvalidStorageClass
|
||||
from .exceptions import BucketAlreadyExists, MissingBucket, InvalidBucketName, InvalidPart, \
|
||||
EntityTooSmall, MissingKey, InvalidNotificationDestination, MalformedXML, InvalidStorageClass, DuplicateTagKeys
|
||||
from .utils import clean_key_name, _VersionedKeyStore
|
||||
|
||||
MAX_BUCKET_NAME_LENGTH = 63
|
||||
MIN_BUCKET_NAME_LENGTH = 3
|
||||
UPLOAD_ID_BYTES = 43
|
||||
UPLOAD_PART_MIN_SIZE = 5242880
|
||||
STORAGE_CLASS = ["STANDARD", "REDUCED_REDUNDANCY", "STANDARD_IA", "ONEZONE_IA"]
|
||||
DEFAULT_KEY_BUFFER_SIZE = 16 * 1024 * 1024
|
||||
DEFAULT_TEXT_ENCODING = sys.getdefaultencoding()
|
||||
|
||||
|
||||
class FakeDeleteMarker(BaseModel):
|
||||
@ -29,7 +36,7 @@ class FakeDeleteMarker(BaseModel):
|
||||
self.key = key
|
||||
self.name = key.name
|
||||
self.last_modified = datetime.datetime.utcnow()
|
||||
self._version_id = key.version_id + 1
|
||||
self._version_id = str(uuid.uuid4())
|
||||
|
||||
@property
|
||||
def last_modified_ISO8601(self):
|
||||
@ -42,9 +49,9 @@ class FakeDeleteMarker(BaseModel):
|
||||
|
||||
class FakeKey(BaseModel):
|
||||
|
||||
def __init__(self, name, value, storage="STANDARD", etag=None, is_versioned=False, version_id=0):
|
||||
def __init__(self, name, value, storage="STANDARD", etag=None, is_versioned=False, version_id=0,
|
||||
max_buffer_size=DEFAULT_KEY_BUFFER_SIZE):
|
||||
self.name = name
|
||||
self.value = value
|
||||
self.last_modified = datetime.datetime.utcnow()
|
||||
self.acl = get_canned_acl('private')
|
||||
self.website_redirect_location = None
|
||||
@ -56,14 +63,37 @@ class FakeKey(BaseModel):
|
||||
self._is_versioned = is_versioned
|
||||
self._tagging = FakeTagging()
|
||||
|
||||
self._value_buffer = tempfile.SpooledTemporaryFile(max_size=max_buffer_size)
|
||||
self._max_buffer_size = max_buffer_size
|
||||
self.value = value
|
||||
|
||||
@property
|
||||
def version_id(self):
|
||||
return self._version_id
|
||||
|
||||
def copy(self, new_name=None):
|
||||
@property
|
||||
def value(self):
|
||||
self._value_buffer.seek(0)
|
||||
return self._value_buffer.read()
|
||||
|
||||
@value.setter
|
||||
def value(self, new_value):
|
||||
self._value_buffer.seek(0)
|
||||
self._value_buffer.truncate()
|
||||
|
||||
# Hack for working around moto's own unit tests; this probably won't
|
||||
# actually get hit in normal use.
|
||||
if isinstance(new_value, six.text_type):
|
||||
new_value = new_value.encode(DEFAULT_TEXT_ENCODING)
|
||||
self._value_buffer.write(new_value)
|
||||
|
||||
def copy(self, new_name=None, new_is_versioned=None):
|
||||
r = copy.deepcopy(self)
|
||||
if new_name is not None:
|
||||
r.name = new_name
|
||||
if new_is_versioned is not None:
|
||||
r._is_versioned = new_is_versioned
|
||||
r.refresh_version()
|
||||
return r
|
||||
|
||||
def set_metadata(self, metadata, replace=False):
|
||||
@ -83,29 +113,34 @@ class FakeKey(BaseModel):
|
||||
self.acl = acl
|
||||
|
||||
def append_to_value(self, value):
|
||||
self.value += value
|
||||
self._value_buffer.seek(0, os.SEEK_END)
|
||||
self._value_buffer.write(value)
|
||||
|
||||
self.last_modified = datetime.datetime.utcnow()
|
||||
self._etag = None # must recalculate etag
|
||||
if self._is_versioned:
|
||||
self._version_id += 1
|
||||
self._version_id = str(uuid.uuid4())
|
||||
else:
|
||||
self._is_versioned = 0
|
||||
self._version_id = None
|
||||
|
||||
def restore(self, days):
|
||||
self._expiry = datetime.datetime.utcnow() + datetime.timedelta(days)
|
||||
|
||||
def increment_version(self):
|
||||
self._version_id += 1
|
||||
def refresh_version(self):
|
||||
self._version_id = str(uuid.uuid4())
|
||||
self.last_modified = datetime.datetime.utcnow()
|
||||
|
||||
@property
|
||||
def etag(self):
|
||||
if self._etag is None:
|
||||
value_md5 = hashlib.md5()
|
||||
if isinstance(self.value, six.text_type):
|
||||
value = self.value.encode("utf-8")
|
||||
else:
|
||||
value = self.value
|
||||
value_md5.update(value)
|
||||
self._value_buffer.seek(0)
|
||||
while True:
|
||||
block = self._value_buffer.read(DEFAULT_KEY_BUFFER_SIZE)
|
||||
if not block:
|
||||
break
|
||||
value_md5.update(block)
|
||||
|
||||
self._etag = value_md5.hexdigest()
|
||||
return '"{0}"'.format(self._etag)
|
||||
|
||||
@ -132,7 +167,7 @@ class FakeKey(BaseModel):
|
||||
res = {
|
||||
'ETag': self.etag,
|
||||
'last-modified': self.last_modified_RFC1123,
|
||||
'content-length': str(len(self.value)),
|
||||
'content-length': str(self.size),
|
||||
}
|
||||
if self._storage_class != 'STANDARD':
|
||||
res['x-amz-storage-class'] = self._storage_class
|
||||
@ -150,7 +185,8 @@ class FakeKey(BaseModel):
|
||||
|
||||
@property
|
||||
def size(self):
|
||||
return len(self.value)
|
||||
self._value_buffer.seek(0, os.SEEK_END)
|
||||
return self._value_buffer.tell()
|
||||
|
||||
@property
|
||||
def storage_class(self):
|
||||
@ -161,6 +197,26 @@ class FakeKey(BaseModel):
|
||||
if self._expiry is not None:
|
||||
return self._expiry.strftime("%a, %d %b %Y %H:%M:%S GMT")
|
||||
|
||||
# Keys need to be pickleable due to some implementation details of boto3.
|
||||
# Since file objects aren't pickleable, we need to override the default
|
||||
# behavior. The following is adapted from the Python docs:
|
||||
# https://docs.python.org/3/library/pickle.html#handling-stateful-objects
|
||||
def __getstate__(self):
|
||||
state = self.__dict__.copy()
|
||||
state['value'] = self.value
|
||||
del state['_value_buffer']
|
||||
return state
|
||||
|
||||
def __setstate__(self, state):
|
||||
self.__dict__.update({
|
||||
k: v for k, v in six.iteritems(state)
|
||||
if k != 'value'
|
||||
})
|
||||
|
||||
self._value_buffer = \
|
||||
tempfile.SpooledTemporaryFile(max_size=self._max_buffer_size)
|
||||
self.value = state['value']
|
||||
|
||||
|
||||
class FakeMultipart(BaseModel):
|
||||
|
||||
@ -634,6 +690,8 @@ class S3Backend(BaseBackend):
|
||||
def create_bucket(self, bucket_name, region_name):
|
||||
if bucket_name in self.buckets:
|
||||
raise BucketAlreadyExists(bucket=bucket_name)
|
||||
if not MIN_BUCKET_NAME_LENGTH <= len(bucket_name) <= MAX_BUCKET_NAME_LENGTH:
|
||||
raise InvalidBucketName()
|
||||
new_bucket = FakeBucket(name=bucket_name, region_name=region_name)
|
||||
self.buckets[bucket_name] = new_bucket
|
||||
return new_bucket
|
||||
@ -663,17 +721,18 @@ class S3Backend(BaseBackend):
|
||||
|
||||
def get_bucket_latest_versions(self, bucket_name):
|
||||
versions = self.get_bucket_versions(bucket_name)
|
||||
maximum_version_per_key = {}
|
||||
latest_modified_per_key = {}
|
||||
latest_versions = {}
|
||||
|
||||
for version in versions:
|
||||
name = version.name
|
||||
last_modified = version.last_modified
|
||||
version_id = version.version_id
|
||||
maximum_version_per_key[name] = max(
|
||||
version_id,
|
||||
maximum_version_per_key.get(name, -1)
|
||||
latest_modified_per_key[name] = max(
|
||||
last_modified,
|
||||
latest_modified_per_key.get(name, datetime.datetime.min)
|
||||
)
|
||||
if version_id == maximum_version_per_key[name]:
|
||||
if last_modified == latest_modified_per_key[name]:
|
||||
latest_versions[name] = version_id
|
||||
|
||||
return latest_versions
|
||||
@ -721,20 +780,19 @@ class S3Backend(BaseBackend):
|
||||
|
||||
bucket = self.get_bucket(bucket_name)
|
||||
|
||||
old_key = bucket.keys.get(key_name, None)
|
||||
if old_key is not None and bucket.is_versioned:
|
||||
new_version_id = old_key._version_id + 1
|
||||
else:
|
||||
new_version_id = 0
|
||||
|
||||
new_key = FakeKey(
|
||||
name=key_name,
|
||||
value=value,
|
||||
storage=storage,
|
||||
etag=etag,
|
||||
is_versioned=bucket.is_versioned,
|
||||
version_id=new_version_id)
|
||||
bucket.keys[key_name] = new_key
|
||||
version_id=str(uuid.uuid4()) if bucket.is_versioned else None)
|
||||
|
||||
keys = [
|
||||
key for key in bucket.keys.getlist(key_name, [])
|
||||
if key.version_id != new_key.version_id
|
||||
] + [new_key]
|
||||
bucket.keys.setlist(key_name, keys)
|
||||
|
||||
return new_key
|
||||
|
||||
@ -773,6 +831,9 @@ class S3Backend(BaseBackend):
|
||||
return key
|
||||
|
||||
def put_bucket_tagging(self, bucket_name, tagging):
|
||||
tag_keys = [tag.key for tag in tagging.tag_set.tags]
|
||||
if len(tag_keys) != len(set(tag_keys)):
|
||||
raise DuplicateTagKeys()
|
||||
bucket = self.get_bucket(bucket_name)
|
||||
bucket.set_tags(tagging)
|
||||
|
||||
@ -915,17 +976,15 @@ class S3Backend(BaseBackend):
|
||||
dest_bucket = self.get_bucket(dest_bucket_name)
|
||||
key = self.get_key(src_bucket_name, src_key_name,
|
||||
version_id=src_version_id)
|
||||
if dest_key_name != src_key_name:
|
||||
key = key.copy(dest_key_name)
|
||||
dest_bucket.keys[dest_key_name] = key
|
||||
|
||||
# By this point, the destination key must exist, or KeyError
|
||||
if dest_bucket.is_versioned:
|
||||
dest_bucket.keys[dest_key_name].increment_version()
|
||||
new_key = key.copy(dest_key_name, dest_bucket.is_versioned)
|
||||
|
||||
if storage is not None:
|
||||
key.set_storage_class(storage)
|
||||
new_key.set_storage_class(storage)
|
||||
if acl is not None:
|
||||
key.set_acl(acl)
|
||||
new_key.set_acl(acl)
|
||||
|
||||
dest_bucket.keys[dest_key_name] = new_key
|
||||
|
||||
def set_bucket_acl(self, bucket_name, acl):
|
||||
bucket = self.get_bucket(bucket_name)
|
||||
|
||||
@ -19,7 +19,7 @@ from .exceptions import BucketAlreadyExists, S3ClientError, MissingBucket, Missi
|
||||
MalformedACLError, InvalidNotificationARN, InvalidNotificationEvent
|
||||
from .models import s3_backend, get_canned_acl, FakeGrantee, FakeGrant, FakeAcl, FakeKey, FakeTagging, FakeTagSet, \
|
||||
FakeTag
|
||||
from .utils import bucket_name_from_url, metadata_from_headers, parse_region_from_url
|
||||
from .utils import bucket_name_from_url, clean_key_name, metadata_from_headers, parse_region_from_url
|
||||
from xml.dom import minidom
|
||||
|
||||
|
||||
@ -193,7 +193,13 @@ class ResponseObject(_TemplateEnvironmentMixin):
|
||||
elif 'location' in querystring:
|
||||
bucket = self.backend.get_bucket(bucket_name)
|
||||
template = self.response_template(S3_BUCKET_LOCATION)
|
||||
return template.render(location=bucket.location)
|
||||
|
||||
location = bucket.location
|
||||
# us-east-1 is different - returns a None location
|
||||
if location == DEFAULT_REGION_NAME:
|
||||
location = None
|
||||
|
||||
return template.render(location=location)
|
||||
elif 'lifecycle' in querystring:
|
||||
bucket = self.backend.get_bucket(bucket_name)
|
||||
if not bucket.rules:
|
||||
@ -338,9 +344,15 @@ class ResponseObject(_TemplateEnvironmentMixin):
|
||||
|
||||
if continuation_token or start_after:
|
||||
limit = continuation_token or start_after
|
||||
result_keys = self._get_results_from_token(result_keys, limit)
|
||||
if not delimiter:
|
||||
result_keys = self._get_results_from_token(result_keys, limit)
|
||||
else:
|
||||
result_folders = self._get_results_from_token(result_folders, limit)
|
||||
|
||||
result_keys, is_truncated, next_continuation_token = self._truncate_result(result_keys, max_keys)
|
||||
if not delimiter:
|
||||
result_keys, is_truncated, next_continuation_token = self._truncate_result(result_keys, max_keys)
|
||||
else:
|
||||
result_folders, is_truncated, next_continuation_token = self._truncate_result(result_folders, max_keys)
|
||||
|
||||
return template.render(
|
||||
bucket=bucket,
|
||||
@ -358,7 +370,7 @@ class ResponseObject(_TemplateEnvironmentMixin):
|
||||
def _get_results_from_token(self, result_keys, token):
|
||||
continuation_index = 0
|
||||
for key in result_keys:
|
||||
if key.name > token:
|
||||
if (key.name if isinstance(key, FakeKey) else key) > token:
|
||||
break
|
||||
continuation_index += 1
|
||||
return result_keys[continuation_index:]
|
||||
@ -367,7 +379,8 @@ class ResponseObject(_TemplateEnvironmentMixin):
|
||||
if len(result_keys) > max_keys:
|
||||
is_truncated = 'true'
|
||||
result_keys = result_keys[:max_keys]
|
||||
next_continuation_token = result_keys[-1].name
|
||||
item = result_keys[-1]
|
||||
next_continuation_token = (item.name if isinstance(item, FakeKey) else item)
|
||||
else:
|
||||
is_truncated = 'false'
|
||||
next_continuation_token = None
|
||||
@ -432,8 +445,19 @@ class ResponseObject(_TemplateEnvironmentMixin):
|
||||
|
||||
else:
|
||||
if body:
|
||||
# us-east-1, the default AWS region behaves a bit differently
|
||||
# - you should not use it as a location constraint --> it fails
|
||||
# - querying the location constraint returns None
|
||||
try:
|
||||
region_name = xmltodict.parse(body)['CreateBucketConfiguration']['LocationConstraint']
|
||||
forced_region = xmltodict.parse(body)['CreateBucketConfiguration']['LocationConstraint']
|
||||
|
||||
if forced_region == DEFAULT_REGION_NAME:
|
||||
raise S3ClientError(
|
||||
'InvalidLocationConstraint',
|
||||
'The specified location-constraint is not valid'
|
||||
)
|
||||
else:
|
||||
region_name = forced_region
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
@ -709,7 +733,7 @@ class ResponseObject(_TemplateEnvironmentMixin):
|
||||
# Copy key
|
||||
# you can have a quoted ?version=abc with a version Id, so work on
|
||||
# we need to parse the unquoted string first
|
||||
src_key = request.headers.get("x-amz-copy-source")
|
||||
src_key = clean_key_name(request.headers.get("x-amz-copy-source"))
|
||||
if isinstance(src_key, six.binary_type):
|
||||
src_key = src_key.decode('utf-8')
|
||||
src_key_parsed = urlparse(src_key)
|
||||
@ -1176,7 +1200,7 @@ S3_DELETE_BUCKET_WITH_ITEMS_ERROR = """<?xml version="1.0" encoding="UTF-8"?>
|
||||
</Error>"""
|
||||
|
||||
S3_BUCKET_LOCATION = """<?xml version="1.0" encoding="UTF-8"?>
|
||||
<LocationConstraint xmlns="http://s3.amazonaws.com/doc/2006-03-01/">{{ location }}</LocationConstraint>"""
|
||||
<LocationConstraint xmlns="http://s3.amazonaws.com/doc/2006-03-01/">{% if location != None %}{{ location }}{% endif %}</LocationConstraint>"""
|
||||
|
||||
S3_BUCKET_LIFECYCLE_CONFIGURATION = """<?xml version="1.0" encoding="UTF-8"?>
|
||||
<LifecycleConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
||||
@ -1279,7 +1303,7 @@ S3_BUCKET_GET_VERSIONS = """<?xml version="1.0" encoding="UTF-8"?>
|
||||
{% for key in key_list %}
|
||||
<Version>
|
||||
<Key>{{ key.name }}</Key>
|
||||
<VersionId>{{ key.version_id }}</VersionId>
|
||||
<VersionId>{% if key.version_id is none %}null{% else %}{{ key.version_id }}{% endif %}</VersionId>
|
||||
<IsLatest>{% if latest_versions[key.name] == key.version_id %}true{% else %}false{% endif %}</IsLatest>
|
||||
<LastModified>{{ key.last_modified_ISO8601 }}</LastModified>
|
||||
<ETag>{{ key.etag }}</ETag>
|
||||
|
||||
@ -27,3 +27,10 @@ class InvalidParameterException(SecretsManagerClientError):
|
||||
super(InvalidParameterException, self).__init__(
|
||||
'InvalidParameterException',
|
||||
message)
|
||||
|
||||
|
||||
class InvalidRequestException(SecretsManagerClientError):
|
||||
def __init__(self, message):
|
||||
super(InvalidRequestException, self).__init__(
|
||||
'InvalidRequestException',
|
||||
message)
|
||||
|
||||
@ -2,6 +2,8 @@ from __future__ import unicode_literals
|
||||
|
||||
import time
|
||||
import json
|
||||
import uuid
|
||||
import datetime
|
||||
|
||||
import boto3
|
||||
|
||||
@ -9,6 +11,7 @@ from moto.core import BaseBackend, BaseModel
|
||||
from .exceptions import (
|
||||
ResourceNotFoundException,
|
||||
InvalidParameterException,
|
||||
InvalidRequestException,
|
||||
ClientError
|
||||
)
|
||||
from .utils import random_password, secret_arn
|
||||
@ -18,10 +21,6 @@ class SecretsManager(BaseModel):
|
||||
|
||||
def __init__(self, region_name, **kwargs):
|
||||
self.region = region_name
|
||||
self.secret_id = kwargs.get('secret_id', '')
|
||||
self.version_id = kwargs.get('version_id', '')
|
||||
self.version_stage = kwargs.get('version_stage', '')
|
||||
self.secret_string = ''
|
||||
|
||||
|
||||
class SecretsManagerBackend(BaseBackend):
|
||||
@ -29,14 +28,7 @@ class SecretsManagerBackend(BaseBackend):
|
||||
def __init__(self, region_name=None, **kwargs):
|
||||
super(SecretsManagerBackend, self).__init__()
|
||||
self.region = region_name
|
||||
self.secret_id = kwargs.get('secret_id', '')
|
||||
self.name = kwargs.get('name', '')
|
||||
self.createdate = int(time.time())
|
||||
self.secret_string = ''
|
||||
self.rotation_enabled = False
|
||||
self.rotation_lambda_arn = ''
|
||||
self.auto_rotate_after_days = 0
|
||||
self.version_id = ''
|
||||
self.secrets = {}
|
||||
|
||||
def reset(self):
|
||||
region_name = self.region
|
||||
@ -44,36 +36,60 @@ class SecretsManagerBackend(BaseBackend):
|
||||
self.__init__(region_name)
|
||||
|
||||
def _is_valid_identifier(self, identifier):
|
||||
return identifier in (self.name, self.secret_id)
|
||||
return identifier in self.secrets
|
||||
|
||||
def _unix_time_secs(self, dt):
|
||||
epoch = datetime.datetime.utcfromtimestamp(0)
|
||||
return (dt - epoch).total_seconds()
|
||||
|
||||
def get_secret_value(self, secret_id, version_id, version_stage):
|
||||
|
||||
if not self._is_valid_identifier(secret_id):
|
||||
raise ResourceNotFoundException()
|
||||
|
||||
if 'deleted_date' in self.secrets[secret_id]:
|
||||
raise InvalidRequestException(
|
||||
"An error occurred (InvalidRequestException) when calling the GetSecretValue operation: You tried to \
|
||||
perform the operation on a secret that's currently marked deleted."
|
||||
)
|
||||
|
||||
secret = self.secrets[secret_id]
|
||||
|
||||
response = json.dumps({
|
||||
"ARN": secret_arn(self.region, self.secret_id),
|
||||
"Name": self.name,
|
||||
"VersionId": "A435958A-D821-4193-B719-B7769357AER4",
|
||||
"SecretString": self.secret_string,
|
||||
"ARN": secret_arn(self.region, secret['secret_id']),
|
||||
"Name": secret['name'],
|
||||
"VersionId": secret['version_id'],
|
||||
"SecretString": secret['secret_string'],
|
||||
"VersionStages": [
|
||||
"AWSCURRENT",
|
||||
],
|
||||
"CreatedDate": "2018-05-23 13:16:57.198000"
|
||||
"CreatedDate": secret['createdate']
|
||||
})
|
||||
|
||||
return response
|
||||
|
||||
def create_secret(self, name, secret_string, **kwargs):
|
||||
def create_secret(self, name, secret_string, tags, **kwargs):
|
||||
|
||||
self.secret_string = secret_string
|
||||
self.secret_id = name
|
||||
self.name = name
|
||||
generated_version_id = str(uuid.uuid4())
|
||||
|
||||
secret = {
|
||||
'secret_string': secret_string,
|
||||
'secret_id': name,
|
||||
'name': name,
|
||||
'createdate': int(time.time()),
|
||||
'rotation_enabled': False,
|
||||
'rotation_lambda_arn': '',
|
||||
'auto_rotate_after_days': 0,
|
||||
'version_id': generated_version_id,
|
||||
'tags': tags
|
||||
}
|
||||
|
||||
self.secrets[name] = secret
|
||||
|
||||
response = json.dumps({
|
||||
"ARN": secret_arn(self.region, name),
|
||||
"Name": self.name,
|
||||
"VersionId": "A435958A-D821-4193-B719-B7769357AER4",
|
||||
"Name": name,
|
||||
"VersionId": generated_version_id,
|
||||
})
|
||||
|
||||
return response
|
||||
@ -82,26 +98,23 @@ class SecretsManagerBackend(BaseBackend):
|
||||
if not self._is_valid_identifier(secret_id):
|
||||
raise ResourceNotFoundException
|
||||
|
||||
secret = self.secrets[secret_id]
|
||||
|
||||
response = json.dumps({
|
||||
"ARN": secret_arn(self.region, self.secret_id),
|
||||
"Name": self.name,
|
||||
"ARN": secret_arn(self.region, secret['secret_id']),
|
||||
"Name": secret['name'],
|
||||
"Description": "",
|
||||
"KmsKeyId": "",
|
||||
"RotationEnabled": self.rotation_enabled,
|
||||
"RotationLambdaARN": self.rotation_lambda_arn,
|
||||
"RotationEnabled": secret['rotation_enabled'],
|
||||
"RotationLambdaARN": secret['rotation_lambda_arn'],
|
||||
"RotationRules": {
|
||||
"AutomaticallyAfterDays": self.auto_rotate_after_days
|
||||
"AutomaticallyAfterDays": secret['auto_rotate_after_days']
|
||||
},
|
||||
"LastRotatedDate": None,
|
||||
"LastChangedDate": None,
|
||||
"LastAccessedDate": None,
|
||||
"DeletedDate": None,
|
||||
"Tags": [
|
||||
{
|
||||
"Key": "",
|
||||
"Value": ""
|
||||
},
|
||||
]
|
||||
"DeletedDate": secret.get('deleted_date', None),
|
||||
"Tags": secret['tags']
|
||||
})
|
||||
|
||||
return response
|
||||
@ -114,6 +127,12 @@ class SecretsManagerBackend(BaseBackend):
|
||||
if not self._is_valid_identifier(secret_id):
|
||||
raise ResourceNotFoundException
|
||||
|
||||
if 'deleted_date' in self.secrets[secret_id]:
|
||||
raise InvalidRequestException(
|
||||
"An error occurred (InvalidRequestException) when calling the RotateSecret operation: You tried to \
|
||||
perform the operation on a secret that's currently marked deleted."
|
||||
)
|
||||
|
||||
if client_request_token:
|
||||
token_length = len(client_request_token)
|
||||
if token_length < 32 or token_length > 64:
|
||||
@ -141,17 +160,19 @@ class SecretsManagerBackend(BaseBackend):
|
||||
)
|
||||
raise InvalidParameterException(msg)
|
||||
|
||||
self.version_id = client_request_token or ''
|
||||
self.rotation_lambda_arn = rotation_lambda_arn or ''
|
||||
secret = self.secrets[secret_id]
|
||||
|
||||
secret['version_id'] = client_request_token or ''
|
||||
secret['rotation_lambda_arn'] = rotation_lambda_arn or ''
|
||||
if rotation_rules:
|
||||
self.auto_rotate_after_days = rotation_rules.get(rotation_days, 0)
|
||||
if self.auto_rotate_after_days > 0:
|
||||
self.rotation_enabled = True
|
||||
secret['auto_rotate_after_days'] = rotation_rules.get(rotation_days, 0)
|
||||
if secret['auto_rotate_after_days'] > 0:
|
||||
secret['rotation_enabled'] = True
|
||||
|
||||
response = json.dumps({
|
||||
"ARN": secret_arn(self.region, self.secret_id),
|
||||
"Name": self.name,
|
||||
"VersionId": self.version_id
|
||||
"ARN": secret_arn(self.region, secret['secret_id']),
|
||||
"Name": secret['name'],
|
||||
"VersionId": secret['version_id']
|
||||
})
|
||||
|
||||
return response
|
||||
@ -185,6 +206,85 @@ class SecretsManagerBackend(BaseBackend):
|
||||
|
||||
return response
|
||||
|
||||
def list_secrets(self, max_results, next_token):
|
||||
# TODO implement pagination and limits
|
||||
|
||||
secret_list = [{
|
||||
"ARN": secret_arn(self.region, secret['secret_id']),
|
||||
"DeletedDate": secret.get('deleted_date', None),
|
||||
"Description": "",
|
||||
"KmsKeyId": "",
|
||||
"LastAccessedDate": None,
|
||||
"LastChangedDate": None,
|
||||
"LastRotatedDate": None,
|
||||
"Name": secret['name'],
|
||||
"RotationEnabled": secret['rotation_enabled'],
|
||||
"RotationLambdaARN": secret['rotation_lambda_arn'],
|
||||
"RotationRules": {
|
||||
"AutomaticallyAfterDays": secret['auto_rotate_after_days']
|
||||
},
|
||||
"SecretVersionsToStages": {
|
||||
secret['version_id']: ["AWSCURRENT"]
|
||||
},
|
||||
"Tags": secret['tags']
|
||||
} for secret in self.secrets.values()]
|
||||
|
||||
return secret_list, None
|
||||
|
||||
def delete_secret(self, secret_id, recovery_window_in_days, force_delete_without_recovery):
|
||||
|
||||
if not self._is_valid_identifier(secret_id):
|
||||
raise ResourceNotFoundException
|
||||
|
||||
if 'deleted_date' in self.secrets[secret_id]:
|
||||
raise InvalidRequestException(
|
||||
"An error occurred (InvalidRequestException) when calling the DeleteSecret operation: You tried to \
|
||||
perform the operation on a secret that's currently marked deleted."
|
||||
)
|
||||
|
||||
if recovery_window_in_days and force_delete_without_recovery:
|
||||
raise InvalidParameterException(
|
||||
"An error occurred (InvalidParameterException) when calling the DeleteSecret operation: You can't \
|
||||
use ForceDeleteWithoutRecovery in conjunction with RecoveryWindowInDays."
|
||||
)
|
||||
|
||||
if recovery_window_in_days and (recovery_window_in_days < 7 or recovery_window_in_days > 30):
|
||||
raise InvalidParameterException(
|
||||
"An error occurred (InvalidParameterException) when calling the DeleteSecret operation: The \
|
||||
RecoveryWindowInDays value must be between 7 and 30 days (inclusive)."
|
||||
)
|
||||
|
||||
deletion_date = datetime.datetime.utcnow()
|
||||
|
||||
if force_delete_without_recovery:
|
||||
secret = self.secrets.pop(secret_id, None)
|
||||
else:
|
||||
deletion_date += datetime.timedelta(days=recovery_window_in_days or 30)
|
||||
self.secrets[secret_id]['deleted_date'] = self._unix_time_secs(deletion_date)
|
||||
secret = self.secrets.get(secret_id, None)
|
||||
|
||||
if not secret:
|
||||
raise ResourceNotFoundException
|
||||
|
||||
arn = secret_arn(self.region, secret['secret_id'])
|
||||
name = secret['name']
|
||||
|
||||
return arn, name, self._unix_time_secs(deletion_date)
|
||||
|
||||
def restore_secret(self, secret_id):
|
||||
|
||||
if not self._is_valid_identifier(secret_id):
|
||||
raise ResourceNotFoundException
|
||||
|
||||
self.secrets[secret_id].pop('deleted_date', None)
|
||||
|
||||
secret = self.secrets[secret_id]
|
||||
|
||||
arn = secret_arn(self.region, secret['secret_id'])
|
||||
name = secret['name']
|
||||
|
||||
return arn, name
|
||||
|
||||
|
||||
available_regions = (
|
||||
boto3.session.Session().get_available_regions("secretsmanager")
|
||||
|
||||
@ -4,6 +4,8 @@ from moto.core.responses import BaseResponse
|
||||
|
||||
from .models import secretsmanager_backends
|
||||
|
||||
import json
|
||||
|
||||
|
||||
class SecretsManagerResponse(BaseResponse):
|
||||
|
||||
@ -19,9 +21,11 @@ class SecretsManagerResponse(BaseResponse):
|
||||
def create_secret(self):
|
||||
name = self._get_param('Name')
|
||||
secret_string = self._get_param('SecretString')
|
||||
tags = self._get_param('Tags', if_none=[])
|
||||
return secretsmanager_backends[self.region].create_secret(
|
||||
name=name,
|
||||
secret_string=secret_string
|
||||
secret_string=secret_string,
|
||||
tags=tags
|
||||
)
|
||||
|
||||
def get_random_password(self):
|
||||
@ -62,3 +66,30 @@ class SecretsManagerResponse(BaseResponse):
|
||||
rotation_lambda_arn=rotation_lambda_arn,
|
||||
rotation_rules=rotation_rules
|
||||
)
|
||||
|
||||
def list_secrets(self):
|
||||
max_results = self._get_int_param("MaxResults")
|
||||
next_token = self._get_param("NextToken")
|
||||
secret_list, next_token = secretsmanager_backends[self.region].list_secrets(
|
||||
max_results=max_results,
|
||||
next_token=next_token,
|
||||
)
|
||||
return json.dumps(dict(SecretList=secret_list, NextToken=next_token))
|
||||
|
||||
def delete_secret(self):
|
||||
secret_id = self._get_param("SecretId")
|
||||
recovery_window_in_days = self._get_param("RecoveryWindowInDays")
|
||||
force_delete_without_recovery = self._get_param("ForceDeleteWithoutRecovery")
|
||||
arn, name, deletion_date = secretsmanager_backends[self.region].delete_secret(
|
||||
secret_id=secret_id,
|
||||
recovery_window_in_days=recovery_window_in_days,
|
||||
force_delete_without_recovery=force_delete_without_recovery,
|
||||
)
|
||||
return json.dumps(dict(ARN=arn, Name=name, DeletionDate=deletion_date))
|
||||
|
||||
def restore_secret(self):
|
||||
secret_id = self._get_param("SecretId")
|
||||
arn, name = secretsmanager_backends[self.region].restore_secret(
|
||||
secret_id=secret_id,
|
||||
)
|
||||
return json.dumps(dict(ARN=arn, Name=name))
|
||||
|
||||
@ -52,8 +52,9 @@ def random_password(password_length, exclude_characters, exclude_numbers,
|
||||
|
||||
|
||||
def secret_arn(region, secret_id):
|
||||
return "arn:aws:secretsmanager:{0}:1234567890:secret:{1}-rIjad".format(
|
||||
region, secret_id)
|
||||
id_string = ''.join(random.choice(string.ascii_letters) for _ in range(5))
|
||||
return "arn:aws:secretsmanager:{0}:1234567890:secret:{1}-{2}".format(
|
||||
region, secret_id, id_string)
|
||||
|
||||
|
||||
def _exclude_characters(password, exclude_characters):
|
||||
|
||||
@ -80,10 +80,13 @@ class DomainDispatcherApplication(object):
|
||||
|
||||
region = 'us-east-1'
|
||||
if service == 'dynamodb':
|
||||
dynamo_api_version = environ['HTTP_X_AMZ_TARGET'].split("_")[1].split(".")[0]
|
||||
# If Newer API version, use dynamodb2
|
||||
if dynamo_api_version > "20111205":
|
||||
host = "dynamodb2"
|
||||
if environ['HTTP_X_AMZ_TARGET'].startswith('DynamoDBStreams'):
|
||||
host = 'dynamodbstreams'
|
||||
else:
|
||||
dynamo_api_version = environ['HTTP_X_AMZ_TARGET'].split("_")[1].split(".")[0]
|
||||
# If Newer API version, use dynamodb2
|
||||
if dynamo_api_version > "20111205":
|
||||
host = "dynamodb2"
|
||||
else:
|
||||
host = "{service}.{region}.amazonaws.com".format(
|
||||
service=service, region=region)
|
||||
|
||||
@ -534,7 +534,7 @@ class SQSBackend(BaseBackend):
|
||||
break
|
||||
|
||||
import time
|
||||
time.sleep(0.001)
|
||||
time.sleep(0.01)
|
||||
continue
|
||||
|
||||
previous_result_count = len(result)
|
||||
|
||||
@ -14,10 +14,12 @@ import itertools
|
||||
|
||||
|
||||
class Parameter(BaseModel):
|
||||
def __init__(self, name, value, type, description, keyid, last_modified_date, version):
|
||||
def __init__(self, name, value, type, description, allowed_pattern, keyid,
|
||||
last_modified_date, version):
|
||||
self.name = name
|
||||
self.type = type
|
||||
self.description = description
|
||||
self.allowed_pattern = allowed_pattern
|
||||
self.keyid = keyid
|
||||
self.last_modified_date = last_modified_date
|
||||
self.version = version
|
||||
@ -58,6 +60,10 @@ class Parameter(BaseModel):
|
||||
|
||||
if self.keyid:
|
||||
r['KeyId'] = self.keyid
|
||||
|
||||
if self.allowed_pattern:
|
||||
r['AllowedPattern'] = self.allowed_pattern
|
||||
|
||||
return r
|
||||
|
||||
|
||||
@ -291,7 +297,8 @@ class SimpleSystemManagerBackend(BaseBackend):
|
||||
return self._parameters[name]
|
||||
return None
|
||||
|
||||
def put_parameter(self, name, description, value, type, keyid, overwrite):
|
||||
def put_parameter(self, name, description, value, type, allowed_pattern,
|
||||
keyid, overwrite):
|
||||
previous_parameter = self._parameters.get(name)
|
||||
version = 1
|
||||
|
||||
@ -302,8 +309,8 @@ class SimpleSystemManagerBackend(BaseBackend):
|
||||
return
|
||||
|
||||
last_modified_date = time.time()
|
||||
self._parameters[name] = Parameter(
|
||||
name, value, type, description, keyid, last_modified_date, version)
|
||||
self._parameters[name] = Parameter(name, value, type, description,
|
||||
allowed_pattern, keyid, last_modified_date, version)
|
||||
return version
|
||||
|
||||
def add_tags_to_resource(self, resource_type, resource_id, tags):
|
||||
|
||||
@ -160,11 +160,12 @@ class SimpleSystemManagerResponse(BaseResponse):
|
||||
description = self._get_param('Description')
|
||||
value = self._get_param('Value')
|
||||
type_ = self._get_param('Type')
|
||||
allowed_pattern = self._get_param('AllowedPattern')
|
||||
keyid = self._get_param('KeyId')
|
||||
overwrite = self._get_param('Overwrite', False)
|
||||
|
||||
result = self.ssm_backend.put_parameter(
|
||||
name, description, value, type_, keyid, overwrite)
|
||||
name, description, value, type_, allowed_pattern, keyid, overwrite)
|
||||
|
||||
if result is None:
|
||||
error = {
|
||||
|
||||
@ -3,6 +3,7 @@ from .responses import SimpleSystemManagerResponse
|
||||
|
||||
url_bases = [
|
||||
"https?://ssm.(.+).amazonaws.com",
|
||||
"https?://ssm.(.+).amazonaws.com.cn",
|
||||
]
|
||||
|
||||
url_paths = {
|
||||
|
||||
31
setup.py
31
setup.py
@ -1,29 +1,44 @@
|
||||
#!/usr/bin/env python
|
||||
from __future__ import unicode_literals
|
||||
import codecs
|
||||
import os
|
||||
import re
|
||||
import setuptools
|
||||
from setuptools import setup, find_packages
|
||||
import sys
|
||||
|
||||
|
||||
# Borrowed from pip at https://github.com/pypa/pip/blob/62c27dee45625e1b63d1e023b0656310f276e050/setup.py#L11-L15
|
||||
here = os.path.abspath(os.path.dirname(__file__))
|
||||
|
||||
def read(*parts):
|
||||
# intentionally *not* adding an encoding option to open, See:
|
||||
# https://github.com/pypa/virtualenv/issues/201#issuecomment-3145690
|
||||
with codecs.open(os.path.join(here, *parts), 'r') as fp:
|
||||
return fp.read()
|
||||
|
||||
|
||||
install_requires = [
|
||||
"Jinja2>=2.7.3",
|
||||
"Jinja2>=2.10.1",
|
||||
"boto>=2.36.0",
|
||||
"boto3>=1.6.16",
|
||||
"botocore>=1.12.13",
|
||||
"boto3>=1.9.86",
|
||||
"botocore>=1.12.86",
|
||||
"cryptography>=2.3.0",
|
||||
"requests>=2.5",
|
||||
"xmltodict",
|
||||
"six>1.9",
|
||||
"werkzeug",
|
||||
"pyaml",
|
||||
"PyYAML",
|
||||
"pytz",
|
||||
"python-dateutil<3.0.0,>=2.1",
|
||||
"python-jose<3.0.0",
|
||||
"python-jose<4.0.0",
|
||||
"mock",
|
||||
"docker>=2.5.1",
|
||||
"jsondiff==1.1.1",
|
||||
"jsondiff==1.1.2",
|
||||
"aws-xray-sdk!=0.96,>=0.93",
|
||||
"responses>=0.9.0",
|
||||
"idna<2.9,>=2.5",
|
||||
"cfn-lint",
|
||||
]
|
||||
|
||||
extras_require = {
|
||||
@ -40,9 +55,11 @@ else:
|
||||
|
||||
setup(
|
||||
name='moto',
|
||||
version='1.3.7',
|
||||
version='1.3.8',
|
||||
description='A library that allows your python tests to easily'
|
||||
' mock out the boto library',
|
||||
long_description=read('README.md'),
|
||||
long_description_content_type='text/markdown',
|
||||
author='Steve Pulec',
|
||||
author_email='spulec@gmail.com',
|
||||
url='https://github.com/spulec/moto',
|
||||
|
||||
@ -710,6 +710,7 @@ def test_create_autoscaling_group_boto3():
|
||||
'PropagateAtLaunch': False
|
||||
}],
|
||||
VPCZoneIdentifier=mocked_networking['subnet1'],
|
||||
NewInstancesProtectedFromScaleIn=False,
|
||||
)
|
||||
response['ResponseMetadata']['HTTPStatusCode'].should.equal(200)
|
||||
|
||||
@ -728,13 +729,48 @@ def test_describe_autoscaling_groups_boto3():
|
||||
MaxSize=20,
|
||||
DesiredCapacity=5,
|
||||
VPCZoneIdentifier=mocked_networking['subnet1'],
|
||||
NewInstancesProtectedFromScaleIn=True,
|
||||
)
|
||||
|
||||
response = client.describe_auto_scaling_groups(
|
||||
AutoScalingGroupNames=["test_asg"]
|
||||
)
|
||||
response['ResponseMetadata']['HTTPStatusCode'].should.equal(200)
|
||||
response['AutoScalingGroups'][0][
|
||||
'AutoScalingGroupName'].should.equal('test_asg')
|
||||
group = response['AutoScalingGroups'][0]
|
||||
group['AutoScalingGroupName'].should.equal('test_asg')
|
||||
group['NewInstancesProtectedFromScaleIn'].should.equal(True)
|
||||
group['Instances'][0]['ProtectedFromScaleIn'].should.equal(True)
|
||||
|
||||
|
||||
@mock_autoscaling
|
||||
def test_describe_autoscaling_instances_boto3():
|
||||
mocked_networking = setup_networking()
|
||||
client = boto3.client('autoscaling', region_name='us-east-1')
|
||||
_ = client.create_launch_configuration(
|
||||
LaunchConfigurationName='test_launch_configuration'
|
||||
)
|
||||
_ = client.create_auto_scaling_group(
|
||||
AutoScalingGroupName='test_asg',
|
||||
LaunchConfigurationName='test_launch_configuration',
|
||||
MinSize=0,
|
||||
MaxSize=20,
|
||||
DesiredCapacity=5,
|
||||
VPCZoneIdentifier=mocked_networking['subnet1'],
|
||||
NewInstancesProtectedFromScaleIn=True,
|
||||
)
|
||||
|
||||
response = client.describe_auto_scaling_groups(
|
||||
AutoScalingGroupNames=["test_asg"]
|
||||
)
|
||||
instance_ids = [
|
||||
instance['InstanceId']
|
||||
for instance in response['AutoScalingGroups'][0]['Instances']
|
||||
]
|
||||
|
||||
response = client.describe_auto_scaling_instances(InstanceIds=instance_ids)
|
||||
for instance in response['AutoScalingInstances']:
|
||||
instance['AutoScalingGroupName'].should.equal('test_asg')
|
||||
instance['ProtectedFromScaleIn'].should.equal(True)
|
||||
|
||||
|
||||
@mock_autoscaling
|
||||
@ -751,17 +787,21 @@ def test_update_autoscaling_group_boto3():
|
||||
MaxSize=20,
|
||||
DesiredCapacity=5,
|
||||
VPCZoneIdentifier=mocked_networking['subnet1'],
|
||||
NewInstancesProtectedFromScaleIn=True,
|
||||
)
|
||||
|
||||
response = client.update_auto_scaling_group(
|
||||
_ = client.update_auto_scaling_group(
|
||||
AutoScalingGroupName='test_asg',
|
||||
MinSize=1,
|
||||
NewInstancesProtectedFromScaleIn=False,
|
||||
)
|
||||
|
||||
response = client.describe_auto_scaling_groups(
|
||||
AutoScalingGroupNames=["test_asg"]
|
||||
)
|
||||
response['AutoScalingGroups'][0]['MinSize'].should.equal(1)
|
||||
group = response['AutoScalingGroups'][0]
|
||||
group['MinSize'].should.equal(1)
|
||||
group['NewInstancesProtectedFromScaleIn'].should.equal(False)
|
||||
|
||||
|
||||
@mock_autoscaling
|
||||
@ -992,9 +1032,7 @@ def test_attach_one_instance():
|
||||
'PropagateAtLaunch': True
|
||||
}],
|
||||
VPCZoneIdentifier=mocked_networking['subnet1'],
|
||||
)
|
||||
response = client.describe_auto_scaling_groups(
|
||||
AutoScalingGroupNames=['test_asg']
|
||||
NewInstancesProtectedFromScaleIn=True,
|
||||
)
|
||||
|
||||
ec2 = boto3.resource('ec2', 'us-east-1')
|
||||
@ -1009,7 +1047,11 @@ def test_attach_one_instance():
|
||||
response = client.describe_auto_scaling_groups(
|
||||
AutoScalingGroupNames=['test_asg']
|
||||
)
|
||||
response['AutoScalingGroups'][0]['Instances'].should.have.length_of(3)
|
||||
instances = response['AutoScalingGroups'][0]['Instances']
|
||||
instances.should.have.length_of(3)
|
||||
for instance in instances:
|
||||
instance['ProtectedFromScaleIn'].should.equal(True)
|
||||
|
||||
|
||||
@mock_autoscaling
|
||||
@mock_ec2
|
||||
@ -1100,3 +1142,111 @@ def test_suspend_processes():
|
||||
launch_suspended = True
|
||||
|
||||
assert launch_suspended is True
|
||||
|
||||
@mock_autoscaling
|
||||
def test_set_instance_protection():
|
||||
mocked_networking = setup_networking()
|
||||
client = boto3.client('autoscaling', region_name='us-east-1')
|
||||
_ = client.create_launch_configuration(
|
||||
LaunchConfigurationName='test_launch_configuration'
|
||||
)
|
||||
_ = client.create_auto_scaling_group(
|
||||
AutoScalingGroupName='test_asg',
|
||||
LaunchConfigurationName='test_launch_configuration',
|
||||
MinSize=0,
|
||||
MaxSize=20,
|
||||
DesiredCapacity=5,
|
||||
VPCZoneIdentifier=mocked_networking['subnet1'],
|
||||
NewInstancesProtectedFromScaleIn=False,
|
||||
)
|
||||
|
||||
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=['test_asg'])
|
||||
instance_ids = [
|
||||
instance['InstanceId']
|
||||
for instance in response['AutoScalingGroups'][0]['Instances']
|
||||
]
|
||||
protected = instance_ids[:3]
|
||||
|
||||
_ = client.set_instance_protection(
|
||||
AutoScalingGroupName='test_asg',
|
||||
InstanceIds=protected,
|
||||
ProtectedFromScaleIn=True,
|
||||
)
|
||||
|
||||
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=['test_asg'])
|
||||
for instance in response['AutoScalingGroups'][0]['Instances']:
|
||||
instance['ProtectedFromScaleIn'].should.equal(
|
||||
instance['InstanceId'] in protected
|
||||
)
|
||||
|
||||
|
||||
@mock_autoscaling
|
||||
def test_set_desired_capacity_up_boto3():
|
||||
mocked_networking = setup_networking()
|
||||
client = boto3.client('autoscaling', region_name='us-east-1')
|
||||
_ = client.create_launch_configuration(
|
||||
LaunchConfigurationName='test_launch_configuration'
|
||||
)
|
||||
_ = client.create_auto_scaling_group(
|
||||
AutoScalingGroupName='test_asg',
|
||||
LaunchConfigurationName='test_launch_configuration',
|
||||
MinSize=0,
|
||||
MaxSize=20,
|
||||
DesiredCapacity=5,
|
||||
VPCZoneIdentifier=mocked_networking['subnet1'],
|
||||
NewInstancesProtectedFromScaleIn=True,
|
||||
)
|
||||
|
||||
_ = client.set_desired_capacity(
|
||||
AutoScalingGroupName='test_asg',
|
||||
DesiredCapacity=10,
|
||||
)
|
||||
|
||||
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=['test_asg'])
|
||||
instances = response['AutoScalingGroups'][0]['Instances']
|
||||
instances.should.have.length_of(10)
|
||||
for instance in instances:
|
||||
instance['ProtectedFromScaleIn'].should.equal(True)
|
||||
|
||||
|
||||
@mock_autoscaling
|
||||
def test_set_desired_capacity_down_boto3():
|
||||
mocked_networking = setup_networking()
|
||||
client = boto3.client('autoscaling', region_name='us-east-1')
|
||||
_ = client.create_launch_configuration(
|
||||
LaunchConfigurationName='test_launch_configuration'
|
||||
)
|
||||
_ = client.create_auto_scaling_group(
|
||||
AutoScalingGroupName='test_asg',
|
||||
LaunchConfigurationName='test_launch_configuration',
|
||||
MinSize=0,
|
||||
MaxSize=20,
|
||||
DesiredCapacity=5,
|
||||
VPCZoneIdentifier=mocked_networking['subnet1'],
|
||||
NewInstancesProtectedFromScaleIn=True,
|
||||
)
|
||||
|
||||
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=['test_asg'])
|
||||
instance_ids = [
|
||||
instance['InstanceId']
|
||||
for instance in response['AutoScalingGroups'][0]['Instances']
|
||||
]
|
||||
unprotected, protected = instance_ids[:2], instance_ids[2:]
|
||||
|
||||
_ = client.set_instance_protection(
|
||||
AutoScalingGroupName='test_asg',
|
||||
InstanceIds=unprotected,
|
||||
ProtectedFromScaleIn=False,
|
||||
)
|
||||
|
||||
_ = client.set_desired_capacity(
|
||||
AutoScalingGroupName='test_asg',
|
||||
DesiredCapacity=1,
|
||||
)
|
||||
|
||||
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=['test_asg'])
|
||||
group = response['AutoScalingGroups'][0]
|
||||
group['DesiredCapacity'].should.equal(1)
|
||||
instance_ids = {instance['InstanceId'] for instance in group['Instances']}
|
||||
set(protected).should.equal(instance_ids)
|
||||
set(unprotected).should_not.be.within(instance_ids) # only unprotected killed
|
||||
|
||||
@ -12,6 +12,8 @@ import sure # noqa
|
||||
|
||||
from freezegun import freeze_time
|
||||
from moto import mock_lambda, mock_s3, mock_ec2, mock_sns, mock_logs, settings
|
||||
from nose.tools import assert_raises
|
||||
from botocore.exceptions import ClientError
|
||||
|
||||
_lambda_region = 'us-west-2'
|
||||
|
||||
@ -397,6 +399,11 @@ def test_get_function():
|
||||
result = conn.get_function(FunctionName='testFunction', Qualifier='$LATEST')
|
||||
result['Configuration']['Version'].should.equal('$LATEST')
|
||||
|
||||
# Test get function when can't find function name
|
||||
with assert_raises(ClientError):
|
||||
conn.get_function(FunctionName='junk', Qualifier='$LATEST')
|
||||
|
||||
|
||||
|
||||
@mock_lambda
|
||||
@mock_s3
|
||||
@ -464,7 +471,8 @@ def test_publish():
|
||||
function_list['Functions'].should.have.length_of(1)
|
||||
latest_arn = function_list['Functions'][0]['FunctionArn']
|
||||
|
||||
conn.publish_version(FunctionName='testFunction')
|
||||
res = conn.publish_version(FunctionName='testFunction')
|
||||
assert res['ResponseMetadata']['HTTPStatusCode'] == 201
|
||||
|
||||
function_list = conn.list_functions()
|
||||
function_list['Functions'].should.have.length_of(2)
|
||||
@ -819,3 +827,87 @@ def get_function_policy():
|
||||
assert isinstance(response['Policy'], str)
|
||||
res = json.loads(response['Policy'])
|
||||
assert res['Statement'][0]['Action'] == 'lambda:InvokeFunction'
|
||||
|
||||
|
||||
@mock_lambda
|
||||
@mock_s3
|
||||
def test_list_versions_by_function():
|
||||
s3_conn = boto3.client('s3', 'us-west-2')
|
||||
s3_conn.create_bucket(Bucket='test-bucket')
|
||||
|
||||
zip_content = get_test_zip_file2()
|
||||
s3_conn.put_object(Bucket='test-bucket', Key='test.zip', Body=zip_content)
|
||||
conn = boto3.client('lambda', 'us-west-2')
|
||||
|
||||
conn.create_function(
|
||||
FunctionName='testFunction',
|
||||
Runtime='python2.7',
|
||||
Role='test-iam-role',
|
||||
Handler='lambda_function.lambda_handler',
|
||||
Code={
|
||||
'S3Bucket': 'test-bucket',
|
||||
'S3Key': 'test.zip',
|
||||
},
|
||||
Description='test lambda function',
|
||||
Timeout=3,
|
||||
MemorySize=128,
|
||||
Publish=True,
|
||||
)
|
||||
|
||||
res = conn.publish_version(FunctionName='testFunction')
|
||||
assert res['ResponseMetadata']['HTTPStatusCode'] == 201
|
||||
versions = conn.list_versions_by_function(FunctionName='testFunction')
|
||||
|
||||
assert versions['Versions'][0]['FunctionArn'] == 'arn:aws:lambda:us-west-2:123456789012:function:testFunction:$LATEST'
|
||||
|
||||
|
||||
@mock_lambda
|
||||
@mock_s3
|
||||
def test_create_function_with_already_exists():
|
||||
s3_conn = boto3.client('s3', 'us-west-2')
|
||||
s3_conn.create_bucket(Bucket='test-bucket')
|
||||
|
||||
zip_content = get_test_zip_file2()
|
||||
s3_conn.put_object(Bucket='test-bucket', Key='test.zip', Body=zip_content)
|
||||
conn = boto3.client('lambda', 'us-west-2')
|
||||
|
||||
conn.create_function(
|
||||
FunctionName='testFunction',
|
||||
Runtime='python2.7',
|
||||
Role='test-iam-role',
|
||||
Handler='lambda_function.lambda_handler',
|
||||
Code={
|
||||
'S3Bucket': 'test-bucket',
|
||||
'S3Key': 'test.zip',
|
||||
},
|
||||
Description='test lambda function',
|
||||
Timeout=3,
|
||||
MemorySize=128,
|
||||
Publish=True,
|
||||
)
|
||||
|
||||
response = conn.create_function(
|
||||
FunctionName='testFunction',
|
||||
Runtime='python2.7',
|
||||
Role='test-iam-role',
|
||||
Handler='lambda_function.lambda_handler',
|
||||
Code={
|
||||
'S3Bucket': 'test-bucket',
|
||||
'S3Key': 'test.zip',
|
||||
},
|
||||
Description='test lambda function',
|
||||
Timeout=3,
|
||||
MemorySize=128,
|
||||
Publish=True,
|
||||
)
|
||||
|
||||
assert response['FunctionName'] == 'testFunction'
|
||||
|
||||
|
||||
@mock_lambda
|
||||
@mock_s3
|
||||
def test_list_versions_by_function_for_nonexistent_function():
|
||||
conn = boto3.client('lambda', 'us-west-2')
|
||||
versions = conn.list_versions_by_function(FunctionName='testFunction')
|
||||
|
||||
assert len(versions['Versions']) == 0
|
||||
|
||||
@ -323,6 +323,54 @@ def test_create_job_queue():
|
||||
resp.should.contain('jobQueues')
|
||||
len(resp['jobQueues']).should.equal(0)
|
||||
|
||||
# Create job queue which already exists
|
||||
try:
|
||||
resp = batch_client.create_job_queue(
|
||||
jobQueueName='test_job_queue',
|
||||
state='ENABLED',
|
||||
priority=123,
|
||||
computeEnvironmentOrder=[
|
||||
{
|
||||
'order': 123,
|
||||
'computeEnvironment': arn
|
||||
},
|
||||
]
|
||||
)
|
||||
|
||||
except ClientError as err:
|
||||
err.response['Error']['Code'].should.equal('ClientException')
|
||||
|
||||
|
||||
# Create job queue with incorrect state
|
||||
try:
|
||||
resp = batch_client.create_job_queue(
|
||||
jobQueueName='test_job_queue2',
|
||||
state='JUNK',
|
||||
priority=123,
|
||||
computeEnvironmentOrder=[
|
||||
{
|
||||
'order': 123,
|
||||
'computeEnvironment': arn
|
||||
},
|
||||
]
|
||||
)
|
||||
|
||||
except ClientError as err:
|
||||
err.response['Error']['Code'].should.equal('ClientException')
|
||||
|
||||
# Create job queue with no compute env
|
||||
try:
|
||||
resp = batch_client.create_job_queue(
|
||||
jobQueueName='test_job_queue3',
|
||||
state='JUNK',
|
||||
priority=123,
|
||||
computeEnvironmentOrder=[
|
||||
|
||||
]
|
||||
)
|
||||
|
||||
except ClientError as err:
|
||||
err.response['Error']['Code'].should.equal('ClientException')
|
||||
|
||||
@mock_ec2
|
||||
@mock_ecs
|
||||
@ -397,6 +445,17 @@ def test_update_job_queue():
|
||||
len(resp['jobQueues']).should.equal(1)
|
||||
resp['jobQueues'][0]['priority'].should.equal(5)
|
||||
|
||||
batch_client.update_job_queue(
|
||||
jobQueue='test_job_queue',
|
||||
priority=5
|
||||
)
|
||||
|
||||
resp = batch_client.describe_job_queues()
|
||||
resp.should.contain('jobQueues')
|
||||
len(resp['jobQueues']).should.equal(1)
|
||||
resp['jobQueues'][0]['priority'].should.equal(5)
|
||||
|
||||
|
||||
|
||||
@mock_ec2
|
||||
@mock_ecs
|
||||
|
||||
@ -1,34 +1,38 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
template = {
|
||||
"AWSTemplateFormatVersion": "2010-09-09",
|
||||
"Description": "VPC ENI Test CloudFormation",
|
||||
"Resources": {
|
||||
"ENI": {
|
||||
"Type": "AWS::EC2::NetworkInterface",
|
||||
"Properties": {
|
||||
"SubnetId": {"Ref": "Subnet"}
|
||||
}
|
||||
},
|
||||
"Subnet": {
|
||||
"Type": "AWS::EC2::Subnet",
|
||||
"Properties": {
|
||||
"AvailabilityZone": "us-east-1a",
|
||||
"VpcId": {"Ref": "VPC"},
|
||||
"CidrBlock": "10.0.0.0/24"
|
||||
}
|
||||
},
|
||||
"VPC": {
|
||||
"Type": "AWS::EC2::VPC",
|
||||
"Properties": {
|
||||
"CidrBlock": "10.0.0.0/16"
|
||||
}
|
||||
}
|
||||
},
|
||||
"Outputs": {
|
||||
"NinjaENI": {
|
||||
"Description": "Elastic IP mapping to Auto-Scaling Group",
|
||||
"Value": {"Ref": "ENI"}
|
||||
}
|
||||
}
|
||||
}
|
||||
from __future__ import unicode_literals
|
||||
|
||||
template = {
|
||||
"AWSTemplateFormatVersion": "2010-09-09",
|
||||
"Description": "VPC ENI Test CloudFormation",
|
||||
"Resources": {
|
||||
"ENI": {
|
||||
"Type": "AWS::EC2::NetworkInterface",
|
||||
"Properties": {
|
||||
"SubnetId": {"Ref": "Subnet"}
|
||||
}
|
||||
},
|
||||
"Subnet": {
|
||||
"Type": "AWS::EC2::Subnet",
|
||||
"Properties": {
|
||||
"AvailabilityZone": "us-east-1a",
|
||||
"VpcId": {"Ref": "VPC"},
|
||||
"CidrBlock": "10.0.0.0/24"
|
||||
}
|
||||
},
|
||||
"VPC": {
|
||||
"Type": "AWS::EC2::VPC",
|
||||
"Properties": {
|
||||
"CidrBlock": "10.0.0.0/16"
|
||||
}
|
||||
}
|
||||
},
|
||||
"Outputs": {
|
||||
"NinjaENI": {
|
||||
"Description": "Elastic IP mapping to Auto-Scaling Group",
|
||||
"Value": {"Ref": "ENI"}
|
||||
},
|
||||
"ENIIpAddress": {
|
||||
"Description": "ENI's Private IP address",
|
||||
"Value": {"Fn::GetAtt": ["ENI", "PrimaryPrivateIpAddress"]}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,471 +1,471 @@
|
||||
from __future__ import unicode_literals
|
||||
import json
|
||||
import yaml
|
||||
|
||||
from mock import patch
|
||||
import sure # noqa
|
||||
|
||||
from moto.cloudformation.exceptions import ValidationError
|
||||
from moto.cloudformation.models import FakeStack
|
||||
from moto.cloudformation.parsing import resource_class_from_type, parse_condition, Export
|
||||
from moto.sqs.models import Queue
|
||||
from moto.s3.models import FakeBucket
|
||||
from moto.cloudformation.utils import yaml_tag_constructor
|
||||
from boto.cloudformation.stack import Output
|
||||
|
||||
|
||||
|
||||
dummy_template = {
|
||||
"AWSTemplateFormatVersion": "2010-09-09",
|
||||
|
||||
"Description": "Create a multi-az, load balanced, Auto Scaled sample web site. The Auto Scaling trigger is based on the CPU utilization of the web servers. The AMI is chosen based on the region in which the stack is run. This example creates a web service running across all availability zones in a region. The instances are load balanced with a simple health check. The web site is available on port 80, however, the instances can be configured to listen on any port (8888 by default). **WARNING** This template creates one or more Amazon EC2 instances. You will be billed for the AWS resources used if you create a stack from this template.",
|
||||
|
||||
"Resources": {
|
||||
"Queue": {
|
||||
"Type": "AWS::SQS::Queue",
|
||||
"Properties": {
|
||||
"QueueName": "my-queue",
|
||||
"VisibilityTimeout": 60,
|
||||
}
|
||||
},
|
||||
"S3Bucket": {
|
||||
"Type": "AWS::S3::Bucket",
|
||||
"DeletionPolicy": "Retain"
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
name_type_template = {
|
||||
"AWSTemplateFormatVersion": "2010-09-09",
|
||||
|
||||
"Description": "Create a multi-az, load balanced, Auto Scaled sample web site. The Auto Scaling trigger is based on the CPU utilization of the web servers. The AMI is chosen based on the region in which the stack is run. This example creates a web service running across all availability zones in a region. The instances are load balanced with a simple health check. The web site is available on port 80, however, the instances can be configured to listen on any port (8888 by default). **WARNING** This template creates one or more Amazon EC2 instances. You will be billed for the AWS resources used if you create a stack from this template.",
|
||||
|
||||
"Resources": {
|
||||
"Queue": {
|
||||
"Type": "AWS::SQS::Queue",
|
||||
"Properties": {
|
||||
"VisibilityTimeout": 60,
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
output_dict = {
|
||||
"Outputs": {
|
||||
"Output1": {
|
||||
"Value": {"Ref": "Queue"},
|
||||
"Description": "This is a description."
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bad_output = {
|
||||
"Outputs": {
|
||||
"Output1": {
|
||||
"Value": {"Fn::GetAtt": ["Queue", "InvalidAttribute"]}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
get_attribute_output = {
|
||||
"Outputs": {
|
||||
"Output1": {
|
||||
"Value": {"Fn::GetAtt": ["Queue", "QueueName"]}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
get_availability_zones_output = {
|
||||
"Outputs": {
|
||||
"Output1": {
|
||||
"Value": {"Fn::GetAZs": ""}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
split_select_template = {
|
||||
"AWSTemplateFormatVersion": "2010-09-09",
|
||||
"Resources": {
|
||||
"Queue": {
|
||||
"Type": "AWS::SQS::Queue",
|
||||
"Properties": {
|
||||
"QueueName": {"Fn::Select": [ "1", {"Fn::Split": [ "-", "123-myqueue" ] } ] },
|
||||
"VisibilityTimeout": 60,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sub_template = {
|
||||
"AWSTemplateFormatVersion": "2010-09-09",
|
||||
"Resources": {
|
||||
"Queue1": {
|
||||
"Type": "AWS::SQS::Queue",
|
||||
"Properties": {
|
||||
"QueueName": {"Fn::Sub": '${AWS::StackName}-queue-${!Literal}'},
|
||||
"VisibilityTimeout": 60,
|
||||
}
|
||||
},
|
||||
"Queue2": {
|
||||
"Type": "AWS::SQS::Queue",
|
||||
"Properties": {
|
||||
"QueueName": {"Fn::Sub": '${Queue1.QueueName}'},
|
||||
"VisibilityTimeout": 60,
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
export_value_template = {
|
||||
"AWSTemplateFormatVersion": "2010-09-09",
|
||||
"Resources": {
|
||||
"Queue": {
|
||||
"Type": "AWS::SQS::Queue",
|
||||
"Properties": {
|
||||
"QueueName": {"Fn::Sub": '${AWS::StackName}-queue'},
|
||||
"VisibilityTimeout": 60,
|
||||
}
|
||||
}
|
||||
},
|
||||
"Outputs": {
|
||||
"Output1": {
|
||||
"Value": "value",
|
||||
"Export": {"Name": 'queue-us-west-1'}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
import_value_template = {
|
||||
"AWSTemplateFormatVersion": "2010-09-09",
|
||||
"Resources": {
|
||||
"Queue": {
|
||||
"Type": "AWS::SQS::Queue",
|
||||
"Properties": {
|
||||
"QueueName": {"Fn::ImportValue": 'queue-us-west-1'},
|
||||
"VisibilityTimeout": 60,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
outputs_template = dict(list(dummy_template.items()) +
|
||||
list(output_dict.items()))
|
||||
bad_outputs_template = dict(
|
||||
list(dummy_template.items()) + list(bad_output.items()))
|
||||
get_attribute_outputs_template = dict(
|
||||
list(dummy_template.items()) + list(get_attribute_output.items()))
|
||||
get_availability_zones_template = dict(
|
||||
list(dummy_template.items()) + list(get_availability_zones_output.items()))
|
||||
|
||||
dummy_template_json = json.dumps(dummy_template)
|
||||
name_type_template_json = json.dumps(name_type_template)
|
||||
output_type_template_json = json.dumps(outputs_template)
|
||||
bad_output_template_json = json.dumps(bad_outputs_template)
|
||||
get_attribute_outputs_template_json = json.dumps(
|
||||
get_attribute_outputs_template)
|
||||
get_availability_zones_template_json = json.dumps(
|
||||
get_availability_zones_template)
|
||||
split_select_template_json = json.dumps(split_select_template)
|
||||
sub_template_json = json.dumps(sub_template)
|
||||
export_value_template_json = json.dumps(export_value_template)
|
||||
import_value_template_json = json.dumps(import_value_template)
|
||||
|
||||
|
||||
def test_parse_stack_resources():
|
||||
stack = FakeStack(
|
||||
stack_id="test_id",
|
||||
name="test_stack",
|
||||
template=dummy_template_json,
|
||||
parameters={},
|
||||
region_name='us-west-1')
|
||||
|
||||
stack.resource_map.should.have.length_of(2)
|
||||
|
||||
queue = stack.resource_map['Queue']
|
||||
queue.should.be.a(Queue)
|
||||
queue.name.should.equal("my-queue")
|
||||
|
||||
bucket = stack.resource_map['S3Bucket']
|
||||
bucket.should.be.a(FakeBucket)
|
||||
bucket.physical_resource_id.should.equal(bucket.name)
|
||||
|
||||
|
||||
@patch("moto.cloudformation.parsing.logger")
|
||||
def test_missing_resource_logs(logger):
|
||||
resource_class_from_type("foobar")
|
||||
logger.warning.assert_called_with(
|
||||
'No Moto CloudFormation support for %s', 'foobar')
|
||||
|
||||
|
||||
def test_parse_stack_with_name_type_resource():
|
||||
stack = FakeStack(
|
||||
stack_id="test_id",
|
||||
name="test_stack",
|
||||
template=name_type_template_json,
|
||||
parameters={},
|
||||
region_name='us-west-1')
|
||||
|
||||
stack.resource_map.should.have.length_of(1)
|
||||
list(stack.resource_map.keys())[0].should.equal('Queue')
|
||||
queue = list(stack.resource_map.values())[0]
|
||||
queue.should.be.a(Queue)
|
||||
|
||||
|
||||
def test_parse_stack_with_yaml_template():
|
||||
stack = FakeStack(
|
||||
stack_id="test_id",
|
||||
name="test_stack",
|
||||
template=yaml.dump(name_type_template),
|
||||
parameters={},
|
||||
region_name='us-west-1')
|
||||
|
||||
stack.resource_map.should.have.length_of(1)
|
||||
list(stack.resource_map.keys())[0].should.equal('Queue')
|
||||
queue = list(stack.resource_map.values())[0]
|
||||
queue.should.be.a(Queue)
|
||||
|
||||
|
||||
def test_parse_stack_with_outputs():
|
||||
stack = FakeStack(
|
||||
stack_id="test_id",
|
||||
name="test_stack",
|
||||
template=output_type_template_json,
|
||||
parameters={},
|
||||
region_name='us-west-1')
|
||||
|
||||
stack.output_map.should.have.length_of(1)
|
||||
list(stack.output_map.keys())[0].should.equal('Output1')
|
||||
output = list(stack.output_map.values())[0]
|
||||
output.should.be.a(Output)
|
||||
output.description.should.equal("This is a description.")
|
||||
|
||||
|
||||
def test_parse_stack_with_get_attribute_outputs():
|
||||
stack = FakeStack(
|
||||
stack_id="test_id",
|
||||
name="test_stack",
|
||||
template=get_attribute_outputs_template_json,
|
||||
parameters={},
|
||||
region_name='us-west-1')
|
||||
|
||||
stack.output_map.should.have.length_of(1)
|
||||
list(stack.output_map.keys())[0].should.equal('Output1')
|
||||
output = list(stack.output_map.values())[0]
|
||||
output.should.be.a(Output)
|
||||
output.value.should.equal("my-queue")
|
||||
|
||||
def test_parse_stack_with_get_attribute_kms():
|
||||
from .fixtures.kms_key import template
|
||||
|
||||
template_json = json.dumps(template)
|
||||
stack = FakeStack(
|
||||
stack_id="test_id",
|
||||
name="test_stack",
|
||||
template=template_json,
|
||||
parameters={},
|
||||
region_name='us-west-1')
|
||||
|
||||
stack.output_map.should.have.length_of(1)
|
||||
list(stack.output_map.keys())[0].should.equal('KeyArn')
|
||||
output = list(stack.output_map.values())[0]
|
||||
output.should.be.a(Output)
|
||||
|
||||
def test_parse_stack_with_get_availability_zones():
|
||||
stack = FakeStack(
|
||||
stack_id="test_id",
|
||||
name="test_stack",
|
||||
template=get_availability_zones_template_json,
|
||||
parameters={},
|
||||
region_name='us-east-1')
|
||||
|
||||
stack.output_map.should.have.length_of(1)
|
||||
list(stack.output_map.keys())[0].should.equal('Output1')
|
||||
output = list(stack.output_map.values())[0]
|
||||
output.should.be.a(Output)
|
||||
output.value.should.equal([ "us-east-1a", "us-east-1b", "us-east-1c", "us-east-1d" ])
|
||||
|
||||
|
||||
def test_parse_stack_with_bad_get_attribute_outputs():
|
||||
FakeStack.when.called_with(
|
||||
"test_id", "test_stack", bad_output_template_json, {}, "us-west-1").should.throw(ValidationError)
|
||||
|
||||
|
||||
def test_parse_equals_condition():
|
||||
parse_condition(
|
||||
condition={"Fn::Equals": [{"Ref": "EnvType"}, "prod"]},
|
||||
resources_map={"EnvType": "prod"},
|
||||
condition_map={},
|
||||
).should.equal(True)
|
||||
|
||||
parse_condition(
|
||||
condition={"Fn::Equals": [{"Ref": "EnvType"}, "prod"]},
|
||||
resources_map={"EnvType": "staging"},
|
||||
condition_map={},
|
||||
).should.equal(False)
|
||||
|
||||
|
||||
def test_parse_not_condition():
|
||||
parse_condition(
|
||||
condition={
|
||||
"Fn::Not": [{
|
||||
"Fn::Equals": [{"Ref": "EnvType"}, "prod"]
|
||||
}]
|
||||
},
|
||||
resources_map={"EnvType": "prod"},
|
||||
condition_map={},
|
||||
).should.equal(False)
|
||||
|
||||
parse_condition(
|
||||
condition={
|
||||
"Fn::Not": [{
|
||||
"Fn::Equals": [{"Ref": "EnvType"}, "prod"]
|
||||
}]
|
||||
},
|
||||
resources_map={"EnvType": "staging"},
|
||||
condition_map={},
|
||||
).should.equal(True)
|
||||
|
||||
|
||||
def test_parse_and_condition():
|
||||
parse_condition(
|
||||
condition={
|
||||
"Fn::And": [
|
||||
{"Fn::Equals": [{"Ref": "EnvType"}, "prod"]},
|
||||
{"Fn::Equals": [{"Ref": "EnvType"}, "staging"]},
|
||||
]
|
||||
},
|
||||
resources_map={"EnvType": "prod"},
|
||||
condition_map={},
|
||||
).should.equal(False)
|
||||
|
||||
parse_condition(
|
||||
condition={
|
||||
"Fn::And": [
|
||||
{"Fn::Equals": [{"Ref": "EnvType"}, "prod"]},
|
||||
{"Fn::Equals": [{"Ref": "EnvType"}, "prod"]},
|
||||
]
|
||||
},
|
||||
resources_map={"EnvType": "prod"},
|
||||
condition_map={},
|
||||
).should.equal(True)
|
||||
|
||||
|
||||
def test_parse_or_condition():
|
||||
parse_condition(
|
||||
condition={
|
||||
"Fn::Or": [
|
||||
{"Fn::Equals": [{"Ref": "EnvType"}, "prod"]},
|
||||
{"Fn::Equals": [{"Ref": "EnvType"}, "staging"]},
|
||||
]
|
||||
},
|
||||
resources_map={"EnvType": "prod"},
|
||||
condition_map={},
|
||||
).should.equal(True)
|
||||
|
||||
parse_condition(
|
||||
condition={
|
||||
"Fn::Or": [
|
||||
{"Fn::Equals": [{"Ref": "EnvType"}, "staging"]},
|
||||
{"Fn::Equals": [{"Ref": "EnvType"}, "staging"]},
|
||||
]
|
||||
},
|
||||
resources_map={"EnvType": "prod"},
|
||||
condition_map={},
|
||||
).should.equal(False)
|
||||
|
||||
|
||||
def test_reference_other_conditions():
|
||||
parse_condition(
|
||||
condition={"Fn::Not": [{"Condition": "OtherCondition"}]},
|
||||
resources_map={},
|
||||
condition_map={"OtherCondition": True},
|
||||
).should.equal(False)
|
||||
|
||||
|
||||
def test_parse_split_and_select():
|
||||
stack = FakeStack(
|
||||
stack_id="test_id",
|
||||
name="test_stack",
|
||||
template=split_select_template_json,
|
||||
parameters={},
|
||||
region_name='us-west-1')
|
||||
|
||||
stack.resource_map.should.have.length_of(1)
|
||||
queue = stack.resource_map['Queue']
|
||||
queue.name.should.equal("myqueue")
|
||||
|
||||
|
||||
def test_sub():
|
||||
stack = FakeStack(
|
||||
stack_id="test_id",
|
||||
name="test_stack",
|
||||
template=sub_template_json,
|
||||
parameters={},
|
||||
region_name='us-west-1')
|
||||
|
||||
queue1 = stack.resource_map['Queue1']
|
||||
queue2 = stack.resource_map['Queue2']
|
||||
queue2.name.should.equal(queue1.name)
|
||||
|
||||
|
||||
def test_import():
|
||||
export_stack = FakeStack(
|
||||
stack_id="test_id",
|
||||
name="test_stack",
|
||||
template=export_value_template_json,
|
||||
parameters={},
|
||||
region_name='us-west-1')
|
||||
import_stack = FakeStack(
|
||||
stack_id="test_id",
|
||||
name="test_stack",
|
||||
template=import_value_template_json,
|
||||
parameters={},
|
||||
region_name='us-west-1',
|
||||
cross_stack_resources={export_stack.exports[0].value: export_stack.exports[0]})
|
||||
|
||||
queue = import_stack.resource_map['Queue']
|
||||
queue.name.should.equal("value")
|
||||
|
||||
|
||||
|
||||
def test_short_form_func_in_yaml_teamplate():
|
||||
template = """---
|
||||
KeyB64: !Base64 valueToEncode
|
||||
KeyRef: !Ref foo
|
||||
KeyAnd: !And
|
||||
- A
|
||||
- B
|
||||
KeyEquals: !Equals [A, B]
|
||||
KeyIf: !If [A, B, C]
|
||||
KeyNot: !Not [A]
|
||||
KeyOr: !Or [A, B]
|
||||
KeyFindInMap: !FindInMap [A, B, C]
|
||||
KeyGetAtt: !GetAtt A.B
|
||||
KeyGetAZs: !GetAZs A
|
||||
KeyImportValue: !ImportValue A
|
||||
KeyJoin: !Join [ ":", [A, B, C] ]
|
||||
KeySelect: !Select [A, B]
|
||||
KeySplit: !Split [A, B]
|
||||
KeySub: !Sub A
|
||||
"""
|
||||
yaml.add_multi_constructor('', yaml_tag_constructor)
|
||||
template_dict = yaml.load(template)
|
||||
key_and_expects = [
|
||||
['KeyRef', {'Ref': 'foo'}],
|
||||
['KeyB64', {'Fn::Base64': 'valueToEncode'}],
|
||||
['KeyAnd', {'Fn::And': ['A', 'B']}],
|
||||
['KeyEquals', {'Fn::Equals': ['A', 'B']}],
|
||||
['KeyIf', {'Fn::If': ['A', 'B', 'C']}],
|
||||
['KeyNot', {'Fn::Not': ['A']}],
|
||||
['KeyOr', {'Fn::Or': ['A', 'B']}],
|
||||
['KeyFindInMap', {'Fn::FindInMap': ['A', 'B', 'C']}],
|
||||
['KeyGetAtt', {'Fn::GetAtt': ['A', 'B']}],
|
||||
['KeyGetAZs', {'Fn::GetAZs': 'A'}],
|
||||
['KeyImportValue', {'Fn::ImportValue': 'A'}],
|
||||
['KeyJoin', {'Fn::Join': [ ":", [ 'A', 'B', 'C' ] ]}],
|
||||
['KeySelect', {'Fn::Select': ['A', 'B']}],
|
||||
['KeySplit', {'Fn::Split': ['A', 'B']}],
|
||||
['KeySub', {'Fn::Sub': 'A'}],
|
||||
]
|
||||
for k, v in key_and_expects:
|
||||
template_dict.should.have.key(k).which.should.be.equal(v)
|
||||
from __future__ import unicode_literals
|
||||
import json
|
||||
import yaml
|
||||
|
||||
from mock import patch
|
||||
import sure # noqa
|
||||
|
||||
from moto.cloudformation.exceptions import ValidationError
|
||||
from moto.cloudformation.models import FakeStack
|
||||
from moto.cloudformation.parsing import resource_class_from_type, parse_condition, Export
|
||||
from moto.sqs.models import Queue
|
||||
from moto.s3.models import FakeBucket
|
||||
from moto.cloudformation.utils import yaml_tag_constructor
|
||||
from boto.cloudformation.stack import Output
|
||||
|
||||
|
||||
|
||||
dummy_template = {
|
||||
"AWSTemplateFormatVersion": "2010-09-09",
|
||||
|
||||
"Description": "Create a multi-az, load balanced, Auto Scaled sample web site. The Auto Scaling trigger is based on the CPU utilization of the web servers. The AMI is chosen based on the region in which the stack is run. This example creates a web service running across all availability zones in a region. The instances are load balanced with a simple health check. The web site is available on port 80, however, the instances can be configured to listen on any port (8888 by default). **WARNING** This template creates one or more Amazon EC2 instances. You will be billed for the AWS resources used if you create a stack from this template.",
|
||||
|
||||
"Resources": {
|
||||
"Queue": {
|
||||
"Type": "AWS::SQS::Queue",
|
||||
"Properties": {
|
||||
"QueueName": "my-queue",
|
||||
"VisibilityTimeout": 60,
|
||||
}
|
||||
},
|
||||
"S3Bucket": {
|
||||
"Type": "AWS::S3::Bucket",
|
||||
"DeletionPolicy": "Retain"
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
name_type_template = {
|
||||
"AWSTemplateFormatVersion": "2010-09-09",
|
||||
|
||||
"Description": "Create a multi-az, load balanced, Auto Scaled sample web site. The Auto Scaling trigger is based on the CPU utilization of the web servers. The AMI is chosen based on the region in which the stack is run. This example creates a web service running across all availability zones in a region. The instances are load balanced with a simple health check. The web site is available on port 80, however, the instances can be configured to listen on any port (8888 by default). **WARNING** This template creates one or more Amazon EC2 instances. You will be billed for the AWS resources used if you create a stack from this template.",
|
||||
|
||||
"Resources": {
|
||||
"Queue": {
|
||||
"Type": "AWS::SQS::Queue",
|
||||
"Properties": {
|
||||
"VisibilityTimeout": 60,
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
output_dict = {
|
||||
"Outputs": {
|
||||
"Output1": {
|
||||
"Value": {"Ref": "Queue"},
|
||||
"Description": "This is a description."
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bad_output = {
|
||||
"Outputs": {
|
||||
"Output1": {
|
||||
"Value": {"Fn::GetAtt": ["Queue", "InvalidAttribute"]}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
get_attribute_output = {
|
||||
"Outputs": {
|
||||
"Output1": {
|
||||
"Value": {"Fn::GetAtt": ["Queue", "QueueName"]}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
get_availability_zones_output = {
|
||||
"Outputs": {
|
||||
"Output1": {
|
||||
"Value": {"Fn::GetAZs": ""}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
split_select_template = {
|
||||
"AWSTemplateFormatVersion": "2010-09-09",
|
||||
"Resources": {
|
||||
"Queue": {
|
||||
"Type": "AWS::SQS::Queue",
|
||||
"Properties": {
|
||||
"QueueName": {"Fn::Select": [ "1", {"Fn::Split": [ "-", "123-myqueue" ] } ] },
|
||||
"VisibilityTimeout": 60,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sub_template = {
|
||||
"AWSTemplateFormatVersion": "2010-09-09",
|
||||
"Resources": {
|
||||
"Queue1": {
|
||||
"Type": "AWS::SQS::Queue",
|
||||
"Properties": {
|
||||
"QueueName": {"Fn::Sub": '${AWS::StackName}-queue-${!Literal}'},
|
||||
"VisibilityTimeout": 60,
|
||||
}
|
||||
},
|
||||
"Queue2": {
|
||||
"Type": "AWS::SQS::Queue",
|
||||
"Properties": {
|
||||
"QueueName": {"Fn::Sub": '${Queue1.QueueName}'},
|
||||
"VisibilityTimeout": 60,
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
export_value_template = {
|
||||
"AWSTemplateFormatVersion": "2010-09-09",
|
||||
"Resources": {
|
||||
"Queue": {
|
||||
"Type": "AWS::SQS::Queue",
|
||||
"Properties": {
|
||||
"QueueName": {"Fn::Sub": '${AWS::StackName}-queue'},
|
||||
"VisibilityTimeout": 60,
|
||||
}
|
||||
}
|
||||
},
|
||||
"Outputs": {
|
||||
"Output1": {
|
||||
"Value": "value",
|
||||
"Export": {"Name": 'queue-us-west-1'}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
import_value_template = {
|
||||
"AWSTemplateFormatVersion": "2010-09-09",
|
||||
"Resources": {
|
||||
"Queue": {
|
||||
"Type": "AWS::SQS::Queue",
|
||||
"Properties": {
|
||||
"QueueName": {"Fn::ImportValue": 'queue-us-west-1'},
|
||||
"VisibilityTimeout": 60,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
outputs_template = dict(list(dummy_template.items()) +
|
||||
list(output_dict.items()))
|
||||
bad_outputs_template = dict(
|
||||
list(dummy_template.items()) + list(bad_output.items()))
|
||||
get_attribute_outputs_template = dict(
|
||||
list(dummy_template.items()) + list(get_attribute_output.items()))
|
||||
get_availability_zones_template = dict(
|
||||
list(dummy_template.items()) + list(get_availability_zones_output.items()))
|
||||
|
||||
dummy_template_json = json.dumps(dummy_template)
|
||||
name_type_template_json = json.dumps(name_type_template)
|
||||
output_type_template_json = json.dumps(outputs_template)
|
||||
bad_output_template_json = json.dumps(bad_outputs_template)
|
||||
get_attribute_outputs_template_json = json.dumps(
|
||||
get_attribute_outputs_template)
|
||||
get_availability_zones_template_json = json.dumps(
|
||||
get_availability_zones_template)
|
||||
split_select_template_json = json.dumps(split_select_template)
|
||||
sub_template_json = json.dumps(sub_template)
|
||||
export_value_template_json = json.dumps(export_value_template)
|
||||
import_value_template_json = json.dumps(import_value_template)
|
||||
|
||||
|
||||
def test_parse_stack_resources():
|
||||
stack = FakeStack(
|
||||
stack_id="test_id",
|
||||
name="test_stack",
|
||||
template=dummy_template_json,
|
||||
parameters={},
|
||||
region_name='us-west-1')
|
||||
|
||||
stack.resource_map.should.have.length_of(2)
|
||||
|
||||
queue = stack.resource_map['Queue']
|
||||
queue.should.be.a(Queue)
|
||||
queue.name.should.equal("my-queue")
|
||||
|
||||
bucket = stack.resource_map['S3Bucket']
|
||||
bucket.should.be.a(FakeBucket)
|
||||
bucket.physical_resource_id.should.equal(bucket.name)
|
||||
|
||||
|
||||
@patch("moto.cloudformation.parsing.logger")
|
||||
def test_missing_resource_logs(logger):
|
||||
resource_class_from_type("foobar")
|
||||
logger.warning.assert_called_with(
|
||||
'No Moto CloudFormation support for %s', 'foobar')
|
||||
|
||||
|
||||
def test_parse_stack_with_name_type_resource():
|
||||
stack = FakeStack(
|
||||
stack_id="test_id",
|
||||
name="test_stack",
|
||||
template=name_type_template_json,
|
||||
parameters={},
|
||||
region_name='us-west-1')
|
||||
|
||||
stack.resource_map.should.have.length_of(1)
|
||||
list(stack.resource_map.keys())[0].should.equal('Queue')
|
||||
queue = list(stack.resource_map.values())[0]
|
||||
queue.should.be.a(Queue)
|
||||
|
||||
|
||||
def test_parse_stack_with_yaml_template():
|
||||
stack = FakeStack(
|
||||
stack_id="test_id",
|
||||
name="test_stack",
|
||||
template=yaml.dump(name_type_template),
|
||||
parameters={},
|
||||
region_name='us-west-1')
|
||||
|
||||
stack.resource_map.should.have.length_of(1)
|
||||
list(stack.resource_map.keys())[0].should.equal('Queue')
|
||||
queue = list(stack.resource_map.values())[0]
|
||||
queue.should.be.a(Queue)
|
||||
|
||||
|
||||
def test_parse_stack_with_outputs():
|
||||
stack = FakeStack(
|
||||
stack_id="test_id",
|
||||
name="test_stack",
|
||||
template=output_type_template_json,
|
||||
parameters={},
|
||||
region_name='us-west-1')
|
||||
|
||||
stack.output_map.should.have.length_of(1)
|
||||
list(stack.output_map.keys())[0].should.equal('Output1')
|
||||
output = list(stack.output_map.values())[0]
|
||||
output.should.be.a(Output)
|
||||
output.description.should.equal("This is a description.")
|
||||
|
||||
|
||||
def test_parse_stack_with_get_attribute_outputs():
|
||||
stack = FakeStack(
|
||||
stack_id="test_id",
|
||||
name="test_stack",
|
||||
template=get_attribute_outputs_template_json,
|
||||
parameters={},
|
||||
region_name='us-west-1')
|
||||
|
||||
stack.output_map.should.have.length_of(1)
|
||||
list(stack.output_map.keys())[0].should.equal('Output1')
|
||||
output = list(stack.output_map.values())[0]
|
||||
output.should.be.a(Output)
|
||||
output.value.should.equal("my-queue")
|
||||
|
||||
def test_parse_stack_with_get_attribute_kms():
|
||||
from .fixtures.kms_key import template
|
||||
|
||||
template_json = json.dumps(template)
|
||||
stack = FakeStack(
|
||||
stack_id="test_id",
|
||||
name="test_stack",
|
||||
template=template_json,
|
||||
parameters={},
|
||||
region_name='us-west-1')
|
||||
|
||||
stack.output_map.should.have.length_of(1)
|
||||
list(stack.output_map.keys())[0].should.equal('KeyArn')
|
||||
output = list(stack.output_map.values())[0]
|
||||
output.should.be.a(Output)
|
||||
|
||||
def test_parse_stack_with_get_availability_zones():
|
||||
stack = FakeStack(
|
||||
stack_id="test_id",
|
||||
name="test_stack",
|
||||
template=get_availability_zones_template_json,
|
||||
parameters={},
|
||||
region_name='us-east-1')
|
||||
|
||||
stack.output_map.should.have.length_of(1)
|
||||
list(stack.output_map.keys())[0].should.equal('Output1')
|
||||
output = list(stack.output_map.values())[0]
|
||||
output.should.be.a(Output)
|
||||
output.value.should.equal([ "us-east-1a", "us-east-1b", "us-east-1c", "us-east-1d" ])
|
||||
|
||||
|
||||
def test_parse_stack_with_bad_get_attribute_outputs():
|
||||
FakeStack.when.called_with(
|
||||
"test_id", "test_stack", bad_output_template_json, {}, "us-west-1").should.throw(ValidationError)
|
||||
|
||||
|
||||
def test_parse_equals_condition():
|
||||
parse_condition(
|
||||
condition={"Fn::Equals": [{"Ref": "EnvType"}, "prod"]},
|
||||
resources_map={"EnvType": "prod"},
|
||||
condition_map={},
|
||||
).should.equal(True)
|
||||
|
||||
parse_condition(
|
||||
condition={"Fn::Equals": [{"Ref": "EnvType"}, "prod"]},
|
||||
resources_map={"EnvType": "staging"},
|
||||
condition_map={},
|
||||
).should.equal(False)
|
||||
|
||||
|
||||
def test_parse_not_condition():
|
||||
parse_condition(
|
||||
condition={
|
||||
"Fn::Not": [{
|
||||
"Fn::Equals": [{"Ref": "EnvType"}, "prod"]
|
||||
}]
|
||||
},
|
||||
resources_map={"EnvType": "prod"},
|
||||
condition_map={},
|
||||
).should.equal(False)
|
||||
|
||||
parse_condition(
|
||||
condition={
|
||||
"Fn::Not": [{
|
||||
"Fn::Equals": [{"Ref": "EnvType"}, "prod"]
|
||||
}]
|
||||
},
|
||||
resources_map={"EnvType": "staging"},
|
||||
condition_map={},
|
||||
).should.equal(True)
|
||||
|
||||
|
||||
def test_parse_and_condition():
|
||||
parse_condition(
|
||||
condition={
|
||||
"Fn::And": [
|
||||
{"Fn::Equals": [{"Ref": "EnvType"}, "prod"]},
|
||||
{"Fn::Equals": [{"Ref": "EnvType"}, "staging"]},
|
||||
]
|
||||
},
|
||||
resources_map={"EnvType": "prod"},
|
||||
condition_map={},
|
||||
).should.equal(False)
|
||||
|
||||
parse_condition(
|
||||
condition={
|
||||
"Fn::And": [
|
||||
{"Fn::Equals": [{"Ref": "EnvType"}, "prod"]},
|
||||
{"Fn::Equals": [{"Ref": "EnvType"}, "prod"]},
|
||||
]
|
||||
},
|
||||
resources_map={"EnvType": "prod"},
|
||||
condition_map={},
|
||||
).should.equal(True)
|
||||
|
||||
|
||||
def test_parse_or_condition():
|
||||
parse_condition(
|
||||
condition={
|
||||
"Fn::Or": [
|
||||
{"Fn::Equals": [{"Ref": "EnvType"}, "prod"]},
|
||||
{"Fn::Equals": [{"Ref": "EnvType"}, "staging"]},
|
||||
]
|
||||
},
|
||||
resources_map={"EnvType": "prod"},
|
||||
condition_map={},
|
||||
).should.equal(True)
|
||||
|
||||
parse_condition(
|
||||
condition={
|
||||
"Fn::Or": [
|
||||
{"Fn::Equals": [{"Ref": "EnvType"}, "staging"]},
|
||||
{"Fn::Equals": [{"Ref": "EnvType"}, "staging"]},
|
||||
]
|
||||
},
|
||||
resources_map={"EnvType": "prod"},
|
||||
condition_map={},
|
||||
).should.equal(False)
|
||||
|
||||
|
||||
def test_reference_other_conditions():
|
||||
parse_condition(
|
||||
condition={"Fn::Not": [{"Condition": "OtherCondition"}]},
|
||||
resources_map={},
|
||||
condition_map={"OtherCondition": True},
|
||||
).should.equal(False)
|
||||
|
||||
|
||||
def test_parse_split_and_select():
|
||||
stack = FakeStack(
|
||||
stack_id="test_id",
|
||||
name="test_stack",
|
||||
template=split_select_template_json,
|
||||
parameters={},
|
||||
region_name='us-west-1')
|
||||
|
||||
stack.resource_map.should.have.length_of(1)
|
||||
queue = stack.resource_map['Queue']
|
||||
queue.name.should.equal("myqueue")
|
||||
|
||||
|
||||
def test_sub():
|
||||
stack = FakeStack(
|
||||
stack_id="test_id",
|
||||
name="test_stack",
|
||||
template=sub_template_json,
|
||||
parameters={},
|
||||
region_name='us-west-1')
|
||||
|
||||
queue1 = stack.resource_map['Queue1']
|
||||
queue2 = stack.resource_map['Queue2']
|
||||
queue2.name.should.equal(queue1.name)
|
||||
|
||||
|
||||
def test_import():
|
||||
export_stack = FakeStack(
|
||||
stack_id="test_id",
|
||||
name="test_stack",
|
||||
template=export_value_template_json,
|
||||
parameters={},
|
||||
region_name='us-west-1')
|
||||
import_stack = FakeStack(
|
||||
stack_id="test_id",
|
||||
name="test_stack",
|
||||
template=import_value_template_json,
|
||||
parameters={},
|
||||
region_name='us-west-1',
|
||||
cross_stack_resources={export_stack.exports[0].value: export_stack.exports[0]})
|
||||
|
||||
queue = import_stack.resource_map['Queue']
|
||||
queue.name.should.equal("value")
|
||||
|
||||
|
||||
|
||||
def test_short_form_func_in_yaml_teamplate():
|
||||
template = """---
|
||||
KeyB64: !Base64 valueToEncode
|
||||
KeyRef: !Ref foo
|
||||
KeyAnd: !And
|
||||
- A
|
||||
- B
|
||||
KeyEquals: !Equals [A, B]
|
||||
KeyIf: !If [A, B, C]
|
||||
KeyNot: !Not [A]
|
||||
KeyOr: !Or [A, B]
|
||||
KeyFindInMap: !FindInMap [A, B, C]
|
||||
KeyGetAtt: !GetAtt A.B
|
||||
KeyGetAZs: !GetAZs A
|
||||
KeyImportValue: !ImportValue A
|
||||
KeyJoin: !Join [ ":", [A, B, C] ]
|
||||
KeySelect: !Select [A, B]
|
||||
KeySplit: !Split [A, B]
|
||||
KeySub: !Sub A
|
||||
"""
|
||||
yaml.add_multi_constructor('', yaml_tag_constructor, Loader=yaml.Loader)
|
||||
template_dict = yaml.load(template, Loader=yaml.Loader)
|
||||
key_and_expects = [
|
||||
['KeyRef', {'Ref': 'foo'}],
|
||||
['KeyB64', {'Fn::Base64': 'valueToEncode'}],
|
||||
['KeyAnd', {'Fn::And': ['A', 'B']}],
|
||||
['KeyEquals', {'Fn::Equals': ['A', 'B']}],
|
||||
['KeyIf', {'Fn::If': ['A', 'B', 'C']}],
|
||||
['KeyNot', {'Fn::Not': ['A']}],
|
||||
['KeyOr', {'Fn::Or': ['A', 'B']}],
|
||||
['KeyFindInMap', {'Fn::FindInMap': ['A', 'B', 'C']}],
|
||||
['KeyGetAtt', {'Fn::GetAtt': ['A', 'B']}],
|
||||
['KeyGetAZs', {'Fn::GetAZs': 'A'}],
|
||||
['KeyImportValue', {'Fn::ImportValue': 'A'}],
|
||||
['KeyJoin', {'Fn::Join': [ ":", [ 'A', 'B', 'C' ] ]}],
|
||||
['KeySelect', {'Fn::Select': ['A', 'B']}],
|
||||
['KeySplit', {'Fn::Split': ['A', 'B']}],
|
||||
['KeySub', {'Fn::Sub': 'A'}],
|
||||
]
|
||||
for k, v in key_and_expects:
|
||||
template_dict.should.have.key(k).which.should.be.equal(v)
|
||||
|
||||
115
tests/test_cloudformation/test_validate.py
Normal file
115
tests/test_cloudformation/test_validate.py
Normal file
@ -0,0 +1,115 @@
|
||||
from collections import OrderedDict
|
||||
import json
|
||||
import yaml
|
||||
import os
|
||||
import boto3
|
||||
from nose.tools import raises
|
||||
import botocore
|
||||
|
||||
|
||||
from moto.cloudformation.exceptions import ValidationError
|
||||
from moto.cloudformation.models import FakeStack
|
||||
from moto.cloudformation.parsing import resource_class_from_type, parse_condition, Export
|
||||
from moto.sqs.models import Queue
|
||||
from moto.s3.models import FakeBucket
|
||||
from moto.cloudformation.utils import yaml_tag_constructor
|
||||
from boto.cloudformation.stack import Output
|
||||
from moto import mock_cloudformation, mock_s3, mock_sqs, mock_ec2
|
||||
|
||||
json_template = {
|
||||
"AWSTemplateFormatVersion": "2010-09-09",
|
||||
"Description": "Stack 1",
|
||||
"Resources": {
|
||||
"EC2Instance1": {
|
||||
"Type": "AWS::EC2::Instance",
|
||||
"Properties": {
|
||||
"ImageId": "ami-d3adb33f",
|
||||
"KeyName": "dummy",
|
||||
"InstanceType": "t2.micro",
|
||||
"Tags": [
|
||||
{
|
||||
"Key": "Description",
|
||||
"Value": "Test tag"
|
||||
},
|
||||
{
|
||||
"Key": "Name",
|
||||
"Value": "Name tag for tests"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# One resource is required
|
||||
json_bad_template = {
|
||||
"AWSTemplateFormatVersion": "2010-09-09",
|
||||
"Description": "Stack 1"
|
||||
}
|
||||
|
||||
dummy_template_json = json.dumps(json_template)
|
||||
dummy_bad_template_json = json.dumps(json_bad_template)
|
||||
|
||||
|
||||
@mock_cloudformation
|
||||
def test_boto3_json_validate_successful():
|
||||
cf_conn = boto3.client('cloudformation', region_name='us-east-1')
|
||||
response = cf_conn.validate_template(
|
||||
TemplateBody=dummy_template_json,
|
||||
)
|
||||
assert response['Description'] == "Stack 1"
|
||||
assert response['Parameters'] == []
|
||||
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
|
||||
|
||||
@mock_cloudformation
|
||||
def test_boto3_json_invalid_missing_resource():
|
||||
cf_conn = boto3.client('cloudformation', region_name='us-east-1')
|
||||
try:
|
||||
cf_conn.validate_template(
|
||||
TemplateBody=dummy_bad_template_json,
|
||||
)
|
||||
assert False
|
||||
except botocore.exceptions.ClientError as e:
|
||||
assert str(e) == 'An error occurred (ValidationError) when calling the ValidateTemplate operation: Stack' \
|
||||
' with id Missing top level item Resources to file module does not exist'
|
||||
assert True
|
||||
|
||||
|
||||
yaml_template = """
|
||||
AWSTemplateFormatVersion: '2010-09-09'
|
||||
Description: Simple CloudFormation Test Template
|
||||
Resources:
|
||||
S3Bucket:
|
||||
Type: AWS::S3::Bucket
|
||||
Properties:
|
||||
AccessControl: PublicRead
|
||||
BucketName: cf-test-bucket-1
|
||||
"""
|
||||
|
||||
yaml_bad_template = """
|
||||
AWSTemplateFormatVersion: '2010-09-09'
|
||||
Description: Simple CloudFormation Test Template
|
||||
"""
|
||||
|
||||
@mock_cloudformation
|
||||
def test_boto3_yaml_validate_successful():
|
||||
cf_conn = boto3.client('cloudformation', region_name='us-east-1')
|
||||
response = cf_conn.validate_template(
|
||||
TemplateBody=yaml_template,
|
||||
)
|
||||
assert response['Description'] == "Simple CloudFormation Test Template"
|
||||
assert response['Parameters'] == []
|
||||
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
|
||||
|
||||
@mock_cloudformation
|
||||
def test_boto3_yaml_invalid_missing_resource():
|
||||
cf_conn = boto3.client('cloudformation', region_name='us-east-1')
|
||||
try:
|
||||
cf_conn.validate_template(
|
||||
TemplateBody=yaml_bad_template,
|
||||
)
|
||||
assert False
|
||||
except botocore.exceptions.ClientError as e:
|
||||
assert str(e) == 'An error occurred (ValidationError) when calling the ValidateTemplate operation: Stack' \
|
||||
' with id Missing top level item Resources to file module does not exist'
|
||||
assert True
|
||||
File diff suppressed because it is too large
Load Diff
491
tests/test_config/test_config.py
Normal file
491
tests/test_config/test_config.py
Normal file
@ -0,0 +1,491 @@
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
import boto3
|
||||
from botocore.exceptions import ClientError
|
||||
from nose.tools import assert_raises
|
||||
|
||||
from moto.config import mock_config
|
||||
|
||||
|
||||
@mock_config
|
||||
def test_put_configuration_recorder():
|
||||
client = boto3.client('config', region_name='us-west-2')
|
||||
|
||||
# Try without a name supplied:
|
||||
with assert_raises(ClientError) as ce:
|
||||
client.put_configuration_recorder(ConfigurationRecorder={'roleARN': 'somearn'})
|
||||
assert ce.exception.response['Error']['Code'] == 'InvalidConfigurationRecorderNameException'
|
||||
assert 'is not valid, blank string.' in ce.exception.response['Error']['Message']
|
||||
|
||||
# Try with a really long name:
|
||||
with assert_raises(ClientError) as ce:
|
||||
client.put_configuration_recorder(ConfigurationRecorder={'name': 'a' * 257, 'roleARN': 'somearn'})
|
||||
assert ce.exception.response['Error']['Code'] == 'ValidationException'
|
||||
assert 'Member must have length less than or equal to 256' in ce.exception.response['Error']['Message']
|
||||
|
||||
# With resource types and flags set to True:
|
||||
bad_groups = [
|
||||
{'allSupported': True, 'includeGlobalResourceTypes': True, 'resourceTypes': ['item']},
|
||||
{'allSupported': False, 'includeGlobalResourceTypes': True, 'resourceTypes': ['item']},
|
||||
{'allSupported': True, 'includeGlobalResourceTypes': False, 'resourceTypes': ['item']},
|
||||
{'allSupported': False, 'includeGlobalResourceTypes': False, 'resourceTypes': []},
|
||||
{'includeGlobalResourceTypes': False, 'resourceTypes': []},
|
||||
{'includeGlobalResourceTypes': True},
|
||||
{'resourceTypes': []},
|
||||
{}
|
||||
]
|
||||
|
||||
for bg in bad_groups:
|
||||
with assert_raises(ClientError) as ce:
|
||||
client.put_configuration_recorder(ConfigurationRecorder={
|
||||
'name': 'default',
|
||||
'roleARN': 'somearn',
|
||||
'recordingGroup': bg
|
||||
})
|
||||
assert ce.exception.response['Error']['Code'] == 'InvalidRecordingGroupException'
|
||||
assert ce.exception.response['Error']['Message'] == 'The recording group provided is not valid'
|
||||
|
||||
# With an invalid Resource Type:
|
||||
with assert_raises(ClientError) as ce:
|
||||
client.put_configuration_recorder(ConfigurationRecorder={
|
||||
'name': 'default',
|
||||
'roleARN': 'somearn',
|
||||
'recordingGroup': {
|
||||
'allSupported': False,
|
||||
'includeGlobalResourceTypes': False,
|
||||
# 2 good, and 2 bad:
|
||||
'resourceTypes': ['AWS::EC2::Volume', 'LOLNO', 'AWS::EC2::VPC', 'LOLSTILLNO']
|
||||
}
|
||||
})
|
||||
assert ce.exception.response['Error']['Code'] == 'ValidationException'
|
||||
assert "2 validation error detected: Value '['LOLNO', 'LOLSTILLNO']" in str(ce.exception.response['Error']['Message'])
|
||||
assert 'AWS::EC2::Instance' in ce.exception.response['Error']['Message']
|
||||
|
||||
# Create a proper one:
|
||||
client.put_configuration_recorder(ConfigurationRecorder={
|
||||
'name': 'testrecorder',
|
||||
'roleARN': 'somearn',
|
||||
'recordingGroup': {
|
||||
'allSupported': False,
|
||||
'includeGlobalResourceTypes': False,
|
||||
'resourceTypes': ['AWS::EC2::Volume', 'AWS::EC2::VPC']
|
||||
}
|
||||
})
|
||||
|
||||
result = client.describe_configuration_recorders()['ConfigurationRecorders']
|
||||
assert len(result) == 1
|
||||
assert result[0]['name'] == 'testrecorder'
|
||||
assert result[0]['roleARN'] == 'somearn'
|
||||
assert not result[0]['recordingGroup']['allSupported']
|
||||
assert not result[0]['recordingGroup']['includeGlobalResourceTypes']
|
||||
assert len(result[0]['recordingGroup']['resourceTypes']) == 2
|
||||
assert 'AWS::EC2::Volume' in result[0]['recordingGroup']['resourceTypes'] \
|
||||
and 'AWS::EC2::VPC' in result[0]['recordingGroup']['resourceTypes']
|
||||
|
||||
# Now update the configuration recorder:
|
||||
client.put_configuration_recorder(ConfigurationRecorder={
|
||||
'name': 'testrecorder',
|
||||
'roleARN': 'somearn',
|
||||
'recordingGroup': {
|
||||
'allSupported': True,
|
||||
'includeGlobalResourceTypes': True
|
||||
}
|
||||
})
|
||||
result = client.describe_configuration_recorders()['ConfigurationRecorders']
|
||||
assert len(result) == 1
|
||||
assert result[0]['name'] == 'testrecorder'
|
||||
assert result[0]['roleARN'] == 'somearn'
|
||||
assert result[0]['recordingGroup']['allSupported']
|
||||
assert result[0]['recordingGroup']['includeGlobalResourceTypes']
|
||||
assert len(result[0]['recordingGroup']['resourceTypes']) == 0
|
||||
|
||||
# With a default recording group (i.e. lacking one)
|
||||
client.put_configuration_recorder(ConfigurationRecorder={'name': 'testrecorder', 'roleARN': 'somearn'})
|
||||
result = client.describe_configuration_recorders()['ConfigurationRecorders']
|
||||
assert len(result) == 1
|
||||
assert result[0]['name'] == 'testrecorder'
|
||||
assert result[0]['roleARN'] == 'somearn'
|
||||
assert result[0]['recordingGroup']['allSupported']
|
||||
assert not result[0]['recordingGroup']['includeGlobalResourceTypes']
|
||||
assert not result[0]['recordingGroup'].get('resourceTypes')
|
||||
|
||||
# Can currently only have exactly 1 Config Recorder in an account/region:
|
||||
with assert_raises(ClientError) as ce:
|
||||
client.put_configuration_recorder(ConfigurationRecorder={
|
||||
'name': 'someotherrecorder',
|
||||
'roleARN': 'somearn',
|
||||
'recordingGroup': {
|
||||
'allSupported': False,
|
||||
'includeGlobalResourceTypes': False,
|
||||
}
|
||||
})
|
||||
assert ce.exception.response['Error']['Code'] == 'MaxNumberOfConfigurationRecordersExceededException'
|
||||
assert "maximum number of configuration recorders: 1 is reached." in ce.exception.response['Error']['Message']
|
||||
|
||||
|
||||
@mock_config
|
||||
def test_describe_configurations():
|
||||
client = boto3.client('config', region_name='us-west-2')
|
||||
|
||||
# Without any configurations:
|
||||
result = client.describe_configuration_recorders()
|
||||
assert not result['ConfigurationRecorders']
|
||||
|
||||
client.put_configuration_recorder(ConfigurationRecorder={
|
||||
'name': 'testrecorder',
|
||||
'roleARN': 'somearn',
|
||||
'recordingGroup': {
|
||||
'allSupported': False,
|
||||
'includeGlobalResourceTypes': False,
|
||||
'resourceTypes': ['AWS::EC2::Volume', 'AWS::EC2::VPC']
|
||||
}
|
||||
})
|
||||
|
||||
result = client.describe_configuration_recorders()['ConfigurationRecorders']
|
||||
assert len(result) == 1
|
||||
assert result[0]['name'] == 'testrecorder'
|
||||
assert result[0]['roleARN'] == 'somearn'
|
||||
assert not result[0]['recordingGroup']['allSupported']
|
||||
assert not result[0]['recordingGroup']['includeGlobalResourceTypes']
|
||||
assert len(result[0]['recordingGroup']['resourceTypes']) == 2
|
||||
assert 'AWS::EC2::Volume' in result[0]['recordingGroup']['resourceTypes'] \
|
||||
and 'AWS::EC2::VPC' in result[0]['recordingGroup']['resourceTypes']
|
||||
|
||||
# Specify an incorrect name:
|
||||
with assert_raises(ClientError) as ce:
|
||||
client.describe_configuration_recorders(ConfigurationRecorderNames=['wrong'])
|
||||
assert ce.exception.response['Error']['Code'] == 'NoSuchConfigurationRecorderException'
|
||||
assert 'wrong' in ce.exception.response['Error']['Message']
|
||||
|
||||
# And with both a good and wrong name:
|
||||
with assert_raises(ClientError) as ce:
|
||||
client.describe_configuration_recorders(ConfigurationRecorderNames=['testrecorder', 'wrong'])
|
||||
assert ce.exception.response['Error']['Code'] == 'NoSuchConfigurationRecorderException'
|
||||
assert 'wrong' in ce.exception.response['Error']['Message']
|
||||
|
||||
|
||||
@mock_config
|
||||
def test_delivery_channels():
|
||||
client = boto3.client('config', region_name='us-west-2')
|
||||
|
||||
# Try without a config recorder:
|
||||
with assert_raises(ClientError) as ce:
|
||||
client.put_delivery_channel(DeliveryChannel={})
|
||||
assert ce.exception.response['Error']['Code'] == 'NoAvailableConfigurationRecorderException'
|
||||
assert ce.exception.response['Error']['Message'] == 'Configuration recorder is not available to ' \
|
||||
'put delivery channel.'
|
||||
|
||||
# Create a config recorder to continue testing:
|
||||
client.put_configuration_recorder(ConfigurationRecorder={
|
||||
'name': 'testrecorder',
|
||||
'roleARN': 'somearn',
|
||||
'recordingGroup': {
|
||||
'allSupported': False,
|
||||
'includeGlobalResourceTypes': False,
|
||||
'resourceTypes': ['AWS::EC2::Volume', 'AWS::EC2::VPC']
|
||||
}
|
||||
})
|
||||
|
||||
# Try without a name supplied:
|
||||
with assert_raises(ClientError) as ce:
|
||||
client.put_delivery_channel(DeliveryChannel={})
|
||||
assert ce.exception.response['Error']['Code'] == 'InvalidDeliveryChannelNameException'
|
||||
assert 'is not valid, blank string.' in ce.exception.response['Error']['Message']
|
||||
|
||||
# Try with a really long name:
|
||||
with assert_raises(ClientError) as ce:
|
||||
client.put_delivery_channel(DeliveryChannel={'name': 'a' * 257})
|
||||
assert ce.exception.response['Error']['Code'] == 'ValidationException'
|
||||
assert 'Member must have length less than or equal to 256' in ce.exception.response['Error']['Message']
|
||||
|
||||
# Without specifying a bucket name:
|
||||
with assert_raises(ClientError) as ce:
|
||||
client.put_delivery_channel(DeliveryChannel={'name': 'testchannel'})
|
||||
assert ce.exception.response['Error']['Code'] == 'NoSuchBucketException'
|
||||
assert ce.exception.response['Error']['Message'] == 'Cannot find a S3 bucket with an empty bucket name.'
|
||||
|
||||
with assert_raises(ClientError) as ce:
|
||||
client.put_delivery_channel(DeliveryChannel={'name': 'testchannel', 's3BucketName': ''})
|
||||
assert ce.exception.response['Error']['Code'] == 'NoSuchBucketException'
|
||||
assert ce.exception.response['Error']['Message'] == 'Cannot find a S3 bucket with an empty bucket name.'
|
||||
|
||||
# With an empty string for the S3 key prefix:
|
||||
with assert_raises(ClientError) as ce:
|
||||
client.put_delivery_channel(DeliveryChannel={
|
||||
'name': 'testchannel', 's3BucketName': 'somebucket', 's3KeyPrefix': ''})
|
||||
assert ce.exception.response['Error']['Code'] == 'InvalidS3KeyPrefixException'
|
||||
assert 'empty s3 key prefix.' in ce.exception.response['Error']['Message']
|
||||
|
||||
# With an empty string for the SNS ARN:
|
||||
with assert_raises(ClientError) as ce:
|
||||
client.put_delivery_channel(DeliveryChannel={
|
||||
'name': 'testchannel', 's3BucketName': 'somebucket', 'snsTopicARN': ''})
|
||||
assert ce.exception.response['Error']['Code'] == 'InvalidSNSTopicARNException'
|
||||
assert 'The sns topic arn' in ce.exception.response['Error']['Message']
|
||||
|
||||
# With an invalid delivery frequency:
|
||||
with assert_raises(ClientError) as ce:
|
||||
client.put_delivery_channel(DeliveryChannel={
|
||||
'name': 'testchannel',
|
||||
's3BucketName': 'somebucket',
|
||||
'configSnapshotDeliveryProperties': {'deliveryFrequency': 'WRONG'}
|
||||
})
|
||||
assert ce.exception.response['Error']['Code'] == 'InvalidDeliveryFrequency'
|
||||
assert 'WRONG' in ce.exception.response['Error']['Message']
|
||||
assert 'TwentyFour_Hours' in ce.exception.response['Error']['Message']
|
||||
|
||||
# Create a proper one:
|
||||
client.put_delivery_channel(DeliveryChannel={'name': 'testchannel', 's3BucketName': 'somebucket'})
|
||||
result = client.describe_delivery_channels()['DeliveryChannels']
|
||||
assert len(result) == 1
|
||||
assert len(result[0].keys()) == 2
|
||||
assert result[0]['name'] == 'testchannel'
|
||||
assert result[0]['s3BucketName'] == 'somebucket'
|
||||
|
||||
# Overwrite it with another proper configuration:
|
||||
client.put_delivery_channel(DeliveryChannel={
|
||||
'name': 'testchannel',
|
||||
's3BucketName': 'somebucket',
|
||||
'snsTopicARN': 'sometopicarn',
|
||||
'configSnapshotDeliveryProperties': {'deliveryFrequency': 'TwentyFour_Hours'}
|
||||
})
|
||||
result = client.describe_delivery_channels()['DeliveryChannels']
|
||||
assert len(result) == 1
|
||||
assert len(result[0].keys()) == 4
|
||||
assert result[0]['name'] == 'testchannel'
|
||||
assert result[0]['s3BucketName'] == 'somebucket'
|
||||
assert result[0]['snsTopicARN'] == 'sometopicarn'
|
||||
assert result[0]['configSnapshotDeliveryProperties']['deliveryFrequency'] == 'TwentyFour_Hours'
|
||||
|
||||
# Can only have 1:
|
||||
with assert_raises(ClientError) as ce:
|
||||
client.put_delivery_channel(DeliveryChannel={'name': 'testchannel2', 's3BucketName': 'somebucket'})
|
||||
assert ce.exception.response['Error']['Code'] == 'MaxNumberOfDeliveryChannelsExceededException'
|
||||
assert 'because the maximum number of delivery channels: 1 is reached.' in ce.exception.response['Error']['Message']
|
||||
|
||||
|
||||
@mock_config
|
||||
def test_describe_delivery_channels():
|
||||
client = boto3.client('config', region_name='us-west-2')
|
||||
client.put_configuration_recorder(ConfigurationRecorder={
|
||||
'name': 'testrecorder',
|
||||
'roleARN': 'somearn',
|
||||
'recordingGroup': {
|
||||
'allSupported': False,
|
||||
'includeGlobalResourceTypes': False,
|
||||
'resourceTypes': ['AWS::EC2::Volume', 'AWS::EC2::VPC']
|
||||
}
|
||||
})
|
||||
|
||||
# Without any channels:
|
||||
result = client.describe_delivery_channels()
|
||||
assert not result['DeliveryChannels']
|
||||
|
||||
client.put_delivery_channel(DeliveryChannel={'name': 'testchannel', 's3BucketName': 'somebucket'})
|
||||
result = client.describe_delivery_channels()['DeliveryChannels']
|
||||
assert len(result) == 1
|
||||
assert len(result[0].keys()) == 2
|
||||
assert result[0]['name'] == 'testchannel'
|
||||
assert result[0]['s3BucketName'] == 'somebucket'
|
||||
|
||||
# Overwrite it with another proper configuration:
|
||||
client.put_delivery_channel(DeliveryChannel={
|
||||
'name': 'testchannel',
|
||||
's3BucketName': 'somebucket',
|
||||
'snsTopicARN': 'sometopicarn',
|
||||
'configSnapshotDeliveryProperties': {'deliveryFrequency': 'TwentyFour_Hours'}
|
||||
})
|
||||
result = client.describe_delivery_channels()['DeliveryChannels']
|
||||
assert len(result) == 1
|
||||
assert len(result[0].keys()) == 4
|
||||
assert result[0]['name'] == 'testchannel'
|
||||
assert result[0]['s3BucketName'] == 'somebucket'
|
||||
assert result[0]['snsTopicARN'] == 'sometopicarn'
|
||||
assert result[0]['configSnapshotDeliveryProperties']['deliveryFrequency'] == 'TwentyFour_Hours'
|
||||
|
||||
# Specify an incorrect name:
|
||||
with assert_raises(ClientError) as ce:
|
||||
client.describe_delivery_channels(DeliveryChannelNames=['wrong'])
|
||||
assert ce.exception.response['Error']['Code'] == 'NoSuchDeliveryChannelException'
|
||||
assert 'wrong' in ce.exception.response['Error']['Message']
|
||||
|
||||
# And with both a good and wrong name:
|
||||
with assert_raises(ClientError) as ce:
|
||||
client.describe_delivery_channels(DeliveryChannelNames=['testchannel', 'wrong'])
|
||||
assert ce.exception.response['Error']['Code'] == 'NoSuchDeliveryChannelException'
|
||||
assert 'wrong' in ce.exception.response['Error']['Message']
|
||||
|
||||
|
||||
@mock_config
|
||||
def test_start_configuration_recorder():
|
||||
client = boto3.client('config', region_name='us-west-2')
|
||||
|
||||
# Without a config recorder:
|
||||
with assert_raises(ClientError) as ce:
|
||||
client.start_configuration_recorder(ConfigurationRecorderName='testrecorder')
|
||||
assert ce.exception.response['Error']['Code'] == 'NoSuchConfigurationRecorderException'
|
||||
|
||||
# Make the config recorder;
|
||||
client.put_configuration_recorder(ConfigurationRecorder={
|
||||
'name': 'testrecorder',
|
||||
'roleARN': 'somearn',
|
||||
'recordingGroup': {
|
||||
'allSupported': False,
|
||||
'includeGlobalResourceTypes': False,
|
||||
'resourceTypes': ['AWS::EC2::Volume', 'AWS::EC2::VPC']
|
||||
}
|
||||
})
|
||||
|
||||
# Without a delivery channel:
|
||||
with assert_raises(ClientError) as ce:
|
||||
client.start_configuration_recorder(ConfigurationRecorderName='testrecorder')
|
||||
assert ce.exception.response['Error']['Code'] == 'NoAvailableDeliveryChannelException'
|
||||
|
||||
# Make the delivery channel:
|
||||
client.put_delivery_channel(DeliveryChannel={'name': 'testchannel', 's3BucketName': 'somebucket'})
|
||||
|
||||
# Start it:
|
||||
client.start_configuration_recorder(ConfigurationRecorderName='testrecorder')
|
||||
|
||||
# Verify it's enabled:
|
||||
result = client.describe_configuration_recorder_status()['ConfigurationRecordersStatus']
|
||||
lower_bound = (datetime.utcnow() - timedelta(minutes=5))
|
||||
assert result[0]['recording']
|
||||
assert result[0]['lastStatus'] == 'PENDING'
|
||||
assert lower_bound < result[0]['lastStartTime'].replace(tzinfo=None) <= datetime.utcnow()
|
||||
assert lower_bound < result[0]['lastStatusChangeTime'].replace(tzinfo=None) <= datetime.utcnow()
|
||||
|
||||
|
||||
@mock_config
|
||||
def test_stop_configuration_recorder():
|
||||
client = boto3.client('config', region_name='us-west-2')
|
||||
|
||||
# Without a config recorder:
|
||||
with assert_raises(ClientError) as ce:
|
||||
client.stop_configuration_recorder(ConfigurationRecorderName='testrecorder')
|
||||
assert ce.exception.response['Error']['Code'] == 'NoSuchConfigurationRecorderException'
|
||||
|
||||
# Make the config recorder;
|
||||
client.put_configuration_recorder(ConfigurationRecorder={
|
||||
'name': 'testrecorder',
|
||||
'roleARN': 'somearn',
|
||||
'recordingGroup': {
|
||||
'allSupported': False,
|
||||
'includeGlobalResourceTypes': False,
|
||||
'resourceTypes': ['AWS::EC2::Volume', 'AWS::EC2::VPC']
|
||||
}
|
||||
})
|
||||
|
||||
# Make the delivery channel for creation:
|
||||
client.put_delivery_channel(DeliveryChannel={'name': 'testchannel', 's3BucketName': 'somebucket'})
|
||||
|
||||
# Start it:
|
||||
client.start_configuration_recorder(ConfigurationRecorderName='testrecorder')
|
||||
client.stop_configuration_recorder(ConfigurationRecorderName='testrecorder')
|
||||
|
||||
# Verify it's disabled:
|
||||
result = client.describe_configuration_recorder_status()['ConfigurationRecordersStatus']
|
||||
lower_bound = (datetime.utcnow() - timedelta(minutes=5))
|
||||
assert not result[0]['recording']
|
||||
assert result[0]['lastStatus'] == 'PENDING'
|
||||
assert lower_bound < result[0]['lastStartTime'].replace(tzinfo=None) <= datetime.utcnow()
|
||||
assert lower_bound < result[0]['lastStopTime'].replace(tzinfo=None) <= datetime.utcnow()
|
||||
assert lower_bound < result[0]['lastStatusChangeTime'].replace(tzinfo=None) <= datetime.utcnow()
|
||||
|
||||
|
||||
@mock_config
|
||||
def test_describe_configuration_recorder_status():
|
||||
client = boto3.client('config', region_name='us-west-2')
|
||||
|
||||
# Without any:
|
||||
result = client.describe_configuration_recorder_status()
|
||||
assert not result['ConfigurationRecordersStatus']
|
||||
|
||||
# Make the config recorder;
|
||||
client.put_configuration_recorder(ConfigurationRecorder={
|
||||
'name': 'testrecorder',
|
||||
'roleARN': 'somearn',
|
||||
'recordingGroup': {
|
||||
'allSupported': False,
|
||||
'includeGlobalResourceTypes': False,
|
||||
'resourceTypes': ['AWS::EC2::Volume', 'AWS::EC2::VPC']
|
||||
}
|
||||
})
|
||||
|
||||
# Without specifying a config recorder:
|
||||
result = client.describe_configuration_recorder_status()['ConfigurationRecordersStatus']
|
||||
assert len(result) == 1
|
||||
assert result[0]['name'] == 'testrecorder'
|
||||
assert not result[0]['recording']
|
||||
|
||||
# With a proper name:
|
||||
result = client.describe_configuration_recorder_status(
|
||||
ConfigurationRecorderNames=['testrecorder'])['ConfigurationRecordersStatus']
|
||||
assert len(result) == 1
|
||||
assert result[0]['name'] == 'testrecorder'
|
||||
assert not result[0]['recording']
|
||||
|
||||
# Invalid name:
|
||||
with assert_raises(ClientError) as ce:
|
||||
client.describe_configuration_recorder_status(ConfigurationRecorderNames=['testrecorder', 'wrong'])
|
||||
assert ce.exception.response['Error']['Code'] == 'NoSuchConfigurationRecorderException'
|
||||
assert 'wrong' in ce.exception.response['Error']['Message']
|
||||
|
||||
|
||||
@mock_config
|
||||
def test_delete_configuration_recorder():
|
||||
client = boto3.client('config', region_name='us-west-2')
|
||||
|
||||
# Make the config recorder;
|
||||
client.put_configuration_recorder(ConfigurationRecorder={
|
||||
'name': 'testrecorder',
|
||||
'roleARN': 'somearn',
|
||||
'recordingGroup': {
|
||||
'allSupported': False,
|
||||
'includeGlobalResourceTypes': False,
|
||||
'resourceTypes': ['AWS::EC2::Volume', 'AWS::EC2::VPC']
|
||||
}
|
||||
})
|
||||
|
||||
# Delete it:
|
||||
client.delete_configuration_recorder(ConfigurationRecorderName='testrecorder')
|
||||
|
||||
# Try again -- it should be deleted:
|
||||
with assert_raises(ClientError) as ce:
|
||||
client.delete_configuration_recorder(ConfigurationRecorderName='testrecorder')
|
||||
assert ce.exception.response['Error']['Code'] == 'NoSuchConfigurationRecorderException'
|
||||
|
||||
|
||||
@mock_config
|
||||
def test_delete_delivery_channel():
|
||||
client = boto3.client('config', region_name='us-west-2')
|
||||
|
||||
# Need a recorder to test the constraint on recording being enabled:
|
||||
client.put_configuration_recorder(ConfigurationRecorder={
|
||||
'name': 'testrecorder',
|
||||
'roleARN': 'somearn',
|
||||
'recordingGroup': {
|
||||
'allSupported': False,
|
||||
'includeGlobalResourceTypes': False,
|
||||
'resourceTypes': ['AWS::EC2::Volume', 'AWS::EC2::VPC']
|
||||
}
|
||||
})
|
||||
client.put_delivery_channel(DeliveryChannel={'name': 'testchannel', 's3BucketName': 'somebucket'})
|
||||
client.start_configuration_recorder(ConfigurationRecorderName='testrecorder')
|
||||
|
||||
# With the recorder enabled:
|
||||
with assert_raises(ClientError) as ce:
|
||||
client.delete_delivery_channel(DeliveryChannelName='testchannel')
|
||||
assert ce.exception.response['Error']['Code'] == 'LastDeliveryChannelDeleteFailedException'
|
||||
assert 'because there is a running configuration recorder.' in ce.exception.response['Error']['Message']
|
||||
|
||||
# Stop recording:
|
||||
client.stop_configuration_recorder(ConfigurationRecorderName='testrecorder')
|
||||
|
||||
# Try again:
|
||||
client.delete_delivery_channel(DeliveryChannelName='testchannel')
|
||||
|
||||
# Verify:
|
||||
with assert_raises(ClientError) as ce:
|
||||
client.delete_delivery_channel(DeliveryChannelName='testchannel')
|
||||
assert ce.exception.response['Error']['Code'] == 'NoSuchDeliveryChannelException'
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
234
tests/test_dynamodbstreams/test_dynamodbstreams.py
Normal file
234
tests/test_dynamodbstreams/test_dynamodbstreams.py
Normal file
@ -0,0 +1,234 @@
|
||||
from __future__ import unicode_literals, print_function
|
||||
|
||||
from nose.tools import assert_raises
|
||||
|
||||
import boto3
|
||||
from moto import mock_dynamodb2, mock_dynamodbstreams
|
||||
|
||||
|
||||
class TestCore():
|
||||
stream_arn = None
|
||||
mocks = []
|
||||
|
||||
def setup(self):
|
||||
self.mocks = [mock_dynamodb2(), mock_dynamodbstreams()]
|
||||
for m in self.mocks:
|
||||
m.start()
|
||||
|
||||
# create a table with a stream
|
||||
conn = boto3.client('dynamodb', region_name='us-east-1')
|
||||
|
||||
resp = conn.create_table(
|
||||
TableName='test-streams',
|
||||
KeySchema=[{'AttributeName': 'id', 'KeyType': 'HASH'}],
|
||||
AttributeDefinitions=[{'AttributeName': 'id',
|
||||
'AttributeType': 'S'}],
|
||||
ProvisionedThroughput={'ReadCapacityUnits': 1,
|
||||
'WriteCapacityUnits': 1},
|
||||
StreamSpecification={
|
||||
'StreamEnabled': True,
|
||||
'StreamViewType': 'NEW_AND_OLD_IMAGES'
|
||||
}
|
||||
)
|
||||
self.stream_arn = resp['TableDescription']['LatestStreamArn']
|
||||
|
||||
def teardown(self):
|
||||
conn = boto3.client('dynamodb', region_name='us-east-1')
|
||||
conn.delete_table(TableName='test-streams')
|
||||
self.stream_arn = None
|
||||
|
||||
for m in self.mocks:
|
||||
m.stop()
|
||||
|
||||
|
||||
def test_verify_stream(self):
|
||||
conn = boto3.client('dynamodb', region_name='us-east-1')
|
||||
resp = conn.describe_table(TableName='test-streams')
|
||||
assert 'LatestStreamArn' in resp['Table']
|
||||
|
||||
def test_describe_stream(self):
|
||||
conn = boto3.client('dynamodbstreams', region_name='us-east-1')
|
||||
|
||||
resp = conn.describe_stream(StreamArn=self.stream_arn)
|
||||
assert 'StreamDescription' in resp
|
||||
desc = resp['StreamDescription']
|
||||
assert desc['StreamArn'] == self.stream_arn
|
||||
assert desc['TableName'] == 'test-streams'
|
||||
|
||||
def test_list_streams(self):
|
||||
conn = boto3.client('dynamodbstreams', region_name='us-east-1')
|
||||
|
||||
resp = conn.list_streams()
|
||||
assert resp['Streams'][0]['StreamArn'] == self.stream_arn
|
||||
|
||||
resp = conn.list_streams(TableName='no-stream')
|
||||
assert not resp['Streams']
|
||||
|
||||
def test_get_shard_iterator(self):
|
||||
conn = boto3.client('dynamodbstreams', region_name='us-east-1')
|
||||
|
||||
resp = conn.describe_stream(StreamArn=self.stream_arn)
|
||||
shard_id = resp['StreamDescription']['Shards'][0]['ShardId']
|
||||
|
||||
resp = conn.get_shard_iterator(
|
||||
StreamArn=self.stream_arn,
|
||||
ShardId=shard_id,
|
||||
ShardIteratorType='TRIM_HORIZON'
|
||||
)
|
||||
assert 'ShardIterator' in resp
|
||||
|
||||
def test_get_records_empty(self):
|
||||
conn = boto3.client('dynamodbstreams', region_name='us-east-1')
|
||||
|
||||
resp = conn.describe_stream(StreamArn=self.stream_arn)
|
||||
shard_id = resp['StreamDescription']['Shards'][0]['ShardId']
|
||||
|
||||
resp = conn.get_shard_iterator(
|
||||
StreamArn=self.stream_arn,
|
||||
ShardId=shard_id,
|
||||
ShardIteratorType='LATEST'
|
||||
)
|
||||
iterator_id = resp['ShardIterator']
|
||||
|
||||
resp = conn.get_records(ShardIterator=iterator_id)
|
||||
assert 'Records' in resp
|
||||
assert len(resp['Records']) == 0
|
||||
|
||||
def test_get_records_seq(self):
|
||||
conn = boto3.client('dynamodb', region_name='us-east-1')
|
||||
|
||||
conn.put_item(
|
||||
TableName='test-streams',
|
||||
Item={
|
||||
'id': {'S': 'entry1'},
|
||||
'first_col': {'S': 'foo'}
|
||||
}
|
||||
)
|
||||
conn.put_item(
|
||||
TableName='test-streams',
|
||||
Item={
|
||||
'id': {'S': 'entry1'},
|
||||
'first_col': {'S': 'bar'},
|
||||
'second_col': {'S': 'baz'}
|
||||
}
|
||||
)
|
||||
conn.delete_item(
|
||||
TableName='test-streams',
|
||||
Key={'id': {'S': 'entry1'}}
|
||||
)
|
||||
|
||||
conn = boto3.client('dynamodbstreams', region_name='us-east-1')
|
||||
|
||||
resp = conn.describe_stream(StreamArn=self.stream_arn)
|
||||
shard_id = resp['StreamDescription']['Shards'][0]['ShardId']
|
||||
|
||||
resp = conn.get_shard_iterator(
|
||||
StreamArn=self.stream_arn,
|
||||
ShardId=shard_id,
|
||||
ShardIteratorType='TRIM_HORIZON'
|
||||
)
|
||||
iterator_id = resp['ShardIterator']
|
||||
|
||||
resp = conn.get_records(ShardIterator=iterator_id)
|
||||
assert len(resp['Records']) == 3
|
||||
assert resp['Records'][0]['eventName'] == 'INSERT'
|
||||
assert resp['Records'][1]['eventName'] == 'MODIFY'
|
||||
assert resp['Records'][2]['eventName'] == 'DELETE'
|
||||
|
||||
# now try fetching from the next shard iterator, it should be
|
||||
# empty
|
||||
resp = conn.get_records(ShardIterator=resp['NextShardIterator'])
|
||||
assert len(resp['Records']) == 0
|
||||
|
||||
|
||||
class TestEdges():
|
||||
mocks = []
|
||||
|
||||
def setup(self):
|
||||
self.mocks = [mock_dynamodb2(), mock_dynamodbstreams()]
|
||||
for m in self.mocks:
|
||||
m.start()
|
||||
|
||||
def teardown(self):
|
||||
for m in self.mocks:
|
||||
m.stop()
|
||||
|
||||
|
||||
def test_enable_stream_on_table(self):
|
||||
conn = boto3.client('dynamodb', region_name='us-east-1')
|
||||
resp = conn.create_table(
|
||||
TableName='test-streams',
|
||||
KeySchema=[{'AttributeName': 'id', 'KeyType': 'HASH'}],
|
||||
AttributeDefinitions=[{'AttributeName': 'id',
|
||||
'AttributeType': 'S'}],
|
||||
ProvisionedThroughput={'ReadCapacityUnits': 1,
|
||||
'WriteCapacityUnits': 1}
|
||||
)
|
||||
assert 'StreamSpecification' not in resp['TableDescription']
|
||||
|
||||
resp = conn.update_table(
|
||||
TableName='test-streams',
|
||||
StreamSpecification={
|
||||
'StreamViewType': 'KEYS_ONLY'
|
||||
}
|
||||
)
|
||||
assert 'StreamSpecification' in resp['TableDescription']
|
||||
assert resp['TableDescription']['StreamSpecification'] == {
|
||||
'StreamEnabled': True,
|
||||
'StreamViewType': 'KEYS_ONLY'
|
||||
}
|
||||
assert 'LatestStreamLabel' in resp['TableDescription']
|
||||
|
||||
# now try to enable it again
|
||||
with assert_raises(conn.exceptions.ResourceInUseException):
|
||||
resp = conn.update_table(
|
||||
TableName='test-streams',
|
||||
StreamSpecification={
|
||||
'StreamViewType': 'OLD_IMAGES'
|
||||
}
|
||||
)
|
||||
|
||||
def test_stream_with_range_key(self):
|
||||
dyn = boto3.client('dynamodb', region_name='us-east-1')
|
||||
|
||||
resp = dyn.create_table(
|
||||
TableName='test-streams',
|
||||
KeySchema=[{'AttributeName': 'id', 'KeyType': 'HASH'},
|
||||
{'AttributeName': 'color', 'KeyType': 'RANGE'}],
|
||||
AttributeDefinitions=[{'AttributeName': 'id',
|
||||
'AttributeType': 'S'},
|
||||
{'AttributeName': 'color',
|
||||
'AttributeType': 'S'}],
|
||||
ProvisionedThroughput={'ReadCapacityUnits': 1,
|
||||
'WriteCapacityUnits': 1},
|
||||
StreamSpecification={
|
||||
'StreamViewType': 'NEW_IMAGES'
|
||||
}
|
||||
)
|
||||
stream_arn = resp['TableDescription']['LatestStreamArn']
|
||||
|
||||
streams = boto3.client('dynamodbstreams', region_name='us-east-1')
|
||||
resp = streams.describe_stream(StreamArn=stream_arn)
|
||||
shard_id = resp['StreamDescription']['Shards'][0]['ShardId']
|
||||
|
||||
resp = streams.get_shard_iterator(
|
||||
StreamArn=stream_arn,
|
||||
ShardId=shard_id,
|
||||
ShardIteratorType='LATEST'
|
||||
)
|
||||
iterator_id = resp['ShardIterator']
|
||||
|
||||
dyn.put_item(
|
||||
TableName='test-streams',
|
||||
Item={'id': {'S': 'row1'}, 'color': {'S': 'blue'}}
|
||||
)
|
||||
dyn.put_item(
|
||||
TableName='test-streams',
|
||||
Item={'id': {'S': 'row2'}, 'color': {'S': 'green'}}
|
||||
)
|
||||
|
||||
resp = streams.get_records(ShardIterator=iterator_id)
|
||||
assert len(resp['Records']) == 2
|
||||
assert resp['Records'][0]['eventName'] == 'INSERT'
|
||||
assert resp['Records'][1]['eventName'] == 'INSERT'
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,362 +1,367 @@
|
||||
from __future__ import unicode_literals
|
||||
# Ensure 'assert_raises' context manager support for Python 2.6
|
||||
import tests.backport_assert_raises
|
||||
from nose.tools import assert_raises
|
||||
|
||||
import boto3
|
||||
from botocore.exceptions import ClientError
|
||||
import boto
|
||||
import boto.cloudformation
|
||||
import boto.ec2
|
||||
from boto.exception import EC2ResponseError
|
||||
import sure # noqa
|
||||
|
||||
from moto import mock_ec2, mock_cloudformation_deprecated, mock_ec2_deprecated
|
||||
from tests.helpers import requires_boto_gte
|
||||
from tests.test_cloudformation.fixtures import vpc_eni
|
||||
import json
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_elastic_network_interfaces():
|
||||
conn = boto.connect_vpc('the_key', 'the_secret')
|
||||
vpc = conn.create_vpc("10.0.0.0/16")
|
||||
subnet = conn.create_subnet(vpc.id, "10.0.0.0/18")
|
||||
|
||||
with assert_raises(EC2ResponseError) as ex:
|
||||
eni = conn.create_network_interface(subnet.id, dry_run=True)
|
||||
ex.exception.error_code.should.equal('DryRunOperation')
|
||||
ex.exception.status.should.equal(400)
|
||||
ex.exception.message.should.equal(
|
||||
'An error occurred (DryRunOperation) when calling the CreateNetworkInterface operation: Request would have succeeded, but DryRun flag is set')
|
||||
|
||||
eni = conn.create_network_interface(subnet.id)
|
||||
|
||||
all_enis = conn.get_all_network_interfaces()
|
||||
all_enis.should.have.length_of(1)
|
||||
eni = all_enis[0]
|
||||
eni.groups.should.have.length_of(0)
|
||||
eni.private_ip_addresses.should.have.length_of(0)
|
||||
|
||||
with assert_raises(EC2ResponseError) as ex:
|
||||
conn.delete_network_interface(eni.id, dry_run=True)
|
||||
ex.exception.error_code.should.equal('DryRunOperation')
|
||||
ex.exception.status.should.equal(400)
|
||||
ex.exception.message.should.equal(
|
||||
'An error occurred (DryRunOperation) when calling the DeleteNetworkInterface operation: Request would have succeeded, but DryRun flag is set')
|
||||
|
||||
conn.delete_network_interface(eni.id)
|
||||
|
||||
all_enis = conn.get_all_network_interfaces()
|
||||
all_enis.should.have.length_of(0)
|
||||
|
||||
with assert_raises(EC2ResponseError) as cm:
|
||||
conn.delete_network_interface(eni.id)
|
||||
cm.exception.error_code.should.equal('InvalidNetworkInterfaceID.NotFound')
|
||||
cm.exception.status.should.equal(400)
|
||||
cm.exception.request_id.should_not.be.none
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_elastic_network_interfaces_subnet_validation():
|
||||
conn = boto.connect_vpc('the_key', 'the_secret')
|
||||
|
||||
with assert_raises(EC2ResponseError) as cm:
|
||||
conn.create_network_interface("subnet-abcd1234")
|
||||
cm.exception.error_code.should.equal('InvalidSubnetID.NotFound')
|
||||
cm.exception.status.should.equal(400)
|
||||
cm.exception.request_id.should_not.be.none
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_elastic_network_interfaces_with_private_ip():
|
||||
conn = boto.connect_vpc('the_key', 'the_secret')
|
||||
vpc = conn.create_vpc("10.0.0.0/16")
|
||||
subnet = conn.create_subnet(vpc.id, "10.0.0.0/18")
|
||||
private_ip = "54.0.0.1"
|
||||
eni = conn.create_network_interface(subnet.id, private_ip)
|
||||
|
||||
all_enis = conn.get_all_network_interfaces()
|
||||
all_enis.should.have.length_of(1)
|
||||
|
||||
eni = all_enis[0]
|
||||
eni.groups.should.have.length_of(0)
|
||||
|
||||
eni.private_ip_addresses.should.have.length_of(1)
|
||||
eni.private_ip_addresses[0].private_ip_address.should.equal(private_ip)
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_elastic_network_interfaces_with_groups():
|
||||
conn = boto.connect_vpc('the_key', 'the_secret')
|
||||
vpc = conn.create_vpc("10.0.0.0/16")
|
||||
subnet = conn.create_subnet(vpc.id, "10.0.0.0/18")
|
||||
security_group1 = conn.create_security_group(
|
||||
'test security group #1', 'this is a test security group')
|
||||
security_group2 = conn.create_security_group(
|
||||
'test security group #2', 'this is a test security group')
|
||||
conn.create_network_interface(
|
||||
subnet.id, groups=[security_group1.id, security_group2.id])
|
||||
|
||||
all_enis = conn.get_all_network_interfaces()
|
||||
all_enis.should.have.length_of(1)
|
||||
|
||||
eni = all_enis[0]
|
||||
eni.groups.should.have.length_of(2)
|
||||
set([group.id for group in eni.groups]).should.equal(
|
||||
set([security_group1.id, security_group2.id]))
|
||||
|
||||
|
||||
@requires_boto_gte("2.12.0")
|
||||
@mock_ec2_deprecated
|
||||
def test_elastic_network_interfaces_modify_attribute():
|
||||
conn = boto.connect_vpc('the_key', 'the_secret')
|
||||
vpc = conn.create_vpc("10.0.0.0/16")
|
||||
subnet = conn.create_subnet(vpc.id, "10.0.0.0/18")
|
||||
security_group1 = conn.create_security_group(
|
||||
'test security group #1', 'this is a test security group')
|
||||
security_group2 = conn.create_security_group(
|
||||
'test security group #2', 'this is a test security group')
|
||||
conn.create_network_interface(subnet.id, groups=[security_group1.id])
|
||||
|
||||
all_enis = conn.get_all_network_interfaces()
|
||||
all_enis.should.have.length_of(1)
|
||||
|
||||
eni = all_enis[0]
|
||||
eni.groups.should.have.length_of(1)
|
||||
eni.groups[0].id.should.equal(security_group1.id)
|
||||
|
||||
with assert_raises(EC2ResponseError) as ex:
|
||||
conn.modify_network_interface_attribute(
|
||||
eni.id, 'groupset', [security_group2.id], dry_run=True)
|
||||
ex.exception.error_code.should.equal('DryRunOperation')
|
||||
ex.exception.status.should.equal(400)
|
||||
ex.exception.message.should.equal(
|
||||
'An error occurred (DryRunOperation) when calling the ModifyNetworkInterface operation: Request would have succeeded, but DryRun flag is set')
|
||||
|
||||
conn.modify_network_interface_attribute(
|
||||
eni.id, 'groupset', [security_group2.id])
|
||||
|
||||
all_enis = conn.get_all_network_interfaces()
|
||||
all_enis.should.have.length_of(1)
|
||||
|
||||
eni = all_enis[0]
|
||||
eni.groups.should.have.length_of(1)
|
||||
eni.groups[0].id.should.equal(security_group2.id)
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_elastic_network_interfaces_filtering():
|
||||
conn = boto.connect_vpc('the_key', 'the_secret')
|
||||
vpc = conn.create_vpc("10.0.0.0/16")
|
||||
subnet = conn.create_subnet(vpc.id, "10.0.0.0/18")
|
||||
|
||||
security_group1 = conn.create_security_group(
|
||||
'test security group #1', 'this is a test security group')
|
||||
security_group2 = conn.create_security_group(
|
||||
'test security group #2', 'this is a test security group')
|
||||
|
||||
eni1 = conn.create_network_interface(
|
||||
subnet.id, groups=[security_group1.id, security_group2.id])
|
||||
eni2 = conn.create_network_interface(
|
||||
subnet.id, groups=[security_group1.id])
|
||||
eni3 = conn.create_network_interface(subnet.id)
|
||||
|
||||
all_enis = conn.get_all_network_interfaces()
|
||||
all_enis.should.have.length_of(3)
|
||||
|
||||
# Filter by NetworkInterfaceId
|
||||
enis_by_id = conn.get_all_network_interfaces([eni1.id])
|
||||
enis_by_id.should.have.length_of(1)
|
||||
set([eni.id for eni in enis_by_id]).should.equal(set([eni1.id]))
|
||||
|
||||
# Filter by ENI ID
|
||||
enis_by_id = conn.get_all_network_interfaces(
|
||||
filters={'network-interface-id': eni1.id})
|
||||
enis_by_id.should.have.length_of(1)
|
||||
set([eni.id for eni in enis_by_id]).should.equal(set([eni1.id]))
|
||||
|
||||
# Filter by Security Group
|
||||
enis_by_group = conn.get_all_network_interfaces(
|
||||
filters={'group-id': security_group1.id})
|
||||
enis_by_group.should.have.length_of(2)
|
||||
set([eni.id for eni in enis_by_group]).should.equal(set([eni1.id, eni2.id]))
|
||||
|
||||
# Filter by ENI ID and Security Group
|
||||
enis_by_group = conn.get_all_network_interfaces(
|
||||
filters={'network-interface-id': eni1.id, 'group-id': security_group1.id})
|
||||
enis_by_group.should.have.length_of(1)
|
||||
set([eni.id for eni in enis_by_group]).should.equal(set([eni1.id]))
|
||||
|
||||
# Unsupported filter
|
||||
conn.get_all_network_interfaces.when.called_with(
|
||||
filters={'not-implemented-filter': 'foobar'}).should.throw(NotImplementedError)
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_elastic_network_interfaces_get_by_tag_name():
|
||||
ec2 = boto3.resource('ec2', region_name='us-west-2')
|
||||
ec2_client = boto3.client('ec2', region_name='us-west-2')
|
||||
|
||||
vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16')
|
||||
subnet = ec2.create_subnet(
|
||||
VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-2a')
|
||||
|
||||
eni1 = ec2.create_network_interface(
|
||||
SubnetId=subnet.id, PrivateIpAddress='10.0.10.5')
|
||||
|
||||
with assert_raises(ClientError) as ex:
|
||||
eni1.create_tags(Tags=[{'Key': 'Name', 'Value': 'eni1'}], DryRun=True)
|
||||
ex.exception.response['Error']['Code'].should.equal('DryRunOperation')
|
||||
ex.exception.response['ResponseMetadata'][
|
||||
'HTTPStatusCode'].should.equal(400)
|
||||
ex.exception.response['Error']['Message'].should.equal(
|
||||
'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set')
|
||||
|
||||
eni1.create_tags(Tags=[{'Key': 'Name', 'Value': 'eni1'}])
|
||||
|
||||
# The status of the new interface should be 'available'
|
||||
waiter = ec2_client.get_waiter('network_interface_available')
|
||||
waiter.wait(NetworkInterfaceIds=[eni1.id])
|
||||
|
||||
filters = [{'Name': 'tag:Name', 'Values': ['eni1']}]
|
||||
enis = list(ec2.network_interfaces.filter(Filters=filters))
|
||||
enis.should.have.length_of(1)
|
||||
|
||||
filters = [{'Name': 'tag:Name', 'Values': ['wrong-name']}]
|
||||
enis = list(ec2.network_interfaces.filter(Filters=filters))
|
||||
enis.should.have.length_of(0)
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_elastic_network_interfaces_get_by_availability_zone():
|
||||
ec2 = boto3.resource('ec2', region_name='us-west-2')
|
||||
ec2_client = boto3.client('ec2', region_name='us-west-2')
|
||||
|
||||
vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16')
|
||||
subnet1 = ec2.create_subnet(
|
||||
VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-2a')
|
||||
|
||||
subnet2 = ec2.create_subnet(
|
||||
VpcId=vpc.id, CidrBlock='10.0.1.0/24', AvailabilityZone='us-west-2b')
|
||||
|
||||
eni1 = ec2.create_network_interface(
|
||||
SubnetId=subnet1.id, PrivateIpAddress='10.0.0.15')
|
||||
|
||||
eni2 = ec2.create_network_interface(
|
||||
SubnetId=subnet2.id, PrivateIpAddress='10.0.1.15')
|
||||
|
||||
# The status of the new interface should be 'available'
|
||||
waiter = ec2_client.get_waiter('network_interface_available')
|
||||
waiter.wait(NetworkInterfaceIds=[eni1.id, eni2.id])
|
||||
|
||||
filters = [{'Name': 'availability-zone', 'Values': ['us-west-2a']}]
|
||||
enis = list(ec2.network_interfaces.filter(Filters=filters))
|
||||
enis.should.have.length_of(1)
|
||||
|
||||
filters = [{'Name': 'availability-zone', 'Values': ['us-west-2c']}]
|
||||
enis = list(ec2.network_interfaces.filter(Filters=filters))
|
||||
enis.should.have.length_of(0)
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_elastic_network_interfaces_get_by_private_ip():
|
||||
ec2 = boto3.resource('ec2', region_name='us-west-2')
|
||||
ec2_client = boto3.client('ec2', region_name='us-west-2')
|
||||
|
||||
vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16')
|
||||
subnet = ec2.create_subnet(
|
||||
VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-2a')
|
||||
|
||||
eni1 = ec2.create_network_interface(
|
||||
SubnetId=subnet.id, PrivateIpAddress='10.0.10.5')
|
||||
|
||||
# The status of the new interface should be 'available'
|
||||
waiter = ec2_client.get_waiter('network_interface_available')
|
||||
waiter.wait(NetworkInterfaceIds=[eni1.id])
|
||||
|
||||
filters = [{'Name': 'private-ip-address', 'Values': ['10.0.10.5']}]
|
||||
enis = list(ec2.network_interfaces.filter(Filters=filters))
|
||||
enis.should.have.length_of(1)
|
||||
|
||||
filters = [{'Name': 'private-ip-address', 'Values': ['10.0.10.10']}]
|
||||
enis = list(ec2.network_interfaces.filter(Filters=filters))
|
||||
enis.should.have.length_of(0)
|
||||
|
||||
filters = [{'Name': 'addresses.private-ip-address', 'Values': ['10.0.10.5']}]
|
||||
enis = list(ec2.network_interfaces.filter(Filters=filters))
|
||||
enis.should.have.length_of(1)
|
||||
|
||||
filters = [{'Name': 'addresses.private-ip-address', 'Values': ['10.0.10.10']}]
|
||||
enis = list(ec2.network_interfaces.filter(Filters=filters))
|
||||
enis.should.have.length_of(0)
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_elastic_network_interfaces_get_by_vpc_id():
|
||||
ec2 = boto3.resource('ec2', region_name='us-west-2')
|
||||
ec2_client = boto3.client('ec2', region_name='us-west-2')
|
||||
|
||||
vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16')
|
||||
subnet = ec2.create_subnet(
|
||||
VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-2a')
|
||||
|
||||
eni1 = ec2.create_network_interface(
|
||||
SubnetId=subnet.id, PrivateIpAddress='10.0.10.5')
|
||||
|
||||
# The status of the new interface should be 'available'
|
||||
waiter = ec2_client.get_waiter('network_interface_available')
|
||||
waiter.wait(NetworkInterfaceIds=[eni1.id])
|
||||
|
||||
filters = [{'Name': 'vpc-id', 'Values': [subnet.vpc_id]}]
|
||||
enis = list(ec2.network_interfaces.filter(Filters=filters))
|
||||
enis.should.have.length_of(1)
|
||||
|
||||
filters = [{'Name': 'vpc-id', 'Values': ['vpc-aaaa1111']}]
|
||||
enis = list(ec2.network_interfaces.filter(Filters=filters))
|
||||
enis.should.have.length_of(0)
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_elastic_network_interfaces_get_by_subnet_id():
|
||||
ec2 = boto3.resource('ec2', region_name='us-west-2')
|
||||
ec2_client = boto3.client('ec2', region_name='us-west-2')
|
||||
|
||||
vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16')
|
||||
subnet = ec2.create_subnet(
|
||||
VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-2a')
|
||||
|
||||
eni1 = ec2.create_network_interface(
|
||||
SubnetId=subnet.id, PrivateIpAddress='10.0.10.5')
|
||||
|
||||
# The status of the new interface should be 'available'
|
||||
waiter = ec2_client.get_waiter('network_interface_available')
|
||||
waiter.wait(NetworkInterfaceIds=[eni1.id])
|
||||
|
||||
filters = [{'Name': 'subnet-id', 'Values': [subnet.id]}]
|
||||
enis = list(ec2.network_interfaces.filter(Filters=filters))
|
||||
enis.should.have.length_of(1)
|
||||
|
||||
filters = [{'Name': 'subnet-id', 'Values': ['subnet-aaaa1111']}]
|
||||
enis = list(ec2.network_interfaces.filter(Filters=filters))
|
||||
enis.should.have.length_of(0)
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
@mock_cloudformation_deprecated
|
||||
def test_elastic_network_interfaces_cloudformation():
|
||||
template = vpc_eni.template
|
||||
template_json = json.dumps(template)
|
||||
conn = boto.cloudformation.connect_to_region("us-west-1")
|
||||
conn.create_stack(
|
||||
"test_stack",
|
||||
template_body=template_json,
|
||||
)
|
||||
ec2_conn = boto.ec2.connect_to_region("us-west-1")
|
||||
eni = ec2_conn.get_all_network_interfaces()[0]
|
||||
|
||||
stack = conn.describe_stacks()[0]
|
||||
resources = stack.describe_resources()
|
||||
cfn_eni = [resource for resource in resources if resource.resource_type ==
|
||||
'AWS::EC2::NetworkInterface'][0]
|
||||
cfn_eni.physical_resource_id.should.equal(eni.id)
|
||||
from __future__ import unicode_literals
|
||||
# Ensure 'assert_raises' context manager support for Python 2.6
|
||||
import tests.backport_assert_raises
|
||||
from nose.tools import assert_raises
|
||||
|
||||
import boto3
|
||||
from botocore.exceptions import ClientError
|
||||
import boto
|
||||
import boto.cloudformation
|
||||
import boto.ec2
|
||||
from boto.exception import EC2ResponseError
|
||||
import sure # noqa
|
||||
|
||||
from moto import mock_ec2, mock_cloudformation_deprecated, mock_ec2_deprecated
|
||||
from tests.helpers import requires_boto_gte
|
||||
from tests.test_cloudformation.fixtures import vpc_eni
|
||||
import json
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_elastic_network_interfaces():
|
||||
conn = boto.connect_vpc('the_key', 'the_secret')
|
||||
vpc = conn.create_vpc("10.0.0.0/16")
|
||||
subnet = conn.create_subnet(vpc.id, "10.0.0.0/18")
|
||||
|
||||
with assert_raises(EC2ResponseError) as ex:
|
||||
eni = conn.create_network_interface(subnet.id, dry_run=True)
|
||||
ex.exception.error_code.should.equal('DryRunOperation')
|
||||
ex.exception.status.should.equal(400)
|
||||
ex.exception.message.should.equal(
|
||||
'An error occurred (DryRunOperation) when calling the CreateNetworkInterface operation: Request would have succeeded, but DryRun flag is set')
|
||||
|
||||
eni = conn.create_network_interface(subnet.id)
|
||||
|
||||
all_enis = conn.get_all_network_interfaces()
|
||||
all_enis.should.have.length_of(1)
|
||||
eni = all_enis[0]
|
||||
eni.groups.should.have.length_of(0)
|
||||
eni.private_ip_addresses.should.have.length_of(1)
|
||||
eni.private_ip_addresses[0].private_ip_address.startswith('10.').should.be.true
|
||||
|
||||
with assert_raises(EC2ResponseError) as ex:
|
||||
conn.delete_network_interface(eni.id, dry_run=True)
|
||||
ex.exception.error_code.should.equal('DryRunOperation')
|
||||
ex.exception.status.should.equal(400)
|
||||
ex.exception.message.should.equal(
|
||||
'An error occurred (DryRunOperation) when calling the DeleteNetworkInterface operation: Request would have succeeded, but DryRun flag is set')
|
||||
|
||||
conn.delete_network_interface(eni.id)
|
||||
|
||||
all_enis = conn.get_all_network_interfaces()
|
||||
all_enis.should.have.length_of(0)
|
||||
|
||||
with assert_raises(EC2ResponseError) as cm:
|
||||
conn.delete_network_interface(eni.id)
|
||||
cm.exception.error_code.should.equal('InvalidNetworkInterfaceID.NotFound')
|
||||
cm.exception.status.should.equal(400)
|
||||
cm.exception.request_id.should_not.be.none
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_elastic_network_interfaces_subnet_validation():
|
||||
conn = boto.connect_vpc('the_key', 'the_secret')
|
||||
|
||||
with assert_raises(EC2ResponseError) as cm:
|
||||
conn.create_network_interface("subnet-abcd1234")
|
||||
cm.exception.error_code.should.equal('InvalidSubnetID.NotFound')
|
||||
cm.exception.status.should.equal(400)
|
||||
cm.exception.request_id.should_not.be.none
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_elastic_network_interfaces_with_private_ip():
|
||||
conn = boto.connect_vpc('the_key', 'the_secret')
|
||||
vpc = conn.create_vpc("10.0.0.0/16")
|
||||
subnet = conn.create_subnet(vpc.id, "10.0.0.0/18")
|
||||
private_ip = "54.0.0.1"
|
||||
eni = conn.create_network_interface(subnet.id, private_ip)
|
||||
|
||||
all_enis = conn.get_all_network_interfaces()
|
||||
all_enis.should.have.length_of(1)
|
||||
|
||||
eni = all_enis[0]
|
||||
eni.groups.should.have.length_of(0)
|
||||
|
||||
eni.private_ip_addresses.should.have.length_of(1)
|
||||
eni.private_ip_addresses[0].private_ip_address.should.equal(private_ip)
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_elastic_network_interfaces_with_groups():
|
||||
conn = boto.connect_vpc('the_key', 'the_secret')
|
||||
vpc = conn.create_vpc("10.0.0.0/16")
|
||||
subnet = conn.create_subnet(vpc.id, "10.0.0.0/18")
|
||||
security_group1 = conn.create_security_group(
|
||||
'test security group #1', 'this is a test security group')
|
||||
security_group2 = conn.create_security_group(
|
||||
'test security group #2', 'this is a test security group')
|
||||
conn.create_network_interface(
|
||||
subnet.id, groups=[security_group1.id, security_group2.id])
|
||||
|
||||
all_enis = conn.get_all_network_interfaces()
|
||||
all_enis.should.have.length_of(1)
|
||||
|
||||
eni = all_enis[0]
|
||||
eni.groups.should.have.length_of(2)
|
||||
set([group.id for group in eni.groups]).should.equal(
|
||||
set([security_group1.id, security_group2.id]))
|
||||
|
||||
|
||||
@requires_boto_gte("2.12.0")
|
||||
@mock_ec2_deprecated
|
||||
def test_elastic_network_interfaces_modify_attribute():
|
||||
conn = boto.connect_vpc('the_key', 'the_secret')
|
||||
vpc = conn.create_vpc("10.0.0.0/16")
|
||||
subnet = conn.create_subnet(vpc.id, "10.0.0.0/18")
|
||||
security_group1 = conn.create_security_group(
|
||||
'test security group #1', 'this is a test security group')
|
||||
security_group2 = conn.create_security_group(
|
||||
'test security group #2', 'this is a test security group')
|
||||
conn.create_network_interface(subnet.id, groups=[security_group1.id])
|
||||
|
||||
all_enis = conn.get_all_network_interfaces()
|
||||
all_enis.should.have.length_of(1)
|
||||
|
||||
eni = all_enis[0]
|
||||
eni.groups.should.have.length_of(1)
|
||||
eni.groups[0].id.should.equal(security_group1.id)
|
||||
|
||||
with assert_raises(EC2ResponseError) as ex:
|
||||
conn.modify_network_interface_attribute(
|
||||
eni.id, 'groupset', [security_group2.id], dry_run=True)
|
||||
ex.exception.error_code.should.equal('DryRunOperation')
|
||||
ex.exception.status.should.equal(400)
|
||||
ex.exception.message.should.equal(
|
||||
'An error occurred (DryRunOperation) when calling the ModifyNetworkInterface operation: Request would have succeeded, but DryRun flag is set')
|
||||
|
||||
conn.modify_network_interface_attribute(
|
||||
eni.id, 'groupset', [security_group2.id])
|
||||
|
||||
all_enis = conn.get_all_network_interfaces()
|
||||
all_enis.should.have.length_of(1)
|
||||
|
||||
eni = all_enis[0]
|
||||
eni.groups.should.have.length_of(1)
|
||||
eni.groups[0].id.should.equal(security_group2.id)
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_elastic_network_interfaces_filtering():
|
||||
conn = boto.connect_vpc('the_key', 'the_secret')
|
||||
vpc = conn.create_vpc("10.0.0.0/16")
|
||||
subnet = conn.create_subnet(vpc.id, "10.0.0.0/18")
|
||||
|
||||
security_group1 = conn.create_security_group(
|
||||
'test security group #1', 'this is a test security group')
|
||||
security_group2 = conn.create_security_group(
|
||||
'test security group #2', 'this is a test security group')
|
||||
|
||||
eni1 = conn.create_network_interface(
|
||||
subnet.id, groups=[security_group1.id, security_group2.id])
|
||||
eni2 = conn.create_network_interface(
|
||||
subnet.id, groups=[security_group1.id])
|
||||
eni3 = conn.create_network_interface(subnet.id)
|
||||
|
||||
all_enis = conn.get_all_network_interfaces()
|
||||
all_enis.should.have.length_of(3)
|
||||
|
||||
# Filter by NetworkInterfaceId
|
||||
enis_by_id = conn.get_all_network_interfaces([eni1.id])
|
||||
enis_by_id.should.have.length_of(1)
|
||||
set([eni.id for eni in enis_by_id]).should.equal(set([eni1.id]))
|
||||
|
||||
# Filter by ENI ID
|
||||
enis_by_id = conn.get_all_network_interfaces(
|
||||
filters={'network-interface-id': eni1.id})
|
||||
enis_by_id.should.have.length_of(1)
|
||||
set([eni.id for eni in enis_by_id]).should.equal(set([eni1.id]))
|
||||
|
||||
# Filter by Security Group
|
||||
enis_by_group = conn.get_all_network_interfaces(
|
||||
filters={'group-id': security_group1.id})
|
||||
enis_by_group.should.have.length_of(2)
|
||||
set([eni.id for eni in enis_by_group]).should.equal(set([eni1.id, eni2.id]))
|
||||
|
||||
# Filter by ENI ID and Security Group
|
||||
enis_by_group = conn.get_all_network_interfaces(
|
||||
filters={'network-interface-id': eni1.id, 'group-id': security_group1.id})
|
||||
enis_by_group.should.have.length_of(1)
|
||||
set([eni.id for eni in enis_by_group]).should.equal(set([eni1.id]))
|
||||
|
||||
# Unsupported filter
|
||||
conn.get_all_network_interfaces.when.called_with(
|
||||
filters={'not-implemented-filter': 'foobar'}).should.throw(NotImplementedError)
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_elastic_network_interfaces_get_by_tag_name():
|
||||
ec2 = boto3.resource('ec2', region_name='us-west-2')
|
||||
ec2_client = boto3.client('ec2', region_name='us-west-2')
|
||||
|
||||
vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16')
|
||||
subnet = ec2.create_subnet(
|
||||
VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-2a')
|
||||
|
||||
eni1 = ec2.create_network_interface(
|
||||
SubnetId=subnet.id, PrivateIpAddress='10.0.10.5')
|
||||
|
||||
with assert_raises(ClientError) as ex:
|
||||
eni1.create_tags(Tags=[{'Key': 'Name', 'Value': 'eni1'}], DryRun=True)
|
||||
ex.exception.response['Error']['Code'].should.equal('DryRunOperation')
|
||||
ex.exception.response['ResponseMetadata'][
|
||||
'HTTPStatusCode'].should.equal(400)
|
||||
ex.exception.response['Error']['Message'].should.equal(
|
||||
'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set')
|
||||
|
||||
eni1.create_tags(Tags=[{'Key': 'Name', 'Value': 'eni1'}])
|
||||
|
||||
# The status of the new interface should be 'available'
|
||||
waiter = ec2_client.get_waiter('network_interface_available')
|
||||
waiter.wait(NetworkInterfaceIds=[eni1.id])
|
||||
|
||||
filters = [{'Name': 'tag:Name', 'Values': ['eni1']}]
|
||||
enis = list(ec2.network_interfaces.filter(Filters=filters))
|
||||
enis.should.have.length_of(1)
|
||||
|
||||
filters = [{'Name': 'tag:Name', 'Values': ['wrong-name']}]
|
||||
enis = list(ec2.network_interfaces.filter(Filters=filters))
|
||||
enis.should.have.length_of(0)
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_elastic_network_interfaces_get_by_availability_zone():
|
||||
ec2 = boto3.resource('ec2', region_name='us-west-2')
|
||||
ec2_client = boto3.client('ec2', region_name='us-west-2')
|
||||
|
||||
vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16')
|
||||
subnet1 = ec2.create_subnet(
|
||||
VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-2a')
|
||||
|
||||
subnet2 = ec2.create_subnet(
|
||||
VpcId=vpc.id, CidrBlock='10.0.1.0/24', AvailabilityZone='us-west-2b')
|
||||
|
||||
eni1 = ec2.create_network_interface(
|
||||
SubnetId=subnet1.id, PrivateIpAddress='10.0.0.15')
|
||||
|
||||
eni2 = ec2.create_network_interface(
|
||||
SubnetId=subnet2.id, PrivateIpAddress='10.0.1.15')
|
||||
|
||||
# The status of the new interface should be 'available'
|
||||
waiter = ec2_client.get_waiter('network_interface_available')
|
||||
waiter.wait(NetworkInterfaceIds=[eni1.id, eni2.id])
|
||||
|
||||
filters = [{'Name': 'availability-zone', 'Values': ['us-west-2a']}]
|
||||
enis = list(ec2.network_interfaces.filter(Filters=filters))
|
||||
enis.should.have.length_of(1)
|
||||
|
||||
filters = [{'Name': 'availability-zone', 'Values': ['us-west-2c']}]
|
||||
enis = list(ec2.network_interfaces.filter(Filters=filters))
|
||||
enis.should.have.length_of(0)
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_elastic_network_interfaces_get_by_private_ip():
|
||||
ec2 = boto3.resource('ec2', region_name='us-west-2')
|
||||
ec2_client = boto3.client('ec2', region_name='us-west-2')
|
||||
|
||||
vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16')
|
||||
subnet = ec2.create_subnet(
|
||||
VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-2a')
|
||||
|
||||
eni1 = ec2.create_network_interface(
|
||||
SubnetId=subnet.id, PrivateIpAddress='10.0.10.5')
|
||||
|
||||
# The status of the new interface should be 'available'
|
||||
waiter = ec2_client.get_waiter('network_interface_available')
|
||||
waiter.wait(NetworkInterfaceIds=[eni1.id])
|
||||
|
||||
filters = [{'Name': 'private-ip-address', 'Values': ['10.0.10.5']}]
|
||||
enis = list(ec2.network_interfaces.filter(Filters=filters))
|
||||
enis.should.have.length_of(1)
|
||||
|
||||
filters = [{'Name': 'private-ip-address', 'Values': ['10.0.10.10']}]
|
||||
enis = list(ec2.network_interfaces.filter(Filters=filters))
|
||||
enis.should.have.length_of(0)
|
||||
|
||||
filters = [{'Name': 'addresses.private-ip-address', 'Values': ['10.0.10.5']}]
|
||||
enis = list(ec2.network_interfaces.filter(Filters=filters))
|
||||
enis.should.have.length_of(1)
|
||||
|
||||
filters = [{'Name': 'addresses.private-ip-address', 'Values': ['10.0.10.10']}]
|
||||
enis = list(ec2.network_interfaces.filter(Filters=filters))
|
||||
enis.should.have.length_of(0)
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_elastic_network_interfaces_get_by_vpc_id():
|
||||
ec2 = boto3.resource('ec2', region_name='us-west-2')
|
||||
ec2_client = boto3.client('ec2', region_name='us-west-2')
|
||||
|
||||
vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16')
|
||||
subnet = ec2.create_subnet(
|
||||
VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-2a')
|
||||
|
||||
eni1 = ec2.create_network_interface(
|
||||
SubnetId=subnet.id, PrivateIpAddress='10.0.10.5')
|
||||
|
||||
# The status of the new interface should be 'available'
|
||||
waiter = ec2_client.get_waiter('network_interface_available')
|
||||
waiter.wait(NetworkInterfaceIds=[eni1.id])
|
||||
|
||||
filters = [{'Name': 'vpc-id', 'Values': [subnet.vpc_id]}]
|
||||
enis = list(ec2.network_interfaces.filter(Filters=filters))
|
||||
enis.should.have.length_of(1)
|
||||
|
||||
filters = [{'Name': 'vpc-id', 'Values': ['vpc-aaaa1111']}]
|
||||
enis = list(ec2.network_interfaces.filter(Filters=filters))
|
||||
enis.should.have.length_of(0)
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_elastic_network_interfaces_get_by_subnet_id():
|
||||
ec2 = boto3.resource('ec2', region_name='us-west-2')
|
||||
ec2_client = boto3.client('ec2', region_name='us-west-2')
|
||||
|
||||
vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16')
|
||||
subnet = ec2.create_subnet(
|
||||
VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-2a')
|
||||
|
||||
eni1 = ec2.create_network_interface(
|
||||
SubnetId=subnet.id, PrivateIpAddress='10.0.10.5')
|
||||
|
||||
# The status of the new interface should be 'available'
|
||||
waiter = ec2_client.get_waiter('network_interface_available')
|
||||
waiter.wait(NetworkInterfaceIds=[eni1.id])
|
||||
|
||||
filters = [{'Name': 'subnet-id', 'Values': [subnet.id]}]
|
||||
enis = list(ec2.network_interfaces.filter(Filters=filters))
|
||||
enis.should.have.length_of(1)
|
||||
|
||||
filters = [{'Name': 'subnet-id', 'Values': ['subnet-aaaa1111']}]
|
||||
enis = list(ec2.network_interfaces.filter(Filters=filters))
|
||||
enis.should.have.length_of(0)
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
@mock_cloudformation_deprecated
|
||||
def test_elastic_network_interfaces_cloudformation():
|
||||
template = vpc_eni.template
|
||||
template_json = json.dumps(template)
|
||||
conn = boto.cloudformation.connect_to_region("us-west-1")
|
||||
conn.create_stack(
|
||||
"test_stack",
|
||||
template_body=template_json,
|
||||
)
|
||||
ec2_conn = boto.ec2.connect_to_region("us-west-1")
|
||||
eni = ec2_conn.get_all_network_interfaces()[0]
|
||||
eni.private_ip_addresses.should.have.length_of(1)
|
||||
|
||||
stack = conn.describe_stacks()[0]
|
||||
resources = stack.describe_resources()
|
||||
cfn_eni = [resource for resource in resources if resource.resource_type ==
|
||||
'AWS::EC2::NetworkInterface'][0]
|
||||
cfn_eni.physical_resource_id.should.equal(eni.id)
|
||||
|
||||
outputs = {output.key: output.value for output in stack.outputs}
|
||||
outputs['ENIIpAddress'].should.equal(eni.private_ip_addresses[0].private_ip_address)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -1,175 +1,216 @@
|
||||
from __future__ import unicode_literals
|
||||
import boto
|
||||
import sure # noqa
|
||||
|
||||
from moto import mock_ec2_deprecated
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_default_network_acl_created_with_vpc():
|
||||
conn = boto.connect_vpc('the_key', 'the secret')
|
||||
vpc = conn.create_vpc("10.0.0.0/16")
|
||||
all_network_acls = conn.get_all_network_acls()
|
||||
all_network_acls.should.have.length_of(2)
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_network_acls():
|
||||
conn = boto.connect_vpc('the_key', 'the secret')
|
||||
vpc = conn.create_vpc("10.0.0.0/16")
|
||||
network_acl = conn.create_network_acl(vpc.id)
|
||||
all_network_acls = conn.get_all_network_acls()
|
||||
all_network_acls.should.have.length_of(3)
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_new_subnet_associates_with_default_network_acl():
|
||||
conn = boto.connect_vpc('the_key', 'the secret')
|
||||
vpc = conn.get_all_vpcs()[0]
|
||||
|
||||
subnet = conn.create_subnet(vpc.id, "10.0.0.0/18")
|
||||
all_network_acls = conn.get_all_network_acls()
|
||||
all_network_acls.should.have.length_of(1)
|
||||
|
||||
acl = all_network_acls[0]
|
||||
acl.associations.should.have.length_of(4)
|
||||
[a.subnet_id for a in acl.associations].should.contain(subnet.id)
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_network_acl_entries():
|
||||
conn = boto.connect_vpc('the_key', 'the secret')
|
||||
vpc = conn.create_vpc("10.0.0.0/16")
|
||||
|
||||
network_acl = conn.create_network_acl(vpc.id)
|
||||
|
||||
network_acl_entry = conn.create_network_acl_entry(
|
||||
network_acl.id, 110, 6,
|
||||
'ALLOW', '0.0.0.0/0', False,
|
||||
port_range_from='443',
|
||||
port_range_to='443'
|
||||
)
|
||||
|
||||
all_network_acls = conn.get_all_network_acls()
|
||||
all_network_acls.should.have.length_of(3)
|
||||
|
||||
test_network_acl = next(na for na in all_network_acls
|
||||
if na.id == network_acl.id)
|
||||
entries = test_network_acl.network_acl_entries
|
||||
entries.should.have.length_of(1)
|
||||
entries[0].rule_number.should.equal('110')
|
||||
entries[0].protocol.should.equal('6')
|
||||
entries[0].rule_action.should.equal('ALLOW')
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_delete_network_acl_entry():
|
||||
conn = boto.connect_vpc('the_key', 'the secret')
|
||||
vpc = conn.create_vpc("10.0.0.0/16")
|
||||
|
||||
network_acl = conn.create_network_acl(vpc.id)
|
||||
|
||||
conn.create_network_acl_entry(
|
||||
network_acl.id, 110, 6,
|
||||
'ALLOW', '0.0.0.0/0', False,
|
||||
port_range_from='443',
|
||||
port_range_to='443'
|
||||
)
|
||||
conn.delete_network_acl_entry(
|
||||
network_acl.id, 110, False
|
||||
)
|
||||
|
||||
all_network_acls = conn.get_all_network_acls()
|
||||
|
||||
test_network_acl = next(na for na in all_network_acls
|
||||
if na.id == network_acl.id)
|
||||
entries = test_network_acl.network_acl_entries
|
||||
entries.should.have.length_of(0)
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_replace_network_acl_entry():
|
||||
conn = boto.connect_vpc('the_key', 'the secret')
|
||||
vpc = conn.create_vpc("10.0.0.0/16")
|
||||
|
||||
network_acl = conn.create_network_acl(vpc.id)
|
||||
|
||||
conn.create_network_acl_entry(
|
||||
network_acl.id, 110, 6,
|
||||
'ALLOW', '0.0.0.0/0', False,
|
||||
port_range_from='443',
|
||||
port_range_to='443'
|
||||
)
|
||||
conn.replace_network_acl_entry(
|
||||
network_acl.id, 110, -1,
|
||||
'DENY', '0.0.0.0/0', False,
|
||||
port_range_from='22',
|
||||
port_range_to='22'
|
||||
)
|
||||
|
||||
all_network_acls = conn.get_all_network_acls()
|
||||
|
||||
test_network_acl = next(na for na in all_network_acls
|
||||
if na.id == network_acl.id)
|
||||
entries = test_network_acl.network_acl_entries
|
||||
entries.should.have.length_of(1)
|
||||
entries[0].rule_number.should.equal('110')
|
||||
entries[0].protocol.should.equal('-1')
|
||||
entries[0].rule_action.should.equal('DENY')
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_associate_new_network_acl_with_subnet():
|
||||
conn = boto.connect_vpc('the_key', 'the secret')
|
||||
vpc = conn.create_vpc("10.0.0.0/16")
|
||||
subnet = conn.create_subnet(vpc.id, "10.0.0.0/18")
|
||||
network_acl = conn.create_network_acl(vpc.id)
|
||||
|
||||
conn.associate_network_acl(network_acl.id, subnet.id)
|
||||
|
||||
all_network_acls = conn.get_all_network_acls()
|
||||
all_network_acls.should.have.length_of(3)
|
||||
|
||||
test_network_acl = next(na for na in all_network_acls
|
||||
if na.id == network_acl.id)
|
||||
|
||||
test_network_acl.associations.should.have.length_of(1)
|
||||
test_network_acl.associations[0].subnet_id.should.equal(subnet.id)
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_delete_network_acl():
|
||||
conn = boto.connect_vpc('the_key', 'the secret')
|
||||
vpc = conn.create_vpc("10.0.0.0/16")
|
||||
subnet = conn.create_subnet(vpc.id, "10.0.0.0/18")
|
||||
network_acl = conn.create_network_acl(vpc.id)
|
||||
|
||||
all_network_acls = conn.get_all_network_acls()
|
||||
all_network_acls.should.have.length_of(3)
|
||||
|
||||
any(acl.id == network_acl.id for acl in all_network_acls).should.be.ok
|
||||
|
||||
conn.delete_network_acl(network_acl.id)
|
||||
|
||||
updated_network_acls = conn.get_all_network_acls()
|
||||
updated_network_acls.should.have.length_of(2)
|
||||
|
||||
any(acl.id == network_acl.id for acl in updated_network_acls).shouldnt.be.ok
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_network_acl_tagging():
|
||||
conn = boto.connect_vpc('the_key', 'the secret')
|
||||
vpc = conn.create_vpc("10.0.0.0/16")
|
||||
network_acl = conn.create_network_acl(vpc.id)
|
||||
|
||||
network_acl.add_tag("a key", "some value")
|
||||
|
||||
tag = conn.get_all_tags()[0]
|
||||
tag.name.should.equal("a key")
|
||||
tag.value.should.equal("some value")
|
||||
|
||||
all_network_acls = conn.get_all_network_acls()
|
||||
test_network_acl = next(na for na in all_network_acls
|
||||
if na.id == network_acl.id)
|
||||
test_network_acl.tags.should.have.length_of(1)
|
||||
test_network_acl.tags["a key"].should.equal("some value")
|
||||
from __future__ import unicode_literals
|
||||
import boto
|
||||
import boto3
|
||||
import sure # noqa
|
||||
|
||||
from moto import mock_ec2_deprecated, mock_ec2
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_default_network_acl_created_with_vpc():
|
||||
conn = boto.connect_vpc('the_key', 'the secret')
|
||||
vpc = conn.create_vpc("10.0.0.0/16")
|
||||
all_network_acls = conn.get_all_network_acls()
|
||||
all_network_acls.should.have.length_of(2)
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_network_acls():
|
||||
conn = boto.connect_vpc('the_key', 'the secret')
|
||||
vpc = conn.create_vpc("10.0.0.0/16")
|
||||
network_acl = conn.create_network_acl(vpc.id)
|
||||
all_network_acls = conn.get_all_network_acls()
|
||||
all_network_acls.should.have.length_of(3)
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_new_subnet_associates_with_default_network_acl():
|
||||
conn = boto.connect_vpc('the_key', 'the secret')
|
||||
vpc = conn.get_all_vpcs()[0]
|
||||
|
||||
subnet = conn.create_subnet(vpc.id, "10.0.0.0/18")
|
||||
all_network_acls = conn.get_all_network_acls()
|
||||
all_network_acls.should.have.length_of(1)
|
||||
|
||||
acl = all_network_acls[0]
|
||||
acl.associations.should.have.length_of(4)
|
||||
[a.subnet_id for a in acl.associations].should.contain(subnet.id)
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_network_acl_entries():
|
||||
conn = boto.connect_vpc('the_key', 'the secret')
|
||||
vpc = conn.create_vpc("10.0.0.0/16")
|
||||
|
||||
network_acl = conn.create_network_acl(vpc.id)
|
||||
|
||||
network_acl_entry = conn.create_network_acl_entry(
|
||||
network_acl.id, 110, 6,
|
||||
'ALLOW', '0.0.0.0/0', False,
|
||||
port_range_from='443',
|
||||
port_range_to='443'
|
||||
)
|
||||
|
||||
all_network_acls = conn.get_all_network_acls()
|
||||
all_network_acls.should.have.length_of(3)
|
||||
|
||||
test_network_acl = next(na for na in all_network_acls
|
||||
if na.id == network_acl.id)
|
||||
entries = test_network_acl.network_acl_entries
|
||||
entries.should.have.length_of(1)
|
||||
entries[0].rule_number.should.equal('110')
|
||||
entries[0].protocol.should.equal('6')
|
||||
entries[0].rule_action.should.equal('ALLOW')
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_delete_network_acl_entry():
|
||||
conn = boto.connect_vpc('the_key', 'the secret')
|
||||
vpc = conn.create_vpc("10.0.0.0/16")
|
||||
|
||||
network_acl = conn.create_network_acl(vpc.id)
|
||||
|
||||
conn.create_network_acl_entry(
|
||||
network_acl.id, 110, 6,
|
||||
'ALLOW', '0.0.0.0/0', False,
|
||||
port_range_from='443',
|
||||
port_range_to='443'
|
||||
)
|
||||
conn.delete_network_acl_entry(
|
||||
network_acl.id, 110, False
|
||||
)
|
||||
|
||||
all_network_acls = conn.get_all_network_acls()
|
||||
|
||||
test_network_acl = next(na for na in all_network_acls
|
||||
if na.id == network_acl.id)
|
||||
entries = test_network_acl.network_acl_entries
|
||||
entries.should.have.length_of(0)
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_replace_network_acl_entry():
|
||||
conn = boto.connect_vpc('the_key', 'the secret')
|
||||
vpc = conn.create_vpc("10.0.0.0/16")
|
||||
|
||||
network_acl = conn.create_network_acl(vpc.id)
|
||||
|
||||
conn.create_network_acl_entry(
|
||||
network_acl.id, 110, 6,
|
||||
'ALLOW', '0.0.0.0/0', False,
|
||||
port_range_from='443',
|
||||
port_range_to='443'
|
||||
)
|
||||
conn.replace_network_acl_entry(
|
||||
network_acl.id, 110, -1,
|
||||
'DENY', '0.0.0.0/0', False,
|
||||
port_range_from='22',
|
||||
port_range_to='22'
|
||||
)
|
||||
|
||||
all_network_acls = conn.get_all_network_acls()
|
||||
|
||||
test_network_acl = next(na for na in all_network_acls
|
||||
if na.id == network_acl.id)
|
||||
entries = test_network_acl.network_acl_entries
|
||||
entries.should.have.length_of(1)
|
||||
entries[0].rule_number.should.equal('110')
|
||||
entries[0].protocol.should.equal('-1')
|
||||
entries[0].rule_action.should.equal('DENY')
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_associate_new_network_acl_with_subnet():
|
||||
conn = boto.connect_vpc('the_key', 'the secret')
|
||||
vpc = conn.create_vpc("10.0.0.0/16")
|
||||
subnet = conn.create_subnet(vpc.id, "10.0.0.0/18")
|
||||
network_acl = conn.create_network_acl(vpc.id)
|
||||
|
||||
conn.associate_network_acl(network_acl.id, subnet.id)
|
||||
|
||||
all_network_acls = conn.get_all_network_acls()
|
||||
all_network_acls.should.have.length_of(3)
|
||||
|
||||
test_network_acl = next(na for na in all_network_acls
|
||||
if na.id == network_acl.id)
|
||||
|
||||
test_network_acl.associations.should.have.length_of(1)
|
||||
test_network_acl.associations[0].subnet_id.should.equal(subnet.id)
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_delete_network_acl():
|
||||
conn = boto.connect_vpc('the_key', 'the secret')
|
||||
vpc = conn.create_vpc("10.0.0.0/16")
|
||||
subnet = conn.create_subnet(vpc.id, "10.0.0.0/18")
|
||||
network_acl = conn.create_network_acl(vpc.id)
|
||||
|
||||
all_network_acls = conn.get_all_network_acls()
|
||||
all_network_acls.should.have.length_of(3)
|
||||
|
||||
any(acl.id == network_acl.id for acl in all_network_acls).should.be.ok
|
||||
|
||||
conn.delete_network_acl(network_acl.id)
|
||||
|
||||
updated_network_acls = conn.get_all_network_acls()
|
||||
updated_network_acls.should.have.length_of(2)
|
||||
|
||||
any(acl.id == network_acl.id for acl in updated_network_acls).shouldnt.be.ok
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_network_acl_tagging():
|
||||
conn = boto.connect_vpc('the_key', 'the secret')
|
||||
vpc = conn.create_vpc("10.0.0.0/16")
|
||||
network_acl = conn.create_network_acl(vpc.id)
|
||||
|
||||
network_acl.add_tag("a key", "some value")
|
||||
|
||||
tag = conn.get_all_tags()[0]
|
||||
tag.name.should.equal("a key")
|
||||
tag.value.should.equal("some value")
|
||||
|
||||
all_network_acls = conn.get_all_network_acls()
|
||||
test_network_acl = next(na for na in all_network_acls
|
||||
if na.id == network_acl.id)
|
||||
test_network_acl.tags.should.have.length_of(1)
|
||||
test_network_acl.tags["a key"].should.equal("some value")
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_new_subnet_in_new_vpc_associates_with_default_network_acl():
|
||||
ec2 = boto3.resource('ec2', region_name='us-west-1')
|
||||
new_vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16')
|
||||
new_vpc.reload()
|
||||
|
||||
subnet = ec2.create_subnet(VpcId=new_vpc.id, CidrBlock='10.0.0.0/24')
|
||||
subnet.reload()
|
||||
|
||||
new_vpcs_default_network_acl = next(iter(new_vpc.network_acls.all()), None)
|
||||
new_vpcs_default_network_acl.reload()
|
||||
new_vpcs_default_network_acl.vpc_id.should.equal(new_vpc.id)
|
||||
new_vpcs_default_network_acl.associations.should.have.length_of(1)
|
||||
new_vpcs_default_network_acl.associations[0]['SubnetId'].should.equal(subnet.id)
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_default_network_acl_default_entries():
|
||||
ec2 = boto3.resource('ec2', region_name='us-west-1')
|
||||
default_network_acl = next(iter(ec2.network_acls.all()), None)
|
||||
default_network_acl.is_default.should.be.ok
|
||||
|
||||
default_network_acl.entries.should.have.length_of(4)
|
||||
unique_entries = []
|
||||
for entry in default_network_acl.entries:
|
||||
entry['CidrBlock'].should.equal('0.0.0.0/0')
|
||||
entry['Protocol'].should.equal('-1')
|
||||
entry['RuleNumber'].should.be.within([100, 32767])
|
||||
entry['RuleAction'].should.be.within(['allow', 'deny'])
|
||||
assert type(entry['Egress']) is bool
|
||||
if entry['RuleAction'] == 'allow':
|
||||
entry['RuleNumber'].should.be.equal(100)
|
||||
else:
|
||||
entry['RuleNumber'].should.be.equal(32767)
|
||||
if entry not in unique_entries:
|
||||
unique_entries.append(entry)
|
||||
|
||||
unique_entries.should.have.length_of(4)
|
||||
|
||||
@ -1,345 +1,387 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import boto3
|
||||
import sure # noqa
|
||||
|
||||
from moto import mock_ec2
|
||||
|
||||
|
||||
def get_subnet_id(conn):
|
||||
vpc = conn.create_vpc(CidrBlock="10.0.0.0/8")['Vpc']
|
||||
subnet = conn.create_subnet(
|
||||
VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet']
|
||||
subnet_id = subnet['SubnetId']
|
||||
return subnet_id
|
||||
|
||||
|
||||
def spot_config(subnet_id, allocation_strategy="lowestPrice"):
|
||||
return {
|
||||
'ClientToken': 'string',
|
||||
'SpotPrice': '0.12',
|
||||
'TargetCapacity': 6,
|
||||
'IamFleetRole': 'arn:aws:iam::123456789012:role/fleet',
|
||||
'LaunchSpecifications': [{
|
||||
'ImageId': 'ami-123',
|
||||
'KeyName': 'my-key',
|
||||
'SecurityGroups': [
|
||||
{
|
||||
'GroupId': 'sg-123'
|
||||
},
|
||||
],
|
||||
'UserData': 'some user data',
|
||||
'InstanceType': 't2.small',
|
||||
'BlockDeviceMappings': [
|
||||
{
|
||||
'VirtualName': 'string',
|
||||
'DeviceName': 'string',
|
||||
'Ebs': {
|
||||
'SnapshotId': 'string',
|
||||
'VolumeSize': 123,
|
||||
'DeleteOnTermination': True | False,
|
||||
'VolumeType': 'standard',
|
||||
'Iops': 123,
|
||||
'Encrypted': True | False
|
||||
},
|
||||
'NoDevice': 'string'
|
||||
},
|
||||
],
|
||||
'Monitoring': {
|
||||
'Enabled': True
|
||||
},
|
||||
'SubnetId': subnet_id,
|
||||
'IamInstanceProfile': {
|
||||
'Arn': 'arn:aws:iam::123456789012:role/fleet'
|
||||
},
|
||||
'EbsOptimized': False,
|
||||
'WeightedCapacity': 2.0,
|
||||
'SpotPrice': '0.13'
|
||||
}, {
|
||||
'ImageId': 'ami-123',
|
||||
'KeyName': 'my-key',
|
||||
'SecurityGroups': [
|
||||
{
|
||||
'GroupId': 'sg-123'
|
||||
},
|
||||
],
|
||||
'UserData': 'some user data',
|
||||
'InstanceType': 't2.large',
|
||||
'Monitoring': {
|
||||
'Enabled': True
|
||||
},
|
||||
'SubnetId': subnet_id,
|
||||
'IamInstanceProfile': {
|
||||
'Arn': 'arn:aws:iam::123456789012:role/fleet'
|
||||
},
|
||||
'EbsOptimized': False,
|
||||
'WeightedCapacity': 4.0,
|
||||
'SpotPrice': '10.00',
|
||||
}],
|
||||
'AllocationStrategy': allocation_strategy,
|
||||
'FulfilledCapacity': 6,
|
||||
}
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_create_spot_fleet_with_lowest_price():
|
||||
conn = boto3.client("ec2", region_name='us-west-2')
|
||||
subnet_id = get_subnet_id(conn)
|
||||
|
||||
spot_fleet_res = conn.request_spot_fleet(
|
||||
SpotFleetRequestConfig=spot_config(subnet_id)
|
||||
)
|
||||
spot_fleet_id = spot_fleet_res['SpotFleetRequestId']
|
||||
|
||||
spot_fleet_requests = conn.describe_spot_fleet_requests(
|
||||
SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs']
|
||||
len(spot_fleet_requests).should.equal(1)
|
||||
spot_fleet_request = spot_fleet_requests[0]
|
||||
spot_fleet_request['SpotFleetRequestState'].should.equal("active")
|
||||
spot_fleet_config = spot_fleet_request['SpotFleetRequestConfig']
|
||||
|
||||
spot_fleet_config['SpotPrice'].should.equal('0.12')
|
||||
spot_fleet_config['TargetCapacity'].should.equal(6)
|
||||
spot_fleet_config['IamFleetRole'].should.equal(
|
||||
'arn:aws:iam::123456789012:role/fleet')
|
||||
spot_fleet_config['AllocationStrategy'].should.equal('lowestPrice')
|
||||
spot_fleet_config['FulfilledCapacity'].should.equal(6.0)
|
||||
|
||||
len(spot_fleet_config['LaunchSpecifications']).should.equal(2)
|
||||
launch_spec = spot_fleet_config['LaunchSpecifications'][0]
|
||||
|
||||
launch_spec['EbsOptimized'].should.equal(False)
|
||||
launch_spec['SecurityGroups'].should.equal([{"GroupId": "sg-123"}])
|
||||
launch_spec['IamInstanceProfile'].should.equal(
|
||||
{"Arn": "arn:aws:iam::123456789012:role/fleet"})
|
||||
launch_spec['ImageId'].should.equal("ami-123")
|
||||
launch_spec['InstanceType'].should.equal("t2.small")
|
||||
launch_spec['KeyName'].should.equal("my-key")
|
||||
launch_spec['Monitoring'].should.equal({"Enabled": True})
|
||||
launch_spec['SpotPrice'].should.equal("0.13")
|
||||
launch_spec['SubnetId'].should.equal(subnet_id)
|
||||
launch_spec['UserData'].should.equal("some user data")
|
||||
launch_spec['WeightedCapacity'].should.equal(2.0)
|
||||
|
||||
instance_res = conn.describe_spot_fleet_instances(
|
||||
SpotFleetRequestId=spot_fleet_id)
|
||||
instances = instance_res['ActiveInstances']
|
||||
len(instances).should.equal(3)
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_create_diversified_spot_fleet():
|
||||
conn = boto3.client("ec2", region_name='us-west-2')
|
||||
subnet_id = get_subnet_id(conn)
|
||||
diversified_config = spot_config(
|
||||
subnet_id, allocation_strategy='diversified')
|
||||
|
||||
spot_fleet_res = conn.request_spot_fleet(
|
||||
SpotFleetRequestConfig=diversified_config
|
||||
)
|
||||
spot_fleet_id = spot_fleet_res['SpotFleetRequestId']
|
||||
|
||||
instance_res = conn.describe_spot_fleet_instances(
|
||||
SpotFleetRequestId=spot_fleet_id)
|
||||
instances = instance_res['ActiveInstances']
|
||||
len(instances).should.equal(2)
|
||||
instance_types = set([instance['InstanceType'] for instance in instances])
|
||||
instance_types.should.equal(set(["t2.small", "t2.large"]))
|
||||
instances[0]['InstanceId'].should.contain("i-")
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_cancel_spot_fleet_request():
|
||||
conn = boto3.client("ec2", region_name='us-west-2')
|
||||
subnet_id = get_subnet_id(conn)
|
||||
|
||||
spot_fleet_res = conn.request_spot_fleet(
|
||||
SpotFleetRequestConfig=spot_config(subnet_id),
|
||||
)
|
||||
spot_fleet_id = spot_fleet_res['SpotFleetRequestId']
|
||||
|
||||
conn.cancel_spot_fleet_requests(
|
||||
SpotFleetRequestIds=[spot_fleet_id], TerminateInstances=True)
|
||||
|
||||
spot_fleet_requests = conn.describe_spot_fleet_requests(
|
||||
SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs']
|
||||
len(spot_fleet_requests).should.equal(0)
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_modify_spot_fleet_request_up():
|
||||
conn = boto3.client("ec2", region_name='us-west-2')
|
||||
subnet_id = get_subnet_id(conn)
|
||||
|
||||
spot_fleet_res = conn.request_spot_fleet(
|
||||
SpotFleetRequestConfig=spot_config(subnet_id),
|
||||
)
|
||||
spot_fleet_id = spot_fleet_res['SpotFleetRequestId']
|
||||
|
||||
conn.modify_spot_fleet_request(
|
||||
SpotFleetRequestId=spot_fleet_id, TargetCapacity=20)
|
||||
|
||||
instance_res = conn.describe_spot_fleet_instances(
|
||||
SpotFleetRequestId=spot_fleet_id)
|
||||
instances = instance_res['ActiveInstances']
|
||||
len(instances).should.equal(10)
|
||||
|
||||
spot_fleet_config = conn.describe_spot_fleet_requests(
|
||||
SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'][0]['SpotFleetRequestConfig']
|
||||
spot_fleet_config['TargetCapacity'].should.equal(20)
|
||||
spot_fleet_config['FulfilledCapacity'].should.equal(20.0)
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_modify_spot_fleet_request_up_diversified():
|
||||
conn = boto3.client("ec2", region_name='us-west-2')
|
||||
subnet_id = get_subnet_id(conn)
|
||||
|
||||
spot_fleet_res = conn.request_spot_fleet(
|
||||
SpotFleetRequestConfig=spot_config(
|
||||
subnet_id, allocation_strategy='diversified'),
|
||||
)
|
||||
spot_fleet_id = spot_fleet_res['SpotFleetRequestId']
|
||||
|
||||
conn.modify_spot_fleet_request(
|
||||
SpotFleetRequestId=spot_fleet_id, TargetCapacity=19)
|
||||
|
||||
instance_res = conn.describe_spot_fleet_instances(
|
||||
SpotFleetRequestId=spot_fleet_id)
|
||||
instances = instance_res['ActiveInstances']
|
||||
len(instances).should.equal(7)
|
||||
|
||||
spot_fleet_config = conn.describe_spot_fleet_requests(
|
||||
SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'][0]['SpotFleetRequestConfig']
|
||||
spot_fleet_config['TargetCapacity'].should.equal(19)
|
||||
spot_fleet_config['FulfilledCapacity'].should.equal(20.0)
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_modify_spot_fleet_request_down_no_terminate():
|
||||
conn = boto3.client("ec2", region_name='us-west-2')
|
||||
subnet_id = get_subnet_id(conn)
|
||||
|
||||
spot_fleet_res = conn.request_spot_fleet(
|
||||
SpotFleetRequestConfig=spot_config(subnet_id),
|
||||
)
|
||||
spot_fleet_id = spot_fleet_res['SpotFleetRequestId']
|
||||
|
||||
conn.modify_spot_fleet_request(
|
||||
SpotFleetRequestId=spot_fleet_id, TargetCapacity=1, ExcessCapacityTerminationPolicy="noTermination")
|
||||
|
||||
instance_res = conn.describe_spot_fleet_instances(
|
||||
SpotFleetRequestId=spot_fleet_id)
|
||||
instances = instance_res['ActiveInstances']
|
||||
len(instances).should.equal(3)
|
||||
|
||||
spot_fleet_config = conn.describe_spot_fleet_requests(
|
||||
SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'][0]['SpotFleetRequestConfig']
|
||||
spot_fleet_config['TargetCapacity'].should.equal(1)
|
||||
spot_fleet_config['FulfilledCapacity'].should.equal(6.0)
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_modify_spot_fleet_request_down_odd():
|
||||
conn = boto3.client("ec2", region_name='us-west-2')
|
||||
subnet_id = get_subnet_id(conn)
|
||||
|
||||
spot_fleet_res = conn.request_spot_fleet(
|
||||
SpotFleetRequestConfig=spot_config(subnet_id),
|
||||
)
|
||||
spot_fleet_id = spot_fleet_res['SpotFleetRequestId']
|
||||
|
||||
conn.modify_spot_fleet_request(
|
||||
SpotFleetRequestId=spot_fleet_id, TargetCapacity=7)
|
||||
conn.modify_spot_fleet_request(
|
||||
SpotFleetRequestId=spot_fleet_id, TargetCapacity=5)
|
||||
|
||||
instance_res = conn.describe_spot_fleet_instances(
|
||||
SpotFleetRequestId=spot_fleet_id)
|
||||
instances = instance_res['ActiveInstances']
|
||||
len(instances).should.equal(3)
|
||||
|
||||
spot_fleet_config = conn.describe_spot_fleet_requests(
|
||||
SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'][0]['SpotFleetRequestConfig']
|
||||
spot_fleet_config['TargetCapacity'].should.equal(5)
|
||||
spot_fleet_config['FulfilledCapacity'].should.equal(6.0)
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_modify_spot_fleet_request_down():
|
||||
conn = boto3.client("ec2", region_name='us-west-2')
|
||||
subnet_id = get_subnet_id(conn)
|
||||
|
||||
spot_fleet_res = conn.request_spot_fleet(
|
||||
SpotFleetRequestConfig=spot_config(subnet_id),
|
||||
)
|
||||
spot_fleet_id = spot_fleet_res['SpotFleetRequestId']
|
||||
|
||||
conn.modify_spot_fleet_request(
|
||||
SpotFleetRequestId=spot_fleet_id, TargetCapacity=1)
|
||||
|
||||
instance_res = conn.describe_spot_fleet_instances(
|
||||
SpotFleetRequestId=spot_fleet_id)
|
||||
instances = instance_res['ActiveInstances']
|
||||
len(instances).should.equal(1)
|
||||
|
||||
spot_fleet_config = conn.describe_spot_fleet_requests(
|
||||
SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'][0]['SpotFleetRequestConfig']
|
||||
spot_fleet_config['TargetCapacity'].should.equal(1)
|
||||
spot_fleet_config['FulfilledCapacity'].should.equal(2.0)
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_modify_spot_fleet_request_down_no_terminate_after_custom_terminate():
|
||||
conn = boto3.client("ec2", region_name='us-west-2')
|
||||
subnet_id = get_subnet_id(conn)
|
||||
|
||||
spot_fleet_res = conn.request_spot_fleet(
|
||||
SpotFleetRequestConfig=spot_config(subnet_id),
|
||||
)
|
||||
spot_fleet_id = spot_fleet_res['SpotFleetRequestId']
|
||||
|
||||
instance_res = conn.describe_spot_fleet_instances(
|
||||
SpotFleetRequestId=spot_fleet_id)
|
||||
instances = instance_res['ActiveInstances']
|
||||
conn.terminate_instances(InstanceIds=[i['InstanceId'] for i in instances[1:]])
|
||||
|
||||
conn.modify_spot_fleet_request(
|
||||
SpotFleetRequestId=spot_fleet_id, TargetCapacity=1, ExcessCapacityTerminationPolicy="noTermination")
|
||||
|
||||
instance_res = conn.describe_spot_fleet_instances(
|
||||
SpotFleetRequestId=spot_fleet_id)
|
||||
instances = instance_res['ActiveInstances']
|
||||
len(instances).should.equal(1)
|
||||
|
||||
spot_fleet_config = conn.describe_spot_fleet_requests(
|
||||
SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'][0]['SpotFleetRequestConfig']
|
||||
spot_fleet_config['TargetCapacity'].should.equal(1)
|
||||
spot_fleet_config['FulfilledCapacity'].should.equal(2.0)
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_create_spot_fleet_without_spot_price():
|
||||
conn = boto3.client("ec2", region_name='us-west-2')
|
||||
subnet_id = get_subnet_id(conn)
|
||||
|
||||
# remove prices to force a fallback to ondemand price
|
||||
spot_config_without_price = spot_config(subnet_id)
|
||||
del spot_config_without_price['SpotPrice']
|
||||
for spec in spot_config_without_price['LaunchSpecifications']:
|
||||
del spec['SpotPrice']
|
||||
|
||||
spot_fleet_id = conn.request_spot_fleet(SpotFleetRequestConfig=spot_config_without_price)['SpotFleetRequestId']
|
||||
spot_fleet_requests = conn.describe_spot_fleet_requests(
|
||||
SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs']
|
||||
len(spot_fleet_requests).should.equal(1)
|
||||
spot_fleet_request = spot_fleet_requests[0]
|
||||
spot_fleet_config = spot_fleet_request['SpotFleetRequestConfig']
|
||||
|
||||
len(spot_fleet_config['LaunchSpecifications']).should.equal(2)
|
||||
launch_spec1 = spot_fleet_config['LaunchSpecifications'][0]
|
||||
launch_spec2 = spot_fleet_config['LaunchSpecifications'][1]
|
||||
|
||||
# AWS will figure out the price
|
||||
assert 'SpotPrice' not in launch_spec1
|
||||
assert 'SpotPrice' not in launch_spec2
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import boto3
|
||||
import sure # noqa
|
||||
|
||||
from moto import mock_ec2
|
||||
|
||||
|
||||
def get_subnet_id(conn):
|
||||
vpc = conn.create_vpc(CidrBlock="10.0.0.0/8")['Vpc']
|
||||
subnet = conn.create_subnet(
|
||||
VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet']
|
||||
subnet_id = subnet['SubnetId']
|
||||
return subnet_id
|
||||
|
||||
|
||||
def spot_config(subnet_id, allocation_strategy="lowestPrice"):
|
||||
return {
|
||||
'ClientToken': 'string',
|
||||
'SpotPrice': '0.12',
|
||||
'TargetCapacity': 6,
|
||||
'IamFleetRole': 'arn:aws:iam::123456789012:role/fleet',
|
||||
'LaunchSpecifications': [{
|
||||
'ImageId': 'ami-123',
|
||||
'KeyName': 'my-key',
|
||||
'SecurityGroups': [
|
||||
{
|
||||
'GroupId': 'sg-123'
|
||||
},
|
||||
],
|
||||
'UserData': 'some user data',
|
||||
'InstanceType': 't2.small',
|
||||
'BlockDeviceMappings': [
|
||||
{
|
||||
'VirtualName': 'string',
|
||||
'DeviceName': 'string',
|
||||
'Ebs': {
|
||||
'SnapshotId': 'string',
|
||||
'VolumeSize': 123,
|
||||
'DeleteOnTermination': True | False,
|
||||
'VolumeType': 'standard',
|
||||
'Iops': 123,
|
||||
'Encrypted': True | False
|
||||
},
|
||||
'NoDevice': 'string'
|
||||
},
|
||||
],
|
||||
'Monitoring': {
|
||||
'Enabled': True
|
||||
},
|
||||
'SubnetId': subnet_id,
|
||||
'IamInstanceProfile': {
|
||||
'Arn': 'arn:aws:iam::123456789012:role/fleet'
|
||||
},
|
||||
'EbsOptimized': False,
|
||||
'WeightedCapacity': 2.0,
|
||||
'SpotPrice': '0.13',
|
||||
}, {
|
||||
'ImageId': 'ami-123',
|
||||
'KeyName': 'my-key',
|
||||
'SecurityGroups': [
|
||||
{
|
||||
'GroupId': 'sg-123'
|
||||
},
|
||||
],
|
||||
'UserData': 'some user data',
|
||||
'InstanceType': 't2.large',
|
||||
'Monitoring': {
|
||||
'Enabled': True
|
||||
},
|
||||
'SubnetId': subnet_id,
|
||||
'IamInstanceProfile': {
|
||||
'Arn': 'arn:aws:iam::123456789012:role/fleet'
|
||||
},
|
||||
'EbsOptimized': False,
|
||||
'WeightedCapacity': 4.0,
|
||||
'SpotPrice': '10.00',
|
||||
}],
|
||||
'AllocationStrategy': allocation_strategy,
|
||||
'FulfilledCapacity': 6,
|
||||
}
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_create_spot_fleet_with_lowest_price():
|
||||
conn = boto3.client("ec2", region_name='us-west-2')
|
||||
subnet_id = get_subnet_id(conn)
|
||||
|
||||
spot_fleet_res = conn.request_spot_fleet(
|
||||
SpotFleetRequestConfig=spot_config(subnet_id)
|
||||
)
|
||||
spot_fleet_id = spot_fleet_res['SpotFleetRequestId']
|
||||
|
||||
spot_fleet_requests = conn.describe_spot_fleet_requests(
|
||||
SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs']
|
||||
len(spot_fleet_requests).should.equal(1)
|
||||
spot_fleet_request = spot_fleet_requests[0]
|
||||
spot_fleet_request['SpotFleetRequestState'].should.equal("active")
|
||||
spot_fleet_config = spot_fleet_request['SpotFleetRequestConfig']
|
||||
|
||||
spot_fleet_config['SpotPrice'].should.equal('0.12')
|
||||
spot_fleet_config['TargetCapacity'].should.equal(6)
|
||||
spot_fleet_config['IamFleetRole'].should.equal(
|
||||
'arn:aws:iam::123456789012:role/fleet')
|
||||
spot_fleet_config['AllocationStrategy'].should.equal('lowestPrice')
|
||||
spot_fleet_config['FulfilledCapacity'].should.equal(6.0)
|
||||
|
||||
len(spot_fleet_config['LaunchSpecifications']).should.equal(2)
|
||||
launch_spec = spot_fleet_config['LaunchSpecifications'][0]
|
||||
|
||||
launch_spec['EbsOptimized'].should.equal(False)
|
||||
launch_spec['SecurityGroups'].should.equal([{"GroupId": "sg-123"}])
|
||||
launch_spec['IamInstanceProfile'].should.equal(
|
||||
{"Arn": "arn:aws:iam::123456789012:role/fleet"})
|
||||
launch_spec['ImageId'].should.equal("ami-123")
|
||||
launch_spec['InstanceType'].should.equal("t2.small")
|
||||
launch_spec['KeyName'].should.equal("my-key")
|
||||
launch_spec['Monitoring'].should.equal({"Enabled": True})
|
||||
launch_spec['SpotPrice'].should.equal("0.13")
|
||||
launch_spec['SubnetId'].should.equal(subnet_id)
|
||||
launch_spec['UserData'].should.equal("some user data")
|
||||
launch_spec['WeightedCapacity'].should.equal(2.0)
|
||||
|
||||
instance_res = conn.describe_spot_fleet_instances(
|
||||
SpotFleetRequestId=spot_fleet_id)
|
||||
instances = instance_res['ActiveInstances']
|
||||
len(instances).should.equal(3)
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_create_diversified_spot_fleet():
|
||||
conn = boto3.client("ec2", region_name='us-west-2')
|
||||
subnet_id = get_subnet_id(conn)
|
||||
diversified_config = spot_config(
|
||||
subnet_id, allocation_strategy='diversified')
|
||||
|
||||
spot_fleet_res = conn.request_spot_fleet(
|
||||
SpotFleetRequestConfig=diversified_config
|
||||
)
|
||||
spot_fleet_id = spot_fleet_res['SpotFleetRequestId']
|
||||
|
||||
instance_res = conn.describe_spot_fleet_instances(
|
||||
SpotFleetRequestId=spot_fleet_id)
|
||||
instances = instance_res['ActiveInstances']
|
||||
len(instances).should.equal(2)
|
||||
instance_types = set([instance['InstanceType'] for instance in instances])
|
||||
instance_types.should.equal(set(["t2.small", "t2.large"]))
|
||||
instances[0]['InstanceId'].should.contain("i-")
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_create_spot_fleet_request_with_tag_spec():
|
||||
conn = boto3.client("ec2", region_name='us-west-2')
|
||||
subnet_id = get_subnet_id(conn)
|
||||
|
||||
tag_spec = [
|
||||
{
|
||||
'ResourceType': 'instance',
|
||||
'Tags': [
|
||||
{
|
||||
'Key': 'tag-1',
|
||||
'Value': 'foo',
|
||||
},
|
||||
{
|
||||
'Key': 'tag-2',
|
||||
'Value': 'bar',
|
||||
},
|
||||
]
|
||||
},
|
||||
]
|
||||
config = spot_config(subnet_id)
|
||||
config['LaunchSpecifications'][0]['TagSpecifications'] = tag_spec
|
||||
spot_fleet_res = conn.request_spot_fleet(
|
||||
SpotFleetRequestConfig=config
|
||||
)
|
||||
spot_fleet_id = spot_fleet_res['SpotFleetRequestId']
|
||||
spot_fleet_requests = conn.describe_spot_fleet_requests(
|
||||
SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs']
|
||||
spot_fleet_config = spot_fleet_requests[0]['SpotFleetRequestConfig']
|
||||
spot_fleet_config['LaunchSpecifications'][0]['TagSpecifications'][0][
|
||||
'ResourceType'].should.equal('instance')
|
||||
for tag in tag_spec[0]['Tags']:
|
||||
spot_fleet_config['LaunchSpecifications'][0]['TagSpecifications'][0]['Tags'].should.contain(tag)
|
||||
|
||||
instance_res = conn.describe_spot_fleet_instances(
|
||||
SpotFleetRequestId=spot_fleet_id)
|
||||
instances = conn.describe_instances(InstanceIds=[i['InstanceId'] for i in instance_res['ActiveInstances']])
|
||||
for instance in instances['Reservations'][0]['Instances']:
|
||||
for tag in tag_spec[0]['Tags']:
|
||||
instance['Tags'].should.contain(tag)
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_cancel_spot_fleet_request():
|
||||
conn = boto3.client("ec2", region_name='us-west-2')
|
||||
subnet_id = get_subnet_id(conn)
|
||||
|
||||
spot_fleet_res = conn.request_spot_fleet(
|
||||
SpotFleetRequestConfig=spot_config(subnet_id),
|
||||
)
|
||||
spot_fleet_id = spot_fleet_res['SpotFleetRequestId']
|
||||
|
||||
conn.cancel_spot_fleet_requests(
|
||||
SpotFleetRequestIds=[spot_fleet_id], TerminateInstances=True)
|
||||
|
||||
spot_fleet_requests = conn.describe_spot_fleet_requests(
|
||||
SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs']
|
||||
len(spot_fleet_requests).should.equal(0)
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_modify_spot_fleet_request_up():
|
||||
conn = boto3.client("ec2", region_name='us-west-2')
|
||||
subnet_id = get_subnet_id(conn)
|
||||
|
||||
spot_fleet_res = conn.request_spot_fleet(
|
||||
SpotFleetRequestConfig=spot_config(subnet_id),
|
||||
)
|
||||
spot_fleet_id = spot_fleet_res['SpotFleetRequestId']
|
||||
|
||||
conn.modify_spot_fleet_request(
|
||||
SpotFleetRequestId=spot_fleet_id, TargetCapacity=20)
|
||||
|
||||
instance_res = conn.describe_spot_fleet_instances(
|
||||
SpotFleetRequestId=spot_fleet_id)
|
||||
instances = instance_res['ActiveInstances']
|
||||
len(instances).should.equal(10)
|
||||
|
||||
spot_fleet_config = conn.describe_spot_fleet_requests(
|
||||
SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'][0]['SpotFleetRequestConfig']
|
||||
spot_fleet_config['TargetCapacity'].should.equal(20)
|
||||
spot_fleet_config['FulfilledCapacity'].should.equal(20.0)
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_modify_spot_fleet_request_up_diversified():
|
||||
conn = boto3.client("ec2", region_name='us-west-2')
|
||||
subnet_id = get_subnet_id(conn)
|
||||
|
||||
spot_fleet_res = conn.request_spot_fleet(
|
||||
SpotFleetRequestConfig=spot_config(
|
||||
subnet_id, allocation_strategy='diversified'),
|
||||
)
|
||||
spot_fleet_id = spot_fleet_res['SpotFleetRequestId']
|
||||
|
||||
conn.modify_spot_fleet_request(
|
||||
SpotFleetRequestId=spot_fleet_id, TargetCapacity=19)
|
||||
|
||||
instance_res = conn.describe_spot_fleet_instances(
|
||||
SpotFleetRequestId=spot_fleet_id)
|
||||
instances = instance_res['ActiveInstances']
|
||||
len(instances).should.equal(7)
|
||||
|
||||
spot_fleet_config = conn.describe_spot_fleet_requests(
|
||||
SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'][0]['SpotFleetRequestConfig']
|
||||
spot_fleet_config['TargetCapacity'].should.equal(19)
|
||||
spot_fleet_config['FulfilledCapacity'].should.equal(20.0)
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_modify_spot_fleet_request_down_no_terminate():
|
||||
conn = boto3.client("ec2", region_name='us-west-2')
|
||||
subnet_id = get_subnet_id(conn)
|
||||
|
||||
spot_fleet_res = conn.request_spot_fleet(
|
||||
SpotFleetRequestConfig=spot_config(subnet_id),
|
||||
)
|
||||
spot_fleet_id = spot_fleet_res['SpotFleetRequestId']
|
||||
|
||||
conn.modify_spot_fleet_request(
|
||||
SpotFleetRequestId=spot_fleet_id, TargetCapacity=1, ExcessCapacityTerminationPolicy="noTermination")
|
||||
|
||||
instance_res = conn.describe_spot_fleet_instances(
|
||||
SpotFleetRequestId=spot_fleet_id)
|
||||
instances = instance_res['ActiveInstances']
|
||||
len(instances).should.equal(3)
|
||||
|
||||
spot_fleet_config = conn.describe_spot_fleet_requests(
|
||||
SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'][0]['SpotFleetRequestConfig']
|
||||
spot_fleet_config['TargetCapacity'].should.equal(1)
|
||||
spot_fleet_config['FulfilledCapacity'].should.equal(6.0)
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_modify_spot_fleet_request_down_odd():
|
||||
conn = boto3.client("ec2", region_name='us-west-2')
|
||||
subnet_id = get_subnet_id(conn)
|
||||
|
||||
spot_fleet_res = conn.request_spot_fleet(
|
||||
SpotFleetRequestConfig=spot_config(subnet_id),
|
||||
)
|
||||
spot_fleet_id = spot_fleet_res['SpotFleetRequestId']
|
||||
|
||||
conn.modify_spot_fleet_request(
|
||||
SpotFleetRequestId=spot_fleet_id, TargetCapacity=7)
|
||||
conn.modify_spot_fleet_request(
|
||||
SpotFleetRequestId=spot_fleet_id, TargetCapacity=5)
|
||||
|
||||
instance_res = conn.describe_spot_fleet_instances(
|
||||
SpotFleetRequestId=spot_fleet_id)
|
||||
instances = instance_res['ActiveInstances']
|
||||
len(instances).should.equal(3)
|
||||
|
||||
spot_fleet_config = conn.describe_spot_fleet_requests(
|
||||
SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'][0]['SpotFleetRequestConfig']
|
||||
spot_fleet_config['TargetCapacity'].should.equal(5)
|
||||
spot_fleet_config['FulfilledCapacity'].should.equal(6.0)
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_modify_spot_fleet_request_down():
|
||||
conn = boto3.client("ec2", region_name='us-west-2')
|
||||
subnet_id = get_subnet_id(conn)
|
||||
|
||||
spot_fleet_res = conn.request_spot_fleet(
|
||||
SpotFleetRequestConfig=spot_config(subnet_id),
|
||||
)
|
||||
spot_fleet_id = spot_fleet_res['SpotFleetRequestId']
|
||||
|
||||
conn.modify_spot_fleet_request(
|
||||
SpotFleetRequestId=spot_fleet_id, TargetCapacity=1)
|
||||
|
||||
instance_res = conn.describe_spot_fleet_instances(
|
||||
SpotFleetRequestId=spot_fleet_id)
|
||||
instances = instance_res['ActiveInstances']
|
||||
len(instances).should.equal(1)
|
||||
|
||||
spot_fleet_config = conn.describe_spot_fleet_requests(
|
||||
SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'][0]['SpotFleetRequestConfig']
|
||||
spot_fleet_config['TargetCapacity'].should.equal(1)
|
||||
spot_fleet_config['FulfilledCapacity'].should.equal(2.0)
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_modify_spot_fleet_request_down_no_terminate_after_custom_terminate():
|
||||
conn = boto3.client("ec2", region_name='us-west-2')
|
||||
subnet_id = get_subnet_id(conn)
|
||||
|
||||
spot_fleet_res = conn.request_spot_fleet(
|
||||
SpotFleetRequestConfig=spot_config(subnet_id),
|
||||
)
|
||||
spot_fleet_id = spot_fleet_res['SpotFleetRequestId']
|
||||
|
||||
instance_res = conn.describe_spot_fleet_instances(
|
||||
SpotFleetRequestId=spot_fleet_id)
|
||||
instances = instance_res['ActiveInstances']
|
||||
conn.terminate_instances(InstanceIds=[i['InstanceId'] for i in instances[1:]])
|
||||
|
||||
conn.modify_spot_fleet_request(
|
||||
SpotFleetRequestId=spot_fleet_id, TargetCapacity=1, ExcessCapacityTerminationPolicy="noTermination")
|
||||
|
||||
instance_res = conn.describe_spot_fleet_instances(
|
||||
SpotFleetRequestId=spot_fleet_id)
|
||||
instances = instance_res['ActiveInstances']
|
||||
len(instances).should.equal(1)
|
||||
|
||||
spot_fleet_config = conn.describe_spot_fleet_requests(
|
||||
SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'][0]['SpotFleetRequestConfig']
|
||||
spot_fleet_config['TargetCapacity'].should.equal(1)
|
||||
spot_fleet_config['FulfilledCapacity'].should.equal(2.0)
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_create_spot_fleet_without_spot_price():
|
||||
conn = boto3.client("ec2", region_name='us-west-2')
|
||||
subnet_id = get_subnet_id(conn)
|
||||
|
||||
# remove prices to force a fallback to ondemand price
|
||||
spot_config_without_price = spot_config(subnet_id)
|
||||
del spot_config_without_price['SpotPrice']
|
||||
for spec in spot_config_without_price['LaunchSpecifications']:
|
||||
del spec['SpotPrice']
|
||||
|
||||
spot_fleet_id = conn.request_spot_fleet(SpotFleetRequestConfig=spot_config_without_price)['SpotFleetRequestId']
|
||||
spot_fleet_requests = conn.describe_spot_fleet_requests(
|
||||
SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs']
|
||||
len(spot_fleet_requests).should.equal(1)
|
||||
spot_fleet_request = spot_fleet_requests[0]
|
||||
spot_fleet_config = spot_fleet_request['SpotFleetRequestConfig']
|
||||
|
||||
len(spot_fleet_config['LaunchSpecifications']).should.equal(2)
|
||||
launch_spec1 = spot_fleet_config['LaunchSpecifications'][0]
|
||||
launch_spec2 = spot_fleet_config['LaunchSpecifications'][1]
|
||||
|
||||
# AWS will figure out the price
|
||||
assert 'SpotPrice' not in launch_spec1
|
||||
assert 'SpotPrice' not in launch_spec2
|
||||
|
||||
@ -1,453 +1,482 @@
|
||||
from __future__ import unicode_literals
|
||||
from nose.tools import assert_raises
|
||||
|
||||
import itertools
|
||||
|
||||
import boto
|
||||
import boto3
|
||||
from boto.exception import EC2ResponseError
|
||||
from boto.ec2.instance import Reservation
|
||||
import sure # noqa
|
||||
|
||||
from moto import mock_ec2_deprecated, mock_ec2
|
||||
from nose.tools import assert_raises
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_add_tag():
|
||||
conn = boto.connect_ec2('the_key', 'the_secret')
|
||||
reservation = conn.run_instances('ami-1234abcd')
|
||||
instance = reservation.instances[0]
|
||||
|
||||
with assert_raises(EC2ResponseError) as ex:
|
||||
instance.add_tag("a key", "some value", dry_run=True)
|
||||
ex.exception.error_code.should.equal('DryRunOperation')
|
||||
ex.exception.status.should.equal(400)
|
||||
ex.exception.message.should.equal(
|
||||
'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set')
|
||||
|
||||
instance.add_tag("a key", "some value")
|
||||
chain = itertools.chain.from_iterable
|
||||
existing_instances = list(
|
||||
chain([res.instances for res in conn.get_all_instances()]))
|
||||
existing_instances.should.have.length_of(1)
|
||||
existing_instance = existing_instances[0]
|
||||
existing_instance.tags["a key"].should.equal("some value")
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_remove_tag():
|
||||
conn = boto.connect_ec2('the_key', 'the_secret')
|
||||
reservation = conn.run_instances('ami-1234abcd')
|
||||
instance = reservation.instances[0]
|
||||
|
||||
instance.add_tag("a key", "some value")
|
||||
|
||||
tags = conn.get_all_tags()
|
||||
tag = tags[0]
|
||||
tag.name.should.equal("a key")
|
||||
tag.value.should.equal("some value")
|
||||
|
||||
with assert_raises(EC2ResponseError) as ex:
|
||||
instance.remove_tag("a key", dry_run=True)
|
||||
ex.exception.error_code.should.equal('DryRunOperation')
|
||||
ex.exception.status.should.equal(400)
|
||||
ex.exception.message.should.equal(
|
||||
'An error occurred (DryRunOperation) when calling the DeleteTags operation: Request would have succeeded, but DryRun flag is set')
|
||||
|
||||
instance.remove_tag("a key")
|
||||
conn.get_all_tags().should.have.length_of(0)
|
||||
|
||||
instance.add_tag("a key", "some value")
|
||||
conn.get_all_tags().should.have.length_of(1)
|
||||
instance.remove_tag("a key", "some value")
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_get_all_tags():
|
||||
conn = boto.connect_ec2('the_key', 'the_secret')
|
||||
reservation = conn.run_instances('ami-1234abcd')
|
||||
instance = reservation.instances[0]
|
||||
|
||||
instance.add_tag("a key", "some value")
|
||||
|
||||
tags = conn.get_all_tags()
|
||||
tag = tags[0]
|
||||
tag.name.should.equal("a key")
|
||||
tag.value.should.equal("some value")
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_get_all_tags_with_special_characters():
|
||||
conn = boto.connect_ec2('the_key', 'the_secret')
|
||||
reservation = conn.run_instances('ami-1234abcd')
|
||||
instance = reservation.instances[0]
|
||||
|
||||
instance.add_tag("a key", "some<> value")
|
||||
|
||||
tags = conn.get_all_tags()
|
||||
tag = tags[0]
|
||||
tag.name.should.equal("a key")
|
||||
tag.value.should.equal("some<> value")
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_create_tags():
|
||||
conn = boto.connect_ec2('the_key', 'the_secret')
|
||||
reservation = conn.run_instances('ami-1234abcd')
|
||||
instance = reservation.instances[0]
|
||||
tag_dict = {'a key': 'some value',
|
||||
'another key': 'some other value',
|
||||
'blank key': ''}
|
||||
|
||||
with assert_raises(EC2ResponseError) as ex:
|
||||
conn.create_tags(instance.id, tag_dict, dry_run=True)
|
||||
ex.exception.error_code.should.equal('DryRunOperation')
|
||||
ex.exception.status.should.equal(400)
|
||||
ex.exception.message.should.equal(
|
||||
'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set')
|
||||
|
||||
conn.create_tags(instance.id, tag_dict)
|
||||
tags = conn.get_all_tags()
|
||||
set([key for key in tag_dict]).should.equal(
|
||||
set([tag.name for tag in tags]))
|
||||
set([tag_dict[key] for key in tag_dict]).should.equal(
|
||||
set([tag.value for tag in tags]))
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_tag_limit_exceeded():
|
||||
conn = boto.connect_ec2('the_key', 'the_secret')
|
||||
reservation = conn.run_instances('ami-1234abcd')
|
||||
instance = reservation.instances[0]
|
||||
tag_dict = {}
|
||||
for i in range(51):
|
||||
tag_dict['{0:02d}'.format(i + 1)] = ''
|
||||
|
||||
with assert_raises(EC2ResponseError) as cm:
|
||||
conn.create_tags(instance.id, tag_dict)
|
||||
cm.exception.code.should.equal('TagLimitExceeded')
|
||||
cm.exception.status.should.equal(400)
|
||||
cm.exception.request_id.should_not.be.none
|
||||
|
||||
instance.add_tag("a key", "a value")
|
||||
with assert_raises(EC2ResponseError) as cm:
|
||||
conn.create_tags(instance.id, tag_dict)
|
||||
cm.exception.code.should.equal('TagLimitExceeded')
|
||||
cm.exception.status.should.equal(400)
|
||||
cm.exception.request_id.should_not.be.none
|
||||
|
||||
tags = conn.get_all_tags()
|
||||
tag = tags[0]
|
||||
tags.should.have.length_of(1)
|
||||
tag.name.should.equal("a key")
|
||||
tag.value.should.equal("a value")
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_invalid_parameter_tag_null():
|
||||
conn = boto.connect_ec2('the_key', 'the_secret')
|
||||
reservation = conn.run_instances('ami-1234abcd')
|
||||
instance = reservation.instances[0]
|
||||
|
||||
with assert_raises(EC2ResponseError) as cm:
|
||||
instance.add_tag("a key", None)
|
||||
cm.exception.code.should.equal('InvalidParameterValue')
|
||||
cm.exception.status.should.equal(400)
|
||||
cm.exception.request_id.should_not.be.none
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_invalid_id():
|
||||
conn = boto.connect_ec2('the_key', 'the_secret')
|
||||
with assert_raises(EC2ResponseError) as cm:
|
||||
conn.create_tags('ami-blah', {'key': 'tag'})
|
||||
cm.exception.code.should.equal('InvalidID')
|
||||
cm.exception.status.should.equal(400)
|
||||
cm.exception.request_id.should_not.be.none
|
||||
|
||||
with assert_raises(EC2ResponseError) as cm:
|
||||
conn.create_tags('blah-blah', {'key': 'tag'})
|
||||
cm.exception.code.should.equal('InvalidID')
|
||||
cm.exception.status.should.equal(400)
|
||||
cm.exception.request_id.should_not.be.none
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_get_all_tags_resource_id_filter():
|
||||
conn = boto.connect_ec2('the_key', 'the_secret')
|
||||
reservation = conn.run_instances('ami-1234abcd')
|
||||
instance = reservation.instances[0]
|
||||
instance.add_tag("an instance key", "some value")
|
||||
image_id = conn.create_image(instance.id, "test-ami", "this is a test ami")
|
||||
image = conn.get_image(image_id)
|
||||
image.add_tag("an image key", "some value")
|
||||
|
||||
tags = conn.get_all_tags(filters={'resource-id': instance.id})
|
||||
tag = tags[0]
|
||||
tags.should.have.length_of(1)
|
||||
tag.res_id.should.equal(instance.id)
|
||||
tag.res_type.should.equal('instance')
|
||||
tag.name.should.equal("an instance key")
|
||||
tag.value.should.equal("some value")
|
||||
|
||||
tags = conn.get_all_tags(filters={'resource-id': image_id})
|
||||
tag = tags[0]
|
||||
tags.should.have.length_of(1)
|
||||
tag.res_id.should.equal(image_id)
|
||||
tag.res_type.should.equal('image')
|
||||
tag.name.should.equal("an image key")
|
||||
tag.value.should.equal("some value")
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_get_all_tags_resource_type_filter():
|
||||
conn = boto.connect_ec2('the_key', 'the_secret')
|
||||
reservation = conn.run_instances('ami-1234abcd')
|
||||
instance = reservation.instances[0]
|
||||
instance.add_tag("an instance key", "some value")
|
||||
image_id = conn.create_image(instance.id, "test-ami", "this is a test ami")
|
||||
image = conn.get_image(image_id)
|
||||
image.add_tag("an image key", "some value")
|
||||
|
||||
tags = conn.get_all_tags(filters={'resource-type': 'instance'})
|
||||
tag = tags[0]
|
||||
tags.should.have.length_of(1)
|
||||
tag.res_id.should.equal(instance.id)
|
||||
tag.res_type.should.equal('instance')
|
||||
tag.name.should.equal("an instance key")
|
||||
tag.value.should.equal("some value")
|
||||
|
||||
tags = conn.get_all_tags(filters={'resource-type': 'image'})
|
||||
tag = tags[0]
|
||||
tags.should.have.length_of(1)
|
||||
tag.res_id.should.equal(image_id)
|
||||
tag.res_type.should.equal('image')
|
||||
tag.name.should.equal("an image key")
|
||||
tag.value.should.equal("some value")
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_get_all_tags_key_filter():
|
||||
conn = boto.connect_ec2('the_key', 'the_secret')
|
||||
reservation = conn.run_instances('ami-1234abcd')
|
||||
instance = reservation.instances[0]
|
||||
instance.add_tag("an instance key", "some value")
|
||||
image_id = conn.create_image(instance.id, "test-ami", "this is a test ami")
|
||||
image = conn.get_image(image_id)
|
||||
image.add_tag("an image key", "some value")
|
||||
|
||||
tags = conn.get_all_tags(filters={'key': 'an instance key'})
|
||||
tag = tags[0]
|
||||
tags.should.have.length_of(1)
|
||||
tag.res_id.should.equal(instance.id)
|
||||
tag.res_type.should.equal('instance')
|
||||
tag.name.should.equal("an instance key")
|
||||
tag.value.should.equal("some value")
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_get_all_tags_value_filter():
|
||||
conn = boto.connect_ec2('the_key', 'the_secret')
|
||||
reservation = conn.run_instances('ami-1234abcd')
|
||||
instance = reservation.instances[0]
|
||||
instance.add_tag("an instance key", "some value")
|
||||
reservation_b = conn.run_instances('ami-1234abcd')
|
||||
instance_b = reservation_b.instances[0]
|
||||
instance_b.add_tag("an instance key", "some other value")
|
||||
reservation_c = conn.run_instances('ami-1234abcd')
|
||||
instance_c = reservation_c.instances[0]
|
||||
instance_c.add_tag("an instance key", "other value*")
|
||||
reservation_d = conn.run_instances('ami-1234abcd')
|
||||
instance_d = reservation_d.instances[0]
|
||||
instance_d.add_tag("an instance key", "other value**")
|
||||
reservation_e = conn.run_instances('ami-1234abcd')
|
||||
instance_e = reservation_e.instances[0]
|
||||
instance_e.add_tag("an instance key", "other value*?")
|
||||
image_id = conn.create_image(instance.id, "test-ami", "this is a test ami")
|
||||
image = conn.get_image(image_id)
|
||||
image.add_tag("an image key", "some value")
|
||||
|
||||
tags = conn.get_all_tags(filters={'value': 'some value'})
|
||||
tags.should.have.length_of(2)
|
||||
|
||||
tags = conn.get_all_tags(filters={'value': 'some*value'})
|
||||
tags.should.have.length_of(3)
|
||||
|
||||
tags = conn.get_all_tags(filters={'value': '*some*value'})
|
||||
tags.should.have.length_of(3)
|
||||
|
||||
tags = conn.get_all_tags(filters={'value': '*some*value*'})
|
||||
tags.should.have.length_of(3)
|
||||
|
||||
tags = conn.get_all_tags(filters={'value': '*value\*'})
|
||||
tags.should.have.length_of(1)
|
||||
|
||||
tags = conn.get_all_tags(filters={'value': '*value\*\*'})
|
||||
tags.should.have.length_of(1)
|
||||
|
||||
tags = conn.get_all_tags(filters={'value': '*value\*\?'})
|
||||
tags.should.have.length_of(1)
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_retrieved_instances_must_contain_their_tags():
|
||||
tag_key = 'Tag name'
|
||||
tag_value = 'Tag value'
|
||||
tags_to_be_set = {tag_key: tag_value}
|
||||
|
||||
conn = boto.connect_ec2('the_key', 'the_secret')
|
||||
reservation = conn.run_instances('ami-1234abcd')
|
||||
reservation.should.be.a(Reservation)
|
||||
reservation.instances.should.have.length_of(1)
|
||||
instance = reservation.instances[0]
|
||||
|
||||
reservations = conn.get_all_instances()
|
||||
reservations.should.have.length_of(1)
|
||||
reservations[0].id.should.equal(reservation.id)
|
||||
instances = reservations[0].instances
|
||||
instances.should.have.length_of(1)
|
||||
instances[0].id.should.equal(instance.id)
|
||||
|
||||
conn.create_tags([instance.id], tags_to_be_set)
|
||||
reservations = conn.get_all_instances()
|
||||
instance = reservations[0].instances[0]
|
||||
retrieved_tags = instance.tags
|
||||
|
||||
# Cleanup of instance
|
||||
conn.terminate_instances([instances[0].id])
|
||||
|
||||
# Check whether tag is present with correct value
|
||||
retrieved_tags[tag_key].should.equal(tag_value)
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_retrieved_volumes_must_contain_their_tags():
|
||||
tag_key = 'Tag name'
|
||||
tag_value = 'Tag value'
|
||||
tags_to_be_set = {tag_key: tag_value}
|
||||
conn = boto.connect_ec2('the_key', 'the_secret')
|
||||
volume = conn.create_volume(80, "us-east-1a")
|
||||
|
||||
all_volumes = conn.get_all_volumes()
|
||||
volume = all_volumes[0]
|
||||
conn.create_tags([volume.id], tags_to_be_set)
|
||||
|
||||
# Fetch the volume again
|
||||
all_volumes = conn.get_all_volumes()
|
||||
volume = all_volumes[0]
|
||||
retrieved_tags = volume.tags
|
||||
|
||||
volume.delete()
|
||||
|
||||
# Check whether tag is present with correct value
|
||||
retrieved_tags[tag_key].should.equal(tag_value)
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_retrieved_snapshots_must_contain_their_tags():
|
||||
tag_key = 'Tag name'
|
||||
tag_value = 'Tag value'
|
||||
tags_to_be_set = {tag_key: tag_value}
|
||||
conn = boto.connect_ec2(aws_access_key_id='the_key',
|
||||
aws_secret_access_key='the_secret')
|
||||
volume = conn.create_volume(80, "eu-west-1a")
|
||||
snapshot = conn.create_snapshot(volume.id)
|
||||
conn.create_tags([snapshot.id], tags_to_be_set)
|
||||
|
||||
# Fetch the snapshot again
|
||||
all_snapshots = conn.get_all_snapshots()
|
||||
snapshot = [item for item in all_snapshots if item.id == snapshot.id][0]
|
||||
retrieved_tags = snapshot.tags
|
||||
|
||||
conn.delete_snapshot(snapshot.id)
|
||||
volume.delete()
|
||||
|
||||
# Check whether tag is present with correct value
|
||||
retrieved_tags[tag_key].should.equal(tag_value)
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_filter_instances_by_wildcard_tags():
|
||||
conn = boto.connect_ec2(aws_access_key_id='the_key',
|
||||
aws_secret_access_key='the_secret')
|
||||
reservation = conn.run_instances('ami-1234abcd')
|
||||
instance_a = reservation.instances[0]
|
||||
instance_a.add_tag("Key1", "Value1")
|
||||
reservation_b = conn.run_instances('ami-1234abcd')
|
||||
instance_b = reservation_b.instances[0]
|
||||
instance_b.add_tag("Key1", "Value2")
|
||||
|
||||
reservations = conn.get_all_instances(filters={'tag:Key1': 'Value*'})
|
||||
reservations.should.have.length_of(2)
|
||||
|
||||
reservations = conn.get_all_instances(filters={'tag-key': 'Key*'})
|
||||
reservations.should.have.length_of(2)
|
||||
|
||||
reservations = conn.get_all_instances(filters={'tag-value': 'Value*'})
|
||||
reservations.should.have.length_of(2)
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_create_volume_with_tags():
|
||||
client = boto3.client('ec2', 'us-west-2')
|
||||
response = client.create_volume(
|
||||
AvailabilityZone='us-west-2',
|
||||
Encrypted=False,
|
||||
Size=40,
|
||||
TagSpecifications=[
|
||||
{
|
||||
'ResourceType': 'volume',
|
||||
'Tags': [
|
||||
{
|
||||
'Key': 'TEST_TAG',
|
||||
'Value': 'TEST_VALUE'
|
||||
}
|
||||
],
|
||||
}
|
||||
]
|
||||
)
|
||||
|
||||
assert response['Tags'][0]['Key'] == 'TEST_TAG'
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_create_snapshot_with_tags():
|
||||
client = boto3.client('ec2', 'us-west-2')
|
||||
volume_id = client.create_volume(
|
||||
AvailabilityZone='us-west-2',
|
||||
Encrypted=False,
|
||||
Size=40,
|
||||
TagSpecifications=[
|
||||
{
|
||||
'ResourceType': 'volume',
|
||||
'Tags': [
|
||||
{
|
||||
'Key': 'TEST_TAG',
|
||||
'Value': 'TEST_VALUE'
|
||||
}
|
||||
],
|
||||
}
|
||||
]
|
||||
)['VolumeId']
|
||||
snapshot = client.create_snapshot(
|
||||
VolumeId=volume_id,
|
||||
TagSpecifications=[
|
||||
{
|
||||
'ResourceType': 'snapshot',
|
||||
'Tags': [
|
||||
{
|
||||
'Key': 'TEST_SNAPSHOT_TAG',
|
||||
'Value': 'TEST_SNAPSHOT_VALUE'
|
||||
}
|
||||
],
|
||||
}
|
||||
]
|
||||
)
|
||||
|
||||
expected_tags = [{
|
||||
'Key': 'TEST_SNAPSHOT_TAG',
|
||||
'Value': 'TEST_SNAPSHOT_VALUE'
|
||||
}]
|
||||
|
||||
assert snapshot['Tags'] == expected_tags
|
||||
from __future__ import unicode_literals
|
||||
from nose.tools import assert_raises
|
||||
|
||||
import itertools
|
||||
|
||||
import boto
|
||||
import boto3
|
||||
from botocore.exceptions import ClientError
|
||||
from boto.exception import EC2ResponseError
|
||||
from boto.ec2.instance import Reservation
|
||||
import sure # noqa
|
||||
|
||||
from moto import mock_ec2_deprecated, mock_ec2
|
||||
from nose.tools import assert_raises
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_add_tag():
|
||||
conn = boto.connect_ec2('the_key', 'the_secret')
|
||||
reservation = conn.run_instances('ami-1234abcd')
|
||||
instance = reservation.instances[0]
|
||||
|
||||
with assert_raises(EC2ResponseError) as ex:
|
||||
instance.add_tag("a key", "some value", dry_run=True)
|
||||
ex.exception.error_code.should.equal('DryRunOperation')
|
||||
ex.exception.status.should.equal(400)
|
||||
ex.exception.message.should.equal(
|
||||
'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set')
|
||||
|
||||
instance.add_tag("a key", "some value")
|
||||
chain = itertools.chain.from_iterable
|
||||
existing_instances = list(
|
||||
chain([res.instances for res in conn.get_all_instances()]))
|
||||
existing_instances.should.have.length_of(1)
|
||||
existing_instance = existing_instances[0]
|
||||
existing_instance.tags["a key"].should.equal("some value")
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_remove_tag():
|
||||
conn = boto.connect_ec2('the_key', 'the_secret')
|
||||
reservation = conn.run_instances('ami-1234abcd')
|
||||
instance = reservation.instances[0]
|
||||
|
||||
instance.add_tag("a key", "some value")
|
||||
|
||||
tags = conn.get_all_tags()
|
||||
tag = tags[0]
|
||||
tag.name.should.equal("a key")
|
||||
tag.value.should.equal("some value")
|
||||
|
||||
with assert_raises(EC2ResponseError) as ex:
|
||||
instance.remove_tag("a key", dry_run=True)
|
||||
ex.exception.error_code.should.equal('DryRunOperation')
|
||||
ex.exception.status.should.equal(400)
|
||||
ex.exception.message.should.equal(
|
||||
'An error occurred (DryRunOperation) when calling the DeleteTags operation: Request would have succeeded, but DryRun flag is set')
|
||||
|
||||
instance.remove_tag("a key")
|
||||
conn.get_all_tags().should.have.length_of(0)
|
||||
|
||||
instance.add_tag("a key", "some value")
|
||||
conn.get_all_tags().should.have.length_of(1)
|
||||
instance.remove_tag("a key", "some value")
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_get_all_tags():
|
||||
conn = boto.connect_ec2('the_key', 'the_secret')
|
||||
reservation = conn.run_instances('ami-1234abcd')
|
||||
instance = reservation.instances[0]
|
||||
|
||||
instance.add_tag("a key", "some value")
|
||||
|
||||
tags = conn.get_all_tags()
|
||||
tag = tags[0]
|
||||
tag.name.should.equal("a key")
|
||||
tag.value.should.equal("some value")
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_get_all_tags_with_special_characters():
|
||||
conn = boto.connect_ec2('the_key', 'the_secret')
|
||||
reservation = conn.run_instances('ami-1234abcd')
|
||||
instance = reservation.instances[0]
|
||||
|
||||
instance.add_tag("a key", "some<> value")
|
||||
|
||||
tags = conn.get_all_tags()
|
||||
tag = tags[0]
|
||||
tag.name.should.equal("a key")
|
||||
tag.value.should.equal("some<> value")
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_create_tags():
|
||||
conn = boto.connect_ec2('the_key', 'the_secret')
|
||||
reservation = conn.run_instances('ami-1234abcd')
|
||||
instance = reservation.instances[0]
|
||||
tag_dict = {'a key': 'some value',
|
||||
'another key': 'some other value',
|
||||
'blank key': ''}
|
||||
|
||||
with assert_raises(EC2ResponseError) as ex:
|
||||
conn.create_tags(instance.id, tag_dict, dry_run=True)
|
||||
ex.exception.error_code.should.equal('DryRunOperation')
|
||||
ex.exception.status.should.equal(400)
|
||||
ex.exception.message.should.equal(
|
||||
'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set')
|
||||
|
||||
conn.create_tags(instance.id, tag_dict)
|
||||
tags = conn.get_all_tags()
|
||||
set([key for key in tag_dict]).should.equal(
|
||||
set([tag.name for tag in tags]))
|
||||
set([tag_dict[key] for key in tag_dict]).should.equal(
|
||||
set([tag.value for tag in tags]))
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_tag_limit_exceeded():
|
||||
conn = boto.connect_ec2('the_key', 'the_secret')
|
||||
reservation = conn.run_instances('ami-1234abcd')
|
||||
instance = reservation.instances[0]
|
||||
tag_dict = {}
|
||||
for i in range(51):
|
||||
tag_dict['{0:02d}'.format(i + 1)] = ''
|
||||
|
||||
with assert_raises(EC2ResponseError) as cm:
|
||||
conn.create_tags(instance.id, tag_dict)
|
||||
cm.exception.code.should.equal('TagLimitExceeded')
|
||||
cm.exception.status.should.equal(400)
|
||||
cm.exception.request_id.should_not.be.none
|
||||
|
||||
instance.add_tag("a key", "a value")
|
||||
with assert_raises(EC2ResponseError) as cm:
|
||||
conn.create_tags(instance.id, tag_dict)
|
||||
cm.exception.code.should.equal('TagLimitExceeded')
|
||||
cm.exception.status.should.equal(400)
|
||||
cm.exception.request_id.should_not.be.none
|
||||
|
||||
tags = conn.get_all_tags()
|
||||
tag = tags[0]
|
||||
tags.should.have.length_of(1)
|
||||
tag.name.should.equal("a key")
|
||||
tag.value.should.equal("a value")
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_invalid_parameter_tag_null():
|
||||
conn = boto.connect_ec2('the_key', 'the_secret')
|
||||
reservation = conn.run_instances('ami-1234abcd')
|
||||
instance = reservation.instances[0]
|
||||
|
||||
with assert_raises(EC2ResponseError) as cm:
|
||||
instance.add_tag("a key", None)
|
||||
cm.exception.code.should.equal('InvalidParameterValue')
|
||||
cm.exception.status.should.equal(400)
|
||||
cm.exception.request_id.should_not.be.none
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_invalid_id():
|
||||
conn = boto.connect_ec2('the_key', 'the_secret')
|
||||
with assert_raises(EC2ResponseError) as cm:
|
||||
conn.create_tags('ami-blah', {'key': 'tag'})
|
||||
cm.exception.code.should.equal('InvalidID')
|
||||
cm.exception.status.should.equal(400)
|
||||
cm.exception.request_id.should_not.be.none
|
||||
|
||||
with assert_raises(EC2ResponseError) as cm:
|
||||
conn.create_tags('blah-blah', {'key': 'tag'})
|
||||
cm.exception.code.should.equal('InvalidID')
|
||||
cm.exception.status.should.equal(400)
|
||||
cm.exception.request_id.should_not.be.none
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_get_all_tags_resource_id_filter():
|
||||
conn = boto.connect_ec2('the_key', 'the_secret')
|
||||
reservation = conn.run_instances('ami-1234abcd')
|
||||
instance = reservation.instances[0]
|
||||
instance.add_tag("an instance key", "some value")
|
||||
image_id = conn.create_image(instance.id, "test-ami", "this is a test ami")
|
||||
image = conn.get_image(image_id)
|
||||
image.add_tag("an image key", "some value")
|
||||
|
||||
tags = conn.get_all_tags(filters={'resource-id': instance.id})
|
||||
tag = tags[0]
|
||||
tags.should.have.length_of(1)
|
||||
tag.res_id.should.equal(instance.id)
|
||||
tag.res_type.should.equal('instance')
|
||||
tag.name.should.equal("an instance key")
|
||||
tag.value.should.equal("some value")
|
||||
|
||||
tags = conn.get_all_tags(filters={'resource-id': image_id})
|
||||
tag = tags[0]
|
||||
tags.should.have.length_of(1)
|
||||
tag.res_id.should.equal(image_id)
|
||||
tag.res_type.should.equal('image')
|
||||
tag.name.should.equal("an image key")
|
||||
tag.value.should.equal("some value")
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_get_all_tags_resource_type_filter():
|
||||
conn = boto.connect_ec2('the_key', 'the_secret')
|
||||
reservation = conn.run_instances('ami-1234abcd')
|
||||
instance = reservation.instances[0]
|
||||
instance.add_tag("an instance key", "some value")
|
||||
image_id = conn.create_image(instance.id, "test-ami", "this is a test ami")
|
||||
image = conn.get_image(image_id)
|
||||
image.add_tag("an image key", "some value")
|
||||
|
||||
tags = conn.get_all_tags(filters={'resource-type': 'instance'})
|
||||
tag = tags[0]
|
||||
tags.should.have.length_of(1)
|
||||
tag.res_id.should.equal(instance.id)
|
||||
tag.res_type.should.equal('instance')
|
||||
tag.name.should.equal("an instance key")
|
||||
tag.value.should.equal("some value")
|
||||
|
||||
tags = conn.get_all_tags(filters={'resource-type': 'image'})
|
||||
tag = tags[0]
|
||||
tags.should.have.length_of(1)
|
||||
tag.res_id.should.equal(image_id)
|
||||
tag.res_type.should.equal('image')
|
||||
tag.name.should.equal("an image key")
|
||||
tag.value.should.equal("some value")
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_get_all_tags_key_filter():
|
||||
conn = boto.connect_ec2('the_key', 'the_secret')
|
||||
reservation = conn.run_instances('ami-1234abcd')
|
||||
instance = reservation.instances[0]
|
||||
instance.add_tag("an instance key", "some value")
|
||||
image_id = conn.create_image(instance.id, "test-ami", "this is a test ami")
|
||||
image = conn.get_image(image_id)
|
||||
image.add_tag("an image key", "some value")
|
||||
|
||||
tags = conn.get_all_tags(filters={'key': 'an instance key'})
|
||||
tag = tags[0]
|
||||
tags.should.have.length_of(1)
|
||||
tag.res_id.should.equal(instance.id)
|
||||
tag.res_type.should.equal('instance')
|
||||
tag.name.should.equal("an instance key")
|
||||
tag.value.should.equal("some value")
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_get_all_tags_value_filter():
|
||||
conn = boto.connect_ec2('the_key', 'the_secret')
|
||||
reservation = conn.run_instances('ami-1234abcd')
|
||||
instance = reservation.instances[0]
|
||||
instance.add_tag("an instance key", "some value")
|
||||
reservation_b = conn.run_instances('ami-1234abcd')
|
||||
instance_b = reservation_b.instances[0]
|
||||
instance_b.add_tag("an instance key", "some other value")
|
||||
reservation_c = conn.run_instances('ami-1234abcd')
|
||||
instance_c = reservation_c.instances[0]
|
||||
instance_c.add_tag("an instance key", "other value*")
|
||||
reservation_d = conn.run_instances('ami-1234abcd')
|
||||
instance_d = reservation_d.instances[0]
|
||||
instance_d.add_tag("an instance key", "other value**")
|
||||
reservation_e = conn.run_instances('ami-1234abcd')
|
||||
instance_e = reservation_e.instances[0]
|
||||
instance_e.add_tag("an instance key", "other value*?")
|
||||
image_id = conn.create_image(instance.id, "test-ami", "this is a test ami")
|
||||
image = conn.get_image(image_id)
|
||||
image.add_tag("an image key", "some value")
|
||||
|
||||
tags = conn.get_all_tags(filters={'value': 'some value'})
|
||||
tags.should.have.length_of(2)
|
||||
|
||||
tags = conn.get_all_tags(filters={'value': 'some*value'})
|
||||
tags.should.have.length_of(3)
|
||||
|
||||
tags = conn.get_all_tags(filters={'value': '*some*value'})
|
||||
tags.should.have.length_of(3)
|
||||
|
||||
tags = conn.get_all_tags(filters={'value': '*some*value*'})
|
||||
tags.should.have.length_of(3)
|
||||
|
||||
tags = conn.get_all_tags(filters={'value': '*value\*'})
|
||||
tags.should.have.length_of(1)
|
||||
|
||||
tags = conn.get_all_tags(filters={'value': '*value\*\*'})
|
||||
tags.should.have.length_of(1)
|
||||
|
||||
tags = conn.get_all_tags(filters={'value': '*value\*\?'})
|
||||
tags.should.have.length_of(1)
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_retrieved_instances_must_contain_their_tags():
|
||||
tag_key = 'Tag name'
|
||||
tag_value = 'Tag value'
|
||||
tags_to_be_set = {tag_key: tag_value}
|
||||
|
||||
conn = boto.connect_ec2('the_key', 'the_secret')
|
||||
reservation = conn.run_instances('ami-1234abcd')
|
||||
reservation.should.be.a(Reservation)
|
||||
reservation.instances.should.have.length_of(1)
|
||||
instance = reservation.instances[0]
|
||||
|
||||
reservations = conn.get_all_instances()
|
||||
reservations.should.have.length_of(1)
|
||||
reservations[0].id.should.equal(reservation.id)
|
||||
instances = reservations[0].instances
|
||||
instances.should.have.length_of(1)
|
||||
instances[0].id.should.equal(instance.id)
|
||||
|
||||
conn.create_tags([instance.id], tags_to_be_set)
|
||||
reservations = conn.get_all_instances()
|
||||
instance = reservations[0].instances[0]
|
||||
retrieved_tags = instance.tags
|
||||
|
||||
# Cleanup of instance
|
||||
conn.terminate_instances([instances[0].id])
|
||||
|
||||
# Check whether tag is present with correct value
|
||||
retrieved_tags[tag_key].should.equal(tag_value)
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_retrieved_volumes_must_contain_their_tags():
|
||||
tag_key = 'Tag name'
|
||||
tag_value = 'Tag value'
|
||||
tags_to_be_set = {tag_key: tag_value}
|
||||
conn = boto.connect_ec2('the_key', 'the_secret')
|
||||
volume = conn.create_volume(80, "us-east-1a")
|
||||
|
||||
all_volumes = conn.get_all_volumes()
|
||||
volume = all_volumes[0]
|
||||
conn.create_tags([volume.id], tags_to_be_set)
|
||||
|
||||
# Fetch the volume again
|
||||
all_volumes = conn.get_all_volumes()
|
||||
volume = all_volumes[0]
|
||||
retrieved_tags = volume.tags
|
||||
|
||||
volume.delete()
|
||||
|
||||
# Check whether tag is present with correct value
|
||||
retrieved_tags[tag_key].should.equal(tag_value)
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_retrieved_snapshots_must_contain_their_tags():
|
||||
tag_key = 'Tag name'
|
||||
tag_value = 'Tag value'
|
||||
tags_to_be_set = {tag_key: tag_value}
|
||||
conn = boto.connect_ec2(aws_access_key_id='the_key',
|
||||
aws_secret_access_key='the_secret')
|
||||
volume = conn.create_volume(80, "eu-west-1a")
|
||||
snapshot = conn.create_snapshot(volume.id)
|
||||
conn.create_tags([snapshot.id], tags_to_be_set)
|
||||
|
||||
# Fetch the snapshot again
|
||||
all_snapshots = conn.get_all_snapshots()
|
||||
snapshot = [item for item in all_snapshots if item.id == snapshot.id][0]
|
||||
retrieved_tags = snapshot.tags
|
||||
|
||||
conn.delete_snapshot(snapshot.id)
|
||||
volume.delete()
|
||||
|
||||
# Check whether tag is present with correct value
|
||||
retrieved_tags[tag_key].should.equal(tag_value)
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_filter_instances_by_wildcard_tags():
|
||||
conn = boto.connect_ec2(aws_access_key_id='the_key',
|
||||
aws_secret_access_key='the_secret')
|
||||
reservation = conn.run_instances('ami-1234abcd')
|
||||
instance_a = reservation.instances[0]
|
||||
instance_a.add_tag("Key1", "Value1")
|
||||
reservation_b = conn.run_instances('ami-1234abcd')
|
||||
instance_b = reservation_b.instances[0]
|
||||
instance_b.add_tag("Key1", "Value2")
|
||||
|
||||
reservations = conn.get_all_instances(filters={'tag:Key1': 'Value*'})
|
||||
reservations.should.have.length_of(2)
|
||||
|
||||
reservations = conn.get_all_instances(filters={'tag-key': 'Key*'})
|
||||
reservations.should.have.length_of(2)
|
||||
|
||||
reservations = conn.get_all_instances(filters={'tag-value': 'Value*'})
|
||||
reservations.should.have.length_of(2)
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_create_volume_with_tags():
|
||||
client = boto3.client('ec2', 'us-west-2')
|
||||
response = client.create_volume(
|
||||
AvailabilityZone='us-west-2',
|
||||
Encrypted=False,
|
||||
Size=40,
|
||||
TagSpecifications=[
|
||||
{
|
||||
'ResourceType': 'volume',
|
||||
'Tags': [
|
||||
{
|
||||
'Key': 'TEST_TAG',
|
||||
'Value': 'TEST_VALUE'
|
||||
}
|
||||
],
|
||||
}
|
||||
]
|
||||
)
|
||||
|
||||
assert response['Tags'][0]['Key'] == 'TEST_TAG'
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_create_snapshot_with_tags():
|
||||
client = boto3.client('ec2', 'us-west-2')
|
||||
volume_id = client.create_volume(
|
||||
AvailabilityZone='us-west-2',
|
||||
Encrypted=False,
|
||||
Size=40,
|
||||
TagSpecifications=[
|
||||
{
|
||||
'ResourceType': 'volume',
|
||||
'Tags': [
|
||||
{
|
||||
'Key': 'TEST_TAG',
|
||||
'Value': 'TEST_VALUE'
|
||||
}
|
||||
],
|
||||
}
|
||||
]
|
||||
)['VolumeId']
|
||||
snapshot = client.create_snapshot(
|
||||
VolumeId=volume_id,
|
||||
TagSpecifications=[
|
||||
{
|
||||
'ResourceType': 'snapshot',
|
||||
'Tags': [
|
||||
{
|
||||
'Key': 'TEST_SNAPSHOT_TAG',
|
||||
'Value': 'TEST_SNAPSHOT_VALUE'
|
||||
}
|
||||
],
|
||||
}
|
||||
]
|
||||
)
|
||||
|
||||
expected_tags = [{
|
||||
'Key': 'TEST_SNAPSHOT_TAG',
|
||||
'Value': 'TEST_SNAPSHOT_VALUE'
|
||||
}]
|
||||
|
||||
assert snapshot['Tags'] == expected_tags
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_create_tag_empty_resource():
|
||||
# create ec2 client in us-west-1
|
||||
client = boto3.client('ec2', region_name='us-west-1')
|
||||
# create tag with empty resource
|
||||
with assert_raises(ClientError) as ex:
|
||||
client.create_tags(
|
||||
Resources=[],
|
||||
Tags=[{'Key': 'Value'}]
|
||||
)
|
||||
ex.exception.response['Error']['Code'].should.equal('MissingParameter')
|
||||
ex.exception.response['Error']['Message'].should.equal('The request must contain the parameter resourceIdSet')
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_delete_tag_empty_resource():
|
||||
# create ec2 client in us-west-1
|
||||
client = boto3.client('ec2', region_name='us-west-1')
|
||||
# delete tag with empty resource
|
||||
with assert_raises(ClientError) as ex:
|
||||
client.delete_tags(
|
||||
Resources=[],
|
||||
Tags=[{'Key': 'Value'}]
|
||||
)
|
||||
ex.exception.response['Error']['Code'].should.equal('MissingParameter')
|
||||
ex.exception.response['Error']['Message'].should.equal('The request must contain the parameter resourceIdSet')
|
||||
|
||||
@ -1,132 +1,133 @@
|
||||
from __future__ import unicode_literals
|
||||
# Ensure 'assert_raises' context manager support for Python 2.6
|
||||
import tests.backport_assert_raises
|
||||
from nose.tools import assert_raises
|
||||
from moto.ec2.exceptions import EC2ClientError
|
||||
from botocore.exceptions import ClientError
|
||||
|
||||
import boto3
|
||||
import boto
|
||||
from boto.exception import EC2ResponseError
|
||||
import sure # noqa
|
||||
|
||||
from moto import mock_ec2, mock_ec2_deprecated
|
||||
from tests.helpers import requires_boto_gte
|
||||
|
||||
|
||||
@requires_boto_gte("2.32.0")
|
||||
@mock_ec2_deprecated
|
||||
def test_vpc_peering_connections():
|
||||
conn = boto.connect_vpc('the_key', 'the_secret')
|
||||
vpc = conn.create_vpc("10.0.0.0/16")
|
||||
peer_vpc = conn.create_vpc("11.0.0.0/16")
|
||||
|
||||
vpc_pcx = conn.create_vpc_peering_connection(vpc.id, peer_vpc.id)
|
||||
vpc_pcx._status.code.should.equal('initiating-request')
|
||||
|
||||
return vpc_pcx
|
||||
|
||||
|
||||
@requires_boto_gte("2.32.0")
|
||||
@mock_ec2_deprecated
|
||||
def test_vpc_peering_connections_get_all():
|
||||
conn = boto.connect_vpc('the_key', 'the_secret')
|
||||
vpc_pcx = test_vpc_peering_connections()
|
||||
vpc_pcx._status.code.should.equal('initiating-request')
|
||||
|
||||
all_vpc_pcxs = conn.get_all_vpc_peering_connections()
|
||||
all_vpc_pcxs.should.have.length_of(1)
|
||||
all_vpc_pcxs[0]._status.code.should.equal('pending-acceptance')
|
||||
|
||||
|
||||
@requires_boto_gte("2.32.0")
|
||||
@mock_ec2_deprecated
|
||||
def test_vpc_peering_connections_accept():
|
||||
conn = boto.connect_vpc('the_key', 'the_secret')
|
||||
vpc_pcx = test_vpc_peering_connections()
|
||||
|
||||
vpc_pcx = conn.accept_vpc_peering_connection(vpc_pcx.id)
|
||||
vpc_pcx._status.code.should.equal('active')
|
||||
|
||||
with assert_raises(EC2ResponseError) as cm:
|
||||
conn.reject_vpc_peering_connection(vpc_pcx.id)
|
||||
cm.exception.code.should.equal('InvalidStateTransition')
|
||||
cm.exception.status.should.equal(400)
|
||||
cm.exception.request_id.should_not.be.none
|
||||
|
||||
all_vpc_pcxs = conn.get_all_vpc_peering_connections()
|
||||
all_vpc_pcxs.should.have.length_of(1)
|
||||
all_vpc_pcxs[0]._status.code.should.equal('active')
|
||||
|
||||
|
||||
@requires_boto_gte("2.32.0")
|
||||
@mock_ec2_deprecated
|
||||
def test_vpc_peering_connections_reject():
|
||||
conn = boto.connect_vpc('the_key', 'the_secret')
|
||||
vpc_pcx = test_vpc_peering_connections()
|
||||
|
||||
verdict = conn.reject_vpc_peering_connection(vpc_pcx.id)
|
||||
verdict.should.equal(True)
|
||||
|
||||
with assert_raises(EC2ResponseError) as cm:
|
||||
conn.accept_vpc_peering_connection(vpc_pcx.id)
|
||||
cm.exception.code.should.equal('InvalidStateTransition')
|
||||
cm.exception.status.should.equal(400)
|
||||
cm.exception.request_id.should_not.be.none
|
||||
|
||||
all_vpc_pcxs = conn.get_all_vpc_peering_connections()
|
||||
all_vpc_pcxs.should.have.length_of(1)
|
||||
all_vpc_pcxs[0]._status.code.should.equal('rejected')
|
||||
|
||||
|
||||
@requires_boto_gte("2.32.1")
|
||||
@mock_ec2_deprecated
|
||||
def test_vpc_peering_connections_delete():
|
||||
conn = boto.connect_vpc('the_key', 'the_secret')
|
||||
vpc_pcx = test_vpc_peering_connections()
|
||||
|
||||
verdict = vpc_pcx.delete()
|
||||
verdict.should.equal(True)
|
||||
|
||||
all_vpc_pcxs = conn.get_all_vpc_peering_connections()
|
||||
all_vpc_pcxs.should.have.length_of(0)
|
||||
|
||||
with assert_raises(EC2ResponseError) as cm:
|
||||
conn.delete_vpc_peering_connection("pcx-1234abcd")
|
||||
cm.exception.code.should.equal('InvalidVpcPeeringConnectionId.NotFound')
|
||||
cm.exception.status.should.equal(400)
|
||||
cm.exception.request_id.should_not.be.none
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_vpc_peering_connections_cross_region():
|
||||
# create vpc in us-west-1 and ap-northeast-1
|
||||
ec2_usw1 = boto3.resource('ec2', region_name='us-west-1')
|
||||
vpc_usw1 = ec2_usw1.create_vpc(CidrBlock='10.90.0.0/16')
|
||||
ec2_apn1 = boto3.resource('ec2', region_name='ap-northeast-1')
|
||||
vpc_apn1 = ec2_apn1.create_vpc(CidrBlock='10.20.0.0/16')
|
||||
# create peering
|
||||
vpc_pcx = ec2_usw1.create_vpc_peering_connection(
|
||||
VpcId=vpc_usw1.id,
|
||||
PeerVpcId=vpc_apn1.id,
|
||||
PeerRegion='ap-northeast-1',
|
||||
)
|
||||
vpc_pcx.status['Code'].should.equal('initiating-request')
|
||||
vpc_pcx.requester_vpc.id.should.equal(vpc_usw1.id)
|
||||
vpc_pcx.accepter_vpc.id.should.equal(vpc_apn1.id)
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_vpc_peering_connections_cross_region_fail():
|
||||
# create vpc in us-west-1 and ap-northeast-1
|
||||
ec2_usw1 = boto3.resource('ec2', region_name='us-west-1')
|
||||
vpc_usw1 = ec2_usw1.create_vpc(CidrBlock='10.90.0.0/16')
|
||||
ec2_apn1 = boto3.resource('ec2', region_name='ap-northeast-1')
|
||||
vpc_apn1 = ec2_apn1.create_vpc(CidrBlock='10.20.0.0/16')
|
||||
# create peering wrong region with no vpc
|
||||
with assert_raises(ClientError) as cm:
|
||||
ec2_usw1.create_vpc_peering_connection(
|
||||
VpcId=vpc_usw1.id,
|
||||
PeerVpcId=vpc_apn1.id,
|
||||
PeerRegion='ap-northeast-2')
|
||||
cm.exception.response['Error']['Code'].should.equal('InvalidVpcID.NotFound')
|
||||
from __future__ import unicode_literals
|
||||
# Ensure 'assert_raises' context manager support for Python 2.6
|
||||
import tests.backport_assert_raises
|
||||
from nose.tools import assert_raises
|
||||
from moto.ec2.exceptions import EC2ClientError
|
||||
from botocore.exceptions import ClientError
|
||||
|
||||
import boto3
|
||||
import boto
|
||||
from boto.exception import EC2ResponseError
|
||||
import sure # noqa
|
||||
|
||||
from moto import mock_ec2, mock_ec2_deprecated
|
||||
from tests.helpers import requires_boto_gte
|
||||
|
||||
|
||||
@requires_boto_gte("2.32.0")
|
||||
@mock_ec2_deprecated
|
||||
def test_vpc_peering_connections():
|
||||
conn = boto.connect_vpc('the_key', 'the_secret')
|
||||
vpc = conn.create_vpc("10.0.0.0/16")
|
||||
peer_vpc = conn.create_vpc("11.0.0.0/16")
|
||||
|
||||
vpc_pcx = conn.create_vpc_peering_connection(vpc.id, peer_vpc.id)
|
||||
vpc_pcx._status.code.should.equal('initiating-request')
|
||||
|
||||
return vpc_pcx
|
||||
|
||||
|
||||
@requires_boto_gte("2.32.0")
|
||||
@mock_ec2_deprecated
|
||||
def test_vpc_peering_connections_get_all():
|
||||
conn = boto.connect_vpc('the_key', 'the_secret')
|
||||
vpc_pcx = test_vpc_peering_connections()
|
||||
vpc_pcx._status.code.should.equal('initiating-request')
|
||||
|
||||
all_vpc_pcxs = conn.get_all_vpc_peering_connections()
|
||||
all_vpc_pcxs.should.have.length_of(1)
|
||||
all_vpc_pcxs[0]._status.code.should.equal('pending-acceptance')
|
||||
|
||||
|
||||
@requires_boto_gte("2.32.0")
|
||||
@mock_ec2_deprecated
|
||||
def test_vpc_peering_connections_accept():
|
||||
conn = boto.connect_vpc('the_key', 'the_secret')
|
||||
vpc_pcx = test_vpc_peering_connections()
|
||||
|
||||
vpc_pcx = conn.accept_vpc_peering_connection(vpc_pcx.id)
|
||||
vpc_pcx._status.code.should.equal('active')
|
||||
|
||||
with assert_raises(EC2ResponseError) as cm:
|
||||
conn.reject_vpc_peering_connection(vpc_pcx.id)
|
||||
cm.exception.code.should.equal('InvalidStateTransition')
|
||||
cm.exception.status.should.equal(400)
|
||||
cm.exception.request_id.should_not.be.none
|
||||
|
||||
all_vpc_pcxs = conn.get_all_vpc_peering_connections()
|
||||
all_vpc_pcxs.should.have.length_of(1)
|
||||
all_vpc_pcxs[0]._status.code.should.equal('active')
|
||||
|
||||
|
||||
@requires_boto_gte("2.32.0")
|
||||
@mock_ec2_deprecated
|
||||
def test_vpc_peering_connections_reject():
|
||||
conn = boto.connect_vpc('the_key', 'the_secret')
|
||||
vpc_pcx = test_vpc_peering_connections()
|
||||
|
||||
verdict = conn.reject_vpc_peering_connection(vpc_pcx.id)
|
||||
verdict.should.equal(True)
|
||||
|
||||
with assert_raises(EC2ResponseError) as cm:
|
||||
conn.accept_vpc_peering_connection(vpc_pcx.id)
|
||||
cm.exception.code.should.equal('InvalidStateTransition')
|
||||
cm.exception.status.should.equal(400)
|
||||
cm.exception.request_id.should_not.be.none
|
||||
|
||||
all_vpc_pcxs = conn.get_all_vpc_peering_connections()
|
||||
all_vpc_pcxs.should.have.length_of(1)
|
||||
all_vpc_pcxs[0]._status.code.should.equal('rejected')
|
||||
|
||||
|
||||
@requires_boto_gte("2.32.1")
|
||||
@mock_ec2_deprecated
|
||||
def test_vpc_peering_connections_delete():
|
||||
conn = boto.connect_vpc('the_key', 'the_secret')
|
||||
vpc_pcx = test_vpc_peering_connections()
|
||||
|
||||
verdict = vpc_pcx.delete()
|
||||
verdict.should.equal(True)
|
||||
|
||||
all_vpc_pcxs = conn.get_all_vpc_peering_connections()
|
||||
all_vpc_pcxs.should.have.length_of(1)
|
||||
all_vpc_pcxs[0]._status.code.should.equal('deleted')
|
||||
|
||||
with assert_raises(EC2ResponseError) as cm:
|
||||
conn.delete_vpc_peering_connection("pcx-1234abcd")
|
||||
cm.exception.code.should.equal('InvalidVpcPeeringConnectionId.NotFound')
|
||||
cm.exception.status.should.equal(400)
|
||||
cm.exception.request_id.should_not.be.none
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_vpc_peering_connections_cross_region():
|
||||
# create vpc in us-west-1 and ap-northeast-1
|
||||
ec2_usw1 = boto3.resource('ec2', region_name='us-west-1')
|
||||
vpc_usw1 = ec2_usw1.create_vpc(CidrBlock='10.90.0.0/16')
|
||||
ec2_apn1 = boto3.resource('ec2', region_name='ap-northeast-1')
|
||||
vpc_apn1 = ec2_apn1.create_vpc(CidrBlock='10.20.0.0/16')
|
||||
# create peering
|
||||
vpc_pcx = ec2_usw1.create_vpc_peering_connection(
|
||||
VpcId=vpc_usw1.id,
|
||||
PeerVpcId=vpc_apn1.id,
|
||||
PeerRegion='ap-northeast-1',
|
||||
)
|
||||
vpc_pcx.status['Code'].should.equal('initiating-request')
|
||||
vpc_pcx.requester_vpc.id.should.equal(vpc_usw1.id)
|
||||
vpc_pcx.accepter_vpc.id.should.equal(vpc_apn1.id)
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_vpc_peering_connections_cross_region_fail():
|
||||
# create vpc in us-west-1 and ap-northeast-1
|
||||
ec2_usw1 = boto3.resource('ec2', region_name='us-west-1')
|
||||
vpc_usw1 = ec2_usw1.create_vpc(CidrBlock='10.90.0.0/16')
|
||||
ec2_apn1 = boto3.resource('ec2', region_name='ap-northeast-1')
|
||||
vpc_apn1 = ec2_apn1.create_vpc(CidrBlock='10.20.0.0/16')
|
||||
# create peering wrong region with no vpc
|
||||
with assert_raises(ClientError) as cm:
|
||||
ec2_usw1.create_vpc_peering_connection(
|
||||
VpcId=vpc_usw1.id,
|
||||
PeerVpcId=vpc_apn1.id,
|
||||
PeerRegion='ap-northeast-2')
|
||||
cm.exception.response['Error']['Code'].should.equal('InvalidVpcID.NotFound')
|
||||
|
||||
@ -47,6 +47,15 @@ def test_list_clusters():
|
||||
'arn:aws:ecs:us-east-1:012345678910:cluster/test_cluster1')
|
||||
|
||||
|
||||
@mock_ecs
|
||||
def test_describe_clusters():
|
||||
client = boto3.client('ecs', region_name='us-east-1')
|
||||
response = client.describe_clusters(clusters=["some-cluster"])
|
||||
response['failures'].should.contain({
|
||||
'arn': 'arn:aws:ecs:us-east-1:012345678910:cluster/some-cluster',
|
||||
'reason': 'MISSING'
|
||||
})
|
||||
|
||||
@mock_ecs
|
||||
def test_delete_cluster():
|
||||
client = boto3.client('ecs', region_name='us-east-1')
|
||||
@ -925,6 +934,65 @@ def test_update_container_instances_state():
|
||||
status='test_status').should.throw(Exception)
|
||||
|
||||
|
||||
@mock_ec2
|
||||
@mock_ecs
|
||||
def test_update_container_instances_state_by_arn():
|
||||
ecs_client = boto3.client('ecs', region_name='us-east-1')
|
||||
ec2 = boto3.resource('ec2', region_name='us-east-1')
|
||||
|
||||
test_cluster_name = 'test_ecs_cluster'
|
||||
_ = ecs_client.create_cluster(
|
||||
clusterName=test_cluster_name
|
||||
)
|
||||
|
||||
instance_to_create = 3
|
||||
test_instance_arns = []
|
||||
for i in range(0, instance_to_create):
|
||||
test_instance = ec2.create_instances(
|
||||
ImageId="ami-1234abcd",
|
||||
MinCount=1,
|
||||
MaxCount=1,
|
||||
)[0]
|
||||
|
||||
instance_id_document = json.dumps(
|
||||
ec2_utils.generate_instance_identity_document(test_instance)
|
||||
)
|
||||
|
||||
response = ecs_client.register_container_instance(
|
||||
cluster=test_cluster_name,
|
||||
instanceIdentityDocument=instance_id_document)
|
||||
|
||||
test_instance_arns.append(response['containerInstance']['containerInstanceArn'])
|
||||
|
||||
response = ecs_client.update_container_instances_state(cluster=test_cluster_name,
|
||||
containerInstances=test_instance_arns,
|
||||
status='DRAINING')
|
||||
len(response['failures']).should.equal(0)
|
||||
len(response['containerInstances']).should.equal(instance_to_create)
|
||||
response_statuses = [ci['status'] for ci in response['containerInstances']]
|
||||
for status in response_statuses:
|
||||
status.should.equal('DRAINING')
|
||||
response = ecs_client.update_container_instances_state(cluster=test_cluster_name,
|
||||
containerInstances=test_instance_arns,
|
||||
status='DRAINING')
|
||||
len(response['failures']).should.equal(0)
|
||||
len(response['containerInstances']).should.equal(instance_to_create)
|
||||
response_statuses = [ci['status'] for ci in response['containerInstances']]
|
||||
for status in response_statuses:
|
||||
status.should.equal('DRAINING')
|
||||
response = ecs_client.update_container_instances_state(cluster=test_cluster_name,
|
||||
containerInstances=test_instance_arns,
|
||||
status='ACTIVE')
|
||||
len(response['failures']).should.equal(0)
|
||||
len(response['containerInstances']).should.equal(instance_to_create)
|
||||
response_statuses = [ci['status'] for ci in response['containerInstances']]
|
||||
for status in response_statuses:
|
||||
status.should.equal('ACTIVE')
|
||||
ecs_client.update_container_instances_state.when.called_with(cluster=test_cluster_name,
|
||||
containerInstances=test_instance_arns,
|
||||
status='test_status').should.throw(Exception)
|
||||
|
||||
|
||||
@mock_ec2
|
||||
@mock_ecs
|
||||
def test_run_task():
|
||||
|
||||
@ -1,211 +1,216 @@
|
||||
import random
|
||||
|
||||
import boto3
|
||||
import json
|
||||
|
||||
from moto.events import mock_events
|
||||
from botocore.exceptions import ClientError
|
||||
from nose.tools import assert_raises
|
||||
|
||||
|
||||
RULES = [
|
||||
{'Name': 'test1', 'ScheduleExpression': 'rate(5 minutes)'},
|
||||
{'Name': 'test2', 'ScheduleExpression': 'rate(1 minute)'},
|
||||
{'Name': 'test3', 'EventPattern': '{"source": ["test-source"]}'}
|
||||
]
|
||||
|
||||
TARGETS = {
|
||||
'test-target-1': {
|
||||
'Id': 'test-target-1',
|
||||
'Arn': 'arn:aws:lambda:us-west-2:111111111111:function:test-function-1',
|
||||
'Rules': ['test1', 'test2']
|
||||
},
|
||||
'test-target-2': {
|
||||
'Id': 'test-target-2',
|
||||
'Arn': 'arn:aws:lambda:us-west-2:111111111111:function:test-function-2',
|
||||
'Rules': ['test1', 'test3']
|
||||
},
|
||||
'test-target-3': {
|
||||
'Id': 'test-target-3',
|
||||
'Arn': 'arn:aws:lambda:us-west-2:111111111111:function:test-function-3',
|
||||
'Rules': ['test1', 'test2']
|
||||
},
|
||||
'test-target-4': {
|
||||
'Id': 'test-target-4',
|
||||
'Arn': 'arn:aws:lambda:us-west-2:111111111111:function:test-function-4',
|
||||
'Rules': ['test1', 'test3']
|
||||
},
|
||||
'test-target-5': {
|
||||
'Id': 'test-target-5',
|
||||
'Arn': 'arn:aws:lambda:us-west-2:111111111111:function:test-function-5',
|
||||
'Rules': ['test1', 'test2']
|
||||
},
|
||||
'test-target-6': {
|
||||
'Id': 'test-target-6',
|
||||
'Arn': 'arn:aws:lambda:us-west-2:111111111111:function:test-function-6',
|
||||
'Rules': ['test1', 'test3']
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def get_random_rule():
|
||||
return RULES[random.randint(0, len(RULES) - 1)]
|
||||
|
||||
|
||||
def generate_environment():
|
||||
client = boto3.client('events', 'us-west-2')
|
||||
|
||||
for rule in RULES:
|
||||
client.put_rule(
|
||||
Name=rule['Name'],
|
||||
ScheduleExpression=rule.get('ScheduleExpression', ''),
|
||||
EventPattern=rule.get('EventPattern', '')
|
||||
)
|
||||
|
||||
targets = []
|
||||
for target in TARGETS:
|
||||
if rule['Name'] in TARGETS[target].get('Rules'):
|
||||
targets.append({'Id': target, 'Arn': TARGETS[target]['Arn']})
|
||||
|
||||
client.put_targets(Rule=rule['Name'], Targets=targets)
|
||||
|
||||
return client
|
||||
|
||||
|
||||
@mock_events
|
||||
def test_list_rules():
|
||||
client = generate_environment()
|
||||
response = client.list_rules()
|
||||
|
||||
assert(response is not None)
|
||||
assert(len(response['Rules']) > 0)
|
||||
|
||||
|
||||
@mock_events
|
||||
def test_describe_rule():
|
||||
rule_name = get_random_rule()['Name']
|
||||
client = generate_environment()
|
||||
response = client.describe_rule(Name=rule_name)
|
||||
|
||||
assert(response is not None)
|
||||
assert(response.get('Name') == rule_name)
|
||||
assert(response.get('Arn') is not None)
|
||||
|
||||
|
||||
@mock_events
|
||||
def test_enable_disable_rule():
|
||||
rule_name = get_random_rule()['Name']
|
||||
client = generate_environment()
|
||||
|
||||
# Rules should start out enabled in these tests.
|
||||
rule = client.describe_rule(Name=rule_name)
|
||||
assert(rule['State'] == 'ENABLED')
|
||||
|
||||
client.disable_rule(Name=rule_name)
|
||||
rule = client.describe_rule(Name=rule_name)
|
||||
assert(rule['State'] == 'DISABLED')
|
||||
|
||||
client.enable_rule(Name=rule_name)
|
||||
rule = client.describe_rule(Name=rule_name)
|
||||
assert(rule['State'] == 'ENABLED')
|
||||
|
||||
|
||||
@mock_events
|
||||
def test_list_rule_names_by_target():
|
||||
test_1_target = TARGETS['test-target-1']
|
||||
test_2_target = TARGETS['test-target-2']
|
||||
client = generate_environment()
|
||||
|
||||
rules = client.list_rule_names_by_target(TargetArn=test_1_target['Arn'])
|
||||
assert(len(rules['RuleNames']) == len(test_1_target['Rules']))
|
||||
for rule in rules['RuleNames']:
|
||||
assert(rule in test_1_target['Rules'])
|
||||
|
||||
rules = client.list_rule_names_by_target(TargetArn=test_2_target['Arn'])
|
||||
assert(len(rules['RuleNames']) == len(test_2_target['Rules']))
|
||||
for rule in rules['RuleNames']:
|
||||
assert(rule in test_2_target['Rules'])
|
||||
|
||||
|
||||
@mock_events
|
||||
def test_list_rules():
|
||||
client = generate_environment()
|
||||
|
||||
rules = client.list_rules()
|
||||
assert(len(rules['Rules']) == len(RULES))
|
||||
|
||||
|
||||
@mock_events
|
||||
def test_delete_rule():
|
||||
client = generate_environment()
|
||||
|
||||
client.delete_rule(Name=RULES[0]['Name'])
|
||||
rules = client.list_rules()
|
||||
assert(len(rules['Rules']) == len(RULES) - 1)
|
||||
|
||||
|
||||
@mock_events
|
||||
def test_list_targets_by_rule():
|
||||
rule_name = get_random_rule()['Name']
|
||||
client = generate_environment()
|
||||
targets = client.list_targets_by_rule(Rule=rule_name)
|
||||
|
||||
expected_targets = []
|
||||
for target in TARGETS:
|
||||
if rule_name in TARGETS[target].get('Rules'):
|
||||
expected_targets.append(target)
|
||||
|
||||
assert(len(targets['Targets']) == len(expected_targets))
|
||||
|
||||
|
||||
@mock_events
|
||||
def test_remove_targets():
|
||||
rule_name = get_random_rule()['Name']
|
||||
client = generate_environment()
|
||||
|
||||
targets = client.list_targets_by_rule(Rule=rule_name)['Targets']
|
||||
targets_before = len(targets)
|
||||
assert(targets_before > 0)
|
||||
|
||||
client.remove_targets(Rule=rule_name, Ids=[targets[0]['Id']])
|
||||
|
||||
targets = client.list_targets_by_rule(Rule=rule_name)['Targets']
|
||||
targets_after = len(targets)
|
||||
assert(targets_before - 1 == targets_after)
|
||||
|
||||
|
||||
@mock_events
|
||||
def test_permissions():
|
||||
client = boto3.client('events', 'eu-central-1')
|
||||
|
||||
client.put_permission(Action='events:PutEvents', Principal='111111111111', StatementId='Account1')
|
||||
client.put_permission(Action='events:PutEvents', Principal='222222222222', StatementId='Account2')
|
||||
|
||||
resp = client.describe_event_bus()
|
||||
resp_policy = json.loads(resp['Policy'])
|
||||
assert len(resp_policy['Statement']) == 2
|
||||
|
||||
client.remove_permission(StatementId='Account2')
|
||||
|
||||
resp = client.describe_event_bus()
|
||||
resp_policy = json.loads(resp['Policy'])
|
||||
assert len(resp_policy['Statement']) == 1
|
||||
assert resp_policy['Statement'][0]['Sid'] == 'Account1'
|
||||
|
||||
|
||||
@mock_events
|
||||
def test_put_events():
|
||||
client = boto3.client('events', 'eu-central-1')
|
||||
|
||||
event = {
|
||||
"Source": "com.mycompany.myapp",
|
||||
"Detail": '{"key1": "value3", "key2": "value4"}',
|
||||
"Resources": ["resource1", "resource2"],
|
||||
"DetailType": "myDetailType"
|
||||
}
|
||||
|
||||
client.put_events(Entries=[event])
|
||||
# Boto3 would error if it didn't return 200 OK
|
||||
|
||||
with assert_raises(ClientError):
|
||||
client.put_events(Entries=[event]*20)
|
||||
import random
|
||||
import boto3
|
||||
import json
|
||||
|
||||
from moto.events import mock_events
|
||||
from botocore.exceptions import ClientError
|
||||
from nose.tools import assert_raises
|
||||
|
||||
RULES = [
|
||||
{'Name': 'test1', 'ScheduleExpression': 'rate(5 minutes)'},
|
||||
{'Name': 'test2', 'ScheduleExpression': 'rate(1 minute)'},
|
||||
{'Name': 'test3', 'EventPattern': '{"source": ["test-source"]}'}
|
||||
]
|
||||
|
||||
TARGETS = {
|
||||
'test-target-1': {
|
||||
'Id': 'test-target-1',
|
||||
'Arn': 'arn:aws:lambda:us-west-2:111111111111:function:test-function-1',
|
||||
'Rules': ['test1', 'test2']
|
||||
},
|
||||
'test-target-2': {
|
||||
'Id': 'test-target-2',
|
||||
'Arn': 'arn:aws:lambda:us-west-2:111111111111:function:test-function-2',
|
||||
'Rules': ['test1', 'test3']
|
||||
},
|
||||
'test-target-3': {
|
||||
'Id': 'test-target-3',
|
||||
'Arn': 'arn:aws:lambda:us-west-2:111111111111:function:test-function-3',
|
||||
'Rules': ['test1', 'test2']
|
||||
},
|
||||
'test-target-4': {
|
||||
'Id': 'test-target-4',
|
||||
'Arn': 'arn:aws:lambda:us-west-2:111111111111:function:test-function-4',
|
||||
'Rules': ['test1', 'test3']
|
||||
},
|
||||
'test-target-5': {
|
||||
'Id': 'test-target-5',
|
||||
'Arn': 'arn:aws:lambda:us-west-2:111111111111:function:test-function-5',
|
||||
'Rules': ['test1', 'test2']
|
||||
},
|
||||
'test-target-6': {
|
||||
'Id': 'test-target-6',
|
||||
'Arn': 'arn:aws:lambda:us-west-2:111111111111:function:test-function-6',
|
||||
'Rules': ['test1', 'test3']
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def get_random_rule():
|
||||
return RULES[random.randint(0, len(RULES) - 1)]
|
||||
|
||||
|
||||
def generate_environment():
|
||||
client = boto3.client('events', 'us-west-2')
|
||||
|
||||
for rule in RULES:
|
||||
client.put_rule(
|
||||
Name=rule['Name'],
|
||||
ScheduleExpression=rule.get('ScheduleExpression', ''),
|
||||
EventPattern=rule.get('EventPattern', '')
|
||||
)
|
||||
|
||||
targets = []
|
||||
for target in TARGETS:
|
||||
if rule['Name'] in TARGETS[target].get('Rules'):
|
||||
targets.append({'Id': target, 'Arn': TARGETS[target]['Arn']})
|
||||
|
||||
client.put_targets(Rule=rule['Name'], Targets=targets)
|
||||
|
||||
return client
|
||||
|
||||
|
||||
@mock_events
|
||||
def test_list_rules():
|
||||
client = generate_environment()
|
||||
response = client.list_rules()
|
||||
|
||||
assert(response is not None)
|
||||
assert(len(response['Rules']) > 0)
|
||||
|
||||
|
||||
@mock_events
|
||||
def test_describe_rule():
|
||||
rule_name = get_random_rule()['Name']
|
||||
client = generate_environment()
|
||||
response = client.describe_rule(Name=rule_name)
|
||||
|
||||
assert(response is not None)
|
||||
assert(response.get('Name') == rule_name)
|
||||
assert(response.get('Arn') is not None)
|
||||
|
||||
|
||||
@mock_events
|
||||
def test_enable_disable_rule():
|
||||
rule_name = get_random_rule()['Name']
|
||||
client = generate_environment()
|
||||
|
||||
# Rules should start out enabled in these tests.
|
||||
rule = client.describe_rule(Name=rule_name)
|
||||
assert(rule['State'] == 'ENABLED')
|
||||
|
||||
client.disable_rule(Name=rule_name)
|
||||
rule = client.describe_rule(Name=rule_name)
|
||||
assert(rule['State'] == 'DISABLED')
|
||||
|
||||
client.enable_rule(Name=rule_name)
|
||||
rule = client.describe_rule(Name=rule_name)
|
||||
assert(rule['State'] == 'ENABLED')
|
||||
|
||||
# Test invalid name
|
||||
try:
|
||||
client.enable_rule(Name='junk')
|
||||
|
||||
except ClientError as ce:
|
||||
assert ce.response['Error']['Code'] == 'ResourceNotFoundException'
|
||||
|
||||
|
||||
@mock_events
|
||||
def test_list_rule_names_by_target():
|
||||
test_1_target = TARGETS['test-target-1']
|
||||
test_2_target = TARGETS['test-target-2']
|
||||
client = generate_environment()
|
||||
|
||||
rules = client.list_rule_names_by_target(TargetArn=test_1_target['Arn'])
|
||||
assert(len(rules['RuleNames']) == len(test_1_target['Rules']))
|
||||
for rule in rules['RuleNames']:
|
||||
assert(rule in test_1_target['Rules'])
|
||||
|
||||
rules = client.list_rule_names_by_target(TargetArn=test_2_target['Arn'])
|
||||
assert(len(rules['RuleNames']) == len(test_2_target['Rules']))
|
||||
for rule in rules['RuleNames']:
|
||||
assert(rule in test_2_target['Rules'])
|
||||
|
||||
|
||||
@mock_events
|
||||
def test_list_rules():
|
||||
client = generate_environment()
|
||||
|
||||
rules = client.list_rules()
|
||||
assert(len(rules['Rules']) == len(RULES))
|
||||
|
||||
|
||||
@mock_events
|
||||
def test_delete_rule():
|
||||
client = generate_environment()
|
||||
|
||||
client.delete_rule(Name=RULES[0]['Name'])
|
||||
rules = client.list_rules()
|
||||
assert(len(rules['Rules']) == len(RULES) - 1)
|
||||
|
||||
|
||||
@mock_events
|
||||
def test_list_targets_by_rule():
|
||||
rule_name = get_random_rule()['Name']
|
||||
client = generate_environment()
|
||||
targets = client.list_targets_by_rule(Rule=rule_name)
|
||||
|
||||
expected_targets = []
|
||||
for target in TARGETS:
|
||||
if rule_name in TARGETS[target].get('Rules'):
|
||||
expected_targets.append(target)
|
||||
|
||||
assert(len(targets['Targets']) == len(expected_targets))
|
||||
|
||||
|
||||
@mock_events
|
||||
def test_remove_targets():
|
||||
rule_name = get_random_rule()['Name']
|
||||
client = generate_environment()
|
||||
|
||||
targets = client.list_targets_by_rule(Rule=rule_name)['Targets']
|
||||
targets_before = len(targets)
|
||||
assert(targets_before > 0)
|
||||
|
||||
client.remove_targets(Rule=rule_name, Ids=[targets[0]['Id']])
|
||||
|
||||
targets = client.list_targets_by_rule(Rule=rule_name)['Targets']
|
||||
targets_after = len(targets)
|
||||
assert(targets_before - 1 == targets_after)
|
||||
|
||||
|
||||
@mock_events
|
||||
def test_permissions():
|
||||
client = boto3.client('events', 'eu-central-1')
|
||||
|
||||
client.put_permission(Action='events:PutEvents', Principal='111111111111', StatementId='Account1')
|
||||
client.put_permission(Action='events:PutEvents', Principal='222222222222', StatementId='Account2')
|
||||
|
||||
resp = client.describe_event_bus()
|
||||
resp_policy = json.loads(resp['Policy'])
|
||||
assert len(resp_policy['Statement']) == 2
|
||||
|
||||
client.remove_permission(StatementId='Account2')
|
||||
|
||||
resp = client.describe_event_bus()
|
||||
resp_policy = json.loads(resp['Policy'])
|
||||
assert len(resp_policy['Statement']) == 1
|
||||
assert resp_policy['Statement'][0]['Sid'] == 'Account1'
|
||||
|
||||
|
||||
@mock_events
|
||||
def test_put_events():
|
||||
client = boto3.client('events', 'eu-central-1')
|
||||
|
||||
event = {
|
||||
"Source": "com.mycompany.myapp",
|
||||
"Detail": '{"key1": "value3", "key2": "value4"}',
|
||||
"Resources": ["resource1", "resource2"],
|
||||
"DetailType": "myDetailType"
|
||||
}
|
||||
|
||||
client.put_events(Entries=[event])
|
||||
# Boto3 would error if it didn't return 200 OK
|
||||
|
||||
with assert_raises(ClientError):
|
||||
client.put_events(Entries=[event]*20)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
8
tests/test_packages/__init__.py
Normal file
8
tests/test_packages/__init__.py
Normal file
@ -0,0 +1,8 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import logging
|
||||
# Disable extra logging for tests
|
||||
logging.getLogger('boto').setLevel(logging.CRITICAL)
|
||||
logging.getLogger('boto3').setLevel(logging.CRITICAL)
|
||||
logging.getLogger('botocore').setLevel(logging.CRITICAL)
|
||||
logging.getLogger('nose').setLevel(logging.CRITICAL)
|
||||
37
tests/test_packages/test_httpretty.py
Normal file
37
tests/test_packages/test_httpretty.py
Normal file
@ -0,0 +1,37 @@
|
||||
# #!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals
|
||||
import mock
|
||||
|
||||
from moto.packages.httpretty.core import HTTPrettyRequest, fake_gethostname, fake_gethostbyname
|
||||
|
||||
|
||||
def test_parse_querystring():
|
||||
|
||||
core = HTTPrettyRequest(headers='test test HTTP/1.1')
|
||||
|
||||
qs = 'test test'
|
||||
response = core.parse_querystring(qs)
|
||||
|
||||
assert response == {}
|
||||
|
||||
def test_parse_request_body():
|
||||
core = HTTPrettyRequest(headers='test test HTTP/1.1')
|
||||
|
||||
qs = 'test'
|
||||
response = core.parse_request_body(qs)
|
||||
|
||||
assert response == 'test'
|
||||
|
||||
def test_fake_gethostname():
|
||||
|
||||
response = fake_gethostname()
|
||||
|
||||
assert response == 'localhost'
|
||||
|
||||
def test_fake_gethostbyname():
|
||||
|
||||
host = 'test'
|
||||
response = fake_gethostbyname(host=host)
|
||||
|
||||
assert response == '127.0.0.1'
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,105 +1,106 @@
|
||||
# coding=utf-8
|
||||
|
||||
from __future__ import unicode_literals
|
||||
import sure # noqa
|
||||
|
||||
from flask.testing import FlaskClient
|
||||
import moto.server as server
|
||||
|
||||
'''
|
||||
Test the different server responses
|
||||
'''
|
||||
|
||||
|
||||
class AuthenticatedClient(FlaskClient):
|
||||
def open(self, *args, **kwargs):
|
||||
kwargs['headers'] = kwargs.get('headers', {})
|
||||
kwargs['headers']['Authorization'] = "Any authorization header"
|
||||
return super(AuthenticatedClient, self).open(*args, **kwargs)
|
||||
|
||||
|
||||
def authenticated_client():
|
||||
backend = server.create_backend_app("s3")
|
||||
backend.test_client_class = AuthenticatedClient
|
||||
return backend.test_client()
|
||||
|
||||
|
||||
def test_s3_server_get():
|
||||
test_client = authenticated_client()
|
||||
res = test_client.get('/')
|
||||
|
||||
res.data.should.contain(b'ListAllMyBucketsResult')
|
||||
|
||||
|
||||
def test_s3_server_bucket_create():
|
||||
test_client = authenticated_client()
|
||||
|
||||
res = test_client.put('/', 'http://foobaz.localhost:5000/')
|
||||
res.status_code.should.equal(200)
|
||||
|
||||
res = test_client.get('/')
|
||||
res.data.should.contain(b'<Name>foobaz</Name>')
|
||||
|
||||
res = test_client.get('/', 'http://foobaz.localhost:5000/')
|
||||
res.status_code.should.equal(200)
|
||||
res.data.should.contain(b"ListBucketResult")
|
||||
|
||||
res = test_client.put(
|
||||
'/bar', 'http://foobaz.localhost:5000/', data='test value')
|
||||
res.status_code.should.equal(200)
|
||||
assert 'ETag' in dict(res.headers)
|
||||
|
||||
res = test_client.get('/bar', 'http://foobaz.localhost:5000/')
|
||||
res.status_code.should.equal(200)
|
||||
res.data.should.equal(b"test value")
|
||||
|
||||
|
||||
def test_s3_server_bucket_versioning():
|
||||
test_client = authenticated_client()
|
||||
|
||||
# Just enough XML to enable versioning
|
||||
body = '<Status>Enabled</Status>'
|
||||
res = test_client.put(
|
||||
'/?versioning', 'http://foobaz.localhost:5000', data=body)
|
||||
res.status_code.should.equal(200)
|
||||
|
||||
|
||||
def test_s3_server_post_to_bucket():
|
||||
test_client = authenticated_client()
|
||||
|
||||
res = test_client.put('/', 'http://tester.localhost:5000/')
|
||||
res.status_code.should.equal(200)
|
||||
|
||||
test_client.post('/', "https://tester.localhost:5000/", data={
|
||||
'key': 'the-key',
|
||||
'file': 'nothing'
|
||||
})
|
||||
|
||||
res = test_client.get('/the-key', 'http://tester.localhost:5000/')
|
||||
res.status_code.should.equal(200)
|
||||
res.data.should.equal(b"nothing")
|
||||
|
||||
|
||||
def test_s3_server_post_without_content_length():
|
||||
test_client = authenticated_client()
|
||||
|
||||
res = test_client.put('/', 'http://tester.localhost:5000/', environ_overrides={'CONTENT_LENGTH': ''})
|
||||
res.status_code.should.equal(411)
|
||||
|
||||
res = test_client.post('/', "https://tester.localhost:5000/", environ_overrides={'CONTENT_LENGTH': ''})
|
||||
res.status_code.should.equal(411)
|
||||
|
||||
|
||||
def test_s3_server_post_unicode_bucket_key():
|
||||
# Make sure that we can deal with non-ascii characters in request URLs (e.g., S3 object names)
|
||||
dispatcher = server.DomainDispatcherApplication(server.create_backend_app)
|
||||
backend_app = dispatcher.get_application({
|
||||
'HTTP_HOST': 's3.amazonaws.com',
|
||||
'PATH_INFO': '/test-bucket/test-object-てすと'
|
||||
})
|
||||
assert backend_app
|
||||
backend_app = dispatcher.get_application({
|
||||
'HTTP_HOST': 's3.amazonaws.com',
|
||||
'PATH_INFO': '/test-bucket/test-object-てすと'.encode('utf-8')
|
||||
})
|
||||
assert backend_app
|
||||
# coding=utf-8
|
||||
|
||||
from __future__ import unicode_literals
|
||||
import sure # noqa
|
||||
|
||||
from flask.testing import FlaskClient
|
||||
import moto.server as server
|
||||
|
||||
'''
|
||||
Test the different server responses
|
||||
'''
|
||||
|
||||
|
||||
class AuthenticatedClient(FlaskClient):
|
||||
def open(self, *args, **kwargs):
|
||||
kwargs['headers'] = kwargs.get('headers', {})
|
||||
kwargs['headers']['Authorization'] = "Any authorization header"
|
||||
kwargs['content_length'] = 0 # Fixes content-length complaints.
|
||||
return super(AuthenticatedClient, self).open(*args, **kwargs)
|
||||
|
||||
|
||||
def authenticated_client():
|
||||
backend = server.create_backend_app("s3")
|
||||
backend.test_client_class = AuthenticatedClient
|
||||
return backend.test_client()
|
||||
|
||||
|
||||
def test_s3_server_get():
|
||||
test_client = authenticated_client()
|
||||
res = test_client.get('/')
|
||||
|
||||
res.data.should.contain(b'ListAllMyBucketsResult')
|
||||
|
||||
|
||||
def test_s3_server_bucket_create():
|
||||
test_client = authenticated_client()
|
||||
|
||||
res = test_client.put('/', 'http://foobaz.localhost:5000/')
|
||||
res.status_code.should.equal(200)
|
||||
|
||||
res = test_client.get('/')
|
||||
res.data.should.contain(b'<Name>foobaz</Name>')
|
||||
|
||||
res = test_client.get('/', 'http://foobaz.localhost:5000/')
|
||||
res.status_code.should.equal(200)
|
||||
res.data.should.contain(b"ListBucketResult")
|
||||
|
||||
res = test_client.put(
|
||||
'/bar', 'http://foobaz.localhost:5000/', data='test value')
|
||||
res.status_code.should.equal(200)
|
||||
assert 'ETag' in dict(res.headers)
|
||||
|
||||
res = test_client.get('/bar', 'http://foobaz.localhost:5000/')
|
||||
res.status_code.should.equal(200)
|
||||
res.data.should.equal(b"test value")
|
||||
|
||||
|
||||
def test_s3_server_bucket_versioning():
|
||||
test_client = authenticated_client()
|
||||
|
||||
# Just enough XML to enable versioning
|
||||
body = '<Status>Enabled</Status>'
|
||||
res = test_client.put(
|
||||
'/?versioning', 'http://foobaz.localhost:5000', data=body)
|
||||
res.status_code.should.equal(200)
|
||||
|
||||
|
||||
def test_s3_server_post_to_bucket():
|
||||
test_client = authenticated_client()
|
||||
|
||||
res = test_client.put('/', 'http://tester.localhost:5000/')
|
||||
res.status_code.should.equal(200)
|
||||
|
||||
test_client.post('/', "https://tester.localhost:5000/", data={
|
||||
'key': 'the-key',
|
||||
'file': 'nothing'
|
||||
})
|
||||
|
||||
res = test_client.get('/the-key', 'http://tester.localhost:5000/')
|
||||
res.status_code.should.equal(200)
|
||||
res.data.should.equal(b"nothing")
|
||||
|
||||
|
||||
def test_s3_server_post_without_content_length():
|
||||
test_client = authenticated_client()
|
||||
|
||||
res = test_client.put('/', 'http://tester.localhost:5000/', environ_overrides={'CONTENT_LENGTH': ''})
|
||||
res.status_code.should.equal(411)
|
||||
|
||||
res = test_client.post('/', "https://tester.localhost:5000/", environ_overrides={'CONTENT_LENGTH': ''})
|
||||
res.status_code.should.equal(411)
|
||||
|
||||
|
||||
def test_s3_server_post_unicode_bucket_key():
|
||||
# Make sure that we can deal with non-ascii characters in request URLs (e.g., S3 object names)
|
||||
dispatcher = server.DomainDispatcherApplication(server.create_backend_app)
|
||||
backend_app = dispatcher.get_application({
|
||||
'HTTP_HOST': 's3.amazonaws.com',
|
||||
'PATH_INFO': '/test-bucket/test-object-てすと'
|
||||
})
|
||||
assert backend_app
|
||||
backend_app = dispatcher.get_application({
|
||||
'HTTP_HOST': 's3.amazonaws.com',
|
||||
'PATH_INFO': '/test-bucket/test-object-てすと'.encode('utf-8')
|
||||
})
|
||||
assert backend_app
|
||||
|
||||
@ -1,113 +1,114 @@
|
||||
from __future__ import unicode_literals
|
||||
import sure # noqa
|
||||
|
||||
from flask.testing import FlaskClient
|
||||
import moto.server as server
|
||||
|
||||
'''
|
||||
Test the different server responses
|
||||
'''
|
||||
|
||||
|
||||
class AuthenticatedClient(FlaskClient):
|
||||
def open(self, *args, **kwargs):
|
||||
kwargs['headers'] = kwargs.get('headers', {})
|
||||
kwargs['headers']['Authorization'] = "Any authorization header"
|
||||
return super(AuthenticatedClient, self).open(*args, **kwargs)
|
||||
|
||||
|
||||
def authenticated_client():
|
||||
backend = server.create_backend_app("s3bucket_path")
|
||||
backend.test_client_class = AuthenticatedClient
|
||||
return backend.test_client()
|
||||
|
||||
|
||||
def test_s3_server_get():
|
||||
test_client = authenticated_client()
|
||||
|
||||
res = test_client.get('/')
|
||||
|
||||
res.data.should.contain(b'ListAllMyBucketsResult')
|
||||
|
||||
|
||||
def test_s3_server_bucket_create():
|
||||
test_client = authenticated_client()
|
||||
|
||||
res = test_client.put('/foobar', 'http://localhost:5000')
|
||||
res.status_code.should.equal(200)
|
||||
|
||||
res = test_client.get('/')
|
||||
res.data.should.contain(b'<Name>foobar</Name>')
|
||||
|
||||
res = test_client.get('/foobar', 'http://localhost:5000')
|
||||
res.status_code.should.equal(200)
|
||||
res.data.should.contain(b"ListBucketResult")
|
||||
|
||||
res = test_client.put('/foobar2/', 'http://localhost:5000')
|
||||
res.status_code.should.equal(200)
|
||||
|
||||
res = test_client.get('/')
|
||||
res.data.should.contain(b'<Name>foobar2</Name>')
|
||||
|
||||
res = test_client.get('/foobar2/', 'http://localhost:5000')
|
||||
res.status_code.should.equal(200)
|
||||
res.data.should.contain(b"ListBucketResult")
|
||||
|
||||
res = test_client.get('/missing-bucket', 'http://localhost:5000')
|
||||
res.status_code.should.equal(404)
|
||||
|
||||
res = test_client.put(
|
||||
'/foobar/bar', 'http://localhost:5000', data='test value')
|
||||
res.status_code.should.equal(200)
|
||||
|
||||
res = test_client.get('/foobar/bar', 'http://localhost:5000')
|
||||
res.status_code.should.equal(200)
|
||||
res.data.should.equal(b"test value")
|
||||
|
||||
|
||||
def test_s3_server_post_to_bucket():
|
||||
test_client = authenticated_client()
|
||||
|
||||
res = test_client.put('/foobar2', 'http://localhost:5000/')
|
||||
res.status_code.should.equal(200)
|
||||
|
||||
test_client.post('/foobar2', "https://localhost:5000/", data={
|
||||
'key': 'the-key',
|
||||
'file': 'nothing'
|
||||
})
|
||||
|
||||
res = test_client.get('/foobar2/the-key', 'http://localhost:5000/')
|
||||
res.status_code.should.equal(200)
|
||||
res.data.should.equal(b"nothing")
|
||||
|
||||
|
||||
def test_s3_server_put_ipv6():
|
||||
test_client = authenticated_client()
|
||||
|
||||
res = test_client.put('/foobar2', 'http://[::]:5000/')
|
||||
res.status_code.should.equal(200)
|
||||
|
||||
test_client.post('/foobar2', "https://[::]:5000/", data={
|
||||
'key': 'the-key',
|
||||
'file': 'nothing'
|
||||
})
|
||||
|
||||
res = test_client.get('/foobar2/the-key', 'http://[::]:5000/')
|
||||
res.status_code.should.equal(200)
|
||||
res.data.should.equal(b"nothing")
|
||||
|
||||
|
||||
def test_s3_server_put_ipv4():
|
||||
test_client = authenticated_client()
|
||||
|
||||
res = test_client.put('/foobar2', 'http://127.0.0.1:5000/')
|
||||
res.status_code.should.equal(200)
|
||||
|
||||
test_client.post('/foobar2', "https://127.0.0.1:5000/", data={
|
||||
'key': 'the-key',
|
||||
'file': 'nothing'
|
||||
})
|
||||
|
||||
res = test_client.get('/foobar2/the-key', 'http://127.0.0.1:5000/')
|
||||
res.status_code.should.equal(200)
|
||||
res.data.should.equal(b"nothing")
|
||||
from __future__ import unicode_literals
|
||||
import sure # noqa
|
||||
|
||||
from flask.testing import FlaskClient
|
||||
import moto.server as server
|
||||
|
||||
'''
|
||||
Test the different server responses
|
||||
'''
|
||||
|
||||
|
||||
class AuthenticatedClient(FlaskClient):
|
||||
def open(self, *args, **kwargs):
|
||||
kwargs['headers'] = kwargs.get('headers', {})
|
||||
kwargs['headers']['Authorization'] = "Any authorization header"
|
||||
kwargs['content_length'] = 0 # Fixes content-length complaints.
|
||||
return super(AuthenticatedClient, self).open(*args, **kwargs)
|
||||
|
||||
|
||||
def authenticated_client():
|
||||
backend = server.create_backend_app("s3bucket_path")
|
||||
backend.test_client_class = AuthenticatedClient
|
||||
return backend.test_client()
|
||||
|
||||
|
||||
def test_s3_server_get():
|
||||
test_client = authenticated_client()
|
||||
|
||||
res = test_client.get('/')
|
||||
|
||||
res.data.should.contain(b'ListAllMyBucketsResult')
|
||||
|
||||
|
||||
def test_s3_server_bucket_create():
|
||||
test_client = authenticated_client()
|
||||
|
||||
res = test_client.put('/foobar', 'http://localhost:5000')
|
||||
res.status_code.should.equal(200)
|
||||
|
||||
res = test_client.get('/')
|
||||
res.data.should.contain(b'<Name>foobar</Name>')
|
||||
|
||||
res = test_client.get('/foobar', 'http://localhost:5000')
|
||||
res.status_code.should.equal(200)
|
||||
res.data.should.contain(b"ListBucketResult")
|
||||
|
||||
res = test_client.put('/foobar2/', 'http://localhost:5000')
|
||||
res.status_code.should.equal(200)
|
||||
|
||||
res = test_client.get('/')
|
||||
res.data.should.contain(b'<Name>foobar2</Name>')
|
||||
|
||||
res = test_client.get('/foobar2/', 'http://localhost:5000')
|
||||
res.status_code.should.equal(200)
|
||||
res.data.should.contain(b"ListBucketResult")
|
||||
|
||||
res = test_client.get('/missing-bucket', 'http://localhost:5000')
|
||||
res.status_code.should.equal(404)
|
||||
|
||||
res = test_client.put(
|
||||
'/foobar/bar', 'http://localhost:5000', data='test value')
|
||||
res.status_code.should.equal(200)
|
||||
|
||||
res = test_client.get('/foobar/bar', 'http://localhost:5000')
|
||||
res.status_code.should.equal(200)
|
||||
res.data.should.equal(b"test value")
|
||||
|
||||
|
||||
def test_s3_server_post_to_bucket():
|
||||
test_client = authenticated_client()
|
||||
|
||||
res = test_client.put('/foobar2', 'http://localhost:5000/')
|
||||
res.status_code.should.equal(200)
|
||||
|
||||
test_client.post('/foobar2', "https://localhost:5000/", data={
|
||||
'key': 'the-key',
|
||||
'file': 'nothing'
|
||||
})
|
||||
|
||||
res = test_client.get('/foobar2/the-key', 'http://localhost:5000/')
|
||||
res.status_code.should.equal(200)
|
||||
res.data.should.equal(b"nothing")
|
||||
|
||||
|
||||
def test_s3_server_put_ipv6():
|
||||
test_client = authenticated_client()
|
||||
|
||||
res = test_client.put('/foobar2', 'http://[::]:5000/')
|
||||
res.status_code.should.equal(200)
|
||||
|
||||
test_client.post('/foobar2', "https://[::]:5000/", data={
|
||||
'key': 'the-key',
|
||||
'file': 'nothing'
|
||||
})
|
||||
|
||||
res = test_client.get('/foobar2/the-key', 'http://[::]:5000/')
|
||||
res.status_code.should.equal(200)
|
||||
res.data.should.equal(b"nothing")
|
||||
|
||||
|
||||
def test_s3_server_put_ipv4():
|
||||
test_client = authenticated_client()
|
||||
|
||||
res = test_client.put('/foobar2', 'http://127.0.0.1:5000/')
|
||||
res.status_code.should.equal(200)
|
||||
|
||||
test_client.post('/foobar2', "https://127.0.0.1:5000/", data={
|
||||
'key': 'the-key',
|
||||
'file': 'nothing'
|
||||
})
|
||||
|
||||
res = test_client.get('/foobar2/the-key', 'http://127.0.0.1:5000/')
|
||||
res.status_code.should.equal(200)
|
||||
res.data.should.equal(b"nothing")
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user