Merge remote-tracking branch 'upstream/master'

This commit is contained in:
mickeypash 2017-11-13 19:59:51 +00:00
commit 98fa083586
90 changed files with 9148 additions and 516 deletions

View File

@ -3,6 +3,23 @@ Moto Changelog
Latest Latest
------ ------
1.1.24
-----
* Implemented Batch
* Fixed regression with moto_server dashboard
* Fixed and closed many outstanding bugs
* Fixed serious performance problem with EC2 reservation listing
* Fixed Route53 list_resource_record_sets
1.1.23
-----
* Implemented X-Ray
* Implemented Autoscaling EC2 attachment
* Implemented Autoscaling Load Balancer methods
* Improved DynamoDB filter expressions
1.1.22 1.1.22
----- -----

3651
IMPLEMENTATION_COVERAGE.md Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,6 @@
include README.md LICENSE AUTHORS.md include README.md LICENSE AUTHORS.md
include requirements.txt requirements-dev.txt tox.ini include requirements.txt requirements-dev.txt tox.ini
include moto/ec2/resources/instance_types.json include moto/ec2/resources/instance_types.json
include moto/ec2/resources/amis.json
recursive-include moto/templates * recursive-include moto/templates *
recursive-include tests * recursive-include tests *

View File

@ -1,5 +1,13 @@
SHELL := /bin/bash SHELL := /bin/bash
ifeq ($(TEST_SERVER_MODE), true)
# exclude test_iot and test_iotdata for now
# because authentication of iot is very complicated
TEST_EXCLUDE := --exclude='test_iot.*'
else
TEST_EXCLUDE :=
endif
init: init:
@python setup.py develop @python setup.py develop
@pip install -r requirements.txt @pip install -r requirements.txt
@ -10,8 +18,7 @@ lint:
test: lint test: lint
rm -f .coverage rm -f .coverage
rm -rf cover rm -rf cover
@nosetests -sv --with-coverage --cover-html ./tests/ @nosetests -sv --with-coverage --cover-html ./tests/ $(TEST_EXCLUDE)
test_server: test_server:
@TEST_SERVER_MODE=true nosetests -sv --with-coverage --cover-html ./tests/ @TEST_SERVER_MODE=true nosetests -sv --with-coverage --cover-html ./tests/
@ -29,7 +36,14 @@ tag_github_release:
git tag `python setup.py --version` git tag `python setup.py --version`
git push origin `python setup.py --version` git push origin `python setup.py --version`
publish: upload_pypi_artifact push_dockerhub_image tag_github_release publish: implementation_coverage \
upload_pypi_artifact \
tag_github_release \
push_dockerhub_image
implementation_coverage:
./scripts/implementation_coverage.py > IMPLEMENTATION_COVERAGE.md
git commit IMPLEMENTATION_COVERAGE.md -m "Updating implementation coverage"
scaffold: scaffold:
@pip install -r requirements-dev.txt > /dev/null @pip install -r requirements-dev.txt > /dev/null

View File

@ -68,10 +68,12 @@ It gets even better! Moto isn't just for Python code and it isn't just for S3. L
|------------------------------------------------------------------------------| |------------------------------------------------------------------------------|
| Cloudwatch | @mock_cloudwatch | basic endpoints done | | Cloudwatch | @mock_cloudwatch | basic endpoints done |
|------------------------------------------------------------------------------| |------------------------------------------------------------------------------|
| CloudwatchEvents | @mock_events | all endpoints done |
|------------------------------------------------------------------------------|
| Data Pipeline | @mock_datapipeline| basic endpoints done | | Data Pipeline | @mock_datapipeline| basic endpoints done |
|------------------------------------------------------------------------------| |------------------------------------------------------------------------------|
| DynamoDB | @mock_dynamodb | core endpoints done | | DynamoDB | @mock_dynamodb | core endpoints done |
| DynamoDB2 | @mock_dynamodb2 | core endpoints + partial indexes | | DynamoDB2 | @mock_dynamodb2 | all endpoints + partial indexes |
|------------------------------------------------------------------------------| |------------------------------------------------------------------------------|
| EC2 | @mock_ec2 | core endpoints done | | EC2 | @mock_ec2 | core endpoints done |
| - AMI | | core endpoints done | | - AMI | | core endpoints done |
@ -86,7 +88,7 @@ It gets even better! Moto isn't just for Python code and it isn't just for S3. L
|------------------------------------------------------------------------------| |------------------------------------------------------------------------------|
| ELB | @mock_elb | core endpoints done | | ELB | @mock_elb | core endpoints done |
|------------------------------------------------------------------------------| |------------------------------------------------------------------------------|
| ELBv2 | @mock_elbv2 | core endpoints done | | ELBv2 | @mock_elbv2 | all endpoints done |
|------------------------------------------------------------------------------| |------------------------------------------------------------------------------|
| EMR | @mock_emr | core endpoints done | | EMR | @mock_emr | core endpoints done |
|------------------------------------------------------------------------------| |------------------------------------------------------------------------------|
@ -94,6 +96,9 @@ It gets even better! Moto isn't just for Python code and it isn't just for S3. L
|------------------------------------------------------------------------------| |------------------------------------------------------------------------------|
| IAM | @mock_iam | core endpoints done | | IAM | @mock_iam | core endpoints done |
|------------------------------------------------------------------------------| |------------------------------------------------------------------------------|
| IoT | @mock_iot | core endpoints done |
| | @mock_iotdata | core endpoints done |
|------------------------------------------------------------------------------|
| Lambda | @mock_lambda | basic endpoints done, requires | | Lambda | @mock_lambda | basic endpoints done, requires |
| | | docker | | | | docker |
|------------------------------------------------------------------------------| |------------------------------------------------------------------------------|
@ -115,7 +120,7 @@ It gets even better! Moto isn't just for Python code and it isn't just for S3. L
|------------------------------------------------------------------------------| |------------------------------------------------------------------------------|
| S3 | @mock_s3 | core endpoints done | | S3 | @mock_s3 | core endpoints done |
|------------------------------------------------------------------------------| |------------------------------------------------------------------------------|
| SES | @mock_ses | core endpoints done | | SES | @mock_ses | all endpoints done |
|------------------------------------------------------------------------------| |------------------------------------------------------------------------------|
| SNS | @mock_sns | all endpoints done | | SNS | @mock_sns | all endpoints done |
|------------------------------------------------------------------------------| |------------------------------------------------------------------------------|
@ -127,7 +132,7 @@ It gets even better! Moto isn't just for Python code and it isn't just for S3. L
|------------------------------------------------------------------------------| |------------------------------------------------------------------------------|
| SWF | @mock_swf | basic endpoints done | | SWF | @mock_swf | basic endpoints done |
|------------------------------------------------------------------------------| |------------------------------------------------------------------------------|
| X-Ray | @mock_xray | core endpoints done | | X-Ray | @mock_xray | all endpoints done |
|------------------------------------------------------------------------------| |------------------------------------------------------------------------------|
``` ```
@ -297,6 +302,7 @@ boto3.resource(
## Install ## Install
```console ```console
$ pip install moto $ pip install moto
``` ```

View File

@ -41,6 +41,8 @@ from .swf import mock_swf, mock_swf_deprecated # flake8: noqa
from .xray import mock_xray, mock_xray_client, XRaySegment # flake8: noqa from .xray import mock_xray, mock_xray_client, XRaySegment # flake8: noqa
from .logs import mock_logs, mock_logs_deprecated # flake8: noqa from .logs import mock_logs, mock_logs_deprecated # flake8: noqa
from .batch import mock_batch # flake8: noqa from .batch import mock_batch # flake8: noqa
from .iot import mock_iot # flake8: noqa
from .iotdata import mock_iotdata # flake8: noqa
try: try:

View File

@ -185,7 +185,7 @@ class FakeAutoScalingGroup(BaseModel):
target_group_arns = properties.get("TargetGroupARNs", []) target_group_arns = properties.get("TargetGroupARNs", [])
backend = autoscaling_backends[region_name] backend = autoscaling_backends[region_name]
group = backend.create_autoscaling_group( group = backend.create_auto_scaling_group(
name=resource_name, name=resource_name,
availability_zones=properties.get("AvailabilityZones", []), availability_zones=properties.get("AvailabilityZones", []),
desired_capacity=properties.get("DesiredCapacity"), desired_capacity=properties.get("DesiredCapacity"),
@ -215,13 +215,13 @@ class FakeAutoScalingGroup(BaseModel):
def delete_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): def delete_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
backend = autoscaling_backends[region_name] backend = autoscaling_backends[region_name]
try: try:
backend.delete_autoscaling_group(resource_name) backend.delete_auto_scaling_group(resource_name)
except KeyError: except KeyError:
pass pass
def delete(self, region_name): def delete(self, region_name):
backend = autoscaling_backends[region_name] backend = autoscaling_backends[region_name]
backend.delete_autoscaling_group(self.name) backend.delete_auto_scaling_group(self.name)
@property @property
def physical_resource_id(self): def physical_resource_id(self):
@ -358,7 +358,7 @@ class AutoScalingBackend(BaseBackend):
def delete_launch_configuration(self, launch_configuration_name): def delete_launch_configuration(self, launch_configuration_name):
self.launch_configurations.pop(launch_configuration_name, None) self.launch_configurations.pop(launch_configuration_name, None)
def create_autoscaling_group(self, name, availability_zones, def create_auto_scaling_group(self, name, availability_zones,
desired_capacity, max_size, min_size, desired_capacity, max_size, min_size,
launch_config_name, vpc_zone_identifier, launch_config_name, vpc_zone_identifier,
default_cooldown, health_check_period, default_cooldown, health_check_period,
@ -402,7 +402,7 @@ class AutoScalingBackend(BaseBackend):
self.update_attached_target_groups(group.name) self.update_attached_target_groups(group.name)
return group return group
def update_autoscaling_group(self, name, availability_zones, def update_auto_scaling_group(self, name, availability_zones,
desired_capacity, max_size, min_size, desired_capacity, max_size, min_size,
launch_config_name, vpc_zone_identifier, launch_config_name, vpc_zone_identifier,
default_cooldown, health_check_period, default_cooldown, health_check_period,
@ -415,18 +415,18 @@ class AutoScalingBackend(BaseBackend):
placement_group, termination_policies) placement_group, termination_policies)
return group return group
def describe_autoscaling_groups(self, names): def describe_auto_scaling_groups(self, names):
groups = self.autoscaling_groups.values() groups = self.autoscaling_groups.values()
if names: if names:
return [group for group in groups if group.name in names] return [group for group in groups if group.name in names]
else: else:
return list(groups) return list(groups)
def delete_autoscaling_group(self, group_name): def delete_auto_scaling_group(self, group_name):
self.set_desired_capacity(group_name, 0) self.set_desired_capacity(group_name, 0)
self.autoscaling_groups.pop(group_name, None) self.autoscaling_groups.pop(group_name, None)
def describe_autoscaling_instances(self): def describe_auto_scaling_instances(self):
instance_states = [] instance_states = []
for group in self.autoscaling_groups.values(): for group in self.autoscaling_groups.values():
instance_states.extend(group.instance_states) instance_states.extend(group.instance_states)

View File

@ -67,7 +67,7 @@ class AutoScalingResponse(BaseResponse):
return template.render() return template.render()
def create_auto_scaling_group(self): def create_auto_scaling_group(self):
self.autoscaling_backend.create_autoscaling_group( self.autoscaling_backend.create_auto_scaling_group(
name=self._get_param('AutoScalingGroupName'), name=self._get_param('AutoScalingGroupName'),
availability_zones=self._get_multi_param( availability_zones=self._get_multi_param(
'AvailabilityZones.member'), 'AvailabilityZones.member'),
@ -160,7 +160,7 @@ class AutoScalingResponse(BaseResponse):
def describe_auto_scaling_groups(self): def describe_auto_scaling_groups(self):
names = self._get_multi_param("AutoScalingGroupNames.member") names = self._get_multi_param("AutoScalingGroupNames.member")
token = self._get_param("NextToken") token = self._get_param("NextToken")
all_groups = self.autoscaling_backend.describe_autoscaling_groups(names) all_groups = self.autoscaling_backend.describe_auto_scaling_groups(names)
all_names = [group.name for group in all_groups] all_names = [group.name for group in all_groups]
if token: if token:
start = all_names.index(token) + 1 start = all_names.index(token) + 1
@ -177,7 +177,7 @@ class AutoScalingResponse(BaseResponse):
return template.render(groups=groups, next_token=next_token) return template.render(groups=groups, next_token=next_token)
def update_auto_scaling_group(self): def update_auto_scaling_group(self):
self.autoscaling_backend.update_autoscaling_group( self.autoscaling_backend.update_auto_scaling_group(
name=self._get_param('AutoScalingGroupName'), name=self._get_param('AutoScalingGroupName'),
availability_zones=self._get_multi_param( availability_zones=self._get_multi_param(
'AvailabilityZones.member'), 'AvailabilityZones.member'),
@ -198,7 +198,7 @@ class AutoScalingResponse(BaseResponse):
def delete_auto_scaling_group(self): def delete_auto_scaling_group(self):
group_name = self._get_param('AutoScalingGroupName') group_name = self._get_param('AutoScalingGroupName')
self.autoscaling_backend.delete_autoscaling_group(group_name) self.autoscaling_backend.delete_auto_scaling_group(group_name)
template = self.response_template(DELETE_AUTOSCALING_GROUP_TEMPLATE) template = self.response_template(DELETE_AUTOSCALING_GROUP_TEMPLATE)
return template.render() return template.render()
@ -218,7 +218,7 @@ class AutoScalingResponse(BaseResponse):
return template.render() return template.render()
def describe_auto_scaling_instances(self): def describe_auto_scaling_instances(self):
instance_states = self.autoscaling_backend.describe_autoscaling_instances() instance_states = self.autoscaling_backend.describe_auto_scaling_instances()
template = self.response_template( template = self.response_template(
DESCRIBE_AUTOSCALING_INSTANCES_TEMPLATE) DESCRIBE_AUTOSCALING_INSTANCES_TEMPLATE)
return template.render(instance_states=instance_states) return template.render(instance_states=instance_states)
@ -314,7 +314,7 @@ DESCRIBE_LAUNCH_CONFIGURATIONS_TEMPLATE = """<DescribeLaunchConfigurationsRespon
{% endif %} {% endif %}
<InstanceType>{{ launch_configuration.instance_type }}</InstanceType> <InstanceType>{{ launch_configuration.instance_type }}</InstanceType>
<LaunchConfigurationARN>arn:aws:autoscaling:us-east-1:803981987763:launchConfiguration: <LaunchConfigurationARN>arn:aws:autoscaling:us-east-1:803981987763:launchConfiguration:
9dbbbf87-6141-428a-a409-0752edbe6cad:launchConfigurationName/my-test-lc</LaunchConfigurationARN> 9dbbbf87-6141-428a-a409-0752edbe6cad:launchConfigurationName/{{ launch_configuration.name }}</LaunchConfigurationARN>
{% if launch_configuration.block_device_mappings %} {% if launch_configuration.block_device_mappings %}
<BlockDeviceMappings> <BlockDeviceMappings>
{% for mount_point, mapping in launch_configuration.block_device_mappings.items() %} {% for mount_point, mapping in launch_configuration.block_device_mappings.items() %}
@ -504,7 +504,7 @@ DESCRIBE_AUTOSCALING_GROUPS_TEMPLATE = """<DescribeAutoScalingGroupsResponse xml
<HealthCheckGracePeriod>{{ group.health_check_period }}</HealthCheckGracePeriod> <HealthCheckGracePeriod>{{ group.health_check_period }}</HealthCheckGracePeriod>
<DefaultCooldown>{{ group.default_cooldown }}</DefaultCooldown> <DefaultCooldown>{{ group.default_cooldown }}</DefaultCooldown>
<AutoScalingGroupARN>arn:aws:autoscaling:us-east-1:803981987763:autoScalingGroup:ca861182-c8f9-4ca7-b1eb-cd35505f5ebb <AutoScalingGroupARN>arn:aws:autoscaling:us-east-1:803981987763:autoScalingGroup:ca861182-c8f9-4ca7-b1eb-cd35505f5ebb
:autoScalingGroupName/my-test-asg-lbs</AutoScalingGroupARN> :autoScalingGroupName/{{ group.name }}</AutoScalingGroupARN>
{% if group.termination_policies %} {% if group.termination_policies %}
<TerminationPolicies> <TerminationPolicies>
{% for policy in group.termination_policies %} {% for policy in group.termination_policies %}

View File

@ -298,7 +298,12 @@ class LambdaFunction(BaseModel):
volumes=["{}:/var/task".format(data_vol.name)], environment=env_vars, detach=True, **run_kwargs) volumes=["{}:/var/task".format(data_vol.name)], environment=env_vars, detach=True, **run_kwargs)
finally: finally:
if container: if container:
exit_code = container.wait() try:
exit_code = container.wait(timeout=300)
except requests.exceptions.ReadTimeout:
exit_code = -1
container.stop()
container.kill()
output = container.logs(stdout=False, stderr=True) output = container.logs(stdout=False, stderr=True)
output += container.logs(stdout=True, stderr=False) output += container.logs(stdout=True, stderr=False)
container.remove() container.remove()

View File

@ -35,8 +35,11 @@ from moto.sqs import sqs_backends
from moto.ssm import ssm_backends from moto.ssm import ssm_backends
from moto.sts import sts_backends from moto.sts import sts_backends
from moto.xray import xray_backends from moto.xray import xray_backends
from moto.iot import iot_backends
from moto.iotdata import iotdata_backends
from moto.batch import batch_backends from moto.batch import batch_backends
BACKENDS = { BACKENDS = {
'acm': acm_backends, 'acm': acm_backends,
'apigateway': apigateway_backends, 'apigateway': apigateway_backends,
@ -74,7 +77,9 @@ BACKENDS = {
'sts': sts_backends, 'sts': sts_backends,
'route53': route53_backends, 'route53': route53_backends,
'lambda': lambda_backends, 'lambda': lambda_backends,
'xray': xray_backends 'xray': xray_backends,
'iot': iot_backends,
'iot-data': iotdata_backends,
} }

View File

@ -15,6 +15,7 @@ from moto.dynamodb import models as dynamodb_models
from moto.ec2 import models as ec2_models from moto.ec2 import models as ec2_models
from moto.ecs import models as ecs_models from moto.ecs import models as ecs_models
from moto.elb import models as elb_models from moto.elb import models as elb_models
from moto.elbv2 import models as elbv2_models
from moto.iam import models as iam_models from moto.iam import models as iam_models
from moto.kinesis import models as kinesis_models from moto.kinesis import models as kinesis_models
from moto.kms import models as kms_models from moto.kms import models as kms_models
@ -61,6 +62,9 @@ MODEL_MAP = {
"AWS::ECS::TaskDefinition": ecs_models.TaskDefinition, "AWS::ECS::TaskDefinition": ecs_models.TaskDefinition,
"AWS::ECS::Service": ecs_models.Service, "AWS::ECS::Service": ecs_models.Service,
"AWS::ElasticLoadBalancing::LoadBalancer": elb_models.FakeLoadBalancer, "AWS::ElasticLoadBalancing::LoadBalancer": elb_models.FakeLoadBalancer,
"AWS::ElasticLoadBalancingV2::LoadBalancer": elbv2_models.FakeLoadBalancer,
"AWS::ElasticLoadBalancingV2::TargetGroup": elbv2_models.FakeTargetGroup,
"AWS::ElasticLoadBalancingV2::Listener": elbv2_models.FakeListener,
"AWS::DataPipeline::Pipeline": datapipeline_models.Pipeline, "AWS::DataPipeline::Pipeline": datapipeline_models.Pipeline,
"AWS::IAM::InstanceProfile": iam_models.InstanceProfile, "AWS::IAM::InstanceProfile": iam_models.InstanceProfile,
"AWS::IAM::Role": iam_models.Role, "AWS::IAM::Role": iam_models.Role,
@ -326,7 +330,7 @@ def parse_output(output_logical_id, output_json, resources_map):
output_json = clean_json(output_json, resources_map) output_json = clean_json(output_json, resources_map)
output = Output() output = Output()
output.key = output_logical_id output.key = output_logical_id
output.value = output_json['Value'] output.value = clean_json(output_json['Value'], resources_map)
output.description = output_json.get('Description') output.description = output_json.get('Description')
return output return output

View File

@ -19,10 +19,19 @@ class CloudFormationResponse(BaseResponse):
template_url_parts = urlparse(template_url) template_url_parts = urlparse(template_url)
if "localhost" in template_url: if "localhost" in template_url:
bucket_name, key_name = template_url_parts.path.lstrip( bucket_name, key_name = template_url_parts.path.lstrip(
"/").split("/") "/").split("/", 1)
else: else:
bucket_name = template_url_parts.netloc.split(".")[0] if template_url_parts.netloc.endswith('amazonaws.com') \
key_name = template_url_parts.path.lstrip("/") and template_url_parts.netloc.startswith('s3'):
# Handle when S3 url uses amazon url with bucket in path
# Also handles getting region as technically s3 is region'd
# region = template_url.netloc.split('.')[1]
bucket_name, key_name = template_url_parts.path.lstrip(
"/").split("/", 1)
else:
bucket_name = template_url_parts.netloc.split(".")[0]
key_name = template_url_parts.path.lstrip("/")
key = s3_backend.get_key(bucket_name, key_name) key = s3_backend.get_key(bucket_name, key_name)
return key.value.decode("utf-8") return key.value.decode("utf-8")
@ -227,13 +236,13 @@ CREATE_STACK_RESPONSE_TEMPLATE = """<CreateStackResponse>
</CreateStackResponse> </CreateStackResponse>
""" """
UPDATE_STACK_RESPONSE_TEMPLATE = """<UpdateStackResponse> UPDATE_STACK_RESPONSE_TEMPLATE = """<UpdateStackResponse xmlns="http://cloudformation.amazonaws.com/doc/2010-05-15/">
<UpdateStackResult> <UpdateStackResult>
<StackId>{{ stack.stack_id }}</StackId> <StackId>{{ stack.stack_id }}</StackId>
</UpdateStackResult> </UpdateStackResult>
<ResponseMetadata> <ResponseMetadata>
<RequestId>b9b5b068-3a41-11e5-94eb-example</RequestId> <RequestId>b9b4b068-3a41-11e5-94eb-example</RequestId>
</ResponseMetadata> </ResponseMetadata>
</UpdateStackResponse> </UpdateStackResponse>
""" """
@ -399,16 +408,6 @@ GET_TEMPLATE_RESPONSE_TEMPLATE = """<GetTemplateResponse>
</GetTemplateResponse>""" </GetTemplateResponse>"""
UPDATE_STACK_RESPONSE_TEMPLATE = """<UpdateStackResponse xmlns="http://cloudformation.amazonaws.com/doc/2010-05-15/">
<UpdateStackResult>
<StackId>{{ stack.stack_id }}</StackId>
</UpdateStackResult>
<ResponseMetadata>
<RequestId>b9b4b068-3a41-11e5-94eb-example</RequestId>
</ResponseMetadata>
</UpdateStackResponse>
"""
DELETE_STACK_RESPONSE_TEMPLATE = """<DeleteStackResponse> DELETE_STACK_RESPONSE_TEMPLATE = """<DeleteStackResponse>
<ResponseMetadata> <ResponseMetadata>
<RequestId>5ccc7dcd-744c-11e5-be70-example</RequestId> <RequestId>5ccc7dcd-744c-11e5-be70-example</RequestId>
@ -416,6 +415,7 @@ DELETE_STACK_RESPONSE_TEMPLATE = """<DeleteStackResponse>
</DeleteStackResponse> </DeleteStackResponse>
""" """
LIST_EXPORTS_RESPONSE = """<ListExportsResponse xmlns="http://cloudformation.amazonaws.com/doc/2010-05-15/"> LIST_EXPORTS_RESPONSE = """<ListExportsResponse xmlns="http://cloudformation.amazonaws.com/doc/2010-05-15/">
<ListExportsResult> <ListExportsResult>
<Exports> <Exports>

View File

@ -1,4 +1,7 @@
import json
from moto.core import BaseBackend, BaseModel from moto.core import BaseBackend, BaseModel
from moto.core.exceptions import RESTError
import boto.ec2.cloudwatch import boto.ec2.cloudwatch
import datetime import datetime
@ -35,9 +38,26 @@ class FakeAlarm(BaseModel):
self.ok_actions = ok_actions self.ok_actions = ok_actions
self.insufficient_data_actions = insufficient_data_actions self.insufficient_data_actions = insufficient_data_actions
self.unit = unit self.unit = unit
self.state_updated_timestamp = datetime.datetime.utcnow()
self.configuration_updated_timestamp = datetime.datetime.utcnow() self.configuration_updated_timestamp = datetime.datetime.utcnow()
self.history = []
self.state_reason = ''
self.state_reason_data = '{}'
self.state = 'OK'
self.state_updated_timestamp = datetime.datetime.utcnow()
def update_state(self, reason, reason_data, state_value):
# History type, that then decides what the rest of the items are, can be one of ConfigurationUpdate | StateUpdate | Action
self.history.append(
('StateUpdate', self.state_reason, self.state_reason_data, self.state, self.state_updated_timestamp)
)
self.state_reason = reason
self.state_reason_data = reason_data
self.state = state_value
self.state_updated_timestamp = datetime.datetime.utcnow()
class MetricDatum(BaseModel): class MetricDatum(BaseModel):
@ -122,10 +142,8 @@ class CloudWatchBackend(BaseBackend):
if alarm.name in alarm_names if alarm.name in alarm_names
] ]
def get_alarms_by_state_value(self, state): def get_alarms_by_state_value(self, target_state):
raise NotImplementedError( return filter(lambda alarm: alarm.state == target_state, self.alarms.values())
"DescribeAlarm by state is not implemented in moto."
)
def delete_alarms(self, alarm_names): def delete_alarms(self, alarm_names):
for alarm_name in alarm_names: for alarm_name in alarm_names:
@ -164,6 +182,21 @@ class CloudWatchBackend(BaseBackend):
def get_dashboard(self, dashboard): def get_dashboard(self, dashboard):
return self.dashboards.get(dashboard) return self.dashboards.get(dashboard)
def set_alarm_state(self, alarm_name, reason, reason_data, state_value):
try:
if reason_data is not None:
json.loads(reason_data)
except ValueError:
raise RESTError('InvalidFormat', 'StateReasonData is invalid JSON')
if alarm_name not in self.alarms:
raise RESTError('ResourceNotFound', 'Alarm {0} not found'.format(alarm_name), status=404)
if state_value not in ('OK', 'ALARM', 'INSUFFICIENT_DATA'):
raise RESTError('InvalidParameterValue', 'StateValue is not one of OK | ALARM | INSUFFICIENT_DATA')
self.alarms[alarm_name].update_state(reason, reason_data, state_value)
class LogGroup(BaseModel): class LogGroup(BaseModel):

View File

@ -1,4 +1,5 @@
import json import json
from moto.core.utils import amzn_request_id
from moto.core.responses import BaseResponse from moto.core.responses import BaseResponse
from .models import cloudwatch_backends from .models import cloudwatch_backends
@ -13,6 +14,7 @@ class CloudWatchResponse(BaseResponse):
template = self.response_template(ERROR_RESPONSE_TEMPLATE) template = self.response_template(ERROR_RESPONSE_TEMPLATE)
return template.render(code=code, message=message), dict(status=status) return template.render(code=code, message=message), dict(status=status)
@amzn_request_id
def put_metric_alarm(self): def put_metric_alarm(self):
name = self._get_param('AlarmName') name = self._get_param('AlarmName')
namespace = self._get_param('Namespace') namespace = self._get_param('Namespace')
@ -40,6 +42,7 @@ class CloudWatchResponse(BaseResponse):
template = self.response_template(PUT_METRIC_ALARM_TEMPLATE) template = self.response_template(PUT_METRIC_ALARM_TEMPLATE)
return template.render(alarm=alarm) return template.render(alarm=alarm)
@amzn_request_id
def describe_alarms(self): def describe_alarms(self):
action_prefix = self._get_param('ActionPrefix') action_prefix = self._get_param('ActionPrefix')
alarm_name_prefix = self._get_param('AlarmNamePrefix') alarm_name_prefix = self._get_param('AlarmNamePrefix')
@ -62,12 +65,14 @@ class CloudWatchResponse(BaseResponse):
template = self.response_template(DESCRIBE_ALARMS_TEMPLATE) template = self.response_template(DESCRIBE_ALARMS_TEMPLATE)
return template.render(alarms=alarms) return template.render(alarms=alarms)
@amzn_request_id
def delete_alarms(self): def delete_alarms(self):
alarm_names = self._get_multi_param('AlarmNames.member') alarm_names = self._get_multi_param('AlarmNames.member')
self.cloudwatch_backend.delete_alarms(alarm_names) self.cloudwatch_backend.delete_alarms(alarm_names)
template = self.response_template(DELETE_METRIC_ALARMS_TEMPLATE) template = self.response_template(DELETE_METRIC_ALARMS_TEMPLATE)
return template.render() return template.render()
@amzn_request_id
def put_metric_data(self): def put_metric_data(self):
namespace = self._get_param('Namespace') namespace = self._get_param('Namespace')
metric_data = [] metric_data = []
@ -99,11 +104,13 @@ class CloudWatchResponse(BaseResponse):
template = self.response_template(PUT_METRIC_DATA_TEMPLATE) template = self.response_template(PUT_METRIC_DATA_TEMPLATE)
return template.render() return template.render()
@amzn_request_id
def list_metrics(self): def list_metrics(self):
metrics = self.cloudwatch_backend.get_all_metrics() metrics = self.cloudwatch_backend.get_all_metrics()
template = self.response_template(LIST_METRICS_TEMPLATE) template = self.response_template(LIST_METRICS_TEMPLATE)
return template.render(metrics=metrics) return template.render(metrics=metrics)
@amzn_request_id
def delete_dashboards(self): def delete_dashboards(self):
dashboards = self._get_multi_param('DashboardNames.member') dashboards = self._get_multi_param('DashboardNames.member')
if dashboards is None: if dashboards is None:
@ -116,18 +123,23 @@ class CloudWatchResponse(BaseResponse):
template = self.response_template(DELETE_DASHBOARD_TEMPLATE) template = self.response_template(DELETE_DASHBOARD_TEMPLATE)
return template.render() return template.render()
@amzn_request_id
def describe_alarm_history(self): def describe_alarm_history(self):
raise NotImplementedError() raise NotImplementedError()
@amzn_request_id
def describe_alarms_for_metric(self): def describe_alarms_for_metric(self):
raise NotImplementedError() raise NotImplementedError()
@amzn_request_id
def disable_alarm_actions(self): def disable_alarm_actions(self):
raise NotImplementedError() raise NotImplementedError()
@amzn_request_id
def enable_alarm_actions(self): def enable_alarm_actions(self):
raise NotImplementedError() raise NotImplementedError()
@amzn_request_id
def get_dashboard(self): def get_dashboard(self):
dashboard_name = self._get_param('DashboardName') dashboard_name = self._get_param('DashboardName')
@ -138,9 +150,11 @@ class CloudWatchResponse(BaseResponse):
template = self.response_template(GET_DASHBOARD_TEMPLATE) template = self.response_template(GET_DASHBOARD_TEMPLATE)
return template.render(dashboard=dashboard) return template.render(dashboard=dashboard)
@amzn_request_id
def get_metric_statistics(self): def get_metric_statistics(self):
raise NotImplementedError() raise NotImplementedError()
@amzn_request_id
def list_dashboards(self): def list_dashboards(self):
prefix = self._get_param('DashboardNamePrefix', '') prefix = self._get_param('DashboardNamePrefix', '')
@ -149,6 +163,7 @@ class CloudWatchResponse(BaseResponse):
template = self.response_template(LIST_DASHBOARD_RESPONSE) template = self.response_template(LIST_DASHBOARD_RESPONSE)
return template.render(dashboards=dashboards) return template.render(dashboards=dashboards)
@amzn_request_id
def put_dashboard(self): def put_dashboard(self):
name = self._get_param('DashboardName') name = self._get_param('DashboardName')
body = self._get_param('DashboardBody') body = self._get_param('DashboardBody')
@ -163,14 +178,23 @@ class CloudWatchResponse(BaseResponse):
template = self.response_template(PUT_DASHBOARD_RESPONSE) template = self.response_template(PUT_DASHBOARD_RESPONSE)
return template.render() return template.render()
@amzn_request_id
def set_alarm_state(self): def set_alarm_state(self):
raise NotImplementedError() alarm_name = self._get_param('AlarmName')
reason = self._get_param('StateReason')
reason_data = self._get_param('StateReasonData')
state_value = self._get_param('StateValue')
self.cloudwatch_backend.set_alarm_state(alarm_name, reason, reason_data, state_value)
template = self.response_template(SET_ALARM_STATE_TEMPLATE)
return template.render()
PUT_METRIC_ALARM_TEMPLATE = """<PutMetricAlarmResponse xmlns="http://monitoring.amazonaws.com/doc/2010-08-01/"> PUT_METRIC_ALARM_TEMPLATE = """<PutMetricAlarmResponse xmlns="http://monitoring.amazonaws.com/doc/2010-08-01/">
<ResponseMetadata> <ResponseMetadata>
<RequestId> <RequestId>
2690d7eb-ed86-11dd-9877-6fad448a8419 {{ request_id }}
</RequestId> </RequestId>
</ResponseMetadata> </ResponseMetadata>
</PutMetricAlarmResponse>""" </PutMetricAlarmResponse>"""
@ -229,7 +253,7 @@ DESCRIBE_ALARMS_TEMPLATE = """<DescribeAlarmsResponse xmlns="http://monitoring.a
DELETE_METRIC_ALARMS_TEMPLATE = """<DeleteMetricAlarmResponse xmlns="http://monitoring.amazonaws.com/doc/2010-08-01/"> DELETE_METRIC_ALARMS_TEMPLATE = """<DeleteMetricAlarmResponse xmlns="http://monitoring.amazonaws.com/doc/2010-08-01/">
<ResponseMetadata> <ResponseMetadata>
<RequestId> <RequestId>
2690d7eb-ed86-11dd-9877-6fad448a8419 {{ request_id }}
</RequestId> </RequestId>
</ResponseMetadata> </ResponseMetadata>
</DeleteMetricAlarmResponse>""" </DeleteMetricAlarmResponse>"""
@ -237,7 +261,7 @@ DELETE_METRIC_ALARMS_TEMPLATE = """<DeleteMetricAlarmResponse xmlns="http://moni
PUT_METRIC_DATA_TEMPLATE = """<PutMetricDataResponse xmlns="http://monitoring.amazonaws.com/doc/2010-08-01/"> PUT_METRIC_DATA_TEMPLATE = """<PutMetricDataResponse xmlns="http://monitoring.amazonaws.com/doc/2010-08-01/">
<ResponseMetadata> <ResponseMetadata>
<RequestId> <RequestId>
2690d7eb-ed86-11dd-9877-6fad448a8419 {{ request_id }}
</RequestId> </RequestId>
</ResponseMetadata> </ResponseMetadata>
</PutMetricDataResponse>""" </PutMetricDataResponse>"""
@ -271,7 +295,7 @@ PUT_DASHBOARD_RESPONSE = """<PutDashboardResponse xmlns="http://monitoring.amazo
<DashboardValidationMessages/> <DashboardValidationMessages/>
</PutDashboardResult> </PutDashboardResult>
<ResponseMetadata> <ResponseMetadata>
<RequestId>44b1d4d8-9fa3-11e7-8ad3-41b86ac5e49e</RequestId> <RequestId>{{ request_id }}</RequestId>
</ResponseMetadata> </ResponseMetadata>
</PutDashboardResponse>""" </PutDashboardResponse>"""
@ -289,14 +313,14 @@ LIST_DASHBOARD_RESPONSE = """<ListDashboardsResponse xmlns="http://monitoring.am
</DashboardEntries> </DashboardEntries>
</ListDashboardsResult> </ListDashboardsResult>
<ResponseMetadata> <ResponseMetadata>
<RequestId>c3773873-9fa5-11e7-b315-31fcc9275d62</RequestId> <RequestId>{{ request_id }}</RequestId>
</ResponseMetadata> </ResponseMetadata>
</ListDashboardsResponse>""" </ListDashboardsResponse>"""
DELETE_DASHBOARD_TEMPLATE = """<DeleteDashboardsResponse xmlns="http://monitoring.amazonaws.com/doc/2010-08-01/"> DELETE_DASHBOARD_TEMPLATE = """<DeleteDashboardsResponse xmlns="http://monitoring.amazonaws.com/doc/2010-08-01/">
<DeleteDashboardsResult/> <DeleteDashboardsResult/>
<ResponseMetadata> <ResponseMetadata>
<RequestId>68d1dc8c-9faa-11e7-a694-df2715690df2</RequestId> <RequestId>{{ request_id }}</RequestId>
</ResponseMetadata> </ResponseMetadata>
</DeleteDashboardsResponse>""" </DeleteDashboardsResponse>"""
@ -307,16 +331,22 @@ GET_DASHBOARD_TEMPLATE = """<GetDashboardResponse xmlns="http://monitoring.amazo
<DashboardName>{{ dashboard.name }}</DashboardName> <DashboardName>{{ dashboard.name }}</DashboardName>
</GetDashboardResult> </GetDashboardResult>
<ResponseMetadata> <ResponseMetadata>
<RequestId>e3c16bb0-9faa-11e7-b315-31fcc9275d62</RequestId> <RequestId>{{ request_id }}</RequestId>
</ResponseMetadata> </ResponseMetadata>
</GetDashboardResponse> </GetDashboardResponse>
""" """
SET_ALARM_STATE_TEMPLATE = """<SetAlarmStateResponse xmlns="http://monitoring.amazonaws.com/doc/2010-08-01/">
<ResponseMetadata>
<RequestId>{{ request_id }}</RequestId>
</ResponseMetadata>
</SetAlarmStateResponse>"""
ERROR_RESPONSE_TEMPLATE = """<ErrorResponse xmlns="http://monitoring.amazonaws.com/doc/2010-08-01/"> ERROR_RESPONSE_TEMPLATE = """<ErrorResponse xmlns="http://monitoring.amazonaws.com/doc/2010-08-01/">
<Error> <Error>
<Type>Sender</Type> <Type>Sender</Type>
<Code>{{ code }}</Code> <Code>{{ code }}</Code>
<Message>{{ message }}</Message> <Message>{{ message }}</Message>
</Error> </Error>
<RequestId>5e45fd1e-9fa3-11e7-b720-89e8821d38c4</RequestId> <RequestId>{{ request_id }}</RequestId>
</ErrorResponse>""" </ErrorResponse>"""

View File

@ -34,6 +34,8 @@ ERROR_JSON_RESPONSE = u"""{
class RESTError(HTTPException): class RESTError(HTTPException):
code = 400
templates = { templates = {
'single_error': SINGLE_ERROR_RESPONSE, 'single_error': SINGLE_ERROR_RESPONSE,
'error': ERROR_RESPONSE, 'error': ERROR_RESPONSE,
@ -54,7 +56,6 @@ class DryRunClientError(RESTError):
class JsonRESTError(RESTError): class JsonRESTError(RESTError):
def __init__(self, error_type, message, template='error_json', **kwargs): def __init__(self, error_type, message, template='error_json', **kwargs):
super(JsonRESTError, self).__init__( super(JsonRESTError, self).__init__(
error_type, message, template, **kwargs) error_type, message, template, **kwargs)

View File

@ -1,3 +1,4 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals from __future__ import unicode_literals
from __future__ import absolute_import from __future__ import absolute_import
@ -176,16 +177,49 @@ class ServerModeMockAWS(BaseMockAWS):
if 'endpoint_url' not in kwargs: if 'endpoint_url' not in kwargs:
kwargs['endpoint_url'] = "http://localhost:5000" kwargs['endpoint_url'] = "http://localhost:5000"
return real_boto3_resource(*args, **kwargs) return real_boto3_resource(*args, **kwargs)
def fake_httplib_send_output(self, message_body=None, *args, **kwargs):
def _convert_to_bytes(mixed_buffer):
bytes_buffer = []
for chunk in mixed_buffer:
if isinstance(chunk, six.text_type):
bytes_buffer.append(chunk.encode('utf-8'))
else:
bytes_buffer.append(chunk)
msg = b"\r\n".join(bytes_buffer)
return msg
self._buffer.extend((b"", b""))
msg = _convert_to_bytes(self._buffer)
del self._buffer[:]
if isinstance(message_body, bytes):
msg += message_body
message_body = None
self.send(msg)
# if self._expect_header_set:
# read, write, exc = select.select([self.sock], [], [self.sock], 1)
# if read:
# self._handle_expect_response(message_body)
# return
if message_body is not None:
self.send(message_body)
self._client_patcher = mock.patch('boto3.client', fake_boto3_client) self._client_patcher = mock.patch('boto3.client', fake_boto3_client)
self._resource_patcher = mock.patch( self._resource_patcher = mock.patch('boto3.resource', fake_boto3_resource)
'boto3.resource', fake_boto3_resource) if six.PY2:
self._httplib_patcher = mock.patch('httplib.HTTPConnection._send_output', fake_httplib_send_output)
self._client_patcher.start() self._client_patcher.start()
self._resource_patcher.start() self._resource_patcher.start()
if six.PY2:
self._httplib_patcher.start()
def disable_patching(self): def disable_patching(self):
if self._client_patcher: if self._client_patcher:
self._client_patcher.stop() self._client_patcher.stop()
self._resource_patcher.stop() self._resource_patcher.stop()
if six.PY2:
self._httplib_patcher.stop()
class Model(type): class Model(type):

View File

@ -17,6 +17,8 @@ from six.moves.urllib.parse import parse_qs, urlparse
import xmltodict import xmltodict
from pkg_resources import resource_filename from pkg_resources import resource_filename
from werkzeug.exceptions import HTTPException from werkzeug.exceptions import HTTPException
import boto3
from moto.compat import OrderedDict from moto.compat import OrderedDict
from moto.core.utils import camelcase_to_underscores, method_names_from_class from moto.core.utils import camelcase_to_underscores, method_names_from_class
@ -103,7 +105,8 @@ class _TemplateEnvironmentMixin(object):
class BaseResponse(_TemplateEnvironmentMixin): class BaseResponse(_TemplateEnvironmentMixin):
default_region = 'us-east-1' default_region = 'us-east-1'
region_regex = r'\.(.+?)\.amazonaws\.com' # to extract region, use [^.]
region_regex = r'\.([^.]+?)\.amazonaws\.com'
aws_service_spec = None aws_service_spec = None
@classmethod @classmethod
@ -151,12 +154,12 @@ class BaseResponse(_TemplateEnvironmentMixin):
querystring.update(headers) querystring.update(headers)
querystring = _decode_dict(querystring) querystring = _decode_dict(querystring)
self.uri = full_url self.uri = full_url
self.path = urlparse(full_url).path self.path = urlparse(full_url).path
self.querystring = querystring self.querystring = querystring
self.method = request.method self.method = request.method
self.region = self.get_region_from_url(request, full_url) self.region = self.get_region_from_url(request, full_url)
self.uri_match = None
self.headers = request.headers self.headers = request.headers
if 'host' not in self.headers: if 'host' not in self.headers:
@ -178,6 +181,58 @@ class BaseResponse(_TemplateEnvironmentMixin):
self.setup_class(request, full_url, headers) self.setup_class(request, full_url, headers)
return self.call_action() return self.call_action()
def uri_to_regexp(self, uri):
"""converts uri w/ placeholder to regexp
'/cars/{carName}/drivers/{DriverName}'
-> '^/cars/.*/drivers/[^/]*$'
'/cars/{carName}/drivers/{DriverName}/drive'
-> '^/cars/.*/drivers/.*/drive$'
"""
def _convert(elem, is_last):
if not re.match('^{.*}$', elem):
return elem
name = elem.replace('{', '').replace('}', '')
if is_last:
return '(?P<%s>[^/]*)' % name
return '(?P<%s>.*)' % name
elems = uri.split('/')
num_elems = len(elems)
regexp = '^{}$'.format('/'.join([_convert(elem, (i == num_elems - 1)) for i, elem in enumerate(elems)]))
return regexp
def _get_action_from_method_and_request_uri(self, method, request_uri):
"""basically used for `rest-json` APIs
You can refer to example from link below
https://github.com/boto/botocore/blob/develop/botocore/data/iot/2015-05-28/service-2.json
"""
# service response class should have 'SERVICE_NAME' class member,
# if you want to get action from method and url
if not hasattr(self, 'SERVICE_NAME'):
return None
service = self.SERVICE_NAME
conn = boto3.client(service, region_name=self.region)
# make cache if it does not exist yet
if not hasattr(self, 'method_urls'):
self.method_urls = defaultdict(lambda: defaultdict(str))
op_names = conn._service_model.operation_names
for op_name in op_names:
op_model = conn._service_model.operation_model(op_name)
_method = op_model.http['method']
uri_regexp = self.uri_to_regexp(op_model.http['requestUri'])
self.method_urls[_method][uri_regexp] = op_model.name
regexp_and_names = self.method_urls[method]
for regexp, name in regexp_and_names.items():
match = re.match(regexp, request_uri)
self.uri_match = match
if match:
return name
return None
def _get_action(self): def _get_action(self):
action = self.querystring.get('Action', [""])[0] action = self.querystring.get('Action', [""])[0]
if not action: # Some services use a header for the action if not action: # Some services use a header for the action
@ -186,7 +241,9 @@ class BaseResponse(_TemplateEnvironmentMixin):
'x-amz-target') or self.headers.get('X-Amz-Target') 'x-amz-target') or self.headers.get('X-Amz-Target')
if match: if match:
action = match.split(".")[-1] action = match.split(".")[-1]
# get action from method and uri
if not action:
return self._get_action_from_method_and_request_uri(self.method, self.path)
return action return action
def call_action(self): def call_action(self):
@ -221,6 +278,22 @@ class BaseResponse(_TemplateEnvironmentMixin):
val = self.querystring.get(param_name) val = self.querystring.get(param_name)
if val is not None: if val is not None:
return val[0] return val[0]
# try to get json body parameter
if self.body is not None:
try:
return json.loads(self.body)[param_name]
except ValueError:
pass
except KeyError:
pass
# try to get path parameter
if self.uri_match:
try:
return self.uri_match.group(param_name)
except IndexError:
# do nothing if param is not found
pass
return if_none return if_none
def _get_int_param(self, param_name, if_none=None): def _get_int_param(self, param_name, if_none=None):

View File

@ -272,9 +272,6 @@ def amzn_request_id(f):
else: else:
status, new_headers, body = response status, new_headers, body = response
headers.update(new_headers) headers.update(new_headers)
# Cast status to string
if "status" in headers:
headers['status'] = str(headers['status'])
request_id = gen_amzn_requestid_long(headers) request_id = gen_amzn_requestid_long(headers)

View File

@ -1,6 +1,7 @@
from __future__ import unicode_literals from __future__ import unicode_literals
from .models import dynamodb_backend2 from .models import dynamodb_backends as dynamodb_backends2
from ..core.models import base_decorator, deprecated_base_decorator
dynamodb_backends2 = {"global": dynamodb_backend2} dynamodb_backend2 = dynamodb_backends2['us-east-1']
mock_dynamodb2 = dynamodb_backend2.decorator mock_dynamodb2 = base_decorator(dynamodb_backends2)
mock_dynamodb2_deprecated = dynamodb_backend2.deprecated_decorator mock_dynamodb2_deprecated = deprecated_base_decorator(dynamodb_backends2)

View File

@ -1,13 +1,16 @@
from __future__ import unicode_literals from __future__ import unicode_literals
from collections import defaultdict from collections import defaultdict
import copy
import datetime import datetime
import decimal import decimal
import json import json
import re import re
import boto3
from moto.compat import OrderedDict from moto.compat import OrderedDict
from moto.core import BaseBackend, BaseModel from moto.core import BaseBackend, BaseModel
from moto.core.utils import unix_time from moto.core.utils import unix_time
from moto.core.exceptions import JsonRESTError
from .comparisons import get_comparison_func, get_filter_expression, Op from .comparisons import get_comparison_func, get_filter_expression, Op
@ -271,6 +274,10 @@ class Table(BaseModel):
self.items = defaultdict(dict) self.items = defaultdict(dict)
self.table_arn = self._generate_arn(table_name) self.table_arn = self._generate_arn(table_name)
self.tags = [] self.tags = []
self.ttl = {
'TimeToLiveStatus': 'DISABLED' # One of 'ENABLING'|'DISABLING'|'ENABLED'|'DISABLED',
# 'AttributeName': 'string' # Can contain this
}
def _generate_arn(self, name): def _generate_arn(self, name):
return 'arn:aws:dynamodb:us-east-1:123456789011:table/' + name return 'arn:aws:dynamodb:us-east-1:123456789011:table/' + name
@ -413,7 +420,7 @@ class Table(BaseModel):
def query(self, hash_key, range_comparison, range_objs, limit, def query(self, hash_key, range_comparison, range_objs, limit,
exclusive_start_key, scan_index_forward, projection_expression, exclusive_start_key, scan_index_forward, projection_expression,
index_name=None, **filter_kwargs): index_name=None, filter_expression=None, **filter_kwargs):
results = [] results = []
if index_name: if index_name:
all_indexes = (self.global_indexes or []) + (self.indexes or []) all_indexes = (self.global_indexes or []) + (self.indexes or [])
@ -486,7 +493,8 @@ class Table(BaseModel):
if projection_expression: if projection_expression:
expressions = [x.strip() for x in projection_expression.split(',')] expressions = [x.strip() for x in projection_expression.split(',')]
for result in possible_results: results = copy.deepcopy(results)
for result in results:
for attr in list(result.attrs): for attr in list(result.attrs):
if attr not in expressions: if attr not in expressions:
result.attrs.pop(attr) result.attrs.pop(attr)
@ -496,6 +504,9 @@ class Table(BaseModel):
scanned_count = len(list(self.all_items())) scanned_count = len(list(self.all_items()))
if filter_expression is not None:
results = [item for item in results if filter_expression.expr(item)]
results, last_evaluated_key = self._trim_results(results, limit, results, last_evaluated_key = self._trim_results(results, limit,
exclusive_start_key) exclusive_start_key)
return results, scanned_count, last_evaluated_key return results, scanned_count, last_evaluated_key
@ -577,9 +588,16 @@ class Table(BaseModel):
class DynamoDBBackend(BaseBackend): class DynamoDBBackend(BaseBackend):
def __init__(self): def __init__(self, region_name=None):
self.region_name = region_name
self.tables = OrderedDict() self.tables = OrderedDict()
def reset(self):
region_name = self.region_name
self.__dict__ = {}
self.__init__(region_name)
def create_table(self, name, **params): def create_table(self, name, **params):
if name in self.tables: if name in self.tables:
return None return None
@ -595,6 +613,11 @@ class DynamoDBBackend(BaseBackend):
if self.tables[table].table_arn == table_arn: if self.tables[table].table_arn == table_arn:
self.tables[table].tags.extend(tags) self.tables[table].tags.extend(tags)
def untag_resource(self, table_arn, tag_keys):
for table in self.tables:
if self.tables[table].table_arn == table_arn:
self.tables[table].tags = [tag for tag in self.tables[table].tags if tag['Key'] not in tag_keys]
def list_tags_of_resource(self, table_arn): def list_tags_of_resource(self, table_arn):
required_table = None required_table = None
for table in self.tables: for table in self.tables:
@ -689,7 +712,9 @@ class DynamoDBBackend(BaseBackend):
return table.get_item(hash_key, range_key) return table.get_item(hash_key, range_key)
def query(self, table_name, hash_key_dict, range_comparison, range_value_dicts, def query(self, table_name, hash_key_dict, range_comparison, range_value_dicts,
limit, exclusive_start_key, scan_index_forward, projection_expression, index_name=None, **filter_kwargs): limit, exclusive_start_key, scan_index_forward, projection_expression, index_name=None,
expr_names=None, expr_values=None, filter_expression=None,
**filter_kwargs):
table = self.tables.get(table_name) table = self.tables.get(table_name)
if not table: if not table:
return None, None return None, None
@ -698,8 +723,13 @@ class DynamoDBBackend(BaseBackend):
range_values = [DynamoType(range_value) range_values = [DynamoType(range_value)
for range_value in range_value_dicts] for range_value in range_value_dicts]
if filter_expression is not None:
filter_expression = get_filter_expression(filter_expression, expr_names, expr_values)
else:
filter_expression = Op(None, None) # Will always eval to true
return table.query(hash_key, range_comparison, range_values, limit, return table.query(hash_key, range_comparison, range_values, limit,
exclusive_start_key, scan_index_forward, projection_expression, index_name, **filter_kwargs) exclusive_start_key, scan_index_forward, projection_expression, index_name, filter_expression, **filter_kwargs)
def scan(self, table_name, filters, limit, exclusive_start_key, filter_expression, expr_names, expr_values): def scan(self, table_name, filters, limit, exclusive_start_key, filter_expression, expr_names, expr_values):
table = self.tables.get(table_name) table = self.tables.get(table_name)
@ -796,5 +826,28 @@ class DynamoDBBackend(BaseBackend):
hash_key, range_key = self.get_keys_value(table, keys) hash_key, range_key = self.get_keys_value(table, keys)
return table.delete_item(hash_key, range_key) return table.delete_item(hash_key, range_key)
def update_ttl(self, table_name, ttl_spec):
table = self.tables.get(table_name)
if table is None:
raise JsonRESTError('ResourceNotFound', 'Table not found')
dynamodb_backend2 = DynamoDBBackend() if 'Enabled' not in ttl_spec or 'AttributeName' not in ttl_spec:
raise JsonRESTError('InvalidParameterValue',
'TimeToLiveSpecification does not contain Enabled and AttributeName')
if ttl_spec['Enabled']:
table.ttl['TimeToLiveStatus'] = 'ENABLED'
else:
table.ttl['TimeToLiveStatus'] = 'DISABLED'
table.ttl['AttributeName'] = ttl_spec['AttributeName']
def describe_ttl(self, table_name):
table = self.tables.get(table_name)
if table is None:
raise JsonRESTError('ResourceNotFound', 'Table not found')
return table.ttl
available_regions = boto3.session.Session().get_available_regions("dynamodb")
dynamodb_backends = {region: DynamoDBBackend(region_name=region) for region in available_regions}

View File

@ -5,7 +5,7 @@ import re
from moto.core.responses import BaseResponse from moto.core.responses import BaseResponse
from moto.core.utils import camelcase_to_underscores, amzn_request_id from moto.core.utils import camelcase_to_underscores, amzn_request_id
from .models import dynamodb_backend2, dynamo_json_dump from .models import dynamodb_backends, dynamo_json_dump
class DynamoHandler(BaseResponse): class DynamoHandler(BaseResponse):
@ -24,6 +24,14 @@ class DynamoHandler(BaseResponse):
def error(self, type_, message, status=400): def error(self, type_, message, status=400):
return status, self.response_headers, dynamo_json_dump({'__type': type_, 'message': message}) return status, self.response_headers, dynamo_json_dump({'__type': type_, 'message': message})
@property
def dynamodb_backend(self):
"""
:return: DynamoDB2 Backend
:rtype: moto.dynamodb2.models.DynamoDBBackend
"""
return dynamodb_backends[self.region]
@amzn_request_id @amzn_request_id
def call_action(self): def call_action(self):
self.body = json.loads(self.body or '{}') self.body = json.loads(self.body or '{}')
@ -46,10 +54,10 @@ class DynamoHandler(BaseResponse):
limit = body.get('Limit', 100) limit = body.get('Limit', 100)
if body.get("ExclusiveStartTableName"): if body.get("ExclusiveStartTableName"):
last = body.get("ExclusiveStartTableName") last = body.get("ExclusiveStartTableName")
start = list(dynamodb_backend2.tables.keys()).index(last) + 1 start = list(self.dynamodb_backend.tables.keys()).index(last) + 1
else: else:
start = 0 start = 0
all_tables = list(dynamodb_backend2.tables.keys()) all_tables = list(self.dynamodb_backend.tables.keys())
if limit: if limit:
tables = all_tables[start:start + limit] tables = all_tables[start:start + limit]
else: else:
@ -74,12 +82,12 @@ class DynamoHandler(BaseResponse):
global_indexes = body.get("GlobalSecondaryIndexes", []) global_indexes = body.get("GlobalSecondaryIndexes", [])
local_secondary_indexes = body.get("LocalSecondaryIndexes", []) local_secondary_indexes = body.get("LocalSecondaryIndexes", [])
table = dynamodb_backend2.create_table(table_name, table = self.dynamodb_backend.create_table(table_name,
schema=key_schema, schema=key_schema,
throughput=throughput, throughput=throughput,
attr=attr, attr=attr,
global_indexes=global_indexes, global_indexes=global_indexes,
indexes=local_secondary_indexes) indexes=local_secondary_indexes)
if table is not None: if table is not None:
return dynamo_json_dump(table.describe()) return dynamo_json_dump(table.describe())
else: else:
@ -88,7 +96,7 @@ class DynamoHandler(BaseResponse):
def delete_table(self): def delete_table(self):
name = self.body['TableName'] name = self.body['TableName']
table = dynamodb_backend2.delete_table(name) table = self.dynamodb_backend.delete_table(name)
if table is not None: if table is not None:
return dynamo_json_dump(table.describe()) return dynamo_json_dump(table.describe())
else: else:
@ -96,15 +104,21 @@ class DynamoHandler(BaseResponse):
return self.error(er, 'Requested resource not found') return self.error(er, 'Requested resource not found')
def tag_resource(self): def tag_resource(self):
tags = self.body['Tags']
table_arn = self.body['ResourceArn'] table_arn = self.body['ResourceArn']
dynamodb_backend2.tag_resource(table_arn, tags) tags = self.body['Tags']
return json.dumps({}) self.dynamodb_backend.tag_resource(table_arn, tags)
return ''
def untag_resource(self):
table_arn = self.body['ResourceArn']
tags = self.body['TagKeys']
self.dynamodb_backend.untag_resource(table_arn, tags)
return ''
def list_tags_of_resource(self): def list_tags_of_resource(self):
try: try:
table_arn = self.body['ResourceArn'] table_arn = self.body['ResourceArn']
all_tags = dynamodb_backend2.list_tags_of_resource(table_arn) all_tags = self.dynamodb_backend.list_tags_of_resource(table_arn)
all_tag_keys = [tag['Key'] for tag in all_tags] all_tag_keys = [tag['Key'] for tag in all_tags]
marker = self.body.get('NextToken') marker = self.body.get('NextToken')
if marker: if marker:
@ -127,17 +141,17 @@ class DynamoHandler(BaseResponse):
def update_table(self): def update_table(self):
name = self.body['TableName'] name = self.body['TableName']
if 'GlobalSecondaryIndexUpdates' in self.body: if 'GlobalSecondaryIndexUpdates' in self.body:
table = dynamodb_backend2.update_table_global_indexes( table = self.dynamodb_backend.update_table_global_indexes(
name, self.body['GlobalSecondaryIndexUpdates']) name, self.body['GlobalSecondaryIndexUpdates'])
if 'ProvisionedThroughput' in self.body: if 'ProvisionedThroughput' in self.body:
throughput = self.body["ProvisionedThroughput"] throughput = self.body["ProvisionedThroughput"]
table = dynamodb_backend2.update_table_throughput(name, throughput) table = self.dynamodb_backend.update_table_throughput(name, throughput)
return dynamo_json_dump(table.describe()) return dynamo_json_dump(table.describe())
def describe_table(self): def describe_table(self):
name = self.body['TableName'] name = self.body['TableName']
try: try:
table = dynamodb_backend2.tables[name] table = self.dynamodb_backend.tables[name]
except KeyError: except KeyError:
er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException' er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'
return self.error(er, 'Requested resource not found') return self.error(er, 'Requested resource not found')
@ -188,8 +202,7 @@ class DynamoHandler(BaseResponse):
expected[not_exists_m.group(1)] = {'Exists': False} expected[not_exists_m.group(1)] = {'Exists': False}
try: try:
result = dynamodb_backend2.put_item( result = self.dynamodb_backend.put_item(name, item, expected, overwrite)
name, item, expected, overwrite)
except ValueError: except ValueError:
er = 'com.amazonaws.dynamodb.v20111205#ConditionalCheckFailedException' er = 'com.amazonaws.dynamodb.v20111205#ConditionalCheckFailedException'
return self.error(er, 'A condition specified in the operation could not be evaluated.') return self.error(er, 'A condition specified in the operation could not be evaluated.')
@ -214,10 +227,10 @@ class DynamoHandler(BaseResponse):
request = list(table_request.values())[0] request = list(table_request.values())[0]
if request_type == 'PutRequest': if request_type == 'PutRequest':
item = request['Item'] item = request['Item']
dynamodb_backend2.put_item(table_name, item) self.dynamodb_backend.put_item(table_name, item)
elif request_type == 'DeleteRequest': elif request_type == 'DeleteRequest':
keys = request['Key'] keys = request['Key']
item = dynamodb_backend2.delete_item(table_name, keys) item = self.dynamodb_backend.delete_item(table_name, keys)
response = { response = {
"ConsumedCapacity": [ "ConsumedCapacity": [
@ -237,7 +250,7 @@ class DynamoHandler(BaseResponse):
name = self.body['TableName'] name = self.body['TableName']
key = self.body['Key'] key = self.body['Key']
try: try:
item = dynamodb_backend2.get_item(name, key) item = self.dynamodb_backend.get_item(name, key)
except ValueError: except ValueError:
er = 'com.amazon.coral.validate#ValidationException' er = 'com.amazon.coral.validate#ValidationException'
return self.error(er, 'Validation Exception') return self.error(er, 'Validation Exception')
@ -268,7 +281,7 @@ class DynamoHandler(BaseResponse):
attributes_to_get = table_request.get('AttributesToGet') attributes_to_get = table_request.get('AttributesToGet')
results["Responses"][table_name] = [] results["Responses"][table_name] = []
for key in keys: for key in keys:
item = dynamodb_backend2.get_item(table_name, key) item = self.dynamodb_backend.get_item(table_name, key)
if item: if item:
item_describe = item.describe_attrs(attributes_to_get) item_describe = item.describe_attrs(attributes_to_get)
results["Responses"][table_name].append( results["Responses"][table_name].append(
@ -285,7 +298,9 @@ class DynamoHandler(BaseResponse):
# {u'KeyConditionExpression': u'#n0 = :v0', u'ExpressionAttributeValues': {u':v0': {u'S': u'johndoe'}}, u'ExpressionAttributeNames': {u'#n0': u'username'}} # {u'KeyConditionExpression': u'#n0 = :v0', u'ExpressionAttributeValues': {u':v0': {u'S': u'johndoe'}}, u'ExpressionAttributeNames': {u'#n0': u'username'}}
key_condition_expression = self.body.get('KeyConditionExpression') key_condition_expression = self.body.get('KeyConditionExpression')
projection_expression = self.body.get('ProjectionExpression') projection_expression = self.body.get('ProjectionExpression')
expression_attribute_names = self.body.get('ExpressionAttributeNames') expression_attribute_names = self.body.get('ExpressionAttributeNames', {})
filter_expression = self.body.get('FilterExpression')
expression_attribute_values = self.body.get('ExpressionAttributeValues', {})
if projection_expression and expression_attribute_names: if projection_expression and expression_attribute_names:
expressions = [x.strip() for x in projection_expression.split(',')] expressions = [x.strip() for x in projection_expression.split(',')]
@ -294,10 +309,11 @@ class DynamoHandler(BaseResponse):
projection_expression = projection_expression.replace(expression, expression_attribute_names[expression]) projection_expression = projection_expression.replace(expression, expression_attribute_names[expression])
filter_kwargs = {} filter_kwargs = {}
if key_condition_expression:
value_alias_map = self.body['ExpressionAttributeValues']
table = dynamodb_backend2.get_table(name) if key_condition_expression:
value_alias_map = self.body.get('ExpressionAttributeValues', {})
table = self.dynamodb_backend.get_table(name)
# If table does not exist # If table does not exist
if table is None: if table is None:
@ -320,7 +336,7 @@ class DynamoHandler(BaseResponse):
index = table.schema index = table.schema
reverse_attribute_lookup = dict((v, k) for k, v in reverse_attribute_lookup = dict((v, k) for k, v in
six.iteritems(self.body['ExpressionAttributeNames'])) six.iteritems(self.body.get('ExpressionAttributeNames', {})))
if " AND " in key_condition_expression: if " AND " in key_condition_expression:
expressions = key_condition_expression.split(" AND ", 1) expressions = key_condition_expression.split(" AND ", 1)
@ -359,13 +375,14 @@ class DynamoHandler(BaseResponse):
range_values = [] range_values = []
hash_key_value_alias = hash_key_expression.split("=")[1].strip() hash_key_value_alias = hash_key_expression.split("=")[1].strip()
hash_key = value_alias_map[hash_key_value_alias] # Temporary fix until we get proper KeyConditionExpression function
hash_key = value_alias_map.get(hash_key_value_alias, {'S': hash_key_value_alias})
else: else:
# 'KeyConditions': {u'forum_name': {u'ComparisonOperator': u'EQ', u'AttributeValueList': [{u'S': u'the-key'}]}} # 'KeyConditions': {u'forum_name': {u'ComparisonOperator': u'EQ', u'AttributeValueList': [{u'S': u'the-key'}]}}
key_conditions = self.body.get('KeyConditions') key_conditions = self.body.get('KeyConditions')
query_filters = self.body.get("QueryFilter") query_filters = self.body.get("QueryFilter")
if key_conditions: if key_conditions:
hash_key_name, range_key_name = dynamodb_backend2.get_table_keys_name( hash_key_name, range_key_name = self.dynamodb_backend.get_table_keys_name(
name, key_conditions.keys()) name, key_conditions.keys())
for key, value in key_conditions.items(): for key, value in key_conditions.items():
if key not in (hash_key_name, range_key_name): if key not in (hash_key_name, range_key_name):
@ -398,9 +415,12 @@ class DynamoHandler(BaseResponse):
exclusive_start_key = self.body.get('ExclusiveStartKey') exclusive_start_key = self.body.get('ExclusiveStartKey')
limit = self.body.get("Limit") limit = self.body.get("Limit")
scan_index_forward = self.body.get("ScanIndexForward") scan_index_forward = self.body.get("ScanIndexForward")
items, scanned_count, last_evaluated_key = dynamodb_backend2.query( items, scanned_count, last_evaluated_key = self.dynamodb_backend.query(
name, hash_key, range_comparison, range_values, limit, name, hash_key, range_comparison, range_values, limit,
exclusive_start_key, scan_index_forward, projection_expression, index_name=index_name, **filter_kwargs) exclusive_start_key, scan_index_forward, projection_expression, index_name=index_name,
expr_names=expression_attribute_names, expr_values=expression_attribute_values,
filter_expression=filter_expression, **filter_kwargs
)
if items is None: if items is None:
er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException' er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'
return self.error(er, 'Requested resource not found') return self.error(er, 'Requested resource not found')
@ -442,12 +462,12 @@ class DynamoHandler(BaseResponse):
limit = self.body.get("Limit") limit = self.body.get("Limit")
try: try:
items, scanned_count, last_evaluated_key = dynamodb_backend2.scan(name, filters, items, scanned_count, last_evaluated_key = self.dynamodb_backend.scan(name, filters,
limit, limit,
exclusive_start_key, exclusive_start_key,
filter_expression, filter_expression,
expression_attribute_names, expression_attribute_names,
expression_attribute_values) expression_attribute_values)
except ValueError as err: except ValueError as err:
er = 'com.amazonaws.dynamodb.v20111205#ValidationError' er = 'com.amazonaws.dynamodb.v20111205#ValidationError'
return self.error(er, 'Bad Filter Expression: {0}'.format(err)) return self.error(er, 'Bad Filter Expression: {0}'.format(err))
@ -478,12 +498,12 @@ class DynamoHandler(BaseResponse):
name = self.body['TableName'] name = self.body['TableName']
keys = self.body['Key'] keys = self.body['Key']
return_values = self.body.get('ReturnValues', '') return_values = self.body.get('ReturnValues', '')
table = dynamodb_backend2.get_table(name) table = self.dynamodb_backend.get_table(name)
if not table: if not table:
er = 'com.amazonaws.dynamodb.v20120810#ConditionalCheckFailedException' er = 'com.amazonaws.dynamodb.v20120810#ConditionalCheckFailedException'
return self.error(er, 'A condition specified in the operation could not be evaluated.') return self.error(er, 'A condition specified in the operation could not be evaluated.')
item = dynamodb_backend2.delete_item(name, keys) item = self.dynamodb_backend.delete_item(name, keys)
if item and return_values == 'ALL_OLD': if item and return_values == 'ALL_OLD':
item_dict = item.to_json() item_dict = item.to_json()
else: else:
@ -500,7 +520,7 @@ class DynamoHandler(BaseResponse):
'ExpressionAttributeNames', {}) 'ExpressionAttributeNames', {})
expression_attribute_values = self.body.get( expression_attribute_values = self.body.get(
'ExpressionAttributeValues', {}) 'ExpressionAttributeValues', {})
existing_item = dynamodb_backend2.get_item(name, key) existing_item = self.dynamodb_backend.get_item(name, key)
if 'Expected' in self.body: if 'Expected' in self.body:
expected = self.body['Expected'] expected = self.body['Expected']
@ -536,9 +556,10 @@ class DynamoHandler(BaseResponse):
'\s*([=\+-])\s*', '\\1', update_expression) '\s*([=\+-])\s*', '\\1', update_expression)
try: try:
item = dynamodb_backend2.update_item( item = self.dynamodb_backend.update_item(
name, key, update_expression, attribute_updates, expression_attribute_names, expression_attribute_values, name, key, update_expression, attribute_updates, expression_attribute_names,
expected) expression_attribute_values, expected
)
except ValueError: except ValueError:
er = 'com.amazonaws.dynamodb.v20111205#ConditionalCheckFailedException' er = 'com.amazonaws.dynamodb.v20111205#ConditionalCheckFailedException'
return self.error(er, 'A condition specified in the operation could not be evaluated.') return self.error(er, 'A condition specified in the operation could not be evaluated.')
@ -555,3 +576,26 @@ class DynamoHandler(BaseResponse):
item_dict['Attributes'] = {} item_dict['Attributes'] = {}
return dynamo_json_dump(item_dict) return dynamo_json_dump(item_dict)
def describe_limits(self):
return json.dumps({
'AccountMaxReadCapacityUnits': 20000,
'TableMaxWriteCapacityUnits': 10000,
'AccountMaxWriteCapacityUnits': 20000,
'TableMaxReadCapacityUnits': 10000
})
def update_time_to_live(self):
name = self.body['TableName']
ttl_spec = self.body['TimeToLiveSpecification']
self.dynamodb_backend.update_ttl(name, ttl_spec)
return json.dumps({'TimeToLiveSpecification': ttl_spec})
def describe_time_to_live(self):
name = self.body['TableName']
ttl_spec = self.dynamodb_backend.describe_ttl(name)
return json.dumps({'TimeToLiveDescription': ttl_spec})

View File

@ -2,10 +2,12 @@ from __future__ import unicode_literals
import copy import copy
import itertools import itertools
import ipaddress
import json import json
import os
import re import re
import six import six
import warnings
from pkg_resources import resource_filename
import boto.ec2 import boto.ec2
@ -44,7 +46,6 @@ from .exceptions import (
InvalidRouteTableIdError, InvalidRouteTableIdError,
InvalidRouteError, InvalidRouteError,
InvalidInstanceIdError, InvalidInstanceIdError,
MalformedAMIIdError,
InvalidAMIIdError, InvalidAMIIdError,
InvalidAMIAttributeItemValueError, InvalidAMIAttributeItemValueError,
InvalidSnapshotIdError, InvalidSnapshotIdError,
@ -113,8 +114,12 @@ from .utils import (
tag_filter_matches, tag_filter_matches,
) )
RESOURCES_DIR = os.path.join(os.path.dirname(__file__), 'resources') INSTANCE_TYPES = json.load(
INSTANCE_TYPES = json.load(open(os.path.join(RESOURCES_DIR, 'instance_types.json'), 'r')) open(resource_filename(__name__, 'resources/instance_types.json'), 'r')
)
AMIS = json.load(
open(resource_filename(__name__, 'resources/amis.json'), 'r')
)
def utc_date_and_time(): def utc_date_and_time():
@ -384,6 +389,11 @@ class Instance(TaggedEC2Resource, BotoInstance):
amis = self.ec2_backend.describe_images(filters={'image-id': image_id}) amis = self.ec2_backend.describe_images(filters={'image-id': image_id})
ami = amis[0] if amis else None ami = amis[0] if amis else None
if ami is None:
warnings.warn('Could not find AMI with image-id:{0}, '
'in the near future this will '
'cause an error'.format(image_id),
PendingDeprecationWarning)
self.platform = ami.platform if ami else None self.platform = ami.platform if ami else None
self.virtualization_type = ami.virtualization_type if ami else 'paravirtual' self.virtualization_type = ami.virtualization_type if ami else 'paravirtual'
@ -403,6 +413,10 @@ class Instance(TaggedEC2Resource, BotoInstance):
subnet = ec2_backend.get_subnet(self.subnet_id) subnet = ec2_backend.get_subnet(self.subnet_id)
self.vpc_id = subnet.vpc_id self.vpc_id = subnet.vpc_id
self._placement.zone = subnet.availability_zone self._placement.zone = subnet.availability_zone
if associate_public_ip is None:
# Mapping public ip hasnt been explicitly enabled or disabled
associate_public_ip = subnet.map_public_ip_on_launch == 'true'
elif placement: elif placement:
self._placement.zone = placement self._placement.zone = placement
else: else:
@ -410,10 +424,22 @@ class Instance(TaggedEC2Resource, BotoInstance):
self.block_device_mapping = BlockDeviceMapping() self.block_device_mapping = BlockDeviceMapping()
self.prep_nics(kwargs.get("nics", {}), self._private_ips = set()
subnet_id=self.subnet_id, self.prep_nics(
private_ip=kwargs.get("private_ip"), kwargs.get("nics", {}),
associate_public_ip=associate_public_ip) private_ip=kwargs.get("private_ip"),
associate_public_ip=associate_public_ip
)
def __del__(self):
try:
subnet = self.ec2_backend.get_subnet(self.subnet_id)
for ip in self._private_ips:
subnet.del_subnet_ip(ip)
except Exception:
# Its not "super" critical we clean this up, as reset will do this
# worst case we'll get IP address exaustion... rarely
pass
def setup_defaults(self): def setup_defaults(self):
# Default have an instance with root volume should you not wish to # Default have an instance with root volume should you not wish to
@ -548,14 +574,23 @@ class Instance(TaggedEC2Resource, BotoInstance):
else: else:
return self.security_groups return self.security_groups
def prep_nics(self, nic_spec, subnet_id=None, private_ip=None, associate_public_ip=None): def prep_nics(self, nic_spec, private_ip=None, associate_public_ip=None):
self.nics = {} self.nics = {}
if not private_ip: if self.subnet_id:
subnet = self.ec2_backend.get_subnet(self.subnet_id)
if not private_ip:
private_ip = subnet.get_available_subnet_ip(instance=self)
else:
subnet.request_ip(private_ip, instance=self)
self._private_ips.add(private_ip)
elif private_ip is None:
# Preserve old behaviour if in EC2-Classic mode
private_ip = random_private_ip() private_ip = random_private_ip()
# Primary NIC defaults # Primary NIC defaults
primary_nic = {'SubnetId': subnet_id, primary_nic = {'SubnetId': self.subnet_id,
'PrivateIpAddress': private_ip, 'PrivateIpAddress': private_ip,
'AssociatePublicIpAddress': associate_public_ip} 'AssociatePublicIpAddress': associate_public_ip}
primary_nic = dict((k, v) for k, v in primary_nic.items() if v) primary_nic = dict((k, v) for k, v in primary_nic.items() if v)
@ -766,14 +801,12 @@ class InstanceBackend(object):
associated with the given instance_ids. associated with the given instance_ids.
""" """
reservations = [] reservations = []
for reservation in self.all_reservations(make_copy=True): for reservation in self.all_reservations():
reservation_instance_ids = [ reservation_instance_ids = [
instance.id for instance in reservation.instances] instance.id for instance in reservation.instances]
matching_reservation = any( matching_reservation = any(
instance_id in reservation_instance_ids for instance_id in instance_ids) instance_id in reservation_instance_ids for instance_id in instance_ids)
if matching_reservation: if matching_reservation:
# We need to make a copy of the reservation because we have to modify the
# instances to limit to those requested
reservation.instances = [ reservation.instances = [
instance for instance in reservation.instances if instance.id in instance_ids] instance for instance in reservation.instances if instance.id in instance_ids]
reservations.append(reservation) reservations.append(reservation)
@ -787,15 +820,8 @@ class InstanceBackend(object):
reservations = filter_reservations(reservations, filters) reservations = filter_reservations(reservations, filters)
return reservations return reservations
def all_reservations(self, make_copy=False, filters=None): def all_reservations(self, filters=None):
if make_copy: reservations = [copy.copy(reservation) for reservation in self.reservations.values()]
# Return copies so that other functions can modify them with changing
# the originals
reservations = [copy.deepcopy(reservation)
for reservation in self.reservations.values()]
else:
reservations = [
reservation for reservation in self.reservations.values()]
if filters is not None: if filters is not None:
reservations = filter_reservations(reservations, filters) reservations = filter_reservations(reservations, filters)
return reservations return reservations
@ -985,17 +1011,31 @@ class TagBackend(object):
class Ami(TaggedEC2Resource): class Ami(TaggedEC2Resource):
def __init__(self, ec2_backend, ami_id, instance=None, source_ami=None, def __init__(self, ec2_backend, ami_id, instance=None, source_ami=None,
name=None, description=None): name=None, description=None, owner_id=None,
public=False, virtualization_type=None, architecture=None,
state='available', creation_date=None, platform=None,
image_type='machine', image_location=None, hypervisor=None,
root_device_type=None, root_device_name=None, sriov='simple',
region_name='us-east-1a'
):
self.ec2_backend = ec2_backend self.ec2_backend = ec2_backend
self.id = ami_id self.id = ami_id
self.state = "available" self.state = state
self.name = name self.name = name
self.image_type = image_type
self.image_location = image_location
self.owner_id = owner_id
self.description = description self.description = description
self.virtualization_type = None self.virtualization_type = virtualization_type
self.architecture = None self.architecture = architecture
self.kernel_id = None self.kernel_id = None
self.platform = None self.platform = platform
self.creation_date = utc_date_and_time() self.hypervisor = hypervisor
self.root_device_name = root_device_name
self.root_device_type = root_device_type
self.sriov = sriov
self.creation_date = utc_date_and_time() if creation_date is None else creation_date
if instance: if instance:
self.instance = instance self.instance = instance
@ -1023,8 +1063,11 @@ class Ami(TaggedEC2Resource):
self.launch_permission_groups = set() self.launch_permission_groups = set()
self.launch_permission_users = set() self.launch_permission_users = set()
if public:
self.launch_permission_groups.add('all')
# AWS auto-creates these, we should reflect the same. # AWS auto-creates these, we should reflect the same.
volume = self.ec2_backend.create_volume(15, "us-east-1a") volume = self.ec2_backend.create_volume(15, region_name)
self.ebs_snapshot = self.ec2_backend.create_snapshot( self.ebs_snapshot = self.ec2_backend.create_snapshot(
volume.id, "Auto-created snapshot for AMI %s" % self.id) volume.id, "Auto-created snapshot for AMI %s" % self.id)
@ -1051,6 +1094,8 @@ class Ami(TaggedEC2Resource):
return self.state return self.state
elif filter_name == 'name': elif filter_name == 'name':
return self.name return self.name
elif filter_name == 'owner-id':
return self.owner_id
else: else:
return super(Ami, self).get_filter_value( return super(Ami, self).get_filter_value(
filter_name, 'DescribeImages') filter_name, 'DescribeImages')
@ -1059,14 +1104,22 @@ class Ami(TaggedEC2Resource):
class AmiBackend(object): class AmiBackend(object):
def __init__(self): def __init__(self):
self.amis = {} self.amis = {}
self._load_amis()
super(AmiBackend, self).__init__() super(AmiBackend, self).__init__()
def create_image(self, instance_id, name=None, description=None): def _load_amis(self):
for ami in AMIS:
ami_id = ami['ami_id']
self.amis[ami_id] = Ami(self, **ami)
def create_image(self, instance_id, name=None, description=None, owner_id=None):
# TODO: check that instance exists and pull info from it. # TODO: check that instance exists and pull info from it.
ami_id = random_ami_id() ami_id = random_ami_id()
instance = self.get_instance(instance_id) instance = self.get_instance(instance_id)
ami = Ami(self, ami_id, instance=instance, source_ami=None, ami = Ami(self, ami_id, instance=instance, source_ami=None,
name=name, description=description) name=name, description=description, owner_id=owner_id)
self.amis[ami_id] = ami self.amis[ami_id] = ami
return ami return ami
@ -1079,30 +1132,29 @@ class AmiBackend(object):
self.amis[ami_id] = ami self.amis[ami_id] = ami
return ami return ami
def describe_images(self, ami_ids=(), filters=None, exec_users=None): def describe_images(self, ami_ids=(), filters=None, exec_users=None, owners=None):
images = [] images = self.amis.values()
# Limit images by launch permissions
if exec_users: if exec_users:
for ami_id in self.amis: tmp_images = []
found = False for ami in images:
for user_id in exec_users: for user_id in exec_users:
if user_id in self.amis[ami_id].launch_permission_users: if user_id in ami.launch_permission_users:
found = True tmp_images.append(ami)
if found: images = tmp_images
images.append(self.amis[ami_id])
if images == []: # Limit by owner ids
return images if owners:
images = [ami for ami in images if ami.owner_id in owners]
if ami_ids:
images = [ami for ami in images if ami.id in ami_ids]
# Generic filters
if filters: if filters:
images = images or self.amis.values()
return generic_filter(filters, images) return generic_filter(filters, images)
else: return images
for ami_id in ami_ids:
if ami_id in self.amis:
images.append(self.amis[ami_id])
elif not ami_id.startswith("ami-"):
raise MalformedAMIIdError(ami_id)
else:
raise InvalidAMIIdError(ami_id)
return images or self.amis.values()
def deregister_image(self, ami_id): def deregister_image(self, ami_id):
if ami_id in self.amis: if ami_id in self.amis:
@ -2127,10 +2179,17 @@ class Subnet(TaggedEC2Resource):
self.id = subnet_id self.id = subnet_id
self.vpc_id = vpc_id self.vpc_id = vpc_id
self.cidr_block = cidr_block self.cidr_block = cidr_block
self.cidr = ipaddress.ip_network(six.text_type(self.cidr_block))
self._availability_zone = availability_zone self._availability_zone = availability_zone
self.default_for_az = default_for_az self.default_for_az = default_for_az
self.map_public_ip_on_launch = map_public_ip_on_launch self.map_public_ip_on_launch = map_public_ip_on_launch
# Theory is we assign ip's as we go (as 16,777,214 usable IPs in a /8)
self._subnet_ip_generator = self.cidr.hosts()
self.reserved_ips = [six.next(self._subnet_ip_generator) for _ in range(0, 3)] # Reserved by AWS
self._unused_ips = set() # if instance is destroyed hold IP here for reuse
self._subnet_ips = {} # has IP: instance
@classmethod @classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties'] properties = cloudformation_json['Properties']
@ -2197,6 +2256,46 @@ class Subnet(TaggedEC2Resource):
'"Fn::GetAtt" : [ "{0}" , "AvailabilityZone" ]"') '"Fn::GetAtt" : [ "{0}" , "AvailabilityZone" ]"')
raise UnformattedGetAttTemplateException() raise UnformattedGetAttTemplateException()
def get_available_subnet_ip(self, instance):
try:
new_ip = self._unused_ips.pop()
except KeyError:
new_ip = six.next(self._subnet_ip_generator)
# Skips any IP's if they've been manually specified
while str(new_ip) in self._subnet_ips:
new_ip = six.next(self._subnet_ip_generator)
if new_ip == self.cidr.broadcast_address:
raise StopIteration() # Broadcast address cant be used obviously
# TODO StopIteration will be raised if no ip's available, not sure how aws handles this.
new_ip = str(new_ip)
self._subnet_ips[new_ip] = instance
return new_ip
def request_ip(self, ip, instance):
if ipaddress.ip_address(ip) not in self.cidr:
raise Exception('IP does not fall in the subnet CIDR of {0}'.format(self.cidr))
if ip in self._subnet_ips:
raise Exception('IP already in use')
try:
self._unused_ips.remove(ip)
except KeyError:
pass
self._subnet_ips[ip] = instance
return ip
def del_subnet_ip(self, ip):
try:
del self._subnet_ips[ip]
self._unused_ips.add(ip)
except KeyError:
pass # Unknown IP
class SubnetBackend(object): class SubnetBackend(object):
def __init__(self): def __init__(self):
@ -3619,8 +3718,8 @@ class NatGatewayBackend(object):
return self.nat_gateways.pop(nat_gateway_id) return self.nat_gateways.pop(nat_gateway_id)
class EC2Backend(BaseBackend, InstanceBackend, TagBackend, AmiBackend, class EC2Backend(BaseBackend, InstanceBackend, TagBackend, EBSBackend,
RegionsAndZonesBackend, SecurityGroupBackend, EBSBackend, RegionsAndZonesBackend, SecurityGroupBackend, AmiBackend,
VPCBackend, SubnetBackend, SubnetRouteTableAssociationBackend, VPCBackend, SubnetBackend, SubnetRouteTableAssociationBackend,
NetworkInterfaceBackend, VPNConnectionBackend, NetworkInterfaceBackend, VPNConnectionBackend,
VPCPeeringConnectionBackend, VPCPeeringConnectionBackend,

View File

@ -0,0 +1,546 @@
[
{
"ami_id": "ami-03cf127a",
"state": "available",
"public": true,
"owner_id": "801119661308",
"sriov": "simple",
"root_device_type": "ebs",
"root_device_name": "/dev/sda1",
"description": "Microsoft Windows Server 2016 Nano Locale English AMI provided by Amazon",
"image_type": "machine",
"platform": "windows",
"architecture": "x86_64",
"name": "Windows_Server-2016-English-Nano-Base-2017.10.13",
"virtualization_type": "hvm",
"hypervisor": "xen"
},
{
"ami_id": "ami-12c6146b",
"state": "available",
"public": true,
"owner_id": "801119661308",
"sriov": "simple",
"root_device_type": "ebs",
"root_device_name": "/dev/sda1",
"description": "Microsoft Windows Server 2008 R2 SP1 Datacenter 64-bit Locale English Base AMI provided by Amazon",
"image_type": "machine",
"platform": "windows",
"architecture": "x86_64",
"name": "Windows_Server-2008-R2_SP1-English-64Bit-Base-2017.10.13",
"virtualization_type": "hvm",
"hypervisor": "xen"
},
{
"ami_id": "ami-1812c061",
"state": "available",
"public": true,
"owner_id": "801119661308",
"sriov": "simple",
"root_device_type": "ebs",
"root_device_name": "/dev/sda1",
"description": "Microsoft Windows Server 2016 Locale English with SQL Standard 2016 AMI provided by Amazon",
"image_type": "machine",
"platform": "windows",
"architecture": "x86_64",
"name": "Windows_Server-2016-English-Full-SQL_2016_SP1_Standard-2017.10.13",
"virtualization_type": "hvm",
"hypervisor": "xen"
},
{
"ami_id": "ami-1e749f67",
"state": "available",
"public": true,
"owner_id": "099720109477",
"sriov": "simple",
"root_device_type": "ebs",
"root_device_name": "/dev/sda1",
"description": "Canonical, Ubuntu, 14.04 LTS, amd64 trusty image build on 2017-07-27",
"image_type": "machine",
"platform": null,
"architecture": "x86_64",
"name": "ubuntu/images/hvm-ssd/ubuntu-trusty-14.04-amd64-server-20170727",
"virtualization_type": "hvm",
"hypervisor": "xen"
},
{
"ami_id": "ami-1ecc1e67",
"state": "available",
"public": true,
"owner_id": "801119661308",
"sriov": "simple",
"root_device_type": "ebs",
"root_device_name": "/dev/sda1",
"description": "Microsoft Windows Server 2012 R2 RTM 64-bit Locale English AMI provided by Amazon",
"image_type": "machine",
"platform": "windows",
"architecture": "x86_64",
"name": "Windows_Server-2012-R2_RTM-English-64Bit-Base-2017.10.13",
"virtualization_type": "hvm",
"hypervisor": "xen"
},
{
"ami_id": "ami-1f12c066",
"state": "available",
"public": true,
"owner_id": "801119661308",
"sriov": "simple",
"root_device_type": "ebs",
"root_device_name": "/dev/sda1",
"description": "Microsoft Windows Server 2016 Locale English with SQL Express 2016 AMI provided by Amazon",
"image_type": "machine",
"platform": "windows",
"architecture": "x86_64",
"name": "Windows_Server-2016-English-Full-SQL_2016_SP1_Express-2017.10.13",
"virtualization_type": "hvm",
"hypervisor": "xen"
},
{
"ami_id": "ami-24f3215d",
"state": "available",
"public": true,
"owner_id": "801119661308",
"sriov": "simple",
"root_device_type": "ebs",
"root_device_name": "/dev/sda1",
"description": "Microsoft Windows Server 2012 R2 RTM 64-bit Locale English with SQL Web 2014 AMI provided by Amazon",
"image_type": "machine",
"platform": "windows",
"architecture": "x86_64",
"name": "Windows_Server-2012-R2_RTM-English-64Bit-SQL_2014_SP2_Web-2017.10.13",
"virtualization_type": "hvm",
"hypervisor": "xen"
},
{
"ami_id": "ami-35e92e4c",
"state": "available",
"public": true,
"owner_id": "013907871322",
"sriov": "simple",
"root_device_type": "ebs",
"root_device_name": "/dev/sda1",
"description": "SUSE Linux Enterprise Server 12 SP3 (HVM, 64-bit, SSD-Backed)",
"image_type": "machine",
"platform": null,
"architecture": "x86_64",
"name": "suse-sles-12-sp3-v20170907-hvm-ssd-x86_64",
"virtualization_type": "hvm",
"hypervisor": "xen"
},
{
"ami_id": "ami-3bf32142",
"state": "available",
"public": true,
"owner_id": "801119661308",
"sriov": "simple",
"root_device_type": "ebs",
"root_device_name": "/dev/sda1",
"description": "Microsoft Windows Server 2012 R2 RTM 64-bit Locale English with SQL Express 2016 AMI provided by Amazon",
"image_type": "machine",
"platform": "windows",
"architecture": "x86_64",
"name": "Windows_Server-2012-R2_RTM-English-64Bit-SQL_2016_SP1_Express-2017.10.13",
"virtualization_type": "hvm",
"hypervisor": "xen"
},
{
"ami_id": "ami-3df32144",
"state": "available",
"public": true,
"owner_id": "801119661308",
"sriov": "simple",
"root_device_type": "ebs",
"root_device_name": "/dev/sda1",
"description": "Microsoft Windows Server 2012 R2 RTM 64-bit Locale English with SQL Enterprise 2016 AMI provided by Amazon",
"image_type": "machine",
"platform": "windows",
"architecture": "x86_64",
"name": "Windows_Server-2012-R2_RTM-English-64Bit-SQL_2016_SP1_Enterprise-2017.10.13",
"virtualization_type": "hvm",
"hypervisor": "xen"
},
{
"ami_id": "ami-56ec3e2f",
"state": "available",
"public": true,
"owner_id": "801119661308",
"sriov": "simple",
"root_device_type": "ebs",
"root_device_name": "/dev/sda1",
"description": "Microsoft Windows Server 2016 Locale English with SQL Express 2017 AMI provided by Amazon",
"image_type": "machine",
"platform": "windows",
"architecture": "x86_64",
"name": "Windows_Server-2016-English-Full-SQL_2017_Express-2017.10.13",
"virtualization_type": "hvm",
"hypervisor": "xen"
},
{
"ami_id": "ami-61db0918",
"state": "available",
"public": true,
"owner_id": "801119661308",
"sriov": "simple",
"root_device_type": "ebs",
"root_device_name": "/dev/sda1",
"description": "Microsoft Windows Server 2003 R2 SP2 Datacenter 64-bit Locale English Base AMI provided by Amazon",
"image_type": "machine",
"platform": "windows",
"architecture": "x86_64",
"name": "Windows_Server-2003-R2_SP2-English-64Bit-Base-2017.10.13",
"virtualization_type": "hvm",
"hypervisor": "xen"
},
{
"ami_id": "ami-6ef02217",
"state": "available",
"public": true,
"owner_id": "801119661308",
"sriov": "simple",
"root_device_type": "ebs",
"root_device_name": "/dev/sda1",
"description": "Microsoft Windows Server 2012 R2 RTM 64-bit Locale English with SQL Web 2016 AMI provided by Amazon",
"image_type": "machine",
"platform": "windows",
"architecture": "x86_64",
"name": "Windows_Server-2012-R2_RTM-English-64Bit-SQL_2016_SP1_Web-2017.10.13",
"virtualization_type": "hvm",
"hypervisor": "xen"
},
{
"ami_id": "ami-760aaa0f",
"state": "available",
"public": true,
"owner_id": "137112412989",
"sriov": "simple",
"root_device_type": "ebs",
"root_device_name": "/dev/xvda",
"description": "Amazon Linux AMI 2017.09.1.20171103 x86_64 HVM GP2",
"image_type": "machine",
"platform": null,
"architecture": "x86_64",
"name": "amzn-ami-hvm-2017.09.1.20171103-x86_64-gp2",
"virtualization_type": "hvm",
"hypervisor": "xen"
},
{
"ami_id": "ami-77ed3f0e",
"state": "available",
"public": true,
"owner_id": "801119661308",
"sriov": "simple",
"root_device_type": "ebs",
"root_device_name": "/dev/sda1",
"description": "Microsoft Windows Server 2016 Full Locale English with SQL Enterprise 2016 SP1 AMI provided by Amazon",
"image_type": "machine",
"platform": "windows",
"architecture": "x86_64",
"name": "Windows_Server-2016-English-Full-SQL_2016_SP1_Enterprise-2017.10.13",
"virtualization_type": "hvm",
"hypervisor": "xen"
},
{
"ami_id": "ami-785db401",
"state": "available",
"public": true,
"owner_id": "099720109477",
"sriov": "simple",
"root_device_type": "ebs",
"root_device_name": "/dev/sda1",
"description": "Canonical, Ubuntu, 16.04 LTS, amd64 xenial image build on 2017-07-21",
"image_type": "machine",
"platform": null,
"architecture": "x86_64",
"name": "ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64-server-20170721",
"virtualization_type": "hvm",
"hypervisor": "xen"
},
{
"ami_id": "ami-8104a4f8",
"state": "available",
"public": true,
"owner_id": "137112412989",
"sriov": "simple",
"root_device_type": "ebs",
"root_device_name": "/dev/sda1",
"description": "Amazon Linux AMI 2017.09.1.20171103 x86_64 PV EBS",
"image_type": "machine",
"platform": null,
"architecture": "x86_64",
"name": "amzn-ami-pv-2017.09.1.20171103-x86_64-ebs",
"virtualization_type": "paravirtual",
"hypervisor": "xen"
},
{
"ami_id": "ami-84ee3cfd",
"state": "available",
"public": true,
"owner_id": "801119661308",
"sriov": "simple",
"root_device_type": "ebs",
"root_device_name": "/dev/sda1",
"description": "Microsoft Windows Server 2016 Locale English with SQL Web 2017 AMI provided by Amazon",
"image_type": "machine",
"platform": "windows",
"architecture": "x86_64",
"name": "Windows_Server-2016-English-Full-SQL_2017_Web-2017.10.13",
"virtualization_type": "hvm",
"hypervisor": "xen"
},
{
"ami_id": "ami-86ee3cff",
"state": "available",
"public": true,
"owner_id": "801119661308",
"sriov": "simple",
"root_device_type": "ebs",
"root_device_name": "/dev/sda1",
"description": "Microsoft Windows Server 2016 Locale English with SQL Standard 2017 AMI provided by Amazon",
"image_type": "machine",
"platform": "windows",
"architecture": "x86_64",
"name": "Windows_Server-2016-English-Full-SQL_2017_Standard-2017.10.13",
"virtualization_type": "hvm",
"hypervisor": "xen"
},
{
"ami_id": "ami-999844e0",
"state": "available",
"public": true,
"owner_id": "898082745236",
"sriov": "simple",
"root_device_type": "ebs",
"root_device_name": "/dev/xvda",
"description": "Deep Learning on Amazon Linux with MXNet, Tensorflow, Caffe, Theano, Torch, CNTK and Keras",
"image_type": "machine",
"platform": null,
"architecture": "x86_64",
"name": "Deep Learning AMI Amazon Linux - 3.3_Oct2017",
"virtualization_type": "hvm",
"hypervisor": "xen"
},
{
"ami_id": "ami-9b32e8e2",
"state": "available",
"public": true,
"owner_id": "898082745236",
"sriov": "simple",
"root_device_type": "ebs",
"root_device_name": "/dev/sda1",
"description": "CUDA9 Classic Ubuntu DLAMI 1508914531",
"image_type": "machine",
"platform": null,
"architecture": "x86_64",
"name": "Ubuntu CUDA9 DLAMI with MXNet/TF/Caffe2",
"virtualization_type": "hvm",
"hypervisor": "xen"
},
{
"ami_id": "ami-a9cc1ed0",
"state": "available",
"public": true,
"owner_id": "801119661308",
"sriov": "simple",
"root_device_type": "ebs",
"root_device_name": "/dev/sda1",
"description": "Microsoft Windows Server 2012 R2 RTM 64-bit Locale English with SQL Standard 2014 AMI provided by Amazon",
"image_type": "machine",
"platform": "windows",
"architecture": "x86_64",
"name": "Windows_Server-2012-R2_RTM-English-64Bit-SQL_2014_SP2_Standard-2017.10.13",
"virtualization_type": "hvm",
"hypervisor": "xen"
},
{
"ami_id": "ami-afee3cd6",
"state": "available",
"public": true,
"owner_id": "801119661308",
"sriov": "simple",
"root_device_type": "ebs",
"root_device_name": "/dev/sda1",
"description": "Microsoft Windows Server 2016 Locale English with SQL Web 2016 SP1 AMI provided by Amazon",
"image_type": "machine",
"platform": "windows",
"architecture": "x86_64",
"name": "Windows_Server-2016-English-Full-SQL_2016_SP1_Web-2017.10.13",
"virtualization_type": "hvm",
"hypervisor": "xen"
},
{
"ami_id": "ami-b7e93bce",
"state": "available",
"public": true,
"owner_id": "801119661308",
"sriov": "simple",
"root_device_type": "ebs",
"root_device_name": "/dev/sda1",
"description": "Microsoft Windows Server 2016 with Desktop Experience Locale English AMI provided by Amazon",
"image_type": "machine",
"platform": "windows",
"architecture": "x86_64",
"name": "Windows_Server-2016-English-Full-Base-2017.10.13",
"virtualization_type": "hvm",
"hypervisor": "xen"
},
{
"ami_id": "ami-bb9a6bc2",
"state": "available",
"public": true,
"owner_id": "309956199498",
"sriov": "simple",
"root_device_type": "ebs",
"root_device_name": "/dev/sda1",
"description": "Provided by Red Hat, Inc.",
"image_type": "machine",
"platform": null,
"architecture": "x86_64",
"name": "RHEL-7.4_HVM_GA-20170808-x86_64-2-Hourly2-GP2",
"virtualization_type": "hvm",
"hypervisor": "xen"
},
{
"ami_id": "ami-bceb39c5",
"state": "available",
"public": true,
"owner_id": "801119661308",
"sriov": "simple",
"root_device_type": "ebs",
"root_device_name": "/dev/sda1",
"description": "Microsoft Windows Server 2016 with Containers Locale English AMI provided by Amazon",
"image_type": "machine",
"platform": "windows",
"architecture": "x86_64",
"name": "Windows_Server-2016-English-Full-Containers-2017.10.13",
"virtualization_type": "hvm",
"hypervisor": "xen"
},
{
"ami_id": "ami-c2ff2dbb",
"state": "available",
"public": true,
"owner_id": "801119661308",
"sriov": "simple",
"root_device_type": "ebs",
"root_device_name": "/dev/sda1",
"description": "Microsoft Windows Server 2012 RTM 64-bit Locale English Base AMI provided by Amazon",
"image_type": "machine",
"platform": "windows",
"architecture": "x86_64",
"name": "Windows_Server-2012-RTM-English-64Bit-Base-2017.10.13",
"virtualization_type": "hvm",
"hypervisor": "xen"
},
{
"ami_id": "ami-c6f321bf",
"state": "available",
"public": true,
"owner_id": "801119661308",
"sriov": "simple",
"root_device_type": "ebs",
"root_device_name": "/dev/sda1",
"description": "Microsoft Windows Server 2012 R2 RTM 64-bit Locale English with SQL Express 2014 AMI provided by Amazon",
"image_type": "machine",
"platform": "windows",
"architecture": "x86_64",
"name": "Windows_Server-2012-R2_RTM-English-64Bit-SQL_2014_SP2_Express-2017.10.13",
"virtualization_type": "hvm",
"hypervisor": "xen"
},
{
"ami_id": "ami-d1cb19a8",
"state": "available",
"public": true,
"owner_id": "801119661308",
"sriov": "simple",
"root_device_type": "ebs",
"root_device_name": "/dev/sda1",
"description": "Microsoft Windows Server 2008 SP2 Datacenter 64-bit Locale English Base AMI provided by Amazon",
"image_type": "machine",
"platform": "windows",
"architecture": "x86_64",
"name": "Windows_Server-2008-SP2-English-64Bit-Base-2017.10.13",
"virtualization_type": "hvm",
"hypervisor": "xen"
},
{
"ami_id": "ami-dca37ea5",
"state": "available",
"public": true,
"owner_id": "898082745236",
"sriov": "simple",
"root_device_type": "ebs",
"root_device_name": "/dev/sda1",
"description": "Deep Learning on Ubuntu Linux with MXNet, Tensorflow, Caffe, Theano, Torch, CNTK and Keras",
"image_type": "machine",
"platform": null,
"architecture": "x86_64",
"name": "Deep Learning AMI Ubuntu Linux - 2.4_Oct2017",
"virtualization_type": "hvm",
"hypervisor": "xen"
},
{
"ami_id": "ami-f0e83a89",
"state": "available",
"public": true,
"owner_id": "801119661308",
"sriov": "simple",
"root_device_type": "ebs",
"root_device_name": "/dev/sda1",
"description": "Microsoft Windows Server 2016 Locale English with SQL Enterprise 2017 AMI provided by Amazon",
"image_type": "machine",
"platform": "windows",
"architecture": "x86_64",
"name": "Windows_Server-2016-English-Full-SQL_2017_Enterprise-2017.10.13",
"virtualization_type": "hvm",
"hypervisor": "xen"
},
{
"ami_id": "ami-f4cf1d8d",
"state": "available",
"public": true,
"owner_id": "801119661308",
"sriov": "simple",
"root_device_type": "ebs",
"root_device_name": "/dev/sda1",
"description": "Microsoft Windows Server 2012 R2 RTM 64-bit Locale English with SQL Standard 2016 AMI provided by Amazon",
"image_type": "machine",
"platform": "windows",
"architecture": "x86_64",
"name": "Windows_Server-2012-R2_RTM-English-64Bit-SQL_2016_SP1_Standard-2017.10.13",
"virtualization_type": "hvm",
"hypervisor": "xen"
},
{
"ami_id": "ami-f8e54081",
"state": "available",
"public": true,
"owner_id": "898082745236",
"sriov": "simple",
"root_device_type": "ebs",
"root_device_name": "/dev/xvda",
"description": "CUDA9 Classic Amazon Linux DLAMI 1508914924",
"image_type": "machine",
"platform": null,
"architecture": "x86_64",
"name": "CUDA9ClassicAmazonLinuxDLAMIwithMXNetTensorflowandCaffe2 ",
"virtualization_type": "hvm",
"hypervisor": "xen"
},
{
"ami_id": "ami-fa7cdd89",
"state": "available",
"public": true,
"owner_id": "013907871322",
"sriov": "simple",
"root_device_type": "ebs",
"root_device_name": "/dev/sda",
"description": "SUSE Linux Enterprise Server 11 Service Pack 4 ((PV, 64-bit, SSD-Backed)",
"image_type": "machine",
"platform": null,
"architecture": "x86_64",
"name": "suse-sles-11-sp4-v20151207-pv-ssd-x86_64",
"virtualization_type": "paravirtual",
"hypervisor": "xen"
}
]

View File

@ -36,9 +36,10 @@ class AmisResponse(BaseResponse):
def describe_images(self): def describe_images(self):
ami_ids = self._get_multi_param('ImageId') ami_ids = self._get_multi_param('ImageId')
filters = filters_from_querystring(self.querystring) filters = filters_from_querystring(self.querystring)
owners = self._get_multi_param('Owner')
exec_users = self._get_multi_param('ExecutableBy') exec_users = self._get_multi_param('ExecutableBy')
images = self.ec2_backend.describe_images( images = self.ec2_backend.describe_images(
ami_ids=ami_ids, filters=filters, exec_users=exec_users) ami_ids=ami_ids, filters=filters, exec_users=exec_users, owners=owners)
template = self.response_template(DESCRIBE_IMAGES_RESPONSE) template = self.response_template(DESCRIBE_IMAGES_RESPONSE)
return template.render(images=images) return template.render(images=images)
@ -92,12 +93,12 @@ DESCRIBE_IMAGES_RESPONSE = """<DescribeImagesResponse xmlns="http://ec2.amazonaw
{% for image in images %} {% for image in images %}
<item> <item>
<imageId>{{ image.id }}</imageId> <imageId>{{ image.id }}</imageId>
<imageLocation>amazon/getting-started</imageLocation> <imageLocation>{{ image.image_location }}</imageLocation>
<imageState>{{ image.state }}</imageState> <imageState>{{ image.state }}</imageState>
<imageOwnerId>123456789012</imageOwnerId> <imageOwnerId>{{ image.owner_id }}</imageOwnerId>
<isPublic>{{ image.is_public_string }}</isPublic> <isPublic>{{ image.is_public_string }}</isPublic>
<architecture>{{ image.architecture }}</architecture> <architecture>{{ image.architecture }}</architecture>
<imageType>machine</imageType> <imageType>{{ image.image_type }}</imageType>
<kernelId>{{ image.kernel_id }}</kernelId> <kernelId>{{ image.kernel_id }}</kernelId>
<ramdiskId>ari-1a2b3c4d</ramdiskId> <ramdiskId>ari-1a2b3c4d</ramdiskId>
<imageOwnerAlias>amazon</imageOwnerAlias> <imageOwnerAlias>amazon</imageOwnerAlias>
@ -107,8 +108,8 @@ DESCRIBE_IMAGES_RESPONSE = """<DescribeImagesResponse xmlns="http://ec2.amazonaw
<platform>{{ image.platform }}</platform> <platform>{{ image.platform }}</platform>
{% endif %} {% endif %}
<description>{{ image.description }}</description> <description>{{ image.description }}</description>
<rootDeviceType>ebs</rootDeviceType> <rootDeviceType>{{ image.root_device_type }}</rootDeviceType>
<rootDeviceName>/dev/sda1</rootDeviceName> <rootDeviceName>{{ image.root_device_name }}</rootDeviceName>
<blockDeviceMapping> <blockDeviceMapping>
<item> <item>
<deviceName>/dev/sda1</deviceName> <deviceName>/dev/sda1</deviceName>

View File

@ -16,8 +16,7 @@ class InstanceResponse(BaseResponse):
reservations = self.ec2_backend.get_reservations_by_instance_ids( reservations = self.ec2_backend.get_reservations_by_instance_ids(
instance_ids, filters=filter_dict) instance_ids, filters=filter_dict)
else: else:
reservations = self.ec2_backend.all_reservations( reservations = self.ec2_backend.all_reservations(filters=filter_dict)
make_copy=True, filters=filter_dict)
reservation_ids = [reservation.id for reservation in reservations] reservation_ids = [reservation.id for reservation in reservations]
if token: if token:
@ -35,6 +34,7 @@ class InstanceResponse(BaseResponse):
def run_instances(self): def run_instances(self):
min_count = int(self._get_param('MinCount', if_none='1')) min_count = int(self._get_param('MinCount', if_none='1'))
image_id = self._get_param('ImageId') image_id = self._get_param('ImageId')
owner_id = self._get_param('OwnerId')
user_data = self._get_param('UserData') user_data = self._get_param('UserData')
security_group_names = self._get_multi_param('SecurityGroup') security_group_names = self._get_multi_param('SecurityGroup')
security_group_ids = self._get_multi_param('SecurityGroupId') security_group_ids = self._get_multi_param('SecurityGroupId')
@ -52,7 +52,7 @@ class InstanceResponse(BaseResponse):
new_reservation = self.ec2_backend.add_instances( new_reservation = self.ec2_backend.add_instances(
image_id, min_count, user_data, security_group_names, image_id, min_count, user_data, security_group_names,
instance_type=instance_type, placement=placement, region_name=region_name, subnet_id=subnet_id, instance_type=instance_type, placement=placement, region_name=region_name, subnet_id=subnet_id,
key_name=key_name, security_group_ids=security_group_ids, owner_id=owner_id, key_name=key_name, security_group_ids=security_group_ids,
nics=nics, private_ip=private_ip, associate_public_ip=associate_public_ip, nics=nics, private_ip=private_ip, associate_public_ip=associate_public_ip,
tags=tags) tags=tags)

View File

@ -4,6 +4,7 @@ from datetime import datetime
from random import random, randint from random import random, randint
import pytz import pytz
from moto.core.exceptions import JsonRESTError
from moto.core import BaseBackend, BaseModel from moto.core import BaseBackend, BaseModel
from moto.ec2 import ec2_backends from moto.ec2 import ec2_backends
from copy import copy from copy import copy
@ -148,7 +149,7 @@ class Task(BaseObject):
resource_requirements, overrides={}, started_by=''): resource_requirements, overrides={}, started_by=''):
self.cluster_arn = cluster.arn self.cluster_arn = cluster.arn
self.task_arn = 'arn:aws:ecs:us-east-1:012345678910:task/{0}'.format( self.task_arn = 'arn:aws:ecs:us-east-1:012345678910:task/{0}'.format(
str(uuid.uuid1())) str(uuid.uuid4()))
self.container_instance_arn = container_instance_arn self.container_instance_arn = container_instance_arn
self.last_status = 'RUNNING' self.last_status = 'RUNNING'
self.desired_status = 'RUNNING' self.desired_status = 'RUNNING'
@ -288,7 +289,7 @@ class ContainerInstance(BaseObject):
'stringSetValue': [], 'stringSetValue': [],
'type': 'STRINGSET'}] 'type': 'STRINGSET'}]
self.container_instance_arn = "arn:aws:ecs:us-east-1:012345678910:container-instance/{0}".format( self.container_instance_arn = "arn:aws:ecs:us-east-1:012345678910:container-instance/{0}".format(
str(uuid.uuid1())) str(uuid.uuid4()))
self.pending_tasks_count = 0 self.pending_tasks_count = 0
self.remaining_resources = [ self.remaining_resources = [
{'doubleValue': 0.0, {'doubleValue': 0.0,
@ -321,6 +322,8 @@ class ContainerInstance(BaseObject):
'dockerVersion': 'DockerVersion: 1.5.0' 'dockerVersion': 'DockerVersion: 1.5.0'
} }
self.attributes = {}
@property @property
def response_object(self): def response_object(self):
response_object = self.gen_response_object() response_object = self.gen_response_object()
@ -766,6 +769,102 @@ class EC2ContainerServiceBackend(BaseBackend):
raise Exception("{0} is not a cluster".format(cluster_name)) raise Exception("{0} is not a cluster".format(cluster_name))
pass pass
def put_attributes(self, cluster_name, attributes=None):
if cluster_name is None or cluster_name not in self.clusters:
raise JsonRESTError('ClusterNotFoundException', 'Cluster not found', status=400)
if attributes is None:
raise JsonRESTError('InvalidParameterException', 'attributes value is required')
for attr in attributes:
self._put_attribute(cluster_name, attr['name'], attr.get('value'), attr.get('targetId'), attr.get('targetType'))
def _put_attribute(self, cluster_name, name, value=None, target_id=None, target_type=None):
if target_id is None and target_type is None:
for instance in self.container_instances[cluster_name].values():
instance.attributes[name] = value
elif target_type is None:
# targetId is full container instance arn
try:
arn = target_id.rsplit('/', 1)[-1]
self.container_instances[cluster_name][arn].attributes[name] = value
except KeyError:
raise JsonRESTError('TargetNotFoundException', 'Could not find {0}'.format(target_id))
else:
# targetId is container uuid, targetType must be container-instance
try:
if target_type != 'container-instance':
raise JsonRESTError('TargetNotFoundException', 'Could not find {0}'.format(target_id))
self.container_instances[cluster_name][target_id].attributes[name] = value
except KeyError:
raise JsonRESTError('TargetNotFoundException', 'Could not find {0}'.format(target_id))
def list_attributes(self, target_type, cluster_name=None, attr_name=None, attr_value=None, max_results=None, next_token=None):
if target_type != 'container-instance':
raise JsonRESTError('InvalidParameterException', 'targetType must be container-instance')
filters = [lambda x: True]
# item will be {0 cluster_name, 1 arn, 2 name, 3 value}
if cluster_name is not None:
filters.append(lambda item: item[0] == cluster_name)
if attr_name:
filters.append(lambda item: item[2] == attr_name)
if attr_name:
filters.append(lambda item: item[3] == attr_value)
all_attrs = []
for cluster_name, cobj in self.container_instances.items():
for container_instance in cobj.values():
for key, value in container_instance.attributes.items():
all_attrs.append((cluster_name, container_instance.container_instance_arn, key, value))
return filter(lambda x: all(f(x) for f in filters), all_attrs)
def delete_attributes(self, cluster_name, attributes=None):
if cluster_name is None or cluster_name not in self.clusters:
raise JsonRESTError('ClusterNotFoundException', 'Cluster not found', status=400)
if attributes is None:
raise JsonRESTError('InvalidParameterException', 'attributes value is required')
for attr in attributes:
self._delete_attribute(cluster_name, attr['name'], attr.get('value'), attr.get('targetId'), attr.get('targetType'))
def _delete_attribute(self, cluster_name, name, value=None, target_id=None, target_type=None):
if target_id is None and target_type is None:
for instance in self.container_instances[cluster_name].values():
if name in instance.attributes and instance.attributes[name] == value:
del instance.attributes[name]
elif target_type is None:
# targetId is full container instance arn
try:
arn = target_id.rsplit('/', 1)[-1]
instance = self.container_instances[cluster_name][arn]
if name in instance.attributes and instance.attributes[name] == value:
del instance.attributes[name]
except KeyError:
raise JsonRESTError('TargetNotFoundException', 'Could not find {0}'.format(target_id))
else:
# targetId is container uuid, targetType must be container-instance
try:
if target_type != 'container-instance':
raise JsonRESTError('TargetNotFoundException', 'Could not find {0}'.format(target_id))
instance = self.container_instances[cluster_name][target_id]
if name in instance.attributes and instance.attributes[name] == value:
del instance.attributes[name]
except KeyError:
raise JsonRESTError('TargetNotFoundException', 'Could not find {0}'.format(target_id))
def list_task_definition_families(self, family_prefix=None, status=None, max_results=None, next_token=None):
for task_fam in self.task_definitions:
if family_prefix is not None and not task_fam.startswith(family_prefix):
continue
yield task_fam
ecs_backends = {} ecs_backends = {}
for region, ec2_backend in ec2_backends.items(): for region, ec2_backend in ec2_backends.items():

View File

@ -9,6 +9,12 @@ class EC2ContainerServiceResponse(BaseResponse):
@property @property
def ecs_backend(self): def ecs_backend(self):
"""
ECS Backend
:return: ECS Backend object
:rtype: moto.ecs.models.EC2ContainerServiceBackend
"""
return ecs_backends[self.region] return ecs_backends[self.region]
@property @property
@ -34,7 +40,7 @@ class EC2ContainerServiceResponse(BaseResponse):
cluster_arns = self.ecs_backend.list_clusters() cluster_arns = self.ecs_backend.list_clusters()
return json.dumps({ return json.dumps({
'clusterArns': cluster_arns 'clusterArns': cluster_arns
# 'nextToken': str(uuid.uuid1()) # 'nextToken': str(uuid.uuid4())
}) })
def describe_clusters(self): def describe_clusters(self):
@ -66,7 +72,7 @@ class EC2ContainerServiceResponse(BaseResponse):
task_definition_arns = self.ecs_backend.list_task_definitions() task_definition_arns = self.ecs_backend.list_task_definitions()
return json.dumps({ return json.dumps({
'taskDefinitionArns': task_definition_arns 'taskDefinitionArns': task_definition_arns
# 'nextToken': str(uuid.uuid1()) # 'nextToken': str(uuid.uuid4())
}) })
def describe_task_definition(self): def describe_task_definition(self):
@ -159,7 +165,7 @@ class EC2ContainerServiceResponse(BaseResponse):
return json.dumps({ return json.dumps({
'serviceArns': service_arns 'serviceArns': service_arns
# , # ,
# 'nextToken': str(uuid.uuid1()) # 'nextToken': str(uuid.uuid4())
}) })
def describe_services(self): def describe_services(self):
@ -245,3 +251,62 @@ class EC2ContainerServiceResponse(BaseResponse):
'failures': [ci.response_object for ci in failures], 'failures': [ci.response_object for ci in failures],
'containerInstances': [ci.response_object for ci in container_instances] 'containerInstances': [ci.response_object for ci in container_instances]
}) })
def put_attributes(self):
cluster_name = self._get_param('cluster')
attributes = self._get_param('attributes')
self.ecs_backend.put_attributes(cluster_name, attributes)
return json.dumps({'attributes': attributes})
def list_attributes(self):
cluster_name = self._get_param('cluster')
attr_name = self._get_param('attributeName')
attr_value = self._get_param('attributeValue')
target_type = self._get_param('targetType')
max_results = self._get_param('maxResults')
next_token = self._get_param('nextToken')
results = self.ecs_backend.list_attributes(target_type, cluster_name, attr_name, attr_value, max_results, next_token)
# Result will be [item will be {0 cluster_name, 1 arn, 2 name, 3 value}]
formatted_results = []
for _, arn, name, value in results:
tmp_result = {
'name': name,
'targetId': arn
}
if value is not None:
tmp_result['value'] = value
formatted_results.append(tmp_result)
return json.dumps({'attributes': formatted_results})
def delete_attributes(self):
cluster_name = self._get_param('cluster')
attributes = self._get_param('attributes')
self.ecs_backend.delete_attributes(cluster_name, attributes)
return json.dumps({'attributes': attributes})
def discover_poll_endpoint(self):
# Here are the arguments, this api is used by the ecs client so obviously no decent
# documentation. Hence I've responded with valid but useless data
# cluster_name = self._get_param('cluster')
# instance = self._get_param('containerInstance')
return json.dumps({
'endpoint': 'http://localhost',
'telemetryEndpoint': 'http://localhost'
})
def list_task_definition_families(self):
family_prefix = self._get_param('familyPrefix')
status = self._get_param('status')
max_results = self._get_param('maxResults')
next_token = self._get_param('nextToken')
results = self.ecs_backend.list_task_definition_families(family_prefix, status, max_results, next_token)
return json.dumps({'families': list(results)})

View File

@ -3,8 +3,12 @@ from __future__ import unicode_literals
import datetime import datetime
import re import re
from moto.compat import OrderedDict from moto.compat import OrderedDict
from moto.core.exceptions import RESTError
from moto.core import BaseBackend, BaseModel from moto.core import BaseBackend, BaseModel
from moto.ec2.models import ec2_backends from moto.ec2.models import ec2_backends
from moto.acm.models import acm_backends
from .utils import make_arn_for_target_group
from .utils import make_arn_for_load_balancer
from .exceptions import ( from .exceptions import (
DuplicateLoadBalancerName, DuplicateLoadBalancerName,
DuplicateListenerError, DuplicateListenerError,
@ -40,6 +44,8 @@ class FakeHealthStatus(BaseModel):
class FakeTargetGroup(BaseModel): class FakeTargetGroup(BaseModel):
HTTP_CODE_REGEX = re.compile(r'(?:(?:\d+-\d+|\d+),?)+')
def __init__(self, def __init__(self,
name, name,
arn, arn,
@ -52,7 +58,9 @@ class FakeTargetGroup(BaseModel):
healthcheck_interval_seconds, healthcheck_interval_seconds,
healthcheck_timeout_seconds, healthcheck_timeout_seconds,
healthy_threshold_count, healthy_threshold_count,
unhealthy_threshold_count): unhealthy_threshold_count,
matcher=None,
target_type=None):
self.name = name self.name = name
self.arn = arn self.arn = arn
self.vpc_id = vpc_id self.vpc_id = vpc_id
@ -67,6 +75,8 @@ class FakeTargetGroup(BaseModel):
self.unhealthy_threshold_count = unhealthy_threshold_count self.unhealthy_threshold_count = unhealthy_threshold_count
self.load_balancer_arns = [] self.load_balancer_arns = []
self.tags = {} self.tags = {}
self.matcher = matcher
self.target_type = target_type
self.attributes = { self.attributes = {
'deregistration_delay.timeout_seconds': 300, 'deregistration_delay.timeout_seconds': 300,
@ -75,6 +85,10 @@ class FakeTargetGroup(BaseModel):
self.targets = OrderedDict() self.targets = OrderedDict()
@property
def physical_resource_id(self):
return self.arn
def register(self, targets): def register(self, targets):
for target in targets: for target in targets:
self.targets[target['id']] = { self.targets[target['id']] = {
@ -99,6 +113,46 @@ class FakeTargetGroup(BaseModel):
raise InvalidTargetError() raise InvalidTargetError()
return FakeHealthStatus(t['id'], t['port'], self.healthcheck_port, 'healthy') return FakeHealthStatus(t['id'], t['port'], self.healthcheck_port, 'healthy')
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
elbv2_backend = elbv2_backends[region_name]
# per cloudformation docs:
# The target group name should be shorter than 22 characters because
# AWS CloudFormation uses the target group name to create the name of the load balancer.
name = properties.get('Name', resource_name[:22])
vpc_id = properties.get("VpcId")
protocol = properties.get('Protocol')
port = properties.get("Port")
healthcheck_protocol = properties.get("HealthCheckProtocol")
healthcheck_port = properties.get("HealthCheckPort")
healthcheck_path = properties.get("HealthCheckPath")
healthcheck_interval_seconds = properties.get("HealthCheckIntervalSeconds")
healthcheck_timeout_seconds = properties.get("HealthCheckTimeoutSeconds")
healthy_threshold_count = properties.get("HealthyThresholdCount")
unhealthy_threshold_count = properties.get("UnhealthyThresholdCount")
matcher = properties.get("Matcher")
target_type = properties.get("TargetType")
target_group = elbv2_backend.create_target_group(
name=name,
vpc_id=vpc_id,
protocol=protocol,
port=port,
healthcheck_protocol=healthcheck_protocol,
healthcheck_port=healthcheck_port,
healthcheck_path=healthcheck_path,
healthcheck_interval_seconds=healthcheck_interval_seconds,
healthcheck_timeout_seconds=healthcheck_timeout_seconds,
healthy_threshold_count=healthy_threshold_count,
unhealthy_threshold_count=unhealthy_threshold_count,
matcher=matcher,
target_type=target_type,
)
return target_group
class FakeListener(BaseModel): class FakeListener(BaseModel):
@ -109,6 +163,7 @@ class FakeListener(BaseModel):
self.port = port self.port = port
self.ssl_policy = ssl_policy self.ssl_policy = ssl_policy
self.certificate = certificate self.certificate = certificate
self.certificates = [certificate] if certificate is not None else []
self.default_actions = default_actions self.default_actions = default_actions
self._non_default_rules = [] self._non_default_rules = []
self._default_rule = FakeRule( self._default_rule = FakeRule(
@ -119,6 +174,10 @@ class FakeListener(BaseModel):
is_default=True is_default=True
) )
@property
def physical_resource_id(self):
return self.arn
@property @property
def rules(self): def rules(self):
return self._non_default_rules + [self._default_rule] return self._non_default_rules + [self._default_rule]
@ -130,6 +189,28 @@ class FakeListener(BaseModel):
self._non_default_rules.append(rule) self._non_default_rules.append(rule)
self._non_default_rules = sorted(self._non_default_rules, key=lambda x: x.priority) self._non_default_rules = sorted(self._non_default_rules, key=lambda x: x.priority)
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
elbv2_backend = elbv2_backends[region_name]
load_balancer_arn = properties.get("LoadBalancerArn")
protocol = properties.get("Protocol")
port = properties.get("Port")
ssl_policy = properties.get("SslPolicy")
certificates = properties.get("Certificates")
# transform default actions to confirm with the rest of the code and XML templates
if "DefaultActions" in properties:
default_actions = []
for action in properties['DefaultActions']:
default_actions.append({'type': action['Type'], 'target_group_arn': action['TargetGroupArn']})
else:
default_actions = None
listener = elbv2_backend.create_listener(
load_balancer_arn, protocol, port, ssl_policy, certificates, default_actions)
return listener
class FakeRule(BaseModel): class FakeRule(BaseModel):
@ -153,6 +234,8 @@ class FakeBackend(BaseModel):
class FakeLoadBalancer(BaseModel): class FakeLoadBalancer(BaseModel):
VALID_ATTRS = {'access_logs.s3.enabled', 'access_logs.s3.bucket', 'access_logs.s3.prefix',
'deletion_protection.enabled', 'idle_timeout.timeout_seconds'}
def __init__(self, name, security_groups, subnets, vpc_id, arn, dns_name, scheme='internet-facing'): def __init__(self, name, security_groups, subnets, vpc_id, arn, dns_name, scheme='internet-facing'):
self.name = name self.name = name
@ -166,9 +249,18 @@ class FakeLoadBalancer(BaseModel):
self.arn = arn self.arn = arn
self.dns_name = dns_name self.dns_name = dns_name
self.stack = 'ipv4'
self.attrs = {
'access_logs.s3.enabled': 'false',
'access_logs.s3.bucket': None,
'access_logs.s3.prefix': None,
'deletion_protection.enabled': 'false',
'idle_timeout.timeout_seconds': '60'
}
@property @property
def physical_resource_id(self): def physical_resource_id(self):
return self.name return self.arn
def add_tag(self, key, value): def add_tag(self, key, value):
if len(self.tags) >= 10 and key not in self.tags: if len(self.tags) >= 10 and key not in self.tags:
@ -186,6 +278,27 @@ class FakeLoadBalancer(BaseModel):
''' Not exposed as part of the ELB API - used for CloudFormation. ''' ''' Not exposed as part of the ELB API - used for CloudFormation. '''
elbv2_backends[region].delete_load_balancer(self.arn) elbv2_backends[region].delete_load_balancer(self.arn)
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
elbv2_backend = elbv2_backends[region_name]
name = properties.get('Name', resource_name)
security_groups = properties.get("SecurityGroups")
subnet_ids = properties.get('Subnets')
scheme = properties.get('Scheme', 'internet-facing')
load_balancer = elbv2_backend.create_load_balancer(name, security_groups, subnet_ids, scheme=scheme)
return load_balancer
def get_cfn_attribute(self, attribute_name):
attributes = {
'DNSName': self.dns_name,
'LoadBalancerName': self.name,
}
return attributes[attribute_name]
class ELBv2Backend(BaseBackend): class ELBv2Backend(BaseBackend):
@ -194,6 +307,26 @@ class ELBv2Backend(BaseBackend):
self.target_groups = OrderedDict() self.target_groups = OrderedDict()
self.load_balancers = OrderedDict() self.load_balancers = OrderedDict()
@property
def ec2_backend(self):
"""
EC2 backend
:return: EC2 Backend
:rtype: moto.ec2.models.EC2Backend
"""
return ec2_backends[self.region_name]
@property
def acm_backend(self):
"""
ACM backend
:return: ACM Backend
:rtype: moto.acm.models.AWSCertificateManagerBackend
"""
return acm_backends[self.region_name]
def reset(self): def reset(self):
region_name = self.region_name region_name = self.region_name
self.__dict__ = {} self.__dict__ = {}
@ -201,18 +334,17 @@ class ELBv2Backend(BaseBackend):
def create_load_balancer(self, name, security_groups, subnet_ids, scheme='internet-facing'): def create_load_balancer(self, name, security_groups, subnet_ids, scheme='internet-facing'):
vpc_id = None vpc_id = None
ec2_backend = ec2_backends[self.region_name]
subnets = [] subnets = []
if not subnet_ids: if not subnet_ids:
raise SubnetNotFoundError() raise SubnetNotFoundError()
for subnet_id in subnet_ids: for subnet_id in subnet_ids:
subnet = ec2_backend.get_subnet(subnet_id) subnet = self.ec2_backend.get_subnet(subnet_id)
if subnet is None: if subnet is None:
raise SubnetNotFoundError() raise SubnetNotFoundError()
subnets.append(subnet) subnets.append(subnet)
vpc_id = subnets[0].vpc_id vpc_id = subnets[0].vpc_id
arn = "arn:aws:elasticloadbalancing:%s:1:loadbalancer/%s/50dc6c495c0c9188" % (self.region_name, name) arn = make_arn_for_load_balancer(account_id=1, name=name, region_name=self.region_name)
dns_name = "%s-1.%s.elb.amazonaws.com" % (name, self.region_name) dns_name = "%s-1.%s.elb.amazonaws.com" % (name, self.region_name)
if arn in self.load_balancers: if arn in self.load_balancers:
@ -279,7 +411,7 @@ class ELBv2Backend(BaseBackend):
def create_target_group(self, name, **kwargs): def create_target_group(self, name, **kwargs):
if len(name) > 32: if len(name) > 32:
raise InvalidTargetGroupNameError( raise InvalidTargetGroupNameError(
"Target group name '%s' cannot be longer than '32' characters" % name "Target group name '%s' cannot be longer than '22' characters" % name
) )
if not re.match('^[a-zA-Z0-9\-]+$', name): if not re.match('^[a-zA-Z0-9\-]+$', name):
raise InvalidTargetGroupNameError( raise InvalidTargetGroupNameError(
@ -300,7 +432,30 @@ class ELBv2Backend(BaseBackend):
if target_group.name == name: if target_group.name == name:
raise DuplicateTargetGroupName() raise DuplicateTargetGroupName()
arn = "arn:aws:elasticloadbalancing:%s:1:targetgroup/%s/50dc6c495c0c9188" % (self.region_name, name) valid_protocols = ['HTTPS', 'HTTP', 'TCP']
if kwargs['healthcheck_protocol'] not in valid_protocols:
raise InvalidConditionValueError(
"Value {} at 'healthCheckProtocol' failed to satisfy constraint: "
"Member must satisfy enum value set: {}".format(kwargs['healthcheck_protocol'], valid_protocols))
if kwargs['protocol'] not in valid_protocols:
raise InvalidConditionValueError(
"Value {} at 'protocol' failed to satisfy constraint: "
"Member must satisfy enum value set: {}".format(kwargs['protocol'], valid_protocols))
if FakeTargetGroup.HTTP_CODE_REGEX.match(kwargs['matcher']['HttpCode']) is None:
raise RESTError('InvalidParameterValue', 'HttpCode must be like 200 | 200-399 | 200,201 ...')
valid_protocols = ['HTTPS', 'HTTP', 'TCP']
if kwargs['healthcheck_protocol'] not in valid_protocols:
raise InvalidConditionValueError(
"Value {} at 'healthCheckProtocol' failed to satisfy constraint: "
"Member must satisfy enum value set: {}".format(kwargs['healthcheck_protocol'], valid_protocols))
if kwargs['protocol'] not in valid_protocols:
raise InvalidConditionValueError(
"Value {} at 'protocol' failed to satisfy constraint: "
"Member must satisfy enum value set: {}".format(kwargs['protocol'], valid_protocols))
arn = make_arn_for_target_group(account_id=1, name=name, region_name=self.region_name)
target_group = FakeTargetGroup(name, arn, **kwargs) target_group = FakeTargetGroup(name, arn, **kwargs)
self.target_groups[target_group.arn] = target_group self.target_groups[target_group.arn] = target_group
return target_group return target_group
@ -547,6 +702,166 @@ class ELBv2Backend(BaseBackend):
modified_rules.append(given_rule) modified_rules.append(given_rule)
return modified_rules return modified_rules
def set_ip_address_type(self, arn, ip_type):
if ip_type not in ('internal', 'dualstack'):
raise RESTError('InvalidParameterValue', 'IpAddressType must be either internal | dualstack')
balancer = self.load_balancers.get(arn)
if balancer is None:
raise LoadBalancerNotFoundError()
if ip_type == 'dualstack' and balancer.scheme == 'internal':
raise RESTError('InvalidConfigurationRequest', 'Internal load balancers cannot be dualstack')
balancer.stack = ip_type
def set_security_groups(self, arn, sec_groups):
balancer = self.load_balancers.get(arn)
if balancer is None:
raise LoadBalancerNotFoundError()
# Check all security groups exist
for sec_group_id in sec_groups:
if self.ec2_backend.get_security_group_from_id(sec_group_id) is None:
raise RESTError('InvalidSecurityGroup', 'Security group {0} does not exist'.format(sec_group_id))
balancer.security_groups = sec_groups
def set_subnets(self, arn, subnets):
balancer = self.load_balancers.get(arn)
if balancer is None:
raise LoadBalancerNotFoundError()
subnet_objects = []
sub_zone_list = {}
for subnet in subnets:
try:
subnet = self.ec2_backend.get_subnet(subnet)
if subnet.availability_zone in sub_zone_list:
raise RESTError('InvalidConfigurationRequest', 'More than 1 subnet cannot be specified for 1 availability zone')
sub_zone_list[subnet.availability_zone] = subnet.id
subnet_objects.append(subnet)
except Exception:
raise SubnetNotFoundError()
if len(sub_zone_list) < 2:
raise RESTError('InvalidConfigurationRequest', 'More than 1 availability zone must be specified')
balancer.subnets = subnet_objects
return sub_zone_list.items()
def modify_load_balancer_attributes(self, arn, attrs):
balancer = self.load_balancers.get(arn)
if balancer is None:
raise LoadBalancerNotFoundError()
for key in attrs:
if key not in FakeLoadBalancer.VALID_ATTRS:
raise RESTError('InvalidConfigurationRequest', 'Key {0} not valid'.format(key))
balancer.attrs.update(attrs)
return balancer.attrs
def describe_load_balancer_attributes(self, arn):
balancer = self.load_balancers.get(arn)
if balancer is None:
raise LoadBalancerNotFoundError()
return balancer.attrs
def modify_target_group(self, arn, health_check_proto=None, health_check_port=None, health_check_path=None, health_check_interval=None,
health_check_timeout=None, healthy_threshold_count=None, unhealthy_threshold_count=None, http_codes=None):
target_group = self.target_groups.get(arn)
if target_group is None:
raise TargetGroupNotFoundError()
if http_codes is not None and FakeTargetGroup.HTTP_CODE_REGEX.match(http_codes) is None:
raise RESTError('InvalidParameterValue', 'HttpCode must be like 200 | 200-399 | 200,201 ...')
if http_codes is not None:
target_group.matcher['HttpCode'] = http_codes
if health_check_interval is not None:
target_group.healthcheck_interval_seconds = health_check_interval
if health_check_path is not None:
target_group.healthcheck_path = health_check_path
if health_check_port is not None:
target_group.healthcheck_port = health_check_port
if health_check_proto is not None:
target_group.healthcheck_protocol = health_check_proto
if health_check_timeout is not None:
target_group.healthcheck_timeout_seconds = health_check_timeout
if healthy_threshold_count is not None:
target_group.healthy_threshold_count = healthy_threshold_count
if unhealthy_threshold_count is not None:
target_group.unhealthy_threshold_count = unhealthy_threshold_count
return target_group
def modify_listener(self, arn, port=None, protocol=None, ssl_policy=None, certificates=None, default_actions=None):
for load_balancer in self.load_balancers.values():
if arn in load_balancer.listeners:
break
else:
raise ListenerNotFoundError()
listener = load_balancer.listeners[arn]
if port is not None:
for listener_arn, current_listener in load_balancer.listeners.items():
if listener_arn == arn:
continue
if listener.port == port:
raise DuplicateListenerError()
listener.port = port
if protocol is not None:
if protocol not in ('HTTP', 'HTTPS', 'TCP'):
raise RESTError('UnsupportedProtocol', 'Protocol {0} is not supported'.format(protocol))
# HTTPS checks
if protocol == 'HTTPS':
# HTTPS
# Might already be HTTPS so may not provide certs
if certificates is None and listener.protocol != 'HTTPS':
raise RESTError('InvalidConfigurationRequest', 'Certificates must be provided for HTTPS')
# Check certificates exist
if certificates is not None:
default_cert = None
all_certs = set() # for SNI
for cert in certificates:
if cert['is_default'] == 'true':
default_cert = cert['certificate_arn']
try:
self.acm_backend.get_certificate(cert['certificate_arn'])
except Exception:
raise RESTError('CertificateNotFound', 'Certificate {0} not found'.format(cert['certificate_arn']))
all_certs.add(cert['certificate_arn'])
if default_cert is None:
raise RESTError('InvalidConfigurationRequest', 'No default certificate')
listener.certificate = default_cert
listener.certificates = list(all_certs)
listener.protocol = protocol
if ssl_policy is not None:
# Its already validated in responses.py
listener.ssl_policy = ssl_policy
if default_actions is not None:
# Is currently not validated
listener.default_actions = default_actions
return listener
def _any_listener_using(self, target_group_arn): def _any_listener_using(self, target_group_arn):
for load_balancer in self.load_balancers.values(): for load_balancer in self.load_balancers.values():
for listener in load_balancer.listeners.values(): for listener in load_balancer.listeners.values():

View File

@ -1,4 +1,6 @@
from __future__ import unicode_literals from __future__ import unicode_literals
from moto.core.exceptions import RESTError
from moto.core.utils import amzn_request_id
from moto.core.responses import BaseResponse from moto.core.responses import BaseResponse
from .models import elbv2_backends from .models import elbv2_backends
from .exceptions import DuplicateTagKeysError from .exceptions import DuplicateTagKeysError
@ -6,12 +8,131 @@ from .exceptions import LoadBalancerNotFoundError
from .exceptions import TargetGroupNotFoundError from .exceptions import TargetGroupNotFoundError
class ELBV2Response(BaseResponse): SSL_POLICIES = [
{
'name': 'ELBSecurityPolicy-2016-08',
'ssl_protocols': ['TLSv1', 'TLSv1.1', 'TLSv1.2'],
'ciphers': [
{'name': 'ECDHE-ECDSA-AES128-GCM-SHA256', 'priority': 1},
{'name': 'ECDHE-RSA-AES128-GCM-SHA256', 'priority': 2},
{'name': 'ECDHE-ECDSA-AES128-SHA256', 'priority': 3},
{'name': 'ECDHE-RSA-AES128-SHA256', 'priority': 4},
{'name': 'ECDHE-ECDSA-AES128-SHA', 'priority': 5},
{'name': 'ECDHE-RSA-AES128-SHA', 'priority': 6},
{'name': 'ECDHE-ECDSA-AES256-GCM-SHA384', 'priority': 7},
{'name': 'ECDHE-RSA-AES256-GCM-SHA384', 'priority': 8},
{'name': 'ECDHE-ECDSA-AES256-SHA384', 'priority': 9},
{'name': 'ECDHE-RSA-AES256-SHA384', 'priority': 10},
{'name': 'ECDHE-RSA-AES256-SHA', 'priority': 11},
{'name': 'ECDHE-ECDSA-AES256-SHA', 'priority': 12},
{'name': 'AES128-GCM-SHA256', 'priority': 13},
{'name': 'AES128-SHA256', 'priority': 14},
{'name': 'AES128-SHA', 'priority': 15},
{'name': 'AES256-GCM-SHA384', 'priority': 16},
{'name': 'AES256-SHA256', 'priority': 17},
{'name': 'AES256-SHA', 'priority': 18}
],
},
{
'name': 'ELBSecurityPolicy-TLS-1-2-2017-01',
'ssl_protocols': ['TLSv1.2'],
'ciphers': [
{'name': 'ECDHE-ECDSA-AES128-GCM-SHA256', 'priority': 1},
{'name': 'ECDHE-RSA-AES128-GCM-SHA256', 'priority': 2},
{'name': 'ECDHE-ECDSA-AES128-SHA256', 'priority': 3},
{'name': 'ECDHE-RSA-AES128-SHA256', 'priority': 4},
{'name': 'ECDHE-ECDSA-AES256-GCM-SHA384', 'priority': 5},
{'name': 'ECDHE-RSA-AES256-GCM-SHA384', 'priority': 6},
{'name': 'ECDHE-ECDSA-AES256-SHA384', 'priority': 7},
{'name': 'ECDHE-RSA-AES256-SHA384', 'priority': 8},
{'name': 'AES128-GCM-SHA256', 'priority': 9},
{'name': 'AES128-SHA256', 'priority': 10},
{'name': 'AES256-GCM-SHA384', 'priority': 11},
{'name': 'AES256-SHA256', 'priority': 12}
]
},
{
'name': 'ELBSecurityPolicy-TLS-1-1-2017-01',
'ssl_protocols': ['TLSv1.1', 'TLSv1.2'],
'ciphers': [
{'name': 'ECDHE-ECDSA-AES128-GCM-SHA256', 'priority': 1},
{'name': 'ECDHE-RSA-AES128-GCM-SHA256', 'priority': 2},
{'name': 'ECDHE-ECDSA-AES128-SHA256', 'priority': 3},
{'name': 'ECDHE-RSA-AES128-SHA256', 'priority': 4},
{'name': 'ECDHE-ECDSA-AES128-SHA', 'priority': 5},
{'name': 'ECDHE-RSA-AES128-SHA', 'priority': 6},
{'name': 'ECDHE-ECDSA-AES256-GCM-SHA384', 'priority': 7},
{'name': 'ECDHE-RSA-AES256-GCM-SHA384', 'priority': 8},
{'name': 'ECDHE-ECDSA-AES256-SHA384', 'priority': 9},
{'name': 'ECDHE-RSA-AES256-SHA384', 'priority': 10},
{'name': 'ECDHE-RSA-AES256-SHA', 'priority': 11},
{'name': 'ECDHE-ECDSA-AES256-SHA', 'priority': 12},
{'name': 'AES128-GCM-SHA256', 'priority': 13},
{'name': 'AES128-SHA256', 'priority': 14},
{'name': 'AES128-SHA', 'priority': 15},
{'name': 'AES256-GCM-SHA384', 'priority': 16},
{'name': 'AES256-SHA256', 'priority': 17},
{'name': 'AES256-SHA', 'priority': 18}
]
},
{
'name': 'ELBSecurityPolicy-2015-05',
'ssl_protocols': ['TLSv1', 'TLSv1.1', 'TLSv1.2'],
'ciphers': [
{'name': 'ECDHE-ECDSA-AES128-GCM-SHA256', 'priority': 1},
{'name': 'ECDHE-RSA-AES128-GCM-SHA256', 'priority': 2},
{'name': 'ECDHE-ECDSA-AES128-SHA256', 'priority': 3},
{'name': 'ECDHE-RSA-AES128-SHA256', 'priority': 4},
{'name': 'ECDHE-ECDSA-AES128-SHA', 'priority': 5},
{'name': 'ECDHE-RSA-AES128-SHA', 'priority': 6},
{'name': 'ECDHE-ECDSA-AES256-GCM-SHA384', 'priority': 7},
{'name': 'ECDHE-RSA-AES256-GCM-SHA384', 'priority': 8},
{'name': 'ECDHE-ECDSA-AES256-SHA384', 'priority': 9},
{'name': 'ECDHE-RSA-AES256-SHA384', 'priority': 10},
{'name': 'ECDHE-RSA-AES256-SHA', 'priority': 11},
{'name': 'ECDHE-ECDSA-AES256-SHA', 'priority': 12},
{'name': 'AES128-GCM-SHA256', 'priority': 13},
{'name': 'AES128-SHA256', 'priority': 14},
{'name': 'AES128-SHA', 'priority': 15},
{'name': 'AES256-GCM-SHA384', 'priority': 16},
{'name': 'AES256-SHA256', 'priority': 17},
{'name': 'AES256-SHA', 'priority': 18}
]
},
{
'name': 'ELBSecurityPolicy-TLS-1-0-2015-04',
'ssl_protocols': ['TLSv1', 'TLSv1.1', 'TLSv1.2'],
'ciphers': [
{'name': 'ECDHE-ECDSA-AES128-GCM-SHA256', 'priority': 1},
{'name': 'ECDHE-RSA-AES128-GCM-SHA256', 'priority': 2},
{'name': 'ECDHE-ECDSA-AES128-SHA256', 'priority': 3},
{'name': 'ECDHE-RSA-AES128-SHA256', 'priority': 4},
{'name': 'ECDHE-ECDSA-AES128-SHA', 'priority': 5},
{'name': 'ECDHE-RSA-AES128-SHA', 'priority': 6},
{'name': 'ECDHE-ECDSA-AES256-GCM-SHA384', 'priority': 7},
{'name': 'ECDHE-RSA-AES256-GCM-SHA384', 'priority': 8},
{'name': 'ECDHE-ECDSA-AES256-SHA384', 'priority': 9},
{'name': 'ECDHE-RSA-AES256-SHA384', 'priority': 10},
{'name': 'ECDHE-RSA-AES256-SHA', 'priority': 11},
{'name': 'ECDHE-ECDSA-AES256-SHA', 'priority': 12},
{'name': 'AES128-GCM-SHA256', 'priority': 13},
{'name': 'AES128-SHA256', 'priority': 14},
{'name': 'AES128-SHA', 'priority': 15},
{'name': 'AES256-GCM-SHA384', 'priority': 16},
{'name': 'AES256-SHA256', 'priority': 17},
{'name': 'AES256-SHA', 'priority': 18},
{'name': 'DES-CBC3-SHA', 'priority': 19}
]
}
]
class ELBV2Response(BaseResponse):
@property @property
def elbv2_backend(self): def elbv2_backend(self):
return elbv2_backends[self.region] return elbv2_backends[self.region]
@amzn_request_id
def create_load_balancer(self): def create_load_balancer(self):
load_balancer_name = self._get_param('Name') load_balancer_name = self._get_param('Name')
subnet_ids = self._get_multi_param("Subnets.member") subnet_ids = self._get_multi_param("Subnets.member")
@ -28,6 +149,7 @@ class ELBV2Response(BaseResponse):
template = self.response_template(CREATE_LOAD_BALANCER_TEMPLATE) template = self.response_template(CREATE_LOAD_BALANCER_TEMPLATE)
return template.render(load_balancer=load_balancer) return template.render(load_balancer=load_balancer)
@amzn_request_id
def create_rule(self): def create_rule(self):
lister_arn = self._get_param('ListenerArn') lister_arn = self._get_param('ListenerArn')
_conditions = self._get_list_prefix('Conditions.member') _conditions = self._get_list_prefix('Conditions.member')
@ -52,6 +174,7 @@ class ELBV2Response(BaseResponse):
template = self.response_template(CREATE_RULE_TEMPLATE) template = self.response_template(CREATE_RULE_TEMPLATE)
return template.render(rules=rules) return template.render(rules=rules)
@amzn_request_id
def create_target_group(self): def create_target_group(self):
name = self._get_param('Name') name = self._get_param('Name')
vpc_id = self._get_param('VpcId') vpc_id = self._get_param('VpcId')
@ -64,6 +187,7 @@ class ELBV2Response(BaseResponse):
healthcheck_timeout_seconds = self._get_param('HealthCheckTimeoutSeconds', '5') healthcheck_timeout_seconds = self._get_param('HealthCheckTimeoutSeconds', '5')
healthy_threshold_count = self._get_param('HealthyThresholdCount', '5') healthy_threshold_count = self._get_param('HealthyThresholdCount', '5')
unhealthy_threshold_count = self._get_param('UnhealthyThresholdCount', '2') unhealthy_threshold_count = self._get_param('UnhealthyThresholdCount', '2')
http_codes = self._get_param('Matcher.HttpCode', '200')
target_group = self.elbv2_backend.create_target_group( target_group = self.elbv2_backend.create_target_group(
name, name,
@ -77,11 +201,13 @@ class ELBV2Response(BaseResponse):
healthcheck_timeout_seconds=healthcheck_timeout_seconds, healthcheck_timeout_seconds=healthcheck_timeout_seconds,
healthy_threshold_count=healthy_threshold_count, healthy_threshold_count=healthy_threshold_count,
unhealthy_threshold_count=unhealthy_threshold_count, unhealthy_threshold_count=unhealthy_threshold_count,
matcher={'HttpCode': http_codes}
) )
template = self.response_template(CREATE_TARGET_GROUP_TEMPLATE) template = self.response_template(CREATE_TARGET_GROUP_TEMPLATE)
return template.render(target_group=target_group) return template.render(target_group=target_group)
@amzn_request_id
def create_listener(self): def create_listener(self):
load_balancer_arn = self._get_param('LoadBalancerArn') load_balancer_arn = self._get_param('LoadBalancerArn')
protocol = self._get_param('Protocol') protocol = self._get_param('Protocol')
@ -105,6 +231,7 @@ class ELBV2Response(BaseResponse):
template = self.response_template(CREATE_LISTENER_TEMPLATE) template = self.response_template(CREATE_LISTENER_TEMPLATE)
return template.render(listener=listener) return template.render(listener=listener)
@amzn_request_id
def describe_load_balancers(self): def describe_load_balancers(self):
arns = self._get_multi_param("LoadBalancerArns.member") arns = self._get_multi_param("LoadBalancerArns.member")
names = self._get_multi_param("Names.member") names = self._get_multi_param("Names.member")
@ -124,6 +251,7 @@ class ELBV2Response(BaseResponse):
template = self.response_template(DESCRIBE_LOAD_BALANCERS_TEMPLATE) template = self.response_template(DESCRIBE_LOAD_BALANCERS_TEMPLATE)
return template.render(load_balancers=load_balancers_resp, marker=next_marker) return template.render(load_balancers=load_balancers_resp, marker=next_marker)
@amzn_request_id
def describe_rules(self): def describe_rules(self):
listener_arn = self._get_param('ListenerArn') listener_arn = self._get_param('ListenerArn')
rule_arns = self._get_multi_param('RuleArns.member') if any(k for k in list(self.querystring.keys()) if k.startswith('RuleArns.member')) else None rule_arns = self._get_multi_param('RuleArns.member') if any(k for k in list(self.querystring.keys()) if k.startswith('RuleArns.member')) else None
@ -144,6 +272,7 @@ class ELBV2Response(BaseResponse):
template = self.response_template(DESCRIBE_RULES_TEMPLATE) template = self.response_template(DESCRIBE_RULES_TEMPLATE)
return template.render(rules=rules_resp, marker=next_marker) return template.render(rules=rules_resp, marker=next_marker)
@amzn_request_id
def describe_target_groups(self): def describe_target_groups(self):
load_balancer_arn = self._get_param('LoadBalancerArn') load_balancer_arn = self._get_param('LoadBalancerArn')
target_group_arns = self._get_multi_param('TargetGroupArns.member') target_group_arns = self._get_multi_param('TargetGroupArns.member')
@ -153,6 +282,7 @@ class ELBV2Response(BaseResponse):
template = self.response_template(DESCRIBE_TARGET_GROUPS_TEMPLATE) template = self.response_template(DESCRIBE_TARGET_GROUPS_TEMPLATE)
return template.render(target_groups=target_groups) return template.render(target_groups=target_groups)
@amzn_request_id
def describe_target_group_attributes(self): def describe_target_group_attributes(self):
target_group_arn = self._get_param('TargetGroupArn') target_group_arn = self._get_param('TargetGroupArn')
target_group = self.elbv2_backend.target_groups.get(target_group_arn) target_group = self.elbv2_backend.target_groups.get(target_group_arn)
@ -161,6 +291,7 @@ class ELBV2Response(BaseResponse):
template = self.response_template(DESCRIBE_TARGET_GROUP_ATTRIBUTES_TEMPLATE) template = self.response_template(DESCRIBE_TARGET_GROUP_ATTRIBUTES_TEMPLATE)
return template.render(attributes=target_group.attributes) return template.render(attributes=target_group.attributes)
@amzn_request_id
def describe_listeners(self): def describe_listeners(self):
load_balancer_arn = self._get_param('LoadBalancerArn') load_balancer_arn = self._get_param('LoadBalancerArn')
listener_arns = self._get_multi_param('ListenerArns.member') listener_arns = self._get_multi_param('ListenerArns.member')
@ -171,30 +302,35 @@ class ELBV2Response(BaseResponse):
template = self.response_template(DESCRIBE_LISTENERS_TEMPLATE) template = self.response_template(DESCRIBE_LISTENERS_TEMPLATE)
return template.render(listeners=listeners) return template.render(listeners=listeners)
@amzn_request_id
def delete_load_balancer(self): def delete_load_balancer(self):
arn = self._get_param('LoadBalancerArn') arn = self._get_param('LoadBalancerArn')
self.elbv2_backend.delete_load_balancer(arn) self.elbv2_backend.delete_load_balancer(arn)
template = self.response_template(DELETE_LOAD_BALANCER_TEMPLATE) template = self.response_template(DELETE_LOAD_BALANCER_TEMPLATE)
return template.render() return template.render()
@amzn_request_id
def delete_rule(self): def delete_rule(self):
arn = self._get_param('RuleArn') arn = self._get_param('RuleArn')
self.elbv2_backend.delete_rule(arn) self.elbv2_backend.delete_rule(arn)
template = self.response_template(DELETE_RULE_TEMPLATE) template = self.response_template(DELETE_RULE_TEMPLATE)
return template.render() return template.render()
@amzn_request_id
def delete_target_group(self): def delete_target_group(self):
arn = self._get_param('TargetGroupArn') arn = self._get_param('TargetGroupArn')
self.elbv2_backend.delete_target_group(arn) self.elbv2_backend.delete_target_group(arn)
template = self.response_template(DELETE_TARGET_GROUP_TEMPLATE) template = self.response_template(DELETE_TARGET_GROUP_TEMPLATE)
return template.render() return template.render()
@amzn_request_id
def delete_listener(self): def delete_listener(self):
arn = self._get_param('ListenerArn') arn = self._get_param('ListenerArn')
self.elbv2_backend.delete_listener(arn) self.elbv2_backend.delete_listener(arn)
template = self.response_template(DELETE_LISTENER_TEMPLATE) template = self.response_template(DELETE_LISTENER_TEMPLATE)
return template.render() return template.render()
@amzn_request_id
def modify_rule(self): def modify_rule(self):
rule_arn = self._get_param('RuleArn') rule_arn = self._get_param('RuleArn')
_conditions = self._get_list_prefix('Conditions.member') _conditions = self._get_list_prefix('Conditions.member')
@ -217,6 +353,7 @@ class ELBV2Response(BaseResponse):
template = self.response_template(MODIFY_RULE_TEMPLATE) template = self.response_template(MODIFY_RULE_TEMPLATE)
return template.render(rules=rules) return template.render(rules=rules)
@amzn_request_id
def modify_target_group_attributes(self): def modify_target_group_attributes(self):
target_group_arn = self._get_param('TargetGroupArn') target_group_arn = self._get_param('TargetGroupArn')
target_group = self.elbv2_backend.target_groups.get(target_group_arn) target_group = self.elbv2_backend.target_groups.get(target_group_arn)
@ -230,6 +367,7 @@ class ELBV2Response(BaseResponse):
template = self.response_template(MODIFY_TARGET_GROUP_ATTRIBUTES_TEMPLATE) template = self.response_template(MODIFY_TARGET_GROUP_ATTRIBUTES_TEMPLATE)
return template.render(attributes=attributes) return template.render(attributes=attributes)
@amzn_request_id
def register_targets(self): def register_targets(self):
target_group_arn = self._get_param('TargetGroupArn') target_group_arn = self._get_param('TargetGroupArn')
targets = self._get_list_prefix('Targets.member') targets = self._get_list_prefix('Targets.member')
@ -238,6 +376,7 @@ class ELBV2Response(BaseResponse):
template = self.response_template(REGISTER_TARGETS_TEMPLATE) template = self.response_template(REGISTER_TARGETS_TEMPLATE)
return template.render() return template.render()
@amzn_request_id
def deregister_targets(self): def deregister_targets(self):
target_group_arn = self._get_param('TargetGroupArn') target_group_arn = self._get_param('TargetGroupArn')
targets = self._get_list_prefix('Targets.member') targets = self._get_list_prefix('Targets.member')
@ -246,6 +385,7 @@ class ELBV2Response(BaseResponse):
template = self.response_template(DEREGISTER_TARGETS_TEMPLATE) template = self.response_template(DEREGISTER_TARGETS_TEMPLATE)
return template.render() return template.render()
@amzn_request_id
def describe_target_health(self): def describe_target_health(self):
target_group_arn = self._get_param('TargetGroupArn') target_group_arn = self._get_param('TargetGroupArn')
targets = self._get_list_prefix('Targets.member') targets = self._get_list_prefix('Targets.member')
@ -254,6 +394,7 @@ class ELBV2Response(BaseResponse):
template = self.response_template(DESCRIBE_TARGET_HEALTH_TEMPLATE) template = self.response_template(DESCRIBE_TARGET_HEALTH_TEMPLATE)
return template.render(target_health_descriptions=target_health_descriptions) return template.render(target_health_descriptions=target_health_descriptions)
@amzn_request_id
def set_rule_priorities(self): def set_rule_priorities(self):
rule_priorities = self._get_list_prefix('RulePriorities.member') rule_priorities = self._get_list_prefix('RulePriorities.member')
for rule_priority in rule_priorities: for rule_priority in rule_priorities:
@ -262,6 +403,7 @@ class ELBV2Response(BaseResponse):
template = self.response_template(SET_RULE_PRIORITIES_TEMPLATE) template = self.response_template(SET_RULE_PRIORITIES_TEMPLATE)
return template.render(rules=rules) return template.render(rules=rules)
@amzn_request_id
def add_tags(self): def add_tags(self):
resource_arns = self._get_multi_param('ResourceArns.member') resource_arns = self._get_multi_param('ResourceArns.member')
@ -281,6 +423,7 @@ class ELBV2Response(BaseResponse):
template = self.response_template(ADD_TAGS_TEMPLATE) template = self.response_template(ADD_TAGS_TEMPLATE)
return template.render() return template.render()
@amzn_request_id
def remove_tags(self): def remove_tags(self):
resource_arns = self._get_multi_param('ResourceArns.member') resource_arns = self._get_multi_param('ResourceArns.member')
tag_keys = self._get_multi_param('TagKeys.member') tag_keys = self._get_multi_param('TagKeys.member')
@ -301,6 +444,7 @@ class ELBV2Response(BaseResponse):
template = self.response_template(REMOVE_TAGS_TEMPLATE) template = self.response_template(REMOVE_TAGS_TEMPLATE)
return template.render() return template.render()
@amzn_request_id
def describe_tags(self): def describe_tags(self):
resource_arns = self._get_multi_param('ResourceArns.member') resource_arns = self._get_multi_param('ResourceArns.member')
resources = [] resources = []
@ -320,6 +464,125 @@ class ELBV2Response(BaseResponse):
template = self.response_template(DESCRIBE_TAGS_TEMPLATE) template = self.response_template(DESCRIBE_TAGS_TEMPLATE)
return template.render(resources=resources) return template.render(resources=resources)
@amzn_request_id
def describe_account_limits(self):
# Supports paging but not worth implementing yet
# marker = self._get_param('Marker')
# page_size = self._get_param('PageSize')
limits = {
'application-load-balancers': 20,
'target-groups': 3000,
'targets-per-application-load-balancer': 30,
'listeners-per-application-load-balancer': 50,
'rules-per-application-load-balancer': 100,
'network-load-balancers': 20,
'targets-per-network-load-balancer': 200,
'listeners-per-network-load-balancer': 50
}
template = self.response_template(DESCRIBE_LIMITS_TEMPLATE)
return template.render(limits=limits)
@amzn_request_id
def describe_ssl_policies(self):
names = self._get_multi_param('Names.member.')
# Supports paging but not worth implementing yet
# marker = self._get_param('Marker')
# page_size = self._get_param('PageSize')
policies = SSL_POLICIES
if names:
policies = filter(lambda policy: policy['name'] in names, policies)
template = self.response_template(DESCRIBE_SSL_POLICIES_TEMPLATE)
return template.render(policies=policies)
@amzn_request_id
def set_ip_address_type(self):
arn = self._get_param('LoadBalancerArn')
ip_type = self._get_param('IpAddressType')
self.elbv2_backend.set_ip_address_type(arn, ip_type)
template = self.response_template(SET_IP_ADDRESS_TYPE_TEMPLATE)
return template.render(ip_type=ip_type)
@amzn_request_id
def set_security_groups(self):
arn = self._get_param('LoadBalancerArn')
sec_groups = self._get_multi_param('SecurityGroups.member.')
self.elbv2_backend.set_security_groups(arn, sec_groups)
template = self.response_template(SET_SECURITY_GROUPS_TEMPLATE)
return template.render(sec_groups=sec_groups)
@amzn_request_id
def set_subnets(self):
arn = self._get_param('LoadBalancerArn')
subnets = self._get_multi_param('Subnets.member.')
subnet_zone_list = self.elbv2_backend.set_subnets(arn, subnets)
template = self.response_template(SET_SUBNETS_TEMPLATE)
return template.render(subnets=subnet_zone_list)
@amzn_request_id
def modify_load_balancer_attributes(self):
arn = self._get_param('LoadBalancerArn')
attrs = self._get_map_prefix('Attributes.member', key_end='Key', value_end='Value')
all_attrs = self.elbv2_backend.modify_load_balancer_attributes(arn, attrs)
template = self.response_template(MODIFY_LOADBALANCER_ATTRS_TEMPLATE)
return template.render(attrs=all_attrs)
@amzn_request_id
def describe_load_balancer_attributes(self):
arn = self._get_param('LoadBalancerArn')
attrs = self.elbv2_backend.describe_load_balancer_attributes(arn)
template = self.response_template(DESCRIBE_LOADBALANCER_ATTRS_TEMPLATE)
return template.render(attrs=attrs)
@amzn_request_id
def modify_target_group(self):
arn = self._get_param('TargetGroupArn')
health_check_proto = self._get_param('HealthCheckProtocol') # 'HTTP' | 'HTTPS' | 'TCP',
health_check_port = self._get_param('HealthCheckPort')
health_check_path = self._get_param('HealthCheckPath')
health_check_interval = self._get_param('HealthCheckIntervalSeconds')
health_check_timeout = self._get_param('HealthCheckTimeoutSeconds')
healthy_threshold_count = self._get_param('HealthyThresholdCount')
unhealthy_threshold_count = self._get_param('UnhealthyThresholdCount')
http_codes = self._get_param('Matcher.HttpCode')
target_group = self.elbv2_backend.modify_target_group(arn, health_check_proto, health_check_port, health_check_path, health_check_interval,
health_check_timeout, healthy_threshold_count, unhealthy_threshold_count, http_codes)
template = self.response_template(MODIFY_TARGET_GROUP_TEMPLATE)
return template.render(target_group=target_group)
@amzn_request_id
def modify_listener(self):
arn = self._get_param('ListenerArn')
port = self._get_param('Port')
protocol = self._get_param('Protocol')
ssl_policy = self._get_param('SslPolicy')
certificates = self._get_list_prefix('Certificates.member')
default_actions = self._get_list_prefix('DefaultActions.member')
# Should really move SSL Policies to models
if ssl_policy is not None and ssl_policy not in [item['name'] for item in SSL_POLICIES]:
raise RESTError('SSLPolicyNotFound', 'Policy {0} not found'.format(ssl_policy))
listener = self.elbv2_backend.modify_listener(arn, port, protocol, ssl_policy, certificates, default_actions)
template = self.response_template(MODIFY_LISTENER_TEMPLATE)
return template.render(listener=listener)
def _add_tags(self, resource): def _add_tags(self, resource):
tag_values = [] tag_values = []
tag_keys = [] tag_keys = []
@ -348,14 +611,14 @@ class ELBV2Response(BaseResponse):
ADD_TAGS_TEMPLATE = """<AddTagsResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/"> ADD_TAGS_TEMPLATE = """<AddTagsResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/">
<AddTagsResult/> <AddTagsResult/>
<ResponseMetadata> <ResponseMetadata>
<RequestId>360e81f7-1100-11e4-b6ed-0f30EXAMPLE</RequestId> <RequestId>{{ request_id }}</RequestId>
</ResponseMetadata> </ResponseMetadata>
</AddTagsResponse>""" </AddTagsResponse>"""
REMOVE_TAGS_TEMPLATE = """<RemoveTagsResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/"> REMOVE_TAGS_TEMPLATE = """<RemoveTagsResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/">
<RemoveTagsResult/> <RemoveTagsResult/>
<ResponseMetadata> <ResponseMetadata>
<RequestId>360e81f7-1100-11e4-b6ed-0f30EXAMPLE</RequestId> <RequestId>{{ request_id }}</RequestId>
</ResponseMetadata> </ResponseMetadata>
</RemoveTagsResponse>""" </RemoveTagsResponse>"""
@ -378,11 +641,10 @@ DESCRIBE_TAGS_TEMPLATE = """<DescribeTagsResponse xmlns="http://elasticloadbalan
</TagDescriptions> </TagDescriptions>
</DescribeTagsResult> </DescribeTagsResult>
<ResponseMetadata> <ResponseMetadata>
<RequestId>360e81f7-1100-11e4-b6ed-0f30EXAMPLE</RequestId> <RequestId>{{ request_id }}</RequestId>
</ResponseMetadata> </ResponseMetadata>
</DescribeTagsResponse>""" </DescribeTagsResponse>"""
CREATE_LOAD_BALANCER_TEMPLATE = """<CreateLoadBalancerResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/"> CREATE_LOAD_BALANCER_TEMPLATE = """<CreateLoadBalancerResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/">
<CreateLoadBalancerResult> <CreateLoadBalancerResult>
<LoadBalancers> <LoadBalancers>
@ -415,7 +677,7 @@ CREATE_LOAD_BALANCER_TEMPLATE = """<CreateLoadBalancerResponse xmlns="http://ela
</LoadBalancers> </LoadBalancers>
</CreateLoadBalancerResult> </CreateLoadBalancerResult>
<ResponseMetadata> <ResponseMetadata>
<RequestId>32d531b2-f2d0-11e5-9192-3fff33344cfa</RequestId> <RequestId>{{ request_id }}</RequestId>
</ResponseMetadata> </ResponseMetadata>
</CreateLoadBalancerResponse>""" </CreateLoadBalancerResponse>"""
@ -452,7 +714,7 @@ CREATE_RULE_TEMPLATE = """<CreateRuleResponse xmlns="http://elasticloadbalancing
</Rules> </Rules>
</CreateRuleResult> </CreateRuleResult>
<ResponseMetadata> <ResponseMetadata>
<RequestId>c5478c83-f397-11e5-bb98-57195a6eb84a</RequestId> <RequestId>{{ request_id }}</RequestId>
</ResponseMetadata> </ResponseMetadata>
</CreateRuleResponse>""" </CreateRuleResponse>"""
@ -472,14 +734,19 @@ CREATE_TARGET_GROUP_TEMPLATE = """<CreateTargetGroupResponse xmlns="http://elast
<HealthCheckTimeoutSeconds>{{ target_group.healthcheck_timeout_seconds }}</HealthCheckTimeoutSeconds> <HealthCheckTimeoutSeconds>{{ target_group.healthcheck_timeout_seconds }}</HealthCheckTimeoutSeconds>
<HealthyThresholdCount>{{ target_group.healthy_threshold_count }}</HealthyThresholdCount> <HealthyThresholdCount>{{ target_group.healthy_threshold_count }}</HealthyThresholdCount>
<UnhealthyThresholdCount>{{ target_group.unhealthy_threshold_count }}</UnhealthyThresholdCount> <UnhealthyThresholdCount>{{ target_group.unhealthy_threshold_count }}</UnhealthyThresholdCount>
{% if target_group.matcher %}
<Matcher> <Matcher>
<HttpCode>200</HttpCode> <HttpCode>{{ target_group.matcher['HttpCode'] }}</HttpCode>
</Matcher> </Matcher>
{% endif %}
{% if target_group.target_type %}
<TargetType>{{ target_group.target_type }}</TargetType>
{% endif %}
</member> </member>
</TargetGroups> </TargetGroups>
</CreateTargetGroupResult> </CreateTargetGroupResult>
<ResponseMetadata> <ResponseMetadata>
<RequestId>b83fe90e-f2d5-11e5-b95d-3b2c1831fc26</RequestId> <RequestId>{{ request_id }}</RequestId>
</ResponseMetadata> </ResponseMetadata>
</CreateTargetGroupResponse>""" </CreateTargetGroupResponse>"""
@ -489,11 +756,13 @@ CREATE_LISTENER_TEMPLATE = """<CreateListenerResponse xmlns="http://elasticloadb
<member> <member>
<LoadBalancerArn>{{ listener.load_balancer_arn }}</LoadBalancerArn> <LoadBalancerArn>{{ listener.load_balancer_arn }}</LoadBalancerArn>
<Protocol>{{ listener.protocol }}</Protocol> <Protocol>{{ listener.protocol }}</Protocol>
{% if listener.certificate %} {% if listener.certificates %}
<Certificates> <Certificates>
{% for cert in listener.certificates %}
<member> <member>
<CertificateArn>{{ listener.certificate }}</CertificateArn> <CertificateArn>{{ cert }}</CertificateArn>
</member> </member>
{% endfor %}
</Certificates> </Certificates>
{% endif %} {% endif %}
<Port>{{ listener.port }}</Port> <Port>{{ listener.port }}</Port>
@ -511,35 +780,35 @@ CREATE_LISTENER_TEMPLATE = """<CreateListenerResponse xmlns="http://elasticloadb
</Listeners> </Listeners>
</CreateListenerResult> </CreateListenerResult>
<ResponseMetadata> <ResponseMetadata>
<RequestId>97f1bb38-f390-11e5-b95d-3b2c1831fc26</RequestId> <RequestId>{{ request_id }}</RequestId>
</ResponseMetadata> </ResponseMetadata>
</CreateListenerResponse>""" </CreateListenerResponse>"""
DELETE_LOAD_BALANCER_TEMPLATE = """<DeleteLoadBalancerResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/"> DELETE_LOAD_BALANCER_TEMPLATE = """<DeleteLoadBalancerResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/">
<DeleteLoadBalancerResult/> <DeleteLoadBalancerResult/>
<ResponseMetadata> <ResponseMetadata>
<RequestId>1549581b-12b7-11e3-895e-1334aEXAMPLE</RequestId> <RequestId>{{ request_id }}</RequestId>
</ResponseMetadata> </ResponseMetadata>
</DeleteLoadBalancerResponse>""" </DeleteLoadBalancerResponse>"""
DELETE_RULE_TEMPLATE = """<DeleteRuleResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/"> DELETE_RULE_TEMPLATE = """<DeleteRuleResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/">
<DeleteRuleResult/> <DeleteRuleResult/>
<ResponseMetadata> <ResponseMetadata>
<RequestId>1549581b-12b7-11e3-895e-1334aEXAMPLE</RequestId> <RequestId>{{ request_id }}</RequestId>
</ResponseMetadata> </ResponseMetadata>
</DeleteRuleResponse>""" </DeleteRuleResponse>"""
DELETE_TARGET_GROUP_TEMPLATE = """<DeleteTargetGroupResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/"> DELETE_TARGET_GROUP_TEMPLATE = """<DeleteTargetGroupResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/">
<DeleteTargetGroupResult/> <DeleteTargetGroupResult/>
<ResponseMetadata> <ResponseMetadata>
<RequestId>1549581b-12b7-11e3-895e-1334aEXAMPLE</RequestId> <RequestId>{{ request_id }}</RequestId>
</ResponseMetadata> </ResponseMetadata>
</DeleteTargetGroupResponse>""" </DeleteTargetGroupResponse>"""
DELETE_LISTENER_TEMPLATE = """<DeleteListenerResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/"> DELETE_LISTENER_TEMPLATE = """<DeleteListenerResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/">
<DeleteListenerResult/> <DeleteListenerResult/>
<ResponseMetadata> <ResponseMetadata>
<RequestId>1549581b-12b7-11e3-895e-1334aEXAMPLE</RequestId> <RequestId>{{ request_id }}</RequestId>
</ResponseMetadata> </ResponseMetadata>
</DeleteListenerResponse>""" </DeleteListenerResponse>"""
@ -572,6 +841,7 @@ DESCRIBE_LOAD_BALANCERS_TEMPLATE = """<DescribeLoadBalancersResponse xmlns="http
<Code>provisioning</Code> <Code>provisioning</Code>
</State> </State>
<Type>application</Type> <Type>application</Type>
<IpAddressType>ipv4</IpAddressType>
</member> </member>
{% endfor %} {% endfor %}
</LoadBalancers> </LoadBalancers>
@ -580,7 +850,7 @@ DESCRIBE_LOAD_BALANCERS_TEMPLATE = """<DescribeLoadBalancersResponse xmlns="http
{% endif %} {% endif %}
</DescribeLoadBalancersResult> </DescribeLoadBalancersResult>
<ResponseMetadata> <ResponseMetadata>
<RequestId>f9880f01-7852-629d-a6c3-3ae2-666a409287e6dc0c</RequestId> <RequestId>{{ request_id }}</RequestId>
</ResponseMetadata> </ResponseMetadata>
</DescribeLoadBalancersResponse>""" </DescribeLoadBalancersResponse>"""
@ -620,7 +890,7 @@ DESCRIBE_RULES_TEMPLATE = """<DescribeRulesResponse xmlns="http://elasticloadbal
{% endif %} {% endif %}
</DescribeRulesResult> </DescribeRulesResult>
<ResponseMetadata> <ResponseMetadata>
<RequestId>74926cf3-f3a3-11e5-b543-9f2c3fbb9bee</RequestId> <RequestId>{{ request_id }}</RequestId>
</ResponseMetadata> </ResponseMetadata>
</DescribeRulesResponse>""" </DescribeRulesResponse>"""
@ -634,16 +904,21 @@ DESCRIBE_TARGET_GROUPS_TEMPLATE = """<DescribeTargetGroupsResponse xmlns="http:/
<Protocol>{{ target_group.protocol }}</Protocol> <Protocol>{{ target_group.protocol }}</Protocol>
<Port>{{ target_group.port }}</Port> <Port>{{ target_group.port }}</Port>
<VpcId>{{ target_group.vpc_id }}</VpcId> <VpcId>{{ target_group.vpc_id }}</VpcId>
<HealthCheckProtocol>{{ target_group.health_check_protocol }}</HealthCheckProtocol> <HealthCheckProtocol>{{ target_group.healthcheck_protocol }}</HealthCheckProtocol>
<HealthCheckPort>{{ target_group.healthcheck_port }}</HealthCheckPort> <HealthCheckPort>{{ target_group.healthcheck_port }}</HealthCheckPort>
<HealthCheckPath>{{ target_group.healthcheck_path }}</HealthCheckPath> <HealthCheckPath>{{ target_group.healthcheck_path }}</HealthCheckPath>
<HealthCheckIntervalSeconds>{{ target_group.healthcheck_interval_seconds }}</HealthCheckIntervalSeconds> <HealthCheckIntervalSeconds>{{ target_group.healthcheck_interval_seconds }}</HealthCheckIntervalSeconds>
<HealthCheckTimeoutSeconds>{{ target_group.healthcheck_timeout_seconds }}</HealthCheckTimeoutSeconds> <HealthCheckTimeoutSeconds>{{ target_group.healthcheck_timeout_seconds }}</HealthCheckTimeoutSeconds>
<HealthyThresholdCount>{{ target_group.healthy_threshold_count }}</HealthyThresholdCount> <HealthyThresholdCount>{{ target_group.healthy_threshold_count }}</HealthyThresholdCount>
<UnhealthyThresholdCount>{{ target_group.unhealthy_threshold_count }}</UnhealthyThresholdCount> <UnhealthyThresholdCount>{{ target_group.unhealthy_threshold_count }}</UnhealthyThresholdCount>
{% if target_group.matcher %}
<Matcher> <Matcher>
<HttpCode>200</HttpCode> <HttpCode>{{ target_group.matcher['HttpCode'] }}</HttpCode>
</Matcher> </Matcher>
{% endif %}
{% if target_group.target_type %}
<TargetType>{{ target_group.target_type }}</TargetType>
{% endif %}
<LoadBalancerArns> <LoadBalancerArns>
{% for load_balancer_arn in target_group.load_balancer_arns %} {% for load_balancer_arn in target_group.load_balancer_arns %}
<member>{{ load_balancer_arn }}</member> <member>{{ load_balancer_arn }}</member>
@ -654,11 +929,10 @@ DESCRIBE_TARGET_GROUPS_TEMPLATE = """<DescribeTargetGroupsResponse xmlns="http:/
</TargetGroups> </TargetGroups>
</DescribeTargetGroupsResult> </DescribeTargetGroupsResult>
<ResponseMetadata> <ResponseMetadata>
<RequestId>70092c0e-f3a9-11e5-ae48-cff02092876b</RequestId> <RequestId>{{ request_id }}</RequestId>
</ResponseMetadata> </ResponseMetadata>
</DescribeTargetGroupsResponse>""" </DescribeTargetGroupsResponse>"""
DESCRIBE_TARGET_GROUP_ATTRIBUTES_TEMPLATE = """<DescribeTargetGroupAttributesResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/"> DESCRIBE_TARGET_GROUP_ATTRIBUTES_TEMPLATE = """<DescribeTargetGroupAttributesResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/">
<DescribeTargetGroupAttributesResult> <DescribeTargetGroupAttributesResult>
<Attributes> <Attributes>
@ -671,11 +945,10 @@ DESCRIBE_TARGET_GROUP_ATTRIBUTES_TEMPLATE = """<DescribeTargetGroupAttributesRes
</Attributes> </Attributes>
</DescribeTargetGroupAttributesResult> </DescribeTargetGroupAttributesResult>
<ResponseMetadata> <ResponseMetadata>
<RequestId>70092c0e-f3a9-11e5-ae48-cff02092876b</RequestId> <RequestId>{{ request_id }}</RequestId>
</ResponseMetadata> </ResponseMetadata>
</DescribeTargetGroupAttributesResponse>""" </DescribeTargetGroupAttributesResponse>"""
DESCRIBE_LISTENERS_TEMPLATE = """<DescribeLoadBalancersResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/"> DESCRIBE_LISTENERS_TEMPLATE = """<DescribeLoadBalancersResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/">
<DescribeListenersResult> <DescribeListenersResult>
<Listeners> <Listeners>
@ -706,7 +979,7 @@ DESCRIBE_LISTENERS_TEMPLATE = """<DescribeLoadBalancersResponse xmlns="http://el
</Listeners> </Listeners>
</DescribeListenersResult> </DescribeListenersResult>
<ResponseMetadata> <ResponseMetadata>
<RequestId>65a3a7ea-f39c-11e5-b543-9f2c3fbb9bee</RequestId> <RequestId>{{ request_id }}</RequestId>
</ResponseMetadata> </ResponseMetadata>
</DescribeLoadBalancersResponse>""" </DescribeLoadBalancersResponse>"""
@ -721,7 +994,7 @@ CONFIGURE_HEALTH_CHECK_TEMPLATE = """<ConfigureHealthCheckResponse xmlns="http:/
</HealthCheck> </HealthCheck>
</ConfigureHealthCheckResult> </ConfigureHealthCheckResult>
<ResponseMetadata> <ResponseMetadata>
<RequestId>f9880f01-7852-629d-a6c3-3ae2-666a409287e6dc0c</RequestId> <RequestId>{{ request_id }}</RequestId>
</ResponseMetadata> </ResponseMetadata>
</ConfigureHealthCheckResponse>""" </ConfigureHealthCheckResponse>"""
@ -758,7 +1031,7 @@ MODIFY_RULE_TEMPLATE = """<ModifyRuleResponse xmlns="http://elasticloadbalancing
</Rules> </Rules>
</ModifyRuleResult> </ModifyRuleResult>
<ResponseMetadata> <ResponseMetadata>
<RequestId>c5478c83-f397-11e5-bb98-57195a6eb84a</RequestId> <RequestId>{{ request_id }}</RequestId>
</ResponseMetadata> </ResponseMetadata>
</ModifyRuleResponse>""" </ModifyRuleResponse>"""
@ -774,7 +1047,7 @@ MODIFY_TARGET_GROUP_ATTRIBUTES_TEMPLATE = """<ModifyTargetGroupAttributesRespons
</Attributes> </Attributes>
</ModifyTargetGroupAttributesResult> </ModifyTargetGroupAttributesResult>
<ResponseMetadata> <ResponseMetadata>
<RequestId>70092c0e-f3a9-11e5-ae48-cff02092876b</RequestId> <RequestId>{{ request_id }}</RequestId>
</ResponseMetadata> </ResponseMetadata>
</ModifyTargetGroupAttributesResponse>""" </ModifyTargetGroupAttributesResponse>"""
@ -782,7 +1055,7 @@ REGISTER_TARGETS_TEMPLATE = """<RegisterTargetsResponse xmlns="http://elasticloa
<RegisterTargetsResult> <RegisterTargetsResult>
</RegisterTargetsResult> </RegisterTargetsResult>
<ResponseMetadata> <ResponseMetadata>
<RequestId>f9880f01-7852-629d-a6c3-3ae2-666a409287e6dc0c</RequestId> <RequestId>{{ request_id }}</RequestId>
</ResponseMetadata> </ResponseMetadata>
</RegisterTargetsResponse>""" </RegisterTargetsResponse>"""
@ -790,22 +1063,21 @@ DEREGISTER_TARGETS_TEMPLATE = """<DeregisterTargetsResponse xmlns="http://elasti
<DeregisterTargetsResult> <DeregisterTargetsResult>
</DeregisterTargetsResult> </DeregisterTargetsResult>
<ResponseMetadata> <ResponseMetadata>
<RequestId>f9880f01-7852-629d-a6c3-3ae2-666a409287e6dc0c</RequestId> <RequestId>{{ request_id }}</RequestId>
</ResponseMetadata> </ResponseMetadata>
</DeregisterTargetsResponse>""" </DeregisterTargetsResponse>"""
SET_LOAD_BALANCER_SSL_CERTIFICATE = """<SetLoadBalancerListenerSSLCertificateResponse xmlns="http://elasticloadbalan cing.amazonaws.com/doc/2015-12-01/"> SET_LOAD_BALANCER_SSL_CERTIFICATE = """<SetLoadBalancerListenerSSLCertificateResponse xmlns="http://elasticloadbalan cing.amazonaws.com/doc/2015-12-01/">
<SetLoadBalancerListenerSSLCertificateResult/> <SetLoadBalancerListenerSSLCertificateResult/>
<ResponseMetadata> <ResponseMetadata>
<RequestId>83c88b9d-12b7-11e3-8b82-87b12EXAMPLE</RequestId> <RequestId>{{ request_id }}</RequestId>
</ResponseMetadata> </ResponseMetadata>
</SetLoadBalancerListenerSSLCertificateResponse>""" </SetLoadBalancerListenerSSLCertificateResponse>"""
DELETE_LOAD_BALANCER_LISTENERS = """<DeleteLoadBalancerListenersResponse xmlns="http://elasticloadbalan cing.amazonaws.com/doc/2015-12-01/"> DELETE_LOAD_BALANCER_LISTENERS = """<DeleteLoadBalancerListenersResponse xmlns="http://elasticloadbalan cing.amazonaws.com/doc/2015-12-01/">
<DeleteLoadBalancerListenersResult/> <DeleteLoadBalancerListenersResult/>
<ResponseMetadata> <ResponseMetadata>
<RequestId>83c88b9d-12b7-11e3-8b82-87b12EXAMPLE</RequestId> <RequestId>{{ request_id }}</RequestId>
</ResponseMetadata> </ResponseMetadata>
</DeleteLoadBalancerListenersResponse>""" </DeleteLoadBalancerListenersResponse>"""
@ -837,7 +1109,7 @@ DESCRIBE_ATTRIBUTES_TEMPLATE = """<DescribeLoadBalancerAttributesResponse xmlns
</LoadBalancerAttributes> </LoadBalancerAttributes>
</DescribeLoadBalancerAttributesResult> </DescribeLoadBalancerAttributesResult>
<ResponseMetadata> <ResponseMetadata>
<RequestId>83c88b9d-12b7-11e3-8b82-87b12EXAMPLE</RequestId> <RequestId>{{ request_id }}</RequestId>
</ResponseMetadata> </ResponseMetadata>
</DescribeLoadBalancerAttributesResponse> </DescribeLoadBalancerAttributesResponse>
""" """
@ -871,7 +1143,7 @@ MODIFY_ATTRIBUTES_TEMPLATE = """<ModifyLoadBalancerAttributesResponse xmlns="htt
</LoadBalancerAttributes> </LoadBalancerAttributes>
</ModifyLoadBalancerAttributesResult> </ModifyLoadBalancerAttributesResult>
<ResponseMetadata> <ResponseMetadata>
<RequestId>83c88b9d-12b7-11e3-8b82-87b12EXAMPLE</RequestId> <RequestId>{{ request_id }}</RequestId>
</ResponseMetadata> </ResponseMetadata>
</ModifyLoadBalancerAttributesResponse> </ModifyLoadBalancerAttributesResponse>
""" """
@ -879,7 +1151,7 @@ MODIFY_ATTRIBUTES_TEMPLATE = """<ModifyLoadBalancerAttributesResponse xmlns="htt
CREATE_LOAD_BALANCER_POLICY_TEMPLATE = """<CreateLoadBalancerPolicyResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/"> CREATE_LOAD_BALANCER_POLICY_TEMPLATE = """<CreateLoadBalancerPolicyResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/">
<CreateLoadBalancerPolicyResult/> <CreateLoadBalancerPolicyResult/>
<ResponseMetadata> <ResponseMetadata>
<RequestId>83c88b9d-12b7-11e3-8b82-87b12EXAMPLE</RequestId> <RequestId>{{ request_id }}</RequestId>
</ResponseMetadata> </ResponseMetadata>
</CreateLoadBalancerPolicyResponse> </CreateLoadBalancerPolicyResponse>
""" """
@ -887,7 +1159,7 @@ CREATE_LOAD_BALANCER_POLICY_TEMPLATE = """<CreateLoadBalancerPolicyResponse xmln
SET_LOAD_BALANCER_POLICIES_OF_LISTENER_TEMPLATE = """<SetLoadBalancerPoliciesOfListenerResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/"> SET_LOAD_BALANCER_POLICIES_OF_LISTENER_TEMPLATE = """<SetLoadBalancerPoliciesOfListenerResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/">
<SetLoadBalancerPoliciesOfListenerResult/> <SetLoadBalancerPoliciesOfListenerResult/>
<ResponseMetadata> <ResponseMetadata>
<RequestId>07b1ecbc-1100-11e3-acaf-dd7edEXAMPLE</RequestId> <RequestId>{{ request_id }}</RequestId>
</ResponseMetadata> </ResponseMetadata>
</SetLoadBalancerPoliciesOfListenerResponse> </SetLoadBalancerPoliciesOfListenerResponse>
""" """
@ -895,7 +1167,7 @@ SET_LOAD_BALANCER_POLICIES_OF_LISTENER_TEMPLATE = """<SetLoadBalancerPoliciesOfL
SET_LOAD_BALANCER_POLICIES_FOR_BACKEND_SERVER_TEMPLATE = """<SetLoadBalancerPoliciesForBackendServerResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/"> SET_LOAD_BALANCER_POLICIES_FOR_BACKEND_SERVER_TEMPLATE = """<SetLoadBalancerPoliciesForBackendServerResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/">
<SetLoadBalancerPoliciesForBackendServerResult/> <SetLoadBalancerPoliciesForBackendServerResult/>
<ResponseMetadata> <ResponseMetadata>
<RequestId>0eb9b381-dde0-11e2-8d78-6ddbaEXAMPLE</RequestId> <RequestId>{{ request_id }}</RequestId>
</ResponseMetadata> </ResponseMetadata>
</SetLoadBalancerPoliciesForBackendServerResponse> </SetLoadBalancerPoliciesForBackendServerResponse>
""" """
@ -918,7 +1190,7 @@ DESCRIBE_TARGET_HEALTH_TEMPLATE = """<DescribeTargetHealthResponse xmlns="http:/
</TargetHealthDescriptions> </TargetHealthDescriptions>
</DescribeTargetHealthResult> </DescribeTargetHealthResult>
<ResponseMetadata> <ResponseMetadata>
<RequestId>c534f810-f389-11e5-9192-3fff33344cfa</RequestId> <RequestId>{{ request_id }}</RequestId>
</ResponseMetadata> </ResponseMetadata>
</DescribeTargetHealthResponse>""" </DescribeTargetHealthResponse>"""
@ -955,6 +1227,186 @@ SET_RULE_PRIORITIES_TEMPLATE = """<SetRulePrioritiesResponse xmlns="http://elast
</Rules> </Rules>
</SetRulePrioritiesResult> </SetRulePrioritiesResult>
<ResponseMetadata> <ResponseMetadata>
<RequestId>4d7a8036-f3a7-11e5-9c02-8fd20490d5a6</RequestId> <RequestId>{{ request_id }}</RequestId>
</ResponseMetadata> </ResponseMetadata>
</SetRulePrioritiesResponse>""" </SetRulePrioritiesResponse>"""
DESCRIBE_LIMITS_TEMPLATE = """<DescribeAccountLimitsResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/">
<DescribeAccountLimitsResult>
<Limits>
{% for key, value in limits.items() %}
<member>
<Name>{{ key }}</Name>
<Max>{{ value }}</Max>
</member>
{% endfor %}
</Limits>
</DescribeAccountLimitsResult>
<ResponseMetadata>
<RequestId>{{ request_id }}</RequestId>
</ResponseMetadata>
</DescribeAccountLimitsResponse>"""
DESCRIBE_SSL_POLICIES_TEMPLATE = """<DescribeSSLPoliciesResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/">
<DescribeSSLPoliciesResult>
<SslPolicies>
{% for policy in policies %}
<member>
<Name>{{ policy['name'] }}</Name>
<Ciphers>
{% for cipher in policy['ciphers'] %}
<member>
<Name>{{ cipher['name'] }}</Name>
<Priority>{{ cipher['priority'] }}</Priority>
</member>
{% endfor %}
</Ciphers>
<SslProtocols>
{% for proto in policy['ssl_protocols'] %}
<member>{{ proto }}</member>
{% endfor %}
</SslProtocols>
</member>
{% endfor %}
</SslPolicies>
</DescribeSSLPoliciesResult>
<ResponseMetadata>
<RequestId>{{ request_id }}</RequestId>
</ResponseMetadata>
</DescribeSSLPoliciesResponse>"""
SET_IP_ADDRESS_TYPE_TEMPLATE = """<SetIpAddressTypeResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/">
<SetIpAddressTypeResult>
<IpAddressType>{{ ip_type }}</IpAddressType>
</SetIpAddressTypeResult>
<ResponseMetadata>
<RequestId>{{ request_id }}</RequestId>
</ResponseMetadata>
</SetIpAddressTypeResponse>"""
SET_SECURITY_GROUPS_TEMPLATE = """<SetSecurityGroupsResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/">
<SetSecurityGroupsResult>
<SecurityGroupIds>
{% for group in sec_groups %}
<member>{{ group }}</member>
{% endfor %}
</SecurityGroupIds>
</SetSecurityGroupsResult>
<ResponseMetadata>
<RequestId>{{ request_id }}</RequestId>
</ResponseMetadata>
</SetSecurityGroupsResponse>"""
SET_SUBNETS_TEMPLATE = """<SetSubnetsResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/">
<SetSubnetsResult>
<AvailabilityZones>
{% for zone_id, subnet_id in subnets %}
<member>
<SubnetId>{{ subnet_id }}</SubnetId>
<ZoneName>{{ zone_id }}</ZoneName>
</member>
{% endfor %}
</AvailabilityZones>
</SetSubnetsResult>
<ResponseMetadata>
<RequestId>{{ request_id }}</RequestId>
</ResponseMetadata>
</SetSubnetsResponse>"""
MODIFY_LOADBALANCER_ATTRS_TEMPLATE = """<ModifyLoadBalancerAttributesResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/">
<ModifyLoadBalancerAttributesResult>
<Attributes>
{% for key, value in attrs.items() %}
<member>
{% if value == None %}<Value />{% else %}<Value>{{ value }}</Value>{% endif %}
<Key>{{ key }}</Key>
</member>
{% endfor %}
</Attributes>
</ModifyLoadBalancerAttributesResult>
<ResponseMetadata>
<RequestId>{{ request_id }}</RequestId>
</ResponseMetadata>
</ModifyLoadBalancerAttributesResponse>"""
DESCRIBE_LOADBALANCER_ATTRS_TEMPLATE = """<DescribeLoadBalancerAttributesResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/">
<DescribeLoadBalancerAttributesResult>
<Attributes>
{% for key, value in attrs.items() %}
<member>
{% if value == None %}<Value />{% else %}<Value>{{ value }}</Value>{% endif %}
<Key>{{ key }}</Key>
</member>
{% endfor %}
</Attributes>
</DescribeLoadBalancerAttributesResult>
<ResponseMetadata>
<RequestId>{{ request_id }}</RequestId>
</ResponseMetadata>
</DescribeLoadBalancerAttributesResponse>"""
MODIFY_TARGET_GROUP_TEMPLATE = """<ModifyTargetGroupResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/">
<ModifyTargetGroupResult>
<TargetGroups>
<member>
<TargetGroupArn>{{ target_group.arn }}</TargetGroupArn>
<TargetGroupName>{{ target_group.name }}</TargetGroupName>
<Protocol>{{ target_group.protocol }}</Protocol>
<Port>{{ target_group.port }}</Port>
<VpcId>{{ target_group.vpc_id }}</VpcId>
<HealthCheckProtocol>{{ target_group.healthcheck_protocol }}</HealthCheckProtocol>
<HealthCheckPort>{{ target_group.healthcheck_port }}</HealthCheckPort>
<HealthCheckPath>{{ target_group.healthcheck_path }}</HealthCheckPath>
<HealthCheckIntervalSeconds>{{ target_group.healthcheck_interval_seconds }}</HealthCheckIntervalSeconds>
<HealthCheckTimeoutSeconds>{{ target_group.healthcheck_timeout_seconds }}</HealthCheckTimeoutSeconds>
<HealthyThresholdCount>{{ target_group.healthy_threshold_count }}</HealthyThresholdCount>
<UnhealthyThresholdCount>{{ target_group.unhealthy_threshold_count }}</UnhealthyThresholdCount>
<Matcher>
<HttpCode>{{ target_group.matcher['HttpCode'] }}</HttpCode>
</Matcher>
<LoadBalancerArns>
{% for load_balancer_arn in target_group.load_balancer_arns %}
<member>{{ load_balancer_arn }}</member>
{% endfor %}
</LoadBalancerArns>
</member>
</TargetGroups>
</ModifyTargetGroupResult>
<ResponseMetadata>
<RequestId>{{ request_id }}</RequestId>
</ResponseMetadata>
</ModifyTargetGroupResponse>"""
MODIFY_LISTENER_TEMPLATE = """<ModifyListenerResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/">
<ModifyListenerResult>
<Listeners>
<member>
<LoadBalancerArn>{{ listener.load_balancer_arn }}</LoadBalancerArn>
<Protocol>{{ listener.protocol }}</Protocol>
{% if listener.certificates %}
<Certificates>
{% for cert in listener.certificates %}
<member>
<CertificateArn>{{ cert }}</CertificateArn>
</member>
{% endfor %}
</Certificates>
{% endif %}
<Port>{{ listener.port }}</Port>
<SslPolicy>{{ listener.ssl_policy }}</SslPolicy>
<ListenerArn>{{ listener.arn }}</ListenerArn>
<DefaultActions>
{% for action in listener.default_actions %}
<member>
<Type>{{ action.type }}</Type>
<TargetGroupArn>{{ action.target_group_arn }}</TargetGroupArn>
</member>
{% endfor %}
</DefaultActions>
</member>
</Listeners>
</ModifyListenerResult>
<ResponseMetadata>
<RequestId>{{ request_id }}</RequestId>
</ResponseMetadata>
</ModifyListenerResponse>"""

8
moto/elbv2/utils.py Normal file
View File

@ -0,0 +1,8 @@
def make_arn_for_load_balancer(account_id, name, region_name):
return "arn:aws:elasticloadbalancing:{}:{}:loadbalancer/{}/50dc6c495c0c9188".format(
region_name, account_id, name)
def make_arn_for_target_group(account_id, name, region_name):
return "arn:aws:elasticloadbalancing:{}:{}:targetgroup/{}/50dc6c495c0c9188".format(
region_name, account_id, name)

View File

@ -1,6 +1,7 @@
import os import os
import re import re
from moto.core.exceptions import JsonRESTError
from moto.core import BaseBackend, BaseModel from moto.core import BaseBackend, BaseModel
@ -50,6 +51,8 @@ class Rule(BaseModel):
class EventsBackend(BaseBackend): class EventsBackend(BaseBackend):
ACCOUNT_ID = re.compile(r'^(\d{1,12}|\*)$')
STATEMENT_ID = re.compile(r'^[a-zA-Z0-9-_]{1,64}$')
def __init__(self): def __init__(self):
self.rules = {} self.rules = {}
@ -58,6 +61,8 @@ class EventsBackend(BaseBackend):
self.rules_order = [] self.rules_order = []
self.next_tokens = {} self.next_tokens = {}
self.permissions = {}
def _get_rule_by_index(self, i): def _get_rule_by_index(self, i):
return self.rules.get(self.rules_order[i]) return self.rules.get(self.rules_order[i])
@ -181,6 +186,17 @@ class EventsBackend(BaseBackend):
return False return False
def put_events(self, events):
num_events = len(events)
if num_events < 1:
raise JsonRESTError('ValidationError', 'Need at least 1 event')
elif num_events > 10:
raise JsonRESTError('ValidationError', 'Can only submit 10 events at once')
# We dont really need to store the events yet
return []
def remove_targets(self, name, ids): def remove_targets(self, name, ids):
rule = self.rules.get(name) rule = self.rules.get(name)
@ -193,5 +209,40 @@ class EventsBackend(BaseBackend):
def test_event_pattern(self): def test_event_pattern(self):
raise NotImplementedError() raise NotImplementedError()
def put_permission(self, action, principal, statement_id):
if action is None or action != 'PutEvents':
raise JsonRESTError('InvalidParameterValue', 'Action must be PutEvents')
if principal is None or self.ACCOUNT_ID.match(principal) is None:
raise JsonRESTError('InvalidParameterValue', 'Principal must match ^(\d{1,12}|\*)$')
if statement_id is None or self.STATEMENT_ID.match(statement_id) is None:
raise JsonRESTError('InvalidParameterValue', 'StatementId must match ^[a-zA-Z0-9-_]{1,64}$')
self.permissions[statement_id] = {'action': action, 'principal': principal}
def remove_permission(self, statement_id):
try:
del self.permissions[statement_id]
except KeyError:
raise JsonRESTError('ResourceNotFoundException', 'StatementId not found')
def describe_event_bus(self):
arn = "arn:aws:events:us-east-1:000000000000:event-bus/default"
statements = []
for statement_id, data in self.permissions.items():
statements.append({
'Sid': statement_id,
'Effect': 'Allow',
'Principal': {'AWS': 'arn:aws:iam::{0}:root'.format(data['principal'])},
'Action': 'events:{0}'.format(data['action']),
'Resource': arn
})
return {
'Policy': {'Version': '2012-10-17', 'Statement': statements},
'Name': 'default',
'Arn': arn
}
events_backend = EventsBackend() events_backend = EventsBackend()

View File

@ -18,9 +18,17 @@ class EventsHandler(BaseResponse):
'RoleArn': rule.role_arn 'RoleArn': rule.role_arn
} }
def load_body(self): @property
decoded_body = self.body def request_params(self):
return json.loads(decoded_body or '{}') if not hasattr(self, '_json_body'):
try:
self._json_body = json.loads(self.body)
except ValueError:
self._json_body = {}
return self._json_body
def _get_param(self, param, if_none=None):
return self.request_params.get(param, if_none)
def error(self, type_, message='', status=400): def error(self, type_, message='', status=400):
headers = self.response_headers headers = self.response_headers
@ -28,8 +36,7 @@ class EventsHandler(BaseResponse):
return json.dumps({'__type': type_, 'message': message}), headers, return json.dumps({'__type': type_, 'message': message}), headers,
def delete_rule(self): def delete_rule(self):
body = self.load_body() name = self._get_param('Name')
name = body.get('Name')
if not name: if not name:
return self.error('ValidationException', 'Parameter Name is required.') return self.error('ValidationException', 'Parameter Name is required.')
@ -38,8 +45,7 @@ class EventsHandler(BaseResponse):
return '', self.response_headers return '', self.response_headers
def describe_rule(self): def describe_rule(self):
body = self.load_body() name = self._get_param('Name')
name = body.get('Name')
if not name: if not name:
return self.error('ValidationException', 'Parameter Name is required.') return self.error('ValidationException', 'Parameter Name is required.')
@ -53,8 +59,7 @@ class EventsHandler(BaseResponse):
return json.dumps(rule_dict), self.response_headers return json.dumps(rule_dict), self.response_headers
def disable_rule(self): def disable_rule(self):
body = self.load_body() name = self._get_param('Name')
name = body.get('Name')
if not name: if not name:
return self.error('ValidationException', 'Parameter Name is required.') return self.error('ValidationException', 'Parameter Name is required.')
@ -65,8 +70,7 @@ class EventsHandler(BaseResponse):
return '', self.response_headers return '', self.response_headers
def enable_rule(self): def enable_rule(self):
body = self.load_body() name = self._get_param('Name')
name = body.get('Name')
if not name: if not name:
return self.error('ValidationException', 'Parameter Name is required.') return self.error('ValidationException', 'Parameter Name is required.')
@ -80,10 +84,9 @@ class EventsHandler(BaseResponse):
pass pass
def list_rule_names_by_target(self): def list_rule_names_by_target(self):
body = self.load_body() target_arn = self._get_param('TargetArn')
target_arn = body.get('TargetArn') next_token = self._get_param('NextToken')
next_token = body.get('NextToken') limit = self._get_param('Limit')
limit = body.get('Limit')
if not target_arn: if not target_arn:
return self.error('ValidationException', 'Parameter TargetArn is required.') return self.error('ValidationException', 'Parameter TargetArn is required.')
@ -94,10 +97,9 @@ class EventsHandler(BaseResponse):
return json.dumps(rule_names), self.response_headers return json.dumps(rule_names), self.response_headers
def list_rules(self): def list_rules(self):
body = self.load_body() prefix = self._get_param('NamePrefix')
prefix = body.get('NamePrefix') next_token = self._get_param('NextToken')
next_token = body.get('NextToken') limit = self._get_param('Limit')
limit = body.get('Limit')
rules = events_backend.list_rules(prefix, next_token, limit) rules = events_backend.list_rules(prefix, next_token, limit)
rules_obj = {'Rules': []} rules_obj = {'Rules': []}
@ -111,10 +113,9 @@ class EventsHandler(BaseResponse):
return json.dumps(rules_obj), self.response_headers return json.dumps(rules_obj), self.response_headers
def list_targets_by_rule(self): def list_targets_by_rule(self):
body = self.load_body() rule_name = self._get_param('Rule')
rule_name = body.get('Rule') next_token = self._get_param('NextToken')
next_token = body.get('NextToken') limit = self._get_param('Limit')
limit = body.get('Limit')
if not rule_name: if not rule_name:
return self.error('ValidationException', 'Parameter Rule is required.') return self.error('ValidationException', 'Parameter Rule is required.')
@ -128,13 +129,25 @@ class EventsHandler(BaseResponse):
return json.dumps(targets), self.response_headers return json.dumps(targets), self.response_headers
def put_events(self): def put_events(self):
events = self._get_param('Entries')
failed_entries = events_backend.put_events(events)
if failed_entries:
return json.dumps({
'FailedEntryCount': len(failed_entries),
'Entries': failed_entries
})
return '', self.response_headers return '', self.response_headers
def put_rule(self): def put_rule(self):
body = self.load_body() name = self._get_param('Name')
name = body.get('Name') event_pattern = self._get_param('EventPattern')
event_pattern = body.get('EventPattern') sched_exp = self._get_param('ScheduleExpression')
sched_exp = body.get('ScheduleExpression') state = self._get_param('State')
desc = self._get_param('Description')
role_arn = self._get_param('RoleArn')
if not name: if not name:
return self.error('ValidationException', 'Parameter Name is required.') return self.error('ValidationException', 'Parameter Name is required.')
@ -156,17 +169,16 @@ class EventsHandler(BaseResponse):
name, name,
ScheduleExpression=sched_exp, ScheduleExpression=sched_exp,
EventPattern=event_pattern, EventPattern=event_pattern,
State=body.get('State'), State=state,
Description=body.get('Description'), Description=desc,
RoleArn=body.get('RoleArn') RoleArn=role_arn
) )
return json.dumps({'RuleArn': rule_arn}), self.response_headers return json.dumps({'RuleArn': rule_arn}), self.response_headers
def put_targets(self): def put_targets(self):
body = self.load_body() rule_name = self._get_param('Rule')
rule_name = body.get('Rule') targets = self._get_param('Targets')
targets = body.get('Targets')
if not rule_name: if not rule_name:
return self.error('ValidationException', 'Parameter Rule is required.') return self.error('ValidationException', 'Parameter Rule is required.')
@ -180,9 +192,8 @@ class EventsHandler(BaseResponse):
return '', self.response_headers return '', self.response_headers
def remove_targets(self): def remove_targets(self):
body = self.load_body() rule_name = self._get_param('Rule')
rule_name = body.get('Rule') ids = self._get_param('Ids')
ids = body.get('Ids')
if not rule_name: if not rule_name:
return self.error('ValidationException', 'Parameter Rule is required.') return self.error('ValidationException', 'Parameter Rule is required.')
@ -197,3 +208,22 @@ class EventsHandler(BaseResponse):
def test_event_pattern(self): def test_event_pattern(self):
pass pass
def put_permission(self):
action = self._get_param('Action')
principal = self._get_param('Principal')
statement_id = self._get_param('StatementId')
events_backend.put_permission(action, principal, statement_id)
return ''
def remove_permission(self):
statement_id = self._get_param('StatementId')
events_backend.remove_permission(statement_id)
return ''
def describe_event_bus(self):
return json.dumps(events_backend.describe_event_bus())

6
moto/iot/__init__.py Normal file
View File

@ -0,0 +1,6 @@
from __future__ import unicode_literals
from .models import iot_backends
from ..core.models import base_decorator
iot_backend = iot_backends['us-east-1']
mock_iot = base_decorator(iot_backends)

24
moto/iot/exceptions.py Normal file
View File

@ -0,0 +1,24 @@
from __future__ import unicode_literals
from moto.core.exceptions import JsonRESTError
class IoTClientError(JsonRESTError):
code = 400
class ResourceNotFoundException(IoTClientError):
def __init__(self):
self.code = 404
super(ResourceNotFoundException, self).__init__(
"ResourceNotFoundException",
"The specified resource does not exist"
)
class InvalidRequestException(IoTClientError):
def __init__(self):
self.code = 400
super(InvalidRequestException, self).__init__(
"InvalidRequestException",
"The request is not valid."
)

364
moto/iot/models.py Normal file
View File

@ -0,0 +1,364 @@
from __future__ import unicode_literals
import time
import boto3
import string
import random
import hashlib
import uuid
from moto.core import BaseBackend, BaseModel
from collections import OrderedDict
from .exceptions import (
ResourceNotFoundException,
InvalidRequestException
)
class FakeThing(BaseModel):
def __init__(self, thing_name, thing_type, attributes, region_name):
self.region_name = region_name
self.thing_name = thing_name
self.thing_type = thing_type
self.attributes = attributes
self.arn = 'arn:aws:iot:%s:1:thing/%s' % (self.region_name, thing_name)
self.version = 1
# TODO: we need to handle 'version'?
# for iot-data
self.thing_shadow = None
def to_dict(self, include_default_client_id=False):
obj = {
'thingName': self.thing_name,
'attributes': self.attributes,
'version': self.version
}
if self.thing_type:
obj['thingTypeName'] = self.thing_type.thing_type_name
if include_default_client_id:
obj['defaultClientId'] = self.thing_name
return obj
class FakeThingType(BaseModel):
def __init__(self, thing_type_name, thing_type_properties, region_name):
self.region_name = region_name
self.thing_type_name = thing_type_name
self.thing_type_properties = thing_type_properties
t = time.time()
self.metadata = {
'deprecated': False,
'creationData': int(t * 1000) / 1000.0
}
self.arn = 'arn:aws:iot:%s:1:thingtype/%s' % (self.region_name, thing_type_name)
def to_dict(self):
return {
'thingTypeName': self.thing_type_name,
'thingTypeProperties': self.thing_type_properties,
'thingTypeMetadata': self.metadata
}
class FakeCertificate(BaseModel):
def __init__(self, certificate_pem, status, region_name):
m = hashlib.sha256()
m.update(str(uuid.uuid4()).encode('utf-8'))
self.certificate_id = m.hexdigest()
self.arn = 'arn:aws:iot:%s:1:cert/%s' % (region_name, self.certificate_id)
self.certificate_pem = certificate_pem
self.status = status
# TODO: must adjust
self.owner = '1'
self.transfer_data = {}
self.creation_date = time.time()
self.last_modified_date = self.creation_date
self.ca_certificate_id = None
def to_dict(self):
return {
'certificateArn': self.arn,
'certificateId': self.certificate_id,
'status': self.status,
'creationDate': self.creation_date
}
def to_description_dict(self):
"""
You might need keys below in some situation
- caCertificateId
- previousOwnedBy
"""
return {
'certificateArn': self.arn,
'certificateId': self.certificate_id,
'status': self.status,
'certificatePem': self.certificate_pem,
'ownedBy': self.owner,
'creationDate': self.creation_date,
'lastModifiedDate': self.last_modified_date,
'transferData': self.transfer_data
}
class FakePolicy(BaseModel):
def __init__(self, name, document, region_name):
self.name = name
self.document = document
self.arn = 'arn:aws:iot:%s:1:policy/%s' % (region_name, name)
self.version = '1' # TODO: handle version
def to_get_dict(self):
return {
'policyName': self.name,
'policyArn': self.arn,
'policyDocument': self.document,
'defaultVersionId': self.version
}
def to_dict_at_creation(self):
return {
'policyName': self.name,
'policyArn': self.arn,
'policyDocument': self.document,
'policyVersionId': self.version
}
def to_dict(self):
return {
'policyName': self.name,
'policyArn': self.arn,
}
class IoTBackend(BaseBackend):
def __init__(self, region_name=None):
super(IoTBackend, self).__init__()
self.region_name = region_name
self.things = OrderedDict()
self.thing_types = OrderedDict()
self.certificates = OrderedDict()
self.policies = OrderedDict()
self.principal_policies = OrderedDict()
self.principal_things = OrderedDict()
def reset(self):
region_name = self.region_name
self.__dict__ = {}
self.__init__(region_name)
def create_thing(self, thing_name, thing_type_name, attribute_payload):
thing_types = self.list_thing_types()
thing_type = None
if thing_type_name:
filtered_thing_types = [_ for _ in thing_types if _.thing_type_name == thing_type_name]
if len(filtered_thing_types) == 0:
raise ResourceNotFoundException()
thing_type = filtered_thing_types[0]
if attribute_payload is None:
attributes = {}
elif 'attributes' not in attribute_payload:
attributes = {}
else:
attributes = attribute_payload['attributes']
thing = FakeThing(thing_name, thing_type, attributes, self.region_name)
self.things[thing.arn] = thing
return thing.thing_name, thing.arn
def create_thing_type(self, thing_type_name, thing_type_properties):
if thing_type_properties is None:
thing_type_properties = {}
thing_type = FakeThingType(thing_type_name, thing_type_properties, self.region_name)
self.thing_types[thing_type.arn] = thing_type
return thing_type.thing_type_name, thing_type.arn
def list_thing_types(self, thing_type_name=None):
if thing_type_name:
# It's wierd but thing_type_name is filterd by forward match, not complete match
return [_ for _ in self.thing_types.values() if _.thing_type_name.startswith(thing_type_name)]
thing_types = self.thing_types.values()
return thing_types
def list_things(self, attribute_name, attribute_value, thing_type_name):
# TODO: filter by attributess or thing_type
things = self.things.values()
return things
def describe_thing(self, thing_name):
things = [_ for _ in self.things.values() if _.thing_name == thing_name]
if len(things) == 0:
raise ResourceNotFoundException()
return things[0]
def describe_thing_type(self, thing_type_name):
thing_types = [_ for _ in self.thing_types.values() if _.thing_type_name == thing_type_name]
if len(thing_types) == 0:
raise ResourceNotFoundException()
return thing_types[0]
def delete_thing(self, thing_name, expected_version):
# TODO: handle expected_version
# can raise ResourceNotFoundError
thing = self.describe_thing(thing_name)
del self.things[thing.arn]
def delete_thing_type(self, thing_type_name):
# can raise ResourceNotFoundError
thing_type = self.describe_thing_type(thing_type_name)
del self.thing_types[thing_type.arn]
def update_thing(self, thing_name, thing_type_name, attribute_payload, expected_version, remove_thing_type):
# if attributes payload = {}, nothing
thing = self.describe_thing(thing_name)
thing_type = None
if remove_thing_type and thing_type_name:
raise InvalidRequestException()
# thing_type
if thing_type_name:
thing_types = self.list_thing_types()
filtered_thing_types = [_ for _ in thing_types if _.thing_type_name == thing_type_name]
if len(filtered_thing_types) == 0:
raise ResourceNotFoundException()
thing_type = filtered_thing_types[0]
thing.thing_type = thing_type
if remove_thing_type:
thing.thing_type = None
# attribute
if attribute_payload is not None and 'attributes' in attribute_payload:
do_merge = attribute_payload.get('merge', False)
attributes = attribute_payload['attributes']
if not do_merge:
thing.attributes = attributes
else:
thing.attributes.update(attributes)
def _random_string(self):
n = 20
random_str = ''.join([random.choice(string.ascii_letters + string.digits) for i in range(n)])
return random_str
def create_keys_and_certificate(self, set_as_active):
# implement here
# caCertificate can be blank
key_pair = {
'PublicKey': self._random_string(),
'PrivateKey': self._random_string()
}
certificate_pem = self._random_string()
status = 'ACTIVE' if set_as_active else 'INACTIVE'
certificate = FakeCertificate(certificate_pem, status, self.region_name)
self.certificates[certificate.certificate_id] = certificate
return certificate, key_pair
def delete_certificate(self, certificate_id):
self.describe_certificate(certificate_id)
del self.certificates[certificate_id]
def describe_certificate(self, certificate_id):
certs = [_ for _ in self.certificates.values() if _.certificate_id == certificate_id]
if len(certs) == 0:
raise ResourceNotFoundException()
return certs[0]
def list_certificates(self):
return self.certificates.values()
def update_certificate(self, certificate_id, new_status):
cert = self.describe_certificate(certificate_id)
# TODO: validate new_status
cert.status = new_status
def create_policy(self, policy_name, policy_document):
policy = FakePolicy(policy_name, policy_document, self.region_name)
self.policies[policy.name] = policy
return policy
def list_policies(self):
policies = self.policies.values()
return policies
def get_policy(self, policy_name):
policies = [_ for _ in self.policies.values() if _.name == policy_name]
if len(policies) == 0:
raise ResourceNotFoundException()
return policies[0]
def delete_policy(self, policy_name):
policy = self.get_policy(policy_name)
del self.policies[policy.name]
def _get_principal(self, principal_arn):
"""
raise ResourceNotFoundException
"""
if ':cert/' in principal_arn:
certs = [_ for _ in self.certificates.values() if _.arn == principal_arn]
if len(certs) == 0:
raise ResourceNotFoundException()
principal = certs[0]
return principal
else:
# TODO: search for cognito_ids
pass
raise ResourceNotFoundException()
def attach_principal_policy(self, policy_name, principal_arn):
principal = self._get_principal(principal_arn)
policy = self.get_policy(policy_name)
k = (principal_arn, policy_name)
if k in self.principal_policies:
return
self.principal_policies[k] = (principal, policy)
def detach_principal_policy(self, policy_name, principal_arn):
# this may raises ResourceNotFoundException
self._get_principal(principal_arn)
self.get_policy(policy_name)
k = (principal_arn, policy_name)
if k not in self.principal_policies:
raise ResourceNotFoundException()
del self.principal_policies[k]
def list_principal_policies(self, principal_arn):
policies = [v[1] for k, v in self.principal_policies.items() if k[0] == principal_arn]
return policies
def list_policy_principals(self, policy_name):
principals = [k[0] for k, v in self.principal_policies.items() if k[1] == policy_name]
return principals
def attach_thing_principal(self, thing_name, principal_arn):
principal = self._get_principal(principal_arn)
thing = self.describe_thing(thing_name)
k = (principal_arn, thing_name)
if k in self.principal_things:
return
self.principal_things[k] = (principal, thing)
def detach_thing_principal(self, thing_name, principal_arn):
# this may raises ResourceNotFoundException
self._get_principal(principal_arn)
self.describe_thing(thing_name)
k = (principal_arn, thing_name)
if k not in self.principal_things:
raise ResourceNotFoundException()
del self.principal_things[k]
def list_principal_things(self, principal_arn):
thing_names = [k[0] for k, v in self.principal_things.items() if k[0] == principal_arn]
return thing_names
def list_thing_principals(self, thing_name):
principals = [k[0] for k, v in self.principal_things.items() if k[1] == thing_name]
return principals
available_regions = boto3.session.Session().get_available_regions("iot")
iot_backends = {region: IoTBackend(region) for region in available_regions}

258
moto/iot/responses.py Normal file
View File

@ -0,0 +1,258 @@
from __future__ import unicode_literals
from moto.core.responses import BaseResponse
from .models import iot_backends
import json
class IoTResponse(BaseResponse):
SERVICE_NAME = 'iot'
@property
def iot_backend(self):
return iot_backends[self.region]
def create_thing(self):
thing_name = self._get_param("thingName")
thing_type_name = self._get_param("thingTypeName")
attribute_payload = self._get_param("attributePayload")
thing_name, thing_arn = self.iot_backend.create_thing(
thing_name=thing_name,
thing_type_name=thing_type_name,
attribute_payload=attribute_payload,
)
return json.dumps(dict(thingName=thing_name, thingArn=thing_arn))
def create_thing_type(self):
thing_type_name = self._get_param("thingTypeName")
thing_type_properties = self._get_param("thingTypeProperties")
thing_type_name, thing_type_arn = self.iot_backend.create_thing_type(
thing_type_name=thing_type_name,
thing_type_properties=thing_type_properties,
)
return json.dumps(dict(thingTypeName=thing_type_name, thingTypeArn=thing_type_arn))
def list_thing_types(self):
# previous_next_token = self._get_param("nextToken")
# max_results = self._get_int_param("maxResults")
thing_type_name = self._get_param("thingTypeName")
thing_types = self.iot_backend.list_thing_types(
thing_type_name=thing_type_name
)
# TODO: support next_token and max_results
next_token = None
return json.dumps(dict(thingTypes=[_.to_dict() for _ in thing_types], nextToken=next_token))
def list_things(self):
# previous_next_token = self._get_param("nextToken")
# max_results = self._get_int_param("maxResults")
attribute_name = self._get_param("attributeName")
attribute_value = self._get_param("attributeValue")
thing_type_name = self._get_param("thingTypeName")
things = self.iot_backend.list_things(
attribute_name=attribute_name,
attribute_value=attribute_value,
thing_type_name=thing_type_name,
)
# TODO: support next_token and max_results
next_token = None
return json.dumps(dict(things=[_.to_dict() for _ in things], nextToken=next_token))
def describe_thing(self):
thing_name = self._get_param("thingName")
thing = self.iot_backend.describe_thing(
thing_name=thing_name,
)
print(thing.to_dict(include_default_client_id=True))
return json.dumps(thing.to_dict(include_default_client_id=True))
def describe_thing_type(self):
thing_type_name = self._get_param("thingTypeName")
thing_type = self.iot_backend.describe_thing_type(
thing_type_name=thing_type_name,
)
return json.dumps(thing_type.to_dict())
def delete_thing(self):
thing_name = self._get_param("thingName")
expected_version = self._get_param("expectedVersion")
self.iot_backend.delete_thing(
thing_name=thing_name,
expected_version=expected_version,
)
return json.dumps(dict())
def delete_thing_type(self):
thing_type_name = self._get_param("thingTypeName")
self.iot_backend.delete_thing_type(
thing_type_name=thing_type_name,
)
return json.dumps(dict())
def update_thing(self):
thing_name = self._get_param("thingName")
thing_type_name = self._get_param("thingTypeName")
attribute_payload = self._get_param("attributePayload")
expected_version = self._get_param("expectedVersion")
remove_thing_type = self._get_param("removeThingType")
self.iot_backend.update_thing(
thing_name=thing_name,
thing_type_name=thing_type_name,
attribute_payload=attribute_payload,
expected_version=expected_version,
remove_thing_type=remove_thing_type,
)
return json.dumps(dict())
def create_keys_and_certificate(self):
set_as_active = self._get_param("setAsActive")
cert, key_pair = self.iot_backend.create_keys_and_certificate(
set_as_active=set_as_active,
)
return json.dumps(dict(
certificateArn=cert.arn,
certificateId=cert.certificate_id,
certificatePem=cert.certificate_pem,
keyPair=key_pair
))
def delete_certificate(self):
certificate_id = self._get_param("certificateId")
self.iot_backend.delete_certificate(
certificate_id=certificate_id,
)
return json.dumps(dict())
def describe_certificate(self):
certificate_id = self._get_param("certificateId")
certificate = self.iot_backend.describe_certificate(
certificate_id=certificate_id,
)
return json.dumps(dict(certificateDescription=certificate.to_description_dict()))
def list_certificates(self):
# page_size = self._get_int_param("pageSize")
# marker = self._get_param("marker")
# ascending_order = self._get_param("ascendingOrder")
certificates = self.iot_backend.list_certificates()
# TODO: handle pagination
return json.dumps(dict(certificates=[_.to_dict() for _ in certificates]))
def update_certificate(self):
certificate_id = self._get_param("certificateId")
new_status = self._get_param("newStatus")
self.iot_backend.update_certificate(
certificate_id=certificate_id,
new_status=new_status,
)
return json.dumps(dict())
def create_policy(self):
policy_name = self._get_param("policyName")
policy_document = self._get_param("policyDocument")
policy = self.iot_backend.create_policy(
policy_name=policy_name,
policy_document=policy_document,
)
return json.dumps(policy.to_dict_at_creation())
def list_policies(self):
# marker = self._get_param("marker")
# page_size = self._get_int_param("pageSize")
# ascending_order = self._get_param("ascendingOrder")
policies = self.iot_backend.list_policies()
# TODO: handle pagination
return json.dumps(dict(policies=[_.to_dict() for _ in policies]))
def get_policy(self):
policy_name = self._get_param("policyName")
policy = self.iot_backend.get_policy(
policy_name=policy_name,
)
return json.dumps(policy.to_get_dict())
def delete_policy(self):
policy_name = self._get_param("policyName")
self.iot_backend.delete_policy(
policy_name=policy_name,
)
return json.dumps(dict())
def attach_principal_policy(self):
policy_name = self._get_param("policyName")
principal = self.headers.get('x-amzn-iot-principal')
self.iot_backend.attach_principal_policy(
policy_name=policy_name,
principal_arn=principal,
)
return json.dumps(dict())
def detach_principal_policy(self):
policy_name = self._get_param("policyName")
principal = self.headers.get('x-amzn-iot-principal')
self.iot_backend.detach_principal_policy(
policy_name=policy_name,
principal_arn=principal,
)
return json.dumps(dict())
def list_principal_policies(self):
principal = self.headers.get('x-amzn-iot-principal')
# marker = self._get_param("marker")
# page_size = self._get_int_param("pageSize")
# ascending_order = self._get_param("ascendingOrder")
policies = self.iot_backend.list_principal_policies(
principal_arn=principal
)
# TODO: handle pagination
next_marker = None
return json.dumps(dict(policies=[_.to_dict() for _ in policies], nextMarker=next_marker))
def list_policy_principals(self):
policy_name = self.headers.get('x-amzn-iot-policy')
# marker = self._get_param("marker")
# page_size = self._get_int_param("pageSize")
# ascending_order = self._get_param("ascendingOrder")
principals = self.iot_backend.list_policy_principals(
policy_name=policy_name,
)
# TODO: handle pagination
next_marker = None
return json.dumps(dict(principals=principals, nextMarker=next_marker))
def attach_thing_principal(self):
thing_name = self._get_param("thingName")
principal = self.headers.get('x-amzn-principal')
self.iot_backend.attach_thing_principal(
thing_name=thing_name,
principal_arn=principal,
)
return json.dumps(dict())
def detach_thing_principal(self):
thing_name = self._get_param("thingName")
principal = self.headers.get('x-amzn-principal')
self.iot_backend.detach_thing_principal(
thing_name=thing_name,
principal_arn=principal,
)
return json.dumps(dict())
def list_principal_things(self):
next_token = self._get_param("nextToken")
# max_results = self._get_int_param("maxResults")
principal = self.headers.get('x-amzn-principal')
things = self.iot_backend.list_principal_things(
principal_arn=principal,
)
# TODO: handle pagination
next_token = None
return json.dumps(dict(things=things, nextToken=next_token))
def list_thing_principals(self):
thing_name = self._get_param("thingName")
principals = self.iot_backend.list_thing_principals(
thing_name=thing_name,
)
return json.dumps(dict(principals=principals))

14
moto/iot/urls.py Normal file
View File

@ -0,0 +1,14 @@
from __future__ import unicode_literals
from .responses import IoTResponse
url_bases = [
"https?://iot.(.+).amazonaws.com",
]
response = IoTResponse()
url_paths = {
'{0}/.*$': response.dispatch,
}

6
moto/iotdata/__init__.py Normal file
View File

@ -0,0 +1,6 @@
from __future__ import unicode_literals
from .models import iotdata_backends
from ..core.models import base_decorator
iotdata_backend = iotdata_backends['us-east-1']
mock_iotdata = base_decorator(iotdata_backends)

View File

@ -0,0 +1,23 @@
from __future__ import unicode_literals
from moto.core.exceptions import JsonRESTError
class IoTDataPlaneClientError(JsonRESTError):
code = 400
class ResourceNotFoundException(IoTDataPlaneClientError):
def __init__(self):
self.code = 404
super(ResourceNotFoundException, self).__init__(
"ResourceNotFoundException",
"The specified resource does not exist"
)
class InvalidRequestException(IoTDataPlaneClientError):
def __init__(self, message):
self.code = 400
super(InvalidRequestException, self).__init__(
"InvalidRequestException", message
)

189
moto/iotdata/models.py Normal file
View File

@ -0,0 +1,189 @@
from __future__ import unicode_literals
import json
import time
import boto3
import jsondiff
from moto.core import BaseBackend, BaseModel
from moto.iot import iot_backends
from .exceptions import (
ResourceNotFoundException,
InvalidRequestException
)
class FakeShadow(BaseModel):
"""See the specification:
http://docs.aws.amazon.com/iot/latest/developerguide/thing-shadow-document-syntax.html
"""
def __init__(self, desired, reported, requested_payload, version, deleted=False):
self.desired = desired
self.reported = reported
self.requested_payload = requested_payload
self.version = version
self.timestamp = int(time.time())
self.deleted = deleted
self.metadata_desired = self._create_metadata_from_state(self.desired, self.timestamp)
self.metadata_reported = self._create_metadata_from_state(self.reported, self.timestamp)
@classmethod
def create_from_previous_version(cls, previous_shadow, payload):
"""
set None to payload when you want to delete shadow
"""
version, previous_payload = (previous_shadow.version + 1, previous_shadow.to_dict(include_delta=False)) if previous_shadow else (1, {})
if payload is None:
# if given payload is None, delete existing payload
# this means the request was delete_thing_shadow
shadow = FakeShadow(None, None, None, version, deleted=True)
return shadow
# we can make sure that payload has 'state' key
desired = payload['state'].get(
'desired',
previous_payload.get('state', {}).get('desired', None)
)
reported = payload['state'].get(
'reported',
previous_payload.get('state', {}).get('reported', None)
)
shadow = FakeShadow(desired, reported, payload, version)
return shadow
@classmethod
def parse_payload(cls, desired, reported):
if desired is None:
delta = reported
elif reported is None:
delta = desired
else:
delta = jsondiff.diff(desired, reported)
return delta
def _create_metadata_from_state(self, state, ts):
"""
state must be disired or reported stype dict object
replces primitive type with {"timestamp": ts} in dict
"""
if state is None:
return None
def _f(elem, ts):
if isinstance(elem, dict):
return {_: _f(elem[_], ts) for _ in elem.keys()}
if isinstance(elem, list):
return [_f(_, ts) for _ in elem]
return {"timestamp": ts}
return _f(state, ts)
def to_response_dict(self):
desired = self.requested_payload['state'].get('desired', None)
reported = self.requested_payload['state'].get('reported', None)
payload = {}
if desired is not None:
payload['desired'] = desired
if reported is not None:
payload['reported'] = reported
metadata = {}
if desired is not None:
metadata['desired'] = self._create_metadata_from_state(desired, self.timestamp)
if reported is not None:
metadata['reported'] = self._create_metadata_from_state(reported, self.timestamp)
return {
'state': payload,
'metadata': metadata,
'timestamp': self.timestamp,
'version': self.version
}
def to_dict(self, include_delta=True):
"""returning nothing except for just top-level keys for now.
"""
if self.deleted:
return {
'timestamp': self.timestamp,
'version': self.version
}
delta = self.parse_payload(self.desired, self.reported)
payload = {}
if self.desired is not None:
payload['desired'] = self.desired
if self.reported is not None:
payload['reported'] = self.reported
if include_delta and (delta is not None and len(delta.keys()) != 0):
payload['delta'] = delta
metadata = {}
if self.metadata_desired is not None:
metadata['desired'] = self.metadata_desired
if self.metadata_reported is not None:
metadata['reported'] = self.metadata_reported
return {
'state': payload,
'metadata': metadata,
'timestamp': self.timestamp,
'version': self.version
}
class IoTDataPlaneBackend(BaseBackend):
def __init__(self, region_name=None):
super(IoTDataPlaneBackend, self).__init__()
self.region_name = region_name
def reset(self):
region_name = self.region_name
self.__dict__ = {}
self.__init__(region_name)
def update_thing_shadow(self, thing_name, payload):
"""
spec of payload:
- need node `state`
- state node must be an Object
- State contains an invalid node: 'foo'
"""
thing = iot_backends[self.region_name].describe_thing(thing_name)
# validate
try:
payload = json.loads(payload)
except ValueError:
raise InvalidRequestException('invalid json')
if 'state' not in payload:
raise InvalidRequestException('need node `state`')
if not isinstance(payload['state'], dict):
raise InvalidRequestException('state node must be an Object')
if any(_ for _ in payload['state'].keys() if _ not in ['desired', 'reported']):
raise InvalidRequestException('State contains an invalid node')
new_shadow = FakeShadow.create_from_previous_version(thing.thing_shadow, payload)
thing.thing_shadow = new_shadow
return thing.thing_shadow
def get_thing_shadow(self, thing_name):
thing = iot_backends[self.region_name].describe_thing(thing_name)
if thing.thing_shadow is None or thing.thing_shadow.deleted:
raise ResourceNotFoundException()
return thing.thing_shadow
def delete_thing_shadow(self, thing_name):
"""after deleting, get_thing_shadow will raise ResourceNotFound.
But version of the shadow keep increasing...
"""
thing = iot_backends[self.region_name].describe_thing(thing_name)
if thing.thing_shadow is None:
raise ResourceNotFoundException()
payload = None
new_shadow = FakeShadow.create_from_previous_version(thing.thing_shadow, payload)
thing.thing_shadow = new_shadow
return thing.thing_shadow
available_regions = boto3.session.Session().get_available_regions("iot-data")
iotdata_backends = {region: IoTDataPlaneBackend(region) for region in available_regions}

35
moto/iotdata/responses.py Normal file
View File

@ -0,0 +1,35 @@
from __future__ import unicode_literals
from moto.core.responses import BaseResponse
from .models import iotdata_backends
import json
class IoTDataPlaneResponse(BaseResponse):
SERVICE_NAME = 'iot-data'
@property
def iotdata_backend(self):
return iotdata_backends[self.region]
def update_thing_shadow(self):
thing_name = self._get_param("thingName")
payload = self.body
payload = self.iotdata_backend.update_thing_shadow(
thing_name=thing_name,
payload=payload,
)
return json.dumps(payload.to_response_dict())
def get_thing_shadow(self):
thing_name = self._get_param("thingName")
payload = self.iotdata_backend.get_thing_shadow(
thing_name=thing_name,
)
return json.dumps(payload.to_dict())
def delete_thing_shadow(self):
thing_name = self._get_param("thingName")
payload = self.iotdata_backend.delete_thing_shadow(
thing_name=thing_name,
)
return json.dumps(payload.to_dict())

14
moto/iotdata/urls.py Normal file
View File

@ -0,0 +1,14 @@
from __future__ import unicode_literals
from .responses import IoTDataPlaneResponse
url_bases = [
"https?://data.iot.(.+).amazonaws.com",
]
response = IoTDataPlaneResponse()
url_paths = {
'{0}/.*$': response.dispatch,
}

View File

@ -704,7 +704,8 @@ class RDS2Backend(BaseBackend):
if self.arn_regex.match(source_database_id): if self.arn_regex.match(source_database_id):
db_kwargs['region'] = self.region db_kwargs['region'] = self.region
replica = copy.deepcopy(primary) # Shouldn't really copy here as the instance is duplicated. RDS replicas have different instances.
replica = copy.copy(primary)
replica.update(db_kwargs) replica.update(db_kwargs)
replica.set_as_replica() replica.set_as_replica()
self.databases[database_id] = replica self.databases[database_id] = replica

View File

@ -196,14 +196,14 @@ class FakeZone(BaseModel):
self.rrsets = [ self.rrsets = [
record_set for record_set in self.rrsets if record_set.set_identifier != set_identifier] record_set for record_set in self.rrsets if record_set.set_identifier != set_identifier]
def get_record_sets(self, type_filter, name_filter): def get_record_sets(self, start_type, start_name):
record_sets = list(self.rrsets) # Copy the list record_sets = list(self.rrsets) # Copy the list
if type_filter: if start_type:
record_sets = [ record_sets = [
record_set for record_set in record_sets if record_set._type == type_filter] record_set for record_set in record_sets if record_set._type >= start_type]
if name_filter: if start_name:
record_sets = [ record_sets = [
record_set for record_set in record_sets if record_set.name == name_filter] record_set for record_set in record_sets if record_set.name >= start_name]
return record_sets return record_sets

View File

@ -151,9 +151,9 @@ class Route53(BaseResponse):
elif method == "GET": elif method == "GET":
querystring = parse_qs(parsed_url.query) querystring = parse_qs(parsed_url.query)
template = Template(LIST_RRSET_REPONSE) template = Template(LIST_RRSET_REPONSE)
type_filter = querystring.get("type", [None])[0] start_type = querystring.get("type", [None])[0]
name_filter = querystring.get("name", [None])[0] start_name = querystring.get("name", [None])[0]
record_sets = the_zone.get_record_sets(type_filter, name_filter) record_sets = the_zone.get_record_sets(start_type, start_name)
return 200, headers, template.render(record_sets=record_sets) return 200, headers, template.render(record_sets=record_sets)
def health_check_response(self, request, full_url, headers): def health_check_response(self, request, full_url, headers):

View File

@ -8,6 +8,7 @@ from six.moves.urllib.parse import parse_qs, urlparse
import xmltodict import xmltodict
from moto.packages.httpretty.core import HTTPrettyRequest
from moto.core.responses import _TemplateEnvironmentMixin from moto.core.responses import _TemplateEnvironmentMixin
from moto.s3bucket_path.utils import bucket_name_from_url as bucketpath_bucket_name_from_url, parse_key_name as bucketpath_parse_key_name, is_delete_keys as bucketpath_is_delete_keys from moto.s3bucket_path.utils import bucket_name_from_url as bucketpath_bucket_name_from_url, parse_key_name as bucketpath_parse_key_name, is_delete_keys as bucketpath_is_delete_keys
@ -113,7 +114,10 @@ class ResponseObject(_TemplateEnvironmentMixin):
return 200, {}, response.encode("utf-8") return 200, {}, response.encode("utf-8")
else: else:
status_code, headers, response_content = response status_code, headers, response_content = response
return status_code, headers, response_content.encode("utf-8") if not isinstance(response_content, six.binary_type):
response_content = response_content.encode("utf-8")
return status_code, headers, response_content
def _bucket_response(self, request, full_url, headers): def _bucket_response(self, request, full_url, headers):
parsed_url = urlparse(full_url) parsed_url = urlparse(full_url)
@ -139,6 +143,7 @@ class ResponseObject(_TemplateEnvironmentMixin):
body = b'' body = b''
if isinstance(body, six.binary_type): if isinstance(body, six.binary_type):
body = body.decode('utf-8') body = body.decode('utf-8')
body = u'{0}'.format(body).encode('utf-8')
if method == 'HEAD': if method == 'HEAD':
return self._bucket_response_head(bucket_name, headers) return self._bucket_response_head(bucket_name, headers)
@ -209,7 +214,7 @@ class ResponseObject(_TemplateEnvironmentMixin):
if not website_configuration: if not website_configuration:
template = self.response_template(S3_NO_BUCKET_WEBSITE_CONFIG) template = self.response_template(S3_NO_BUCKET_WEBSITE_CONFIG)
return 404, {}, template.render(bucket_name=bucket_name) return 404, {}, template.render(bucket_name=bucket_name)
return website_configuration return 200, {}, website_configuration
elif 'acl' in querystring: elif 'acl' in querystring:
bucket = self.backend.get_bucket(bucket_name) bucket = self.backend.get_bucket(bucket_name)
template = self.response_template(S3_OBJECT_ACL_RESPONSE) template = self.response_template(S3_OBJECT_ACL_RESPONSE)
@ -355,7 +360,7 @@ class ResponseObject(_TemplateEnvironmentMixin):
if not request.headers.get('Content-Length'): if not request.headers.get('Content-Length'):
return 411, {}, "Content-Length required" return 411, {}, "Content-Length required"
if 'versioning' in querystring: if 'versioning' in querystring:
ver = re.search('<Status>([A-Za-z]+)</Status>', body) ver = re.search('<Status>([A-Za-z]+)</Status>', body.decode())
if ver: if ver:
self.backend.set_bucket_versioning(bucket_name, ver.group(1)) self.backend.set_bucket_versioning(bucket_name, ver.group(1))
template = self.response_template(S3_BUCKET_VERSIONING) template = self.response_template(S3_BUCKET_VERSIONING)
@ -444,7 +449,12 @@ class ResponseObject(_TemplateEnvironmentMixin):
def _bucket_response_post(self, request, body, bucket_name, headers): def _bucket_response_post(self, request, body, bucket_name, headers):
if not request.headers.get('Content-Length'): if not request.headers.get('Content-Length'):
return 411, {}, "Content-Length required" return 411, {}, "Content-Length required"
path = request.path if hasattr(request, 'path') else request.path_url
if isinstance(request, HTTPrettyRequest):
path = request.path
else:
path = request.full_path if hasattr(request, 'full_path') else request.path_url
if self.is_delete_keys(request, path, bucket_name): if self.is_delete_keys(request, path, bucket_name):
return self._bucket_response_delete_keys(request, body, bucket_name, headers) return self._bucket_response_delete_keys(request, body, bucket_name, headers)
@ -454,6 +464,8 @@ class ResponseObject(_TemplateEnvironmentMixin):
form = request.form form = request.form
else: else:
# HTTPretty, build new form object # HTTPretty, build new form object
body = body.decode()
form = {} form = {}
for kv in body.split('&'): for kv in body.split('&'):
k, v = kv.split('=') k, v = kv.split('=')
@ -764,7 +776,7 @@ class ResponseObject(_TemplateEnvironmentMixin):
return FakeTagging() return FakeTagging()
def _tagging_from_xml(self, xml): def _tagging_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml) parsed_xml = xmltodict.parse(xml, force_list={'Tag': True})
tags = [] tags = []
for tag in parsed_xml['Tagging']['TagSet']['Tag']: for tag in parsed_xml['Tagging']['TagSet']['Tag']:

View File

@ -32,3 +32,11 @@ class SNSInvalidParameter(RESTError):
def __init__(self, message): def __init__(self, message):
super(SNSInvalidParameter, self).__init__( super(SNSInvalidParameter, self).__init__(
"InvalidParameter", message) "InvalidParameter", message)
class InvalidParameterValue(RESTError):
code = 400
def __init__(self, message):
super(InvalidParameterValue, self).__init__(
"InvalidParameterValue", message)

View File

@ -7,6 +7,7 @@ import json
import boto.sns import boto.sns
import requests import requests
import six import six
import re
from moto.compat import OrderedDict from moto.compat import OrderedDict
from moto.core import BaseBackend, BaseModel from moto.core import BaseBackend, BaseModel
@ -15,7 +16,8 @@ from moto.sqs import sqs_backends
from moto.awslambda import lambda_backends from moto.awslambda import lambda_backends
from .exceptions import ( from .exceptions import (
SNSNotFoundError, DuplicateSnsEndpointError, SnsEndpointDisabled, SNSInvalidParameter SNSNotFoundError, DuplicateSnsEndpointError, SnsEndpointDisabled, SNSInvalidParameter,
InvalidParameterValue
) )
from .utils import make_arn_for_topic, make_arn_for_subscription from .utils import make_arn_for_topic, make_arn_for_subscription
@ -193,9 +195,15 @@ class SNSBackend(BaseBackend):
self.sms_attributes.update(attrs) self.sms_attributes.update(attrs)
def create_topic(self, name): def create_topic(self, name):
topic = Topic(name, self) fails_constraints = not re.match(r'^[a-zA-Z0-9](?:[A-Za-z0-9_-]{0,253}[a-zA-Z0-9])?$', name)
self.topics[topic.arn] = topic if fails_constraints:
return topic raise InvalidParameterValue("Topic names must be made up of only uppercase and lowercase ASCII letters, numbers, underscores, and hyphens, and must be between 1 and 256 characters long.")
candidate_topic = Topic(name, self)
if candidate_topic.arn in self.topics:
return self.topics[candidate_topic.arn]
else:
self.topics[candidate_topic.arn] = candidate_topic
return candidate_topic
def _get_values_nexttoken(self, values_map, next_token=None): def _get_values_nexttoken(self, values_map, next_token=None):
if next_token is None: if next_token is None:

View File

@ -2,6 +2,7 @@ from __future__ import unicode_literals
import base64 import base64
import hashlib import hashlib
import json
import re import re
import six import six
import struct import struct
@ -9,6 +10,7 @@ from xml.sax.saxutils import escape
import boto.sqs import boto.sqs
from moto.core.exceptions import RESTError
from moto.core import BaseBackend, BaseModel from moto.core import BaseBackend, BaseModel
from moto.core.utils import camelcase_to_underscores, get_random_message_id, unix_time, unix_time_millis from moto.core.utils import camelcase_to_underscores, get_random_message_id, unix_time, unix_time_millis
from .utils import generate_receipt_handle from .utils import generate_receipt_handle
@ -166,11 +168,14 @@ class Queue(BaseModel):
'ReceiveMessageWaitTimeSeconds', 'ReceiveMessageWaitTimeSeconds',
'VisibilityTimeout', 'VisibilityTimeout',
'WaitTimeSeconds'] 'WaitTimeSeconds']
ALLOWED_PERMISSIONS = ('*', 'ChangeMessageVisibility', 'DeleteMessage', 'GetQueueAttributes',
'GetQueueUrl', 'ReceiveMessage', 'SendMessage')
def __init__(self, name, region, **kwargs): def __init__(self, name, region, **kwargs):
self.name = name self.name = name
self.visibility_timeout = int(kwargs.get('VisibilityTimeout', 30)) self.visibility_timeout = int(kwargs.get('VisibilityTimeout', 30))
self.region = region self.region = region
self.tags = {}
self._messages = [] self._messages = []
@ -189,14 +194,42 @@ class Queue(BaseModel):
self.message_retention_period = int(kwargs.get('MessageRetentionPeriod', 86400 * 4)) # four days self.message_retention_period = int(kwargs.get('MessageRetentionPeriod', 86400 * 4)) # four days
self.queue_arn = 'arn:aws:sqs:{0}:123456789012:{1}'.format(self.region, self.name) self.queue_arn = 'arn:aws:sqs:{0}:123456789012:{1}'.format(self.region, self.name)
self.receive_message_wait_time_seconds = int(kwargs.get('ReceiveMessageWaitTimeSeconds', 0)) self.receive_message_wait_time_seconds = int(kwargs.get('ReceiveMessageWaitTimeSeconds', 0))
self.permissions = {}
# wait_time_seconds will be set to immediate return messages # wait_time_seconds will be set to immediate return messages
self.wait_time_seconds = int(kwargs.get('WaitTimeSeconds', 0)) self.wait_time_seconds = int(kwargs.get('WaitTimeSeconds', 0))
self.redrive_policy = {}
self.dead_letter_queue = None
if 'RedrivePolicy' in kwargs:
self._setup_dlq(kwargs['RedrivePolicy'])
# Check some conditions # Check some conditions
if self.fifo_queue and not self.name.endswith('.fifo'): if self.fifo_queue and not self.name.endswith('.fifo'):
raise MessageAttributesInvalid('Queue name must end in .fifo for FIFO queues') raise MessageAttributesInvalid('Queue name must end in .fifo for FIFO queues')
def _setup_dlq(self, policy_json):
try:
self.redrive_policy = json.loads(policy_json)
except ValueError:
raise RESTError('InvalidParameterValue', 'Redrive policy does not contain valid json')
if 'deadLetterTargetArn' not in self.redrive_policy:
raise RESTError('InvalidParameterValue', 'Redrive policy does not contain deadLetterTargetArn')
if 'maxReceiveCount' not in self.redrive_policy:
raise RESTError('InvalidParameterValue', 'Redrive policy does not contain maxReceiveCount')
for queue in sqs_backends[self.region].queues.values():
if queue.queue_arn == self.redrive_policy['deadLetterTargetArn']:
self.dead_letter_queue = queue
if self.fifo_queue and not queue.fifo_queue:
raise RESTError('InvalidParameterCombination', 'Fifo queues cannot use non fifo dead letter queues')
break
else:
raise RESTError('AWS.SimpleQueueService.NonExistentQueue', 'Could not find DLQ for {0}'.format(self.redrive_policy['deadLetterTargetArn']))
@classmethod @classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties'] properties = cloudformation_json['Properties']
@ -382,9 +415,14 @@ class SQSBackend(BaseBackend):
time.sleep(0.001) time.sleep(0.001)
continue continue
messages_to_dlq = []
for message in queue.messages: for message in queue.messages:
if not message.visible: if not message.visible:
continue continue
if queue.dead_letter_queue is not None and message.approximate_receive_count >= queue.redrive_policy['maxReceiveCount']:
messages_to_dlq.append(message)
continue
message.mark_received( message.mark_received(
visibility_timeout=visibility_timeout visibility_timeout=visibility_timeout
) )
@ -392,6 +430,10 @@ class SQSBackend(BaseBackend):
if len(result) >= count: if len(result) >= count:
break break
for message in messages_to_dlq:
queue._messages.remove(message)
queue.dead_letter_queue.add_message(message)
return result return result
def delete_message(self, queue_name, receipt_handle): def delete_message(self, queue_name, receipt_handle):
@ -419,6 +461,49 @@ class SQSBackend(BaseBackend):
queue = self.get_queue(queue_name) queue = self.get_queue(queue_name)
queue._messages = [] queue._messages = []
def list_dead_letter_source_queues(self, queue_name):
dlq = self.get_queue(queue_name)
queues = []
for queue in self.queues.values():
if queue.dead_letter_queue is dlq:
queues.append(queue)
return queues
def add_permission(self, queue_name, actions, account_ids, label):
queue = self.get_queue(queue_name)
if actions is None or len(actions) == 0:
raise RESTError('InvalidParameterValue', 'Need at least one Action')
if account_ids is None or len(account_ids) == 0:
raise RESTError('InvalidParameterValue', 'Need at least one Account ID')
if not all([item in Queue.ALLOWED_PERMISSIONS for item in actions]):
raise RESTError('InvalidParameterValue', 'Invalid permissions')
queue.permissions[label] = (account_ids, actions)
def remove_permission(self, queue_name, label):
queue = self.get_queue(queue_name)
if label not in queue.permissions:
raise RESTError('InvalidParameterValue', 'Permission doesnt exist for the given label')
del queue.permissions[label]
def tag_queue(self, queue_name, tags):
queue = self.get_queue(queue_name)
queue.tags.update(tags)
def untag_queue(self, queue_name, tag_keys):
queue = self.get_queue(queue_name)
for key in tag_keys:
try:
del queue.tags[key]
except KeyError:
pass
sqs_backends = {} sqs_backends = {}
for region in boto.sqs.regions(): for region in boto.sqs.regions():

View File

@ -40,12 +40,15 @@ class SQSResponse(BaseResponse):
queue_name = self.path.split("/")[-1] queue_name = self.path.split("/")[-1]
return queue_name return queue_name
def _get_validated_visibility_timeout(self): def _get_validated_visibility_timeout(self, timeout=None):
""" """
:raises ValueError: If specified visibility timeout exceeds MAXIMUM_VISIBILTY_TIMEOUT :raises ValueError: If specified visibility timeout exceeds MAXIMUM_VISIBILTY_TIMEOUT
:raises TypeError: If visibility timeout was not specified :raises TypeError: If visibility timeout was not specified
""" """
visibility_timeout = int(self.querystring.get("VisibilityTimeout")[0]) if timeout is not None:
visibility_timeout = int(timeout)
else:
visibility_timeout = int(self.querystring.get("VisibilityTimeout")[0])
if visibility_timeout > MAXIMUM_VISIBILTY_TIMEOUT: if visibility_timeout > MAXIMUM_VISIBILTY_TIMEOUT:
raise ValueError raise ValueError
@ -119,6 +122,49 @@ class SQSResponse(BaseResponse):
template = self.response_template(CHANGE_MESSAGE_VISIBILITY_RESPONSE) template = self.response_template(CHANGE_MESSAGE_VISIBILITY_RESPONSE)
return template.render() return template.render()
def change_message_visibility_batch(self):
queue_name = self._get_queue_name()
entries = self._get_list_prefix('ChangeMessageVisibilityBatchRequestEntry')
success = []
error = []
for entry in entries:
try:
visibility_timeout = self._get_validated_visibility_timeout(entry['visibility_timeout'])
except ValueError:
error.append({
'Id': entry['id'],
'SenderFault': 'true',
'Code': 'InvalidParameterValue',
'Message': 'Visibility timeout invalid'
})
continue
try:
self.sqs_backend.change_message_visibility(
queue_name=queue_name,
receipt_handle=entry['receipt_handle'],
visibility_timeout=visibility_timeout
)
success.append(entry['id'])
except ReceiptHandleIsInvalid as e:
error.append({
'Id': entry['id'],
'SenderFault': 'true',
'Code': 'ReceiptHandleIsInvalid',
'Message': e.description
})
except MessageNotInflight as e:
error.append({
'Id': entry['id'],
'SenderFault': 'false',
'Code': 'AWS.SimpleQueueService.MessageNotInflight',
'Message': e.description
})
template = self.response_template(CHANGE_MESSAGE_VISIBILITY_BATCH_RESPONSE)
return template.render(success=success, errors=error)
def get_queue_attributes(self): def get_queue_attributes(self):
queue_name = self._get_queue_name() queue_name = self._get_queue_name()
try: try:
@ -288,8 +334,62 @@ class SQSResponse(BaseResponse):
messages = self.sqs_backend.receive_messages( messages = self.sqs_backend.receive_messages(
queue_name, message_count, wait_time, visibility_timeout) queue_name, message_count, wait_time, visibility_timeout)
template = self.response_template(RECEIVE_MESSAGE_RESPONSE) template = self.response_template(RECEIVE_MESSAGE_RESPONSE)
output = template.render(messages=messages) return template.render(messages=messages)
return output
def list_dead_letter_source_queues(self):
request_url = urlparse(self.uri)
queue_name = self._get_queue_name()
source_queue_urls = self.sqs_backend.list_dead_letter_source_queues(queue_name)
template = self.response_template(LIST_DEAD_LETTER_SOURCE_QUEUES_RESPONSE)
return template.render(queues=source_queue_urls, request_url=request_url)
def add_permission(self):
queue_name = self._get_queue_name()
actions = self._get_multi_param('ActionName')
account_ids = self._get_multi_param('AWSAccountId')
label = self._get_param('Label')
self.sqs_backend.add_permission(queue_name, actions, account_ids, label)
template = self.response_template(ADD_PERMISSION_RESPONSE)
return template.render()
def remove_permission(self):
queue_name = self._get_queue_name()
label = self._get_param('Label')
self.sqs_backend.remove_permission(queue_name, label)
template = self.response_template(REMOVE_PERMISSION_RESPONSE)
return template.render()
def tag_queue(self):
queue_name = self._get_queue_name()
tags = self._get_map_prefix('Tag', key_end='.Key', value_end='.Value')
self.sqs_backend.tag_queue(queue_name, tags)
template = self.response_template(TAG_QUEUE_RESPONSE)
return template.render()
def untag_queue(self):
queue_name = self._get_queue_name()
tag_keys = self._get_multi_param('TagKey')
self.sqs_backend.untag_queue(queue_name, tag_keys)
template = self.response_template(UNTAG_QUEUE_RESPONSE)
return template.render()
def list_queue_tags(self):
queue_name = self._get_queue_name()
queue = self.sqs_backend.get_queue(queue_name)
template = self.response_template(LIST_QUEUE_TAGS_RESPONSE)
return template.render(tags=queue.tags)
CREATE_QUEUE_RESPONSE = """<CreateQueueResponse> CREATE_QUEUE_RESPONSE = """<CreateQueueResponse>
@ -307,7 +407,7 @@ GET_QUEUE_URL_RESPONSE = """<GetQueueUrlResponse>
<QueueUrl>{{ queue.url(request_url) }}</QueueUrl> <QueueUrl>{{ queue.url(request_url) }}</QueueUrl>
</GetQueueUrlResult> </GetQueueUrlResult>
<ResponseMetadata> <ResponseMetadata>
<RequestId>470a6f13-2ed9-4181-ad8a-2fdea142988e</RequestId> <RequestId>{{ requestid }}</RequestId>
</ResponseMetadata> </ResponseMetadata>
</GetQueueUrlResponse>""" </GetQueueUrlResponse>"""
@ -318,13 +418,13 @@ LIST_QUEUES_RESPONSE = """<ListQueuesResponse>
{% endfor %} {% endfor %}
</ListQueuesResult> </ListQueuesResult>
<ResponseMetadata> <ResponseMetadata>
<RequestId>725275ae-0b9b-4762-b238-436d7c65a1ac</RequestId> <RequestId>{{ requestid }}</RequestId>
</ResponseMetadata> </ResponseMetadata>
</ListQueuesResponse>""" </ListQueuesResponse>"""
DELETE_QUEUE_RESPONSE = """<DeleteQueueResponse> DELETE_QUEUE_RESPONSE = """<DeleteQueueResponse>
<ResponseMetadata> <ResponseMetadata>
<RequestId>6fde8d1e-52cd-4581-8cd9-c512f4c64223</RequestId> <RequestId>{{ requestid }}</RequestId>
</ResponseMetadata> </ResponseMetadata>
</DeleteQueueResponse>""" </DeleteQueueResponse>"""
@ -338,13 +438,13 @@ GET_QUEUE_ATTRIBUTES_RESPONSE = """<GetQueueAttributesResponse>
{% endfor %} {% endfor %}
</GetQueueAttributesResult> </GetQueueAttributesResult>
<ResponseMetadata> <ResponseMetadata>
<RequestId>1ea71be5-b5a2-4f9d-b85a-945d8d08cd0b</RequestId> <RequestId>{{ requestid }}</RequestId>
</ResponseMetadata> </ResponseMetadata>
</GetQueueAttributesResponse>""" </GetQueueAttributesResponse>"""
SET_QUEUE_ATTRIBUTE_RESPONSE = """<SetQueueAttributesResponse> SET_QUEUE_ATTRIBUTE_RESPONSE = """<SetQueueAttributesResponse>
<ResponseMetadata> <ResponseMetadata>
<RequestId>e5cca473-4fc0-4198-a451-8abb94d02c75</RequestId> <RequestId>{{ requestid }}</RequestId>
</ResponseMetadata> </ResponseMetadata>
</SetQueueAttributesResponse>""" </SetQueueAttributesResponse>"""
@ -361,7 +461,7 @@ SEND_MESSAGE_RESPONSE = """<SendMessageResponse>
</MessageId> </MessageId>
</SendMessageResult> </SendMessageResult>
<ResponseMetadata> <ResponseMetadata>
<RequestId>27daac76-34dd-47df-bd01-1f6e873584a0</RequestId> <RequestId>{{ requestid }}</RequestId>
</ResponseMetadata> </ResponseMetadata>
</SendMessageResponse>""" </SendMessageResponse>"""
@ -409,7 +509,7 @@ RECEIVE_MESSAGE_RESPONSE = """<ReceiveMessageResponse>
{% endfor %} {% endfor %}
</ReceiveMessageResult> </ReceiveMessageResult>
<ResponseMetadata> <ResponseMetadata>
<RequestId>b6633655-283d-45b4-aee4-4e84e0ae6afa</RequestId> <RequestId>{{ requestid }}</RequestId>
</ResponseMetadata> </ResponseMetadata>
</ReceiveMessageResponse>""" </ReceiveMessageResponse>"""
@ -427,13 +527,13 @@ SEND_MESSAGE_BATCH_RESPONSE = """<SendMessageBatchResponse>
{% endfor %} {% endfor %}
</SendMessageBatchResult> </SendMessageBatchResult>
<ResponseMetadata> <ResponseMetadata>
<RequestId>ca1ad5d0-8271-408b-8d0f-1351bf547e74</RequestId> <RequestId>{{ requestid }}</RequestId>
</ResponseMetadata> </ResponseMetadata>
</SendMessageBatchResponse>""" </SendMessageBatchResponse>"""
DELETE_MESSAGE_RESPONSE = """<DeleteMessageResponse> DELETE_MESSAGE_RESPONSE = """<DeleteMessageResponse>
<ResponseMetadata> <ResponseMetadata>
<RequestId>b5293cb5-d306-4a17-9048-b263635abe42</RequestId> <RequestId>{{ requestid }}</RequestId>
</ResponseMetadata> </ResponseMetadata>
</DeleteMessageResponse>""" </DeleteMessageResponse>"""
@ -446,22 +546,92 @@ DELETE_MESSAGE_BATCH_RESPONSE = """<DeleteMessageBatchResponse>
{% endfor %} {% endfor %}
</DeleteMessageBatchResult> </DeleteMessageBatchResult>
<ResponseMetadata> <ResponseMetadata>
<RequestId>d6f86b7a-74d1-4439-b43f-196a1e29cd85</RequestId> <RequestId>{{ requestid }}</RequestId>
</ResponseMetadata> </ResponseMetadata>
</DeleteMessageBatchResponse>""" </DeleteMessageBatchResponse>"""
CHANGE_MESSAGE_VISIBILITY_RESPONSE = """<ChangeMessageVisibilityResponse> CHANGE_MESSAGE_VISIBILITY_RESPONSE = """<ChangeMessageVisibilityResponse>
<ResponseMetadata> <ResponseMetadata>
<RequestId>6a7a282a-d013-4a59-aba9-335b0fa48bed</RequestId> <RequestId>{{ requestid }}</RequestId>
</ResponseMetadata> </ResponseMetadata>
</ChangeMessageVisibilityResponse>""" </ChangeMessageVisibilityResponse>"""
CHANGE_MESSAGE_VISIBILITY_BATCH_RESPONSE = """<ChangeMessageVisibilityBatchResponse>
<ChangeMessageVisibilityBatchResult>
{% for success_id in success %}
<ChangeMessageVisibilityBatchResultEntry>
<Id>{{ success_id }}</Id>
</ChangeMessageVisibilityBatchResultEntry>
{% endfor %}
{% for error_dict in errors %}
<BatchResultErrorEntry>
<Id>{{ error_dict['Id'] }}</Id>
<Code>{{ error_dict['Code'] }}</Code>
<Message>{{ error_dict['Message'] }}</Message>
<SenderFault>{{ error_dict['SenderFault'] }}</SenderFault>
</BatchResultErrorEntry>
{% endfor %}
</ChangeMessageVisibilityBatchResult>
<ResponseMetadata>
<RequestId>{{ request_id }}</RequestId>
</ResponseMetadata>
</ChangeMessageVisibilityBatchResponse>"""
PURGE_QUEUE_RESPONSE = """<PurgeQueueResponse> PURGE_QUEUE_RESPONSE = """<PurgeQueueResponse>
<ResponseMetadata> <ResponseMetadata>
<RequestId>6fde8d1e-52cd-4581-8cd9-c512f4c64223</RequestId> <RequestId>{{ requestid }}</RequestId>
</ResponseMetadata> </ResponseMetadata>
</PurgeQueueResponse>""" </PurgeQueueResponse>"""
LIST_DEAD_LETTER_SOURCE_QUEUES_RESPONSE = """<ListDeadLetterSourceQueuesResponse xmlns="http://queue.amazonaws.com/doc/2012-11-05/">
<ListDeadLetterSourceQueuesResult>
{% for queue in queues %}
<QueueUrl>{{ queue.url(request_url) }}</QueueUrl>
{% endfor %}
</ListDeadLetterSourceQueuesResult>
<ResponseMetadata>
<RequestId>8ffb921f-b85e-53d9-abcf-d8d0057f38fc</RequestId>
</ResponseMetadata>
</ListDeadLetterSourceQueuesResponse>"""
ADD_PERMISSION_RESPONSE = """<AddPermissionResponse>
<ResponseMetadata>
<RequestId>{{ request_id }}</RequestId>
</ResponseMetadata>
</AddPermissionResponse>"""
REMOVE_PERMISSION_RESPONSE = """<RemovePermissionResponse>
<ResponseMetadata>
<RequestId>{{ request_id }}</RequestId>
</ResponseMetadata>
</RemovePermissionResponse>"""
TAG_QUEUE_RESPONSE = """<TagQueueResponse>
<ResponseMetadata>
<RequestId>{{ request_id }}</RequestId>
</ResponseMetadata>
</TagQueueResponse>"""
UNTAG_QUEUE_RESPONSE = """<UntagQueueResponse>
<ResponseMetadata>
<RequestId>{{ request_id }}</RequestId>
</ResponseMetadata>
</UntagQueueResponse>"""
LIST_QUEUE_TAGS_RESPONSE = """<ListQueueTagsResponse>
<ListQueueTagsResult>
{% for key, value in tags.items() %}
<Tag>
<Key>{{ key }}</Key>
<Value>{{ value }}</Value>
</Tag>
{% endfor %}
</ListQueueTagsResult>
<ResponseMetadata>
<RequestId>{{ request_id }}</RequestId>
</ResponseMetadata>
</ListQueueTagsResponse>"""
ERROR_TOO_LONG_RESPONSE = """<ErrorResponse xmlns="http://queue.amazonaws.com/doc/2012-11-05/"> ERROR_TOO_LONG_RESPONSE = """<ErrorResponse xmlns="http://queue.amazonaws.com/doc/2012-11-05/">
<Error> <Error>
<Type>Sender</Type> <Type>Sender</Type>

View File

@ -75,6 +75,21 @@ class SimpleSystemManagerBackend(BaseBackend):
result.append(self._parameters[name]) result.append(self._parameters[name])
return result return result
def get_parameters_by_path(self, path, with_decryption, recursive):
"""Implement the get-parameters-by-path-API in the backend."""
result = []
# path could be with or without a trailing /. we handle this
# difference here.
path = path.rstrip('/') + '/'
for param in self._parameters:
if not param.startswith(path):
continue
if '/' in param[len(path) + 1:] and not recursive:
continue
result.append(self._parameters[param])
return result
def get_parameter(self, name, with_decryption): def get_parameter(self, name, with_decryption):
if name in self._parameters: if name in self._parameters:
return self._parameters[name] return self._parameters[name]

View File

@ -81,6 +81,25 @@ class SimpleSystemManagerResponse(BaseResponse):
response['InvalidParameters'].append(name) response['InvalidParameters'].append(name)
return json.dumps(response) return json.dumps(response)
def get_parameters_by_path(self):
path = self._get_param('Path')
with_decryption = self._get_param('WithDecryption')
recursive = self._get_param('Recursive', False)
result = self.ssm_backend.get_parameters_by_path(
path, with_decryption, recursive
)
response = {
'Parameters': [],
}
for parameter in result:
param_data = parameter.response_object(with_decryption)
response['Parameters'].append(param_data)
return json.dumps(response)
def describe_parameters(self): def describe_parameters(self):
page_size = 10 page_size = 10
filters = self._get_param('Filters') filters = self._get_param('Filters')

40
scripts/get_amis.py Normal file
View File

@ -0,0 +1,40 @@
import boto3
import json
# Taken from free tear list when creating an instance
instances = [
'ami-760aaa0f', 'ami-bb9a6bc2', 'ami-35e92e4c', 'ami-785db401', 'ami-b7e93bce', 'ami-dca37ea5', 'ami-999844e0',
'ami-9b32e8e2', 'ami-f8e54081', 'ami-bceb39c5', 'ami-03cf127a', 'ami-1ecc1e67', 'ami-c2ff2dbb', 'ami-12c6146b',
'ami-d1cb19a8', 'ami-61db0918', 'ami-56ec3e2f', 'ami-84ee3cfd', 'ami-86ee3cff', 'ami-f0e83a89', 'ami-1f12c066',
'ami-afee3cd6', 'ami-1812c061', 'ami-77ed3f0e', 'ami-3bf32142', 'ami-6ef02217', 'ami-f4cf1d8d', 'ami-3df32144',
'ami-c6f321bf', 'ami-24f3215d', 'ami-fa7cdd89', 'ami-1e749f67', 'ami-a9cc1ed0', 'ami-8104a4f8'
]
client = boto3.client('ec2', region_name='eu-west-1')
test = client.describe_images(ImageIds=instances)
result = []
for image in test['Images']:
try:
tmp = {
'ami_id': image['ImageId'],
'name': image['Name'],
'description': image['Description'],
'owner_id': image['OwnerId'],
'public': image['Public'],
'virtualization_type': image['VirtualizationType'],
'architecture': image['Architecture'],
'state': image['State'],
'platform': image.get('Platform'),
'image_type': image['ImageType'],
'hypervisor': image['Hypervisor'],
'root_device_name': image['RootDeviceName'],
'root_device_type': image['RootDeviceType'],
'sriov': image.get('SriovNetSupport', 'simple')
}
result.append(tmp)
except Exception as err:
pass
print(json.dumps(result, indent=2))

View File

@ -56,14 +56,14 @@ def print_implementation_coverage():
else: else:
percentage_implemented = 0 percentage_implemented = 0
print("-----------------------") print("")
print("{} - {}% implemented".format(service_name, percentage_implemented)) print("## {} - {}% implemented".format(service_name, percentage_implemented))
print("-----------------------")
for op in operations: for op in operations:
if op in implemented: if op in implemented:
print("[X] {}".format(op)) print("- [X] {}".format(op))
else: else:
print("[ ] {}".format(op)) print("- [ ] {}".format(op))
if __name__ == '__main__': if __name__ == '__main__':
print_implementation_coverage() print_implementation_coverage()

View File

@ -81,12 +81,14 @@ def select_service_and_operation():
raise click.Abort() raise click.Abort()
return service_name, operation_name return service_name, operation_name
def get_escaped_service(service):
return service.replace('-', '')
def get_lib_dir(service): def get_lib_dir(service):
return os.path.join('moto', service) return os.path.join('moto', get_escaped_service(service))
def get_test_dir(service): def get_test_dir(service):
return os.path.join('tests', 'test_{}'.format(service)) return os.path.join('tests', 'test_{}'.format(get_escaped_service(service)))
def render_template(tmpl_dir, tmpl_filename, context, service, alt_filename=None): def render_template(tmpl_dir, tmpl_filename, context, service, alt_filename=None):
@ -117,7 +119,7 @@ def append_mock_to_init_py(service):
filtered_lines = [_ for _ in lines if re.match('^from.*mock.*$', _)] filtered_lines = [_ for _ in lines if re.match('^from.*mock.*$', _)]
last_import_line_index = lines.index(filtered_lines[-1]) last_import_line_index = lines.index(filtered_lines[-1])
new_line = 'from .{} import mock_{} # flake8: noqa'.format(service, service) new_line = 'from .{} import mock_{} # flake8: noqa'.format(get_escaped_service(service), get_escaped_service(service))
lines.insert(last_import_line_index + 1, new_line) lines.insert(last_import_line_index + 1, new_line)
body = '\n'.join(lines) + '\n' body = '\n'.join(lines) + '\n'
@ -135,7 +137,7 @@ def append_mock_import_to_backends_py(service):
filtered_lines = [_ for _ in lines if re.match('^from.*backends.*$', _)] filtered_lines = [_ for _ in lines if re.match('^from.*backends.*$', _)]
last_import_line_index = lines.index(filtered_lines[-1]) last_import_line_index = lines.index(filtered_lines[-1])
new_line = 'from moto.{} import {}_backends'.format(service, service) new_line = 'from moto.{} import {}_backends'.format(get_escaped_service(service), get_escaped_service(service))
lines.insert(last_import_line_index + 1, new_line) lines.insert(last_import_line_index + 1, new_line)
body = '\n'.join(lines) + '\n' body = '\n'.join(lines) + '\n'
@ -147,13 +149,12 @@ def append_mock_dict_to_backends_py(service):
with open(path) as f: with open(path) as f:
lines = [_.replace('\n', '') for _ in f.readlines()] lines = [_.replace('\n', '') for _ in f.readlines()]
# 'xray': xray_backends
if any(_ for _ in lines if re.match(".*'{}': {}_backends.*".format(service, service), _)): if any(_ for _ in lines if re.match(".*'{}': {}_backends.*".format(service, service), _)):
return return
filtered_lines = [_ for _ in lines if re.match(".*'.*':.*_backends.*", _)] filtered_lines = [_ for _ in lines if re.match(".*'.*':.*_backends.*", _)]
last_elem_line_index = lines.index(filtered_lines[-1]) last_elem_line_index = lines.index(filtered_lines[-1])
new_line = " '{}': {}_backends,".format(service, service) new_line = " '{}': {}_backends,".format(service, get_escaped_service(service))
prev_line = lines[last_elem_line_index] prev_line = lines[last_elem_line_index]
if not prev_line.endswith('{') and not prev_line.endswith(','): if not prev_line.endswith('{') and not prev_line.endswith(','):
lines[last_elem_line_index] += ',' lines[last_elem_line_index] += ','
@ -166,8 +167,8 @@ def append_mock_dict_to_backends_py(service):
def initialize_service(service, operation, api_protocol): def initialize_service(service, operation, api_protocol):
"""create lib and test dirs if not exist """create lib and test dirs if not exist
""" """
lib_dir = os.path.join('moto', service) lib_dir = get_lib_dir(service)
test_dir = os.path.join('tests', 'test_{}'.format(service)) test_dir = get_test_dir(service)
print_progress('Initializing service', service, 'green') print_progress('Initializing service', service, 'green')
@ -178,7 +179,9 @@ def initialize_service(service, operation, api_protocol):
tmpl_context = { tmpl_context = {
'service': service, 'service': service,
'service_class': service_class, 'service_class': service_class,
'endpoint_prefix': endpoint_prefix 'endpoint_prefix': endpoint_prefix,
'api_protocol': api_protocol,
'escaped_service': get_escaped_service(service)
} }
# initialize service directory # initialize service directory
@ -202,7 +205,7 @@ def initialize_service(service, operation, api_protocol):
os.makedirs(test_dir) os.makedirs(test_dir)
tmpl_dir = os.path.join(TEMPLATE_DIR, 'test') tmpl_dir = os.path.join(TEMPLATE_DIR, 'test')
for tmpl_filename in os.listdir(tmpl_dir): for tmpl_filename in os.listdir(tmpl_dir):
alt_filename = 'test_{}.py'.format(service) if tmpl_filename == 'test_service.py.j2' else None alt_filename = 'test_{}.py'.format(get_escaped_service(service)) if tmpl_filename == 'test_service.py.j2' else None
render_template( render_template(
tmpl_dir, tmpl_filename, tmpl_context, service, alt_filename tmpl_dir, tmpl_filename, tmpl_context, service, alt_filename
) )
@ -212,9 +215,16 @@ def initialize_service(service, operation, api_protocol):
append_mock_import_to_backends_py(service) append_mock_import_to_backends_py(service)
append_mock_dict_to_backends_py(service) append_mock_dict_to_backends_py(service)
def to_upper_camel_case(s): def to_upper_camel_case(s):
return ''.join([_.title() for _ in s.split('_')]) return ''.join([_.title() for _ in s.split('_')])
def to_lower_camel_case(s):
words = s.split('_')
return ''.join(words[:1] + [_.title() for _ in words[1:]])
def to_snake_case(s): def to_snake_case(s):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', s) s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', s)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower() return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
@ -229,25 +239,28 @@ def get_function_in_responses(service, operation, protocol):
aws_operation_name = to_upper_camel_case(operation) aws_operation_name = to_upper_camel_case(operation)
op_model = client._service_model.operation_model(aws_operation_name) op_model = client._service_model.operation_model(aws_operation_name)
outputs = op_model.output_shape.members if not hasattr(op_model.output_shape, 'members'):
outputs = {}
else:
outputs = op_model.output_shape.members
inputs = op_model.input_shape.members inputs = op_model.input_shape.members
input_names = [to_snake_case(_) for _ in inputs.keys() if _ not in INPUT_IGNORED_IN_BACKEND] input_names = [to_snake_case(_) for _ in inputs.keys() if _ not in INPUT_IGNORED_IN_BACKEND]
output_names = [to_snake_case(_) for _ in outputs.keys() if _ not in OUTPUT_IGNORED_IN_BACKEND] output_names = [to_snake_case(_) for _ in outputs.keys() if _ not in OUTPUT_IGNORED_IN_BACKEND]
body = 'def {}(self):\n'.format(operation) body = '\ndef {}(self):\n'.format(operation)
for input_name, input_type in inputs.items(): for input_name, input_type in inputs.items():
type_name = input_type.type_name type_name = input_type.type_name
if type_name == 'integer': if type_name == 'integer':
arg_line_tmpl = ' {} = _get_int_param("{}")\n' arg_line_tmpl = ' {} = self._get_int_param("{}")\n'
elif type_name == 'list': elif type_name == 'list':
arg_line_tmpl = ' {} = self._get_list_prefix("{}.member")\n' arg_line_tmpl = ' {} = self._get_list_prefix("{}.member")\n'
else: else:
arg_line_tmpl = ' {} = self._get_param("{}")\n' arg_line_tmpl = ' {} = self._get_param("{}")\n'
body += arg_line_tmpl.format(to_snake_case(input_name), input_name) body += arg_line_tmpl.format(to_snake_case(input_name), input_name)
if output_names: if output_names:
body += ' {} = self.{}_backend.{}(\n'.format(','.join(output_names), service, operation) body += ' {} = self.{}_backend.{}(\n'.format(', '.join(output_names), get_escaped_service(service), operation)
else: else:
body += ' self.{}_backend.{}(\n'.format(service, operation) body += ' self.{}_backend.{}(\n'.format(get_escaped_service(service), operation)
for input_name in input_names: for input_name in input_names:
body += ' {}={},\n'.format(input_name, input_name) body += ' {}={},\n'.format(input_name, input_name)
@ -255,11 +268,11 @@ def get_function_in_responses(service, operation, protocol):
if protocol == 'query': if protocol == 'query':
body += ' template = self.response_template({}_TEMPLATE)\n'.format(operation.upper()) body += ' template = self.response_template({}_TEMPLATE)\n'.format(operation.upper())
body += ' return template.render({})\n'.format( body += ' return template.render({})\n'.format(
','.join(['{}={}'.format(_, _) for _ in output_names]) ', '.join(['{}={}'.format(_, _) for _ in output_names])
) )
elif protocol == 'json': elif protocol in ['json', 'rest-json']:
body += ' # TODO: adjust reponse\n' body += ' # TODO: adjust response\n'
body += ' return json.dumps({})\n'.format(','.join(['{}={}'.format(_, _) for _ in output_names])) body += ' return json.dumps(dict({}))\n'.format(', '.join(['{}={}'.format(to_lower_camel_case(_), _) for _ in output_names]))
return body return body
@ -272,7 +285,10 @@ def get_function_in_models(service, operation):
aws_operation_name = to_upper_camel_case(operation) aws_operation_name = to_upper_camel_case(operation)
op_model = client._service_model.operation_model(aws_operation_name) op_model = client._service_model.operation_model(aws_operation_name)
inputs = op_model.input_shape.members inputs = op_model.input_shape.members
outputs = op_model.output_shape.members if not hasattr(op_model.output_shape, 'members'):
outputs = {}
else:
outputs = op_model.output_shape.members
input_names = [to_snake_case(_) for _ in inputs.keys() if _ not in INPUT_IGNORED_IN_BACKEND] input_names = [to_snake_case(_) for _ in inputs.keys() if _ not in INPUT_IGNORED_IN_BACKEND]
output_names = [to_snake_case(_) for _ in outputs.keys() if _ not in OUTPUT_IGNORED_IN_BACKEND] output_names = [to_snake_case(_) for _ in outputs.keys() if _ not in OUTPUT_IGNORED_IN_BACKEND]
if input_names: if input_names:
@ -280,7 +296,7 @@ def get_function_in_models(service, operation):
else: else:
body = 'def {}(self)\n' body = 'def {}(self)\n'
body += ' # implement here\n' body += ' # implement here\n'
body += ' return {}\n'.format(', '.join(output_names)) body += ' return {}\n\n'.format(', '.join(output_names))
return body return body
@ -388,13 +404,13 @@ def insert_code_to_class(path, base_class, new_code):
f.write(body) f.write(body)
def insert_url(service, operation): def insert_url(service, operation, api_protocol):
client = boto3.client(service) client = boto3.client(service)
service_class = client.__class__.__name__ service_class = client.__class__.__name__
aws_operation_name = to_upper_camel_case(operation) aws_operation_name = to_upper_camel_case(operation)
uri = client._service_model.operation_model(aws_operation_name).http['requestUri'] uri = client._service_model.operation_model(aws_operation_name).http['requestUri']
path = os.path.join(os.path.dirname(__file__), '..', 'moto', service, 'urls.py') path = os.path.join(os.path.dirname(__file__), '..', 'moto', get_escaped_service(service), 'urls.py')
with open(path) as f: with open(path) as f:
lines = [_.replace('\n', '') for _ in f.readlines()] lines = [_.replace('\n', '') for _ in f.readlines()]
@ -413,81 +429,55 @@ def insert_url(service, operation):
if not prev_line.endswith('{') and not prev_line.endswith(','): if not prev_line.endswith('{') and not prev_line.endswith(','):
lines[last_elem_line_index] += ',' lines[last_elem_line_index] += ','
new_line = " '{0}%s$': %sResponse.dispatch," % ( # generate url pattern
uri, service_class if api_protocol == 'rest-json':
) new_line = " '{0}/.*$': response.dispatch,"
else:
new_line = " '{0}%s$': %sResponse.dispatch," % (
uri, service_class
)
if new_line in lines:
return
lines.insert(last_elem_line_index + 1, new_line) lines.insert(last_elem_line_index + 1, new_line)
body = '\n'.join(lines) + '\n' body = '\n'.join(lines) + '\n'
with open(path, 'w') as f: with open(path, 'w') as f:
f.write(body) f.write(body)
def insert_codes(service, operation, api_protocol):
def insert_query_codes(service, operation): func_in_responses = get_function_in_responses(service, operation, api_protocol)
func_in_responses = get_function_in_responses(service, operation, 'query')
func_in_models = get_function_in_models(service, operation) func_in_models = get_function_in_models(service, operation)
template = get_response_query_template(service, operation)
# edit responses.py # edit responses.py
responses_path = 'moto/{}/responses.py'.format(service) responses_path = 'moto/{}/responses.py'.format(get_escaped_service(service))
print_progress('inserting code', responses_path, 'green') print_progress('inserting code', responses_path, 'green')
insert_code_to_class(responses_path, BaseResponse, func_in_responses) insert_code_to_class(responses_path, BaseResponse, func_in_responses)
# insert template # insert template
with open(responses_path) as f: if api_protocol == 'query':
lines = [_[:-1] for _ in f.readlines()] template = get_response_query_template(service, operation)
lines += template.splitlines() with open(responses_path) as f:
with open(responses_path, 'w') as f: lines = [_[:-1] for _ in f.readlines()]
f.write('\n'.join(lines)) lines += template.splitlines()
with open(responses_path, 'w') as f:
f.write('\n'.join(lines))
# edit models.py # edit models.py
models_path = 'moto/{}/models.py'.format(service) models_path = 'moto/{}/models.py'.format(get_escaped_service(service))
print_progress('inserting code', models_path, 'green') print_progress('inserting code', models_path, 'green')
insert_code_to_class(models_path, BaseBackend, func_in_models) insert_code_to_class(models_path, BaseBackend, func_in_models)
# edit urls.py # edit urls.py
insert_url(service, operation) insert_url(service, operation, api_protocol)
def insert_json_codes(service, operation):
func_in_responses = get_function_in_responses(service, operation, 'json')
func_in_models = get_function_in_models(service, operation)
# edit responses.py
responses_path = 'moto/{}/responses.py'.format(service)
print_progress('inserting code', responses_path, 'green')
insert_code_to_class(responses_path, BaseResponse, func_in_responses)
# edit models.py
models_path = 'moto/{}/models.py'.format(service)
print_progress('inserting code', models_path, 'green')
insert_code_to_class(models_path, BaseBackend, func_in_models)
# edit urls.py
insert_url(service, operation)
def insert_restjson_codes(service, operation):
func_in_models = get_function_in_models(service, operation)
print_progress('skipping inserting code to responses.py', "dont't know how to implement", 'yellow')
# edit models.py
models_path = 'moto/{}/models.py'.format(service)
print_progress('inserting code', models_path, 'green')
insert_code_to_class(models_path, BaseBackend, func_in_models)
# edit urls.py
insert_url(service, operation)
@click.command() @click.command()
def main(): def main():
service, operation = select_service_and_operation() service, operation = select_service_and_operation()
api_protocol = boto3.client(service)._service_model.metadata['protocol'] api_protocol = boto3.client(service)._service_model.metadata['protocol']
initialize_service(service, operation, api_protocol) initialize_service(service, operation, api_protocol)
if api_protocol == 'query':
insert_query_codes(service, operation) if api_protocol in ['query', 'json', 'rest-json']:
elif api_protocol == 'json': insert_codes(service, operation, api_protocol)
insert_json_codes(service, operation)
elif api_protocol == 'rest-json':
insert_restjson_codes(service, operation)
else: else:
print_progress('skip inserting code', 'api protocol "{}" is not supported'.format(api_protocol), 'yellow') print_progress('skip inserting code', 'api protocol "{}" is not supported'.format(api_protocol), 'yellow')

View File

@ -1,7 +1,7 @@
from __future__ import unicode_literals from __future__ import unicode_literals
from .models import {{ service }}_backends from .models import {{ escaped_service }}_backends
from ..core.models import base_decorator from ..core.models import base_decorator
{{ service }}_backend = {{ service }}_backends['us-east-1'] {{ escaped_service }}_backend = {{ escaped_service }}_backends['us-east-1']
mock_{{ service }} = base_decorator({{ service }}_backends) mock_{{ escaped_service }} = base_decorator({{ escaped_service }}_backends)

View File

@ -17,4 +17,4 @@ class {{ service_class }}Backend(BaseBackend):
available_regions = boto3.session.Session().get_available_regions("{{ service }}") available_regions = boto3.session.Session().get_available_regions("{{ service }}")
{{ service }}_backends = {region: {{ service_class }}Backend(region) for region in available_regions} {{ escaped_service }}_backends = {region: {{ service_class }}Backend(region) for region in available_regions}

View File

@ -1,12 +1,14 @@
from __future__ import unicode_literals from __future__ import unicode_literals
from moto.core.responses import BaseResponse from moto.core.responses import BaseResponse
from .models import {{ service }}_backends from .models import {{ escaped_service }}_backends
import json
class {{ service_class }}Response(BaseResponse): class {{ service_class }}Response(BaseResponse):
SERVICE_NAME = '{{ service }}'
@property @property
def {{ service }}_backend(self): def {{ escaped_service }}_backend(self):
return {{ service }}_backends[self.region] return {{ escaped_service }}_backends[self.region]
# add methods from here # add methods from here

View File

@ -5,5 +5,9 @@ url_bases = [
"https?://{{ endpoint_prefix }}.(.+).amazonaws.com", "https?://{{ endpoint_prefix }}.(.+).amazonaws.com",
] ]
{% if api_protocol == 'rest-json' %}
response = {{ service_class }}Response()
{% endif %}
url_paths = { url_paths = {
} }

View File

@ -3,14 +3,14 @@ from __future__ import unicode_literals
import sure # noqa import sure # noqa
import moto.server as server import moto.server as server
from moto import mock_{{ service }} from moto import mock_{{ escaped_service }}
''' '''
Test the different server responses Test the different server responses
''' '''
@mock_{{ service }} @mock_{{ escaped_service }}
def test_{{ service }}_list(): def test_{{ escaped_service }}_list():
backend = server.create_backend_app("{{ service }}") backend = server.create_backend_app("{{ service }}")
test_client = backend.test_client() test_client = backend.test_client()
# do test # do test

View File

@ -2,10 +2,10 @@ from __future__ import unicode_literals
import boto3 import boto3
import sure # noqa import sure # noqa
from moto import mock_{{ service }} from moto import mock_{{ escaped_service }}
@mock_{{ service }} @mock_{{ escaped_service }}
def test_list(): def test_list():
# do test # do test
pass pass

View File

@ -21,7 +21,8 @@ install_requires = [
"python-dateutil<3.0.0,>=2.1", "python-dateutil<3.0.0,>=2.1",
"mock", "mock",
"docker>=2.5.1", "docker>=2.5.1",
"aws-xray-sdk>=0.93" "jsondiff==1.1.1",
"aws-xray-sdk>=0.93",
] ]
extras_require = { extras_require = {
@ -38,7 +39,7 @@ else:
setup( setup(
name='moto', name='moto',
version='1.1.23', version='1.1.24',
description='A library that allows your python tests to easily' description='A library that allows your python tests to easily'
' mock out the boto library', ' mock out the boto library',
author='Steve Pulec', author='Steve Pulec',

View File

@ -4,6 +4,7 @@ import os
import boto3 import boto3
from freezegun import freeze_time from freezegun import freeze_time
import sure # noqa import sure # noqa
import uuid
from botocore.exceptions import ClientError from botocore.exceptions import ClientError
@ -281,11 +282,23 @@ def test_resend_validation_email_invalid():
def test_request_certificate(): def test_request_certificate():
client = boto3.client('acm', region_name='eu-central-1') client = boto3.client('acm', region_name='eu-central-1')
token = str(uuid.uuid4())
resp = client.request_certificate( resp = client.request_certificate(
DomainName='google.com', DomainName='google.com',
IdempotencyToken=token,
SubjectAlternativeNames=['google.com', 'www.google.com', 'mail.google.com'], SubjectAlternativeNames=['google.com', 'www.google.com', 'mail.google.com'],
) )
resp.should.contain('CertificateArn') resp.should.contain('CertificateArn')
arn = resp['CertificateArn']
resp = client.request_certificate(
DomainName='google.com',
IdempotencyToken=token,
SubjectAlternativeNames=['google.com', 'www.google.com', 'mail.google.com'],
)
resp['CertificateArn'].should.equal(arn)
@mock_acm @mock_acm
def test_request_certificate_no_san(): def test_request_certificate_no_san():

View File

@ -488,6 +488,7 @@ def lambda_handler(event, context):
assert 'FunctionError' in result assert 'FunctionError' in result
assert result['FunctionError'] == 'Handled' assert result['FunctionError'] == 'Handled'
@mock_lambda @mock_lambda
@mock_s3 @mock_s3
def test_tags(): def test_tags():
@ -554,6 +555,7 @@ def test_tags():
TagKeys=['spam'] TagKeys=['spam']
)['ResponseMetadata']['HTTPStatusCode'].should.equal(204) )['ResponseMetadata']['HTTPStatusCode'].should.equal(204)
@mock_lambda @mock_lambda
def test_tags_not_found(): def test_tags_not_found():
""" """
@ -574,6 +576,7 @@ def test_tags_not_found():
TagKeys=['spam'] TagKeys=['spam']
).should.throw(botocore.client.ClientError) ).should.throw(botocore.client.ClientError)
@mock_lambda @mock_lambda
def test_invoke_async_function(): def test_invoke_async_function():
conn = boto3.client('lambda', 'us-west-2') conn = boto3.client('lambda', 'us-west-2')
@ -581,10 +584,8 @@ def test_invoke_async_function():
FunctionName='testFunction', FunctionName='testFunction',
Runtime='python2.7', Runtime='python2.7',
Role='test-iam-role', Role='test-iam-role',
Handler='lambda_function.handler', Handler='lambda_function.lambda_handler',
Code={ Code={'ZipFile': get_test_zip_file1()},
'ZipFile': get_test_zip_file1(),
},
Description='test lambda function', Description='test lambda function',
Timeout=3, Timeout=3,
MemorySize=128, MemorySize=128,
@ -593,11 +594,12 @@ def test_invoke_async_function():
success_result = conn.invoke_async( success_result = conn.invoke_async(
FunctionName='testFunction', FunctionName='testFunction',
InvokeArgs=json.dumps({ 'test': 'event' }) InvokeArgs=json.dumps({'test': 'event'})
) )
success_result['Status'].should.equal(202) success_result['Status'].should.equal(202)
@mock_lambda @mock_lambda
@freeze_time('2015-01-01 00:00:00') @freeze_time('2015-01-01 00:00:00')
def test_get_function_created_with_zipfile(): def test_get_function_created_with_zipfile():
@ -646,6 +648,7 @@ def test_get_function_created_with_zipfile():
}, },
) )
@mock_lambda @mock_lambda
def add_function_permission(): def add_function_permission():
conn = boto3.client('lambda', 'us-west-2') conn = boto3.client('lambda', 'us-west-2')

View File

@ -38,7 +38,7 @@ from moto import (
mock_sns_deprecated, mock_sns_deprecated,
mock_sqs, mock_sqs,
mock_sqs_deprecated, mock_sqs_deprecated,
) mock_elbv2)
from .fixtures import ( from .fixtures import (
ec2_classic_eip, ec2_classic_eip,
@ -2111,3 +2111,158 @@ def test_stack_spot_fleet():
launch_spec['SubnetId'].should.equal(subnet_id) launch_spec['SubnetId'].should.equal(subnet_id)
launch_spec['SpotPrice'].should.equal("0.13") launch_spec['SpotPrice'].should.equal("0.13")
launch_spec['WeightedCapacity'].should.equal(2.0) launch_spec['WeightedCapacity'].should.equal(2.0)
@mock_ec2
@mock_elbv2
@mock_cloudformation
def test_stack_elbv2_resources_integration():
alb_template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Outputs": {
"albdns": {
"Description": "Load balanacer DNS",
"Value": {"Fn::GetAtt": ["alb", "DNSName"]},
},
"albname": {
"Description": "Load balancer name",
"Value": {"Fn::GetAtt": ["alb", "LoadBalancerName"]},
},
},
"Resources": {
"alb": {
"Type": "AWS::ElasticLoadBalancingV2::LoadBalancer",
"Properties": {
"Name": "myelbv2",
"Scheme": "internet-facing",
"Subnets": [{
"Ref": "mysubnet",
}],
"SecurityGroups": [{
"Ref": "mysg",
}],
"Type": "application",
"IpAddressType": "ipv4",
}
},
"mytargetgroup": {
"Type": "AWS::ElasticLoadBalancingV2::TargetGroup",
"Properties": {
"HealthCheckIntervalSeconds": 30,
"HealthCheckPath": "/status",
"HealthCheckPort": 80,
"HealthCheckProtocol": "HTTP",
"HealthCheckTimeoutSeconds": 5,
"HealthyThresholdCount": 30,
"UnhealthyThresholdCount": 5,
"Matcher": {
"HttpCode": "200,201"
},
"Name": "mytargetgroup",
"Port": 80,
"Protocol": "HTTP",
"TargetType": "instance",
"Targets": [{
"Id": {
"Ref": "ec2instance",
"Port": 80,
},
}],
"VpcId": {
"Ref": "myvpc",
}
}
},
"listener": {
"Type": "AWS::ElasticLoadBalancingV2::Listener",
"Properties": {
"DefaultActions": [{
"Type": "forward",
"TargetGroupArn": {"Ref": "mytargetgroup"}
}],
"LoadBalancerArn": {"Ref": "alb"},
"Port": "80",
"Protocol": "HTTP"
}
},
"myvpc": {
"Type": "AWS::EC2::VPC",
"Properties": {
"CidrBlock": "10.0.0.0/16",
}
},
"mysubnet": {
"Type": "AWS::EC2::Subnet",
"Properties": {
"CidrBlock": "10.0.0.0/27",
"VpcId": {"Ref": "myvpc"},
}
},
"mysg": {
"Type": "AWS::EC2::SecurityGroup",
"Properties": {
"GroupName": "mysg",
"GroupDescription": "test security group",
"VpcId": {"Ref": "myvpc"}
}
},
"ec2instance": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "ami-1234abcd",
"UserData": "some user data",
}
},
},
}
alb_template_json = json.dumps(alb_template)
cfn_conn = boto3.client("cloudformation", "us-west-1")
cfn_conn.create_stack(
StackName="elb_stack",
TemplateBody=alb_template_json,
)
elbv2_conn = boto3.client("elbv2", "us-west-1")
load_balancers = elbv2_conn.describe_load_balancers()['LoadBalancers']
len(load_balancers).should.equal(1)
load_balancers[0]['LoadBalancerName'].should.equal('myelbv2')
load_balancers[0]['Scheme'].should.equal('internet-facing')
load_balancers[0]['Type'].should.equal('application')
load_balancers[0]['IpAddressType'].should.equal('ipv4')
target_groups = elbv2_conn.describe_target_groups()['TargetGroups']
len(target_groups).should.equal(1)
target_groups[0]['HealthCheckIntervalSeconds'].should.equal(30)
target_groups[0]['HealthCheckPath'].should.equal('/status')
target_groups[0]['HealthCheckPort'].should.equal('80')
target_groups[0]['HealthCheckProtocol'].should.equal('HTTP')
target_groups[0]['HealthCheckTimeoutSeconds'].should.equal(5)
target_groups[0]['HealthyThresholdCount'].should.equal(30)
target_groups[0]['UnhealthyThresholdCount'].should.equal(5)
target_groups[0]['Matcher'].should.equal({'HttpCode': '200,201'})
target_groups[0]['TargetGroupName'].should.equal('mytargetgroup')
target_groups[0]['Port'].should.equal(80)
target_groups[0]['Protocol'].should.equal('HTTP')
target_groups[0]['TargetType'].should.equal('instance')
listeners = elbv2_conn.describe_listeners(LoadBalancerArn=load_balancers[0]['LoadBalancerArn'])['Listeners']
len(listeners).should.equal(1)
listeners[0]['LoadBalancerArn'].should.equal(load_balancers[0]['LoadBalancerArn'])
listeners[0]['Port'].should.equal(80)
listeners[0]['Protocol'].should.equal('HTTP')
listeners[0]['DefaultActions'].should.equal([{
"Type": "forward",
"TargetGroupArn": target_groups[0]['TargetGroupArn']
}])
# test outputs
stacks = cfn_conn.describe_stacks(StackName='elb_stack')['Stacks']
len(stacks).should.equal(1)
dns = list(filter(lambda item: item['OutputKey'] == 'albdns', stacks[0]['Outputs']))[0]
name = list(filter(lambda item: item['OutputKey'] == 'albname', stacks[0]['Outputs']))[0]
dns['OutputValue'].should.equal(load_balancers[0]['DNSName'])
name['OutputValue'].should.equal(load_balancers[0]['LoadBalancerName'])

View File

@ -118,12 +118,3 @@ def test_describe_alarms():
alarms = conn.describe_alarms() alarms = conn.describe_alarms()
alarms.should.have.length_of(0) alarms.should.have.length_of(0)
@mock_cloudwatch_deprecated
def test_describe_state_value_unimplemented():
conn = boto.connect_cloudwatch()
conn.describe_alarms()
conn.describe_alarms.when.called_with(
state_value="foo").should.throw(NotImplementedError)

View File

@ -87,6 +87,54 @@ def test_get_dashboard_fail():
raise RuntimeError('Should of raised error') raise RuntimeError('Should of raised error')
@mock_cloudwatch
def test_alarm_state():
client = boto3.client('cloudwatch', region_name='eu-central-1')
client.put_metric_alarm(
AlarmName='testalarm1',
MetricName='cpu',
Namespace='blah',
Period=10,
EvaluationPeriods=5,
Statistic='Average',
Threshold=2,
ComparisonOperator='GreaterThanThreshold',
)
client.put_metric_alarm(
AlarmName='testalarm2',
MetricName='cpu',
Namespace='blah',
Period=10,
EvaluationPeriods=5,
Statistic='Average',
Threshold=2,
ComparisonOperator='GreaterThanThreshold',
)
# This is tested implicitly as if it doesnt work the rest will die
client.set_alarm_state(
AlarmName='testalarm1',
StateValue='ALARM',
StateReason='testreason',
StateReasonData='{"some": "json_data"}'
)
resp = client.describe_alarms(
StateValue='ALARM'
)
len(resp['MetricAlarms']).should.equal(1)
resp['MetricAlarms'][0]['AlarmName'].should.equal('testalarm1')
resp = client.describe_alarms(
StateValue='OK'
)
len(resp['MetricAlarms']).should.equal(1)
resp['MetricAlarms'][0]['AlarmName'].should.equal('testalarm2')
# Just for sanity
resp = client.describe_alarms()
len(resp['MetricAlarms']).should.equal(2)

View File

@ -28,13 +28,13 @@ except ImportError:
@mock_dynamodb2_deprecated @mock_dynamodb2_deprecated
def test_list_tables(): def test_list_tables():
name = 'TestTable' name = 'TestTable'
#{'schema': } # Should make tables properly with boto
dynamodb_backend2.create_table(name, schema=[ dynamodb_backend2.create_table(name, schema=[
{u'KeyType': u'HASH', u'AttributeName': u'forum_name'}, {u'KeyType': u'HASH', u'AttributeName': u'forum_name'},
{u'KeyType': u'RANGE', u'AttributeName': u'subject'} {u'KeyType': u'RANGE', u'AttributeName': u'subject'}
]) ])
conn = boto.dynamodb2.connect_to_region( conn = boto.dynamodb2.connect_to_region(
'us-west-2', 'us-east-1',
aws_access_key_id="ak", aws_access_key_id="ak",
aws_secret_access_key="sk") aws_secret_access_key="sk")
assert conn.list_tables()["TableNames"] == [name] assert conn.list_tables()["TableNames"] == [name]
@ -43,6 +43,7 @@ def test_list_tables():
@requires_boto_gte("2.9") @requires_boto_gte("2.9")
@mock_dynamodb2_deprecated @mock_dynamodb2_deprecated
def test_list_tables_layer_1(): def test_list_tables_layer_1():
# Should make tables properly with boto
dynamodb_backend2.create_table("test_1", schema=[ dynamodb_backend2.create_table("test_1", schema=[
{u'KeyType': u'HASH', u'AttributeName': u'name'} {u'KeyType': u'HASH', u'AttributeName': u'name'}
]) ])
@ -50,7 +51,7 @@ def test_list_tables_layer_1():
{u'KeyType': u'HASH', u'AttributeName': u'name'} {u'KeyType': u'HASH', u'AttributeName': u'name'}
]) ])
conn = boto.dynamodb2.connect_to_region( conn = boto.dynamodb2.connect_to_region(
'us-west-2', 'us-east-1',
aws_access_key_id="ak", aws_access_key_id="ak",
aws_secret_access_key="sk") aws_secret_access_key="sk")
@ -88,12 +89,22 @@ def test_list_table_tags():
ProvisionedThroughput={'ReadCapacityUnits':5,'WriteCapacityUnits':5}) ProvisionedThroughput={'ReadCapacityUnits':5,'WriteCapacityUnits':5})
table_description = conn.describe_table(TableName=name) table_description = conn.describe_table(TableName=name)
arn = table_description['Table']['TableArn'] arn = table_description['Table']['TableArn']
tags = [{'Key':'TestTag', 'Value': 'TestValue'}]
conn.tag_resource(ResourceArn=arn, # Tag table
Tags=tags) tags = [{'Key': 'TestTag', 'Value': 'TestValue'}, {'Key': 'TestTag2', 'Value': 'TestValue2'}]
conn.tag_resource(ResourceArn=arn, Tags=tags)
# Check tags
resp = conn.list_tags_of_resource(ResourceArn=arn) resp = conn.list_tags_of_resource(ResourceArn=arn)
assert resp["Tags"] == tags assert resp["Tags"] == tags
# Remove 1 tag
conn.untag_resource(ResourceArn=arn, TagKeys=['TestTag'])
# Check tags
resp = conn.list_tags_of_resource(ResourceArn=arn)
assert resp["Tags"] == [{'Key': 'TestTag2', 'Value': 'TestValue2'}]
@requires_boto_gte("2.9") @requires_boto_gte("2.9")
@mock_dynamodb2 @mock_dynamodb2
@ -356,10 +367,21 @@ def test_basic_projection_expressions():
) )
assert 'body' in results['Items'][0] assert 'body' in results['Items'][0]
assert 'subject' not in results['Items'][0]
assert results['Items'][0]['body'] == 'some test message' assert results['Items'][0]['body'] == 'some test message'
assert 'body' in results['Items'][1] assert 'body' in results['Items'][1]
assert 'subject' not in results['Items'][1]
assert results['Items'][1]['body'] == 'yet another test message' assert results['Items'][1]['body'] == 'yet another test message'
# The projection expression should not remove data from storage
results = table.query(
KeyConditionExpression=Key('forum_name').eq(
'the-key'),
)
assert 'subject' in results['Items'][0]
assert 'body' in results['Items'][1]
assert 'forum_name' in results['Items'][1]
@mock_dynamodb2 @mock_dynamodb2
def test_basic_projection_expressions_with_attr_expression_names(): def test_basic_projection_expressions_with_attr_expression_names():
@ -638,6 +660,47 @@ def test_filter_expression():
filter_expr.expr(row1).should.be(True) filter_expr.expr(row1).should.be(True)
@mock_dynamodb2
def test_query_filter():
client = boto3.client('dynamodb', region_name='us-east-1')
dynamodb = boto3.resource('dynamodb', region_name='us-east-1')
# Create the DynamoDB table.
client.create_table(
TableName='test1',
AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}],
KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}],
ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123}
)
client.put_item(
TableName='test1',
Item={
'client': {'S': 'client1'},
'app': {'S': 'app1'}
}
)
client.put_item(
TableName='test1',
Item={
'client': {'S': 'client1'},
'app': {'S': 'app2'}
}
)
table = dynamodb.Table('test1')
response = table.query(
KeyConditionExpression=Key('client').eq('client1')
)
assert response['Count'] == 2
response = table.query(
KeyConditionExpression=Key('client').eq('client1'),
FilterExpression=Attr('app').eq('app2')
)
assert response['Count'] == 1
assert response['Items'][0]['app'] == 'app2'
@mock_dynamodb2 @mock_dynamodb2
def test_scan_filter(): def test_scan_filter():
client = boto3.client('dynamodb', region_name='us-east-1') client = boto3.client('dynamodb', region_name='us-east-1')
@ -868,3 +931,78 @@ def test_delete_item():
response = table.scan() response = table.scan()
assert response['Count'] == 0 assert response['Count'] == 0
@mock_dynamodb2
def test_describe_limits():
client = boto3.client('dynamodb', region_name='eu-central-1')
resp = client.describe_limits()
resp['AccountMaxReadCapacityUnits'].should.equal(20000)
resp['AccountMaxWriteCapacityUnits'].should.equal(20000)
resp['TableMaxWriteCapacityUnits'].should.equal(10000)
resp['TableMaxReadCapacityUnits'].should.equal(10000)
@mock_dynamodb2
def test_set_ttl():
client = boto3.client('dynamodb', region_name='us-east-1')
# Create the DynamoDB table.
client.create_table(
TableName='test1',
AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}],
KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}],
ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123}
)
client.update_time_to_live(
TableName='test1',
TimeToLiveSpecification={
'Enabled': True,
'AttributeName': 'expire'
}
)
resp = client.describe_time_to_live(TableName='test1')
resp['TimeToLiveDescription']['TimeToLiveStatus'].should.equal('ENABLED')
resp['TimeToLiveDescription']['AttributeName'].should.equal('expire')
client.update_time_to_live(
TableName='test1',
TimeToLiveSpecification={
'Enabled': False,
'AttributeName': 'expire'
}
)
resp = client.describe_time_to_live(TableName='test1')
resp['TimeToLiveDescription']['TimeToLiveStatus'].should.equal('DISABLED')
# https://github.com/spulec/moto/issues/1043
@mock_dynamodb2
def test_query_missing_expr_names():
client = boto3.client('dynamodb', region_name='us-east-1')
# Create the DynamoDB table.
client.create_table(
TableName='test1',
AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}],
KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}],
ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123}
)
client.put_item(TableName='test1', Item={'client': {'S': 'test1'}, 'app': {'S': 'test1'}})
client.put_item(TableName='test1', Item={'client': {'S': 'test2'}, 'app': {'S': 'test2'}})
resp = client.query(TableName='test1', KeyConditionExpression='client=:client',
ExpressionAttributeValues={':client': {'S': 'test1'}})
resp['Count'].should.equal(1)
resp['Items'][0]['client']['S'].should.equal('test1')
resp = client.query(TableName='test1', KeyConditionExpression=':name=test2',
ExpressionAttributeNames={':name': 'client'})
resp['Count'].should.equal(1)
resp['Items'][0]['client']['S'].should.equal('test2')

View File

@ -54,7 +54,7 @@ def test_create_table():
} }
} }
conn = boto.dynamodb2.connect_to_region( conn = boto.dynamodb2.connect_to_region(
'us-west-2', 'us-east-1',
aws_access_key_id="ak", aws_access_key_id="ak",
aws_secret_access_key="sk" aws_secret_access_key="sk"
) )
@ -425,7 +425,7 @@ def test_get_special_item():
@mock_dynamodb2_deprecated @mock_dynamodb2_deprecated
def test_update_item_remove(): def test_update_item_remove():
conn = boto.dynamodb2.connect_to_region("us-west-2") conn = boto.dynamodb2.connect_to_region("us-east-1")
table = Table.create('messages', schema=[ table = Table.create('messages', schema=[
HashKey('username') HashKey('username')
]) ])
@ -452,7 +452,7 @@ def test_update_item_remove():
@mock_dynamodb2_deprecated @mock_dynamodb2_deprecated
def test_update_item_set(): def test_update_item_set():
conn = boto.dynamodb2.connect_to_region("us-west-2") conn = boto.dynamodb2.connect_to_region("us-east-1")
table = Table.create('messages', schema=[ table = Table.create('messages', schema=[
HashKey('username') HashKey('username')
]) ])

View File

@ -666,10 +666,6 @@ def test_ami_attribute_error_cases():
cm.exception.request_id.should_not.be.none cm.exception.request_id.should_not.be.none
"""
Boto3
"""
@mock_ec2 @mock_ec2
def test_ami_filter_wildcard(): def test_ami_filter_wildcard():
ec2 = boto3.resource('ec2', region_name='us-west-1') ec2 = boto3.resource('ec2', region_name='us-west-1')
@ -678,3 +674,20 @@ def test_ami_filter_wildcard():
filter_result = list(ec2.images.filter(Owners=['111122223333'], Filters=[{'Name':'name', 'Values':['test*']}])) filter_result = list(ec2.images.filter(Owners=['111122223333'], Filters=[{'Name':'name', 'Values':['test*']}]))
assert filter_result == [image] assert filter_result == [image]
@mock_ec2
def test_ami_filter_by_owner_id():
client = boto3.client('ec2', region_name='us-east-1')
ubuntu_id = '099720109477'
ubuntu_images = client.describe_images(Owners=[ubuntu_id])
all_images = client.describe_images()
ubuntu_ids = [ami['OwnerId'] for ami in ubuntu_images['Images']]
all_ids = [ami['OwnerId'] for ami in all_images['Images']]
# Assert all ubuntu_ids are the same and one equals ubuntu_id
assert all(ubuntu_ids) and ubuntu_ids[0] == ubuntu_id
# Check we actually have a subset of images
assert len(ubuntu_ids) < len(all_ids)

View File

@ -17,12 +17,14 @@ def test_create_and_delete_volume():
volume = conn.create_volume(80, "us-east-1a") volume = conn.create_volume(80, "us-east-1a")
all_volumes = conn.get_all_volumes() all_volumes = conn.get_all_volumes()
all_volumes.should.have.length_of(1)
all_volumes[0].size.should.equal(80)
all_volumes[0].zone.should.equal("us-east-1a")
all_volumes[0].encrypted.should.be(False)
volume = all_volumes[0] current_volume = [item for item in all_volumes if item.id == volume.id]
current_volume.should.have.length_of(1)
current_volume[0].size.should.equal(80)
current_volume[0].zone.should.equal("us-east-1a")
current_volume[0].encrypted.should.be(False)
volume = current_volume[0]
with assert_raises(EC2ResponseError) as ex: with assert_raises(EC2ResponseError) as ex:
volume.delete(dry_run=True) volume.delete(dry_run=True)
@ -33,7 +35,9 @@ def test_create_and_delete_volume():
volume.delete() volume.delete()
conn.get_all_volumes().should.have.length_of(0) all_volumes = conn.get_all_volumes()
my_volume = [item for item in all_volumes if item.id == volume.id]
my_volume.should.have.length_of(0)
# Deleting something that was already deleted should throw an error # Deleting something that was already deleted should throw an error
with assert_raises(EC2ResponseError) as cm: with assert_raises(EC2ResponseError) as cm:
@ -57,7 +61,7 @@ def test_create_encrypted_volume_dryrun():
@mock_ec2_deprecated @mock_ec2_deprecated
def test_create_encrypted_volume(): def test_create_encrypted_volume():
conn = boto.connect_ec2('the_key', 'the_secret') conn = boto.connect_ec2('the_key', 'the_secret')
conn.create_volume(80, "us-east-1a", encrypted=True) volume = conn.create_volume(80, "us-east-1a", encrypted=True)
with assert_raises(EC2ResponseError) as ex: with assert_raises(EC2ResponseError) as ex:
conn.create_volume(80, "us-east-1a", encrypted=True, dry_run=True) conn.create_volume(80, "us-east-1a", encrypted=True, dry_run=True)
@ -66,7 +70,7 @@ def test_create_encrypted_volume():
ex.exception.message.should.equal( ex.exception.message.should.equal(
'An error occurred (DryRunOperation) when calling the CreateVolume operation: Request would have succeeded, but DryRun flag is set') 'An error occurred (DryRunOperation) when calling the CreateVolume operation: Request would have succeeded, but DryRun flag is set')
all_volumes = conn.get_all_volumes() all_volumes = [vol for vol in conn.get_all_volumes() if vol.id == volume.id]
all_volumes[0].encrypted.should.be(True) all_volumes[0].encrypted.should.be(True)
@ -116,67 +120,69 @@ def test_volume_filters():
block_mapping = instance.block_device_mapping['/dev/sda1'] block_mapping = instance.block_device_mapping['/dev/sda1']
volume_ids = (volume1.id, volume2.id, volume3.id, volume4.id, block_mapping.volume_id)
volumes_by_attach_time = conn.get_all_volumes( volumes_by_attach_time = conn.get_all_volumes(
filters={'attachment.attach-time': block_mapping.attach_time}) filters={'attachment.attach-time': block_mapping.attach_time})
set([vol.id for vol in volumes_by_attach_time] set([vol.id for vol in volumes_by_attach_time]
).should.equal(set([block_mapping.volume_id])) ).should.equal({block_mapping.volume_id})
volumes_by_attach_device = conn.get_all_volumes( volumes_by_attach_device = conn.get_all_volumes(
filters={'attachment.device': '/dev/sda1'}) filters={'attachment.device': '/dev/sda1'})
set([vol.id for vol in volumes_by_attach_device] set([vol.id for vol in volumes_by_attach_device]
).should.equal(set([block_mapping.volume_id])) ).should.equal({block_mapping.volume_id})
volumes_by_attach_instance_id = conn.get_all_volumes( volumes_by_attach_instance_id = conn.get_all_volumes(
filters={'attachment.instance-id': instance.id}) filters={'attachment.instance-id': instance.id})
set([vol.id for vol in volumes_by_attach_instance_id] set([vol.id for vol in volumes_by_attach_instance_id]
).should.equal(set([block_mapping.volume_id])) ).should.equal({block_mapping.volume_id})
volumes_by_attach_status = conn.get_all_volumes( volumes_by_attach_status = conn.get_all_volumes(
filters={'attachment.status': 'attached'}) filters={'attachment.status': 'attached'})
set([vol.id for vol in volumes_by_attach_status] set([vol.id for vol in volumes_by_attach_status]
).should.equal(set([block_mapping.volume_id])) ).should.equal({block_mapping.volume_id})
volumes_by_create_time = conn.get_all_volumes( volumes_by_create_time = conn.get_all_volumes(
filters={'create-time': volume4.create_time}) filters={'create-time': volume4.create_time})
set([vol.create_time for vol in volumes_by_create_time] set([vol.create_time for vol in volumes_by_create_time]
).should.equal(set([volume4.create_time])) ).should.equal({volume4.create_time})
volumes_by_size = conn.get_all_volumes(filters={'size': volume2.size}) volumes_by_size = conn.get_all_volumes(filters={'size': volume2.size})
set([vol.id for vol in volumes_by_size]).should.equal(set([volume2.id])) set([vol.id for vol in volumes_by_size]).should.equal({volume2.id})
volumes_by_snapshot_id = conn.get_all_volumes( volumes_by_snapshot_id = conn.get_all_volumes(
filters={'snapshot-id': snapshot.id}) filters={'snapshot-id': snapshot.id})
set([vol.id for vol in volumes_by_snapshot_id] set([vol.id for vol in volumes_by_snapshot_id]
).should.equal(set([volume4.id])) ).should.equal({volume4.id})
volumes_by_status = conn.get_all_volumes(filters={'status': 'in-use'}) volumes_by_status = conn.get_all_volumes(filters={'status': 'in-use'})
set([vol.id for vol in volumes_by_status]).should.equal( set([vol.id for vol in volumes_by_status]).should.equal(
set([block_mapping.volume_id])) {block_mapping.volume_id})
volumes_by_id = conn.get_all_volumes(filters={'volume-id': volume1.id}) volumes_by_id = conn.get_all_volumes(filters={'volume-id': volume1.id})
set([vol.id for vol in volumes_by_id]).should.equal(set([volume1.id])) set([vol.id for vol in volumes_by_id]).should.equal({volume1.id})
volumes_by_tag_key = conn.get_all_volumes(filters={'tag-key': 'testkey1'}) volumes_by_tag_key = conn.get_all_volumes(filters={'tag-key': 'testkey1'})
set([vol.id for vol in volumes_by_tag_key]).should.equal(set([volume1.id])) set([vol.id for vol in volumes_by_tag_key]).should.equal({volume1.id})
volumes_by_tag_value = conn.get_all_volumes( volumes_by_tag_value = conn.get_all_volumes(
filters={'tag-value': 'testvalue1'}) filters={'tag-value': 'testvalue1'})
set([vol.id for vol in volumes_by_tag_value] set([vol.id for vol in volumes_by_tag_value]
).should.equal(set([volume1.id])) ).should.equal({volume1.id})
volumes_by_tag = conn.get_all_volumes( volumes_by_tag = conn.get_all_volumes(
filters={'tag:testkey1': 'testvalue1'}) filters={'tag:testkey1': 'testvalue1'})
set([vol.id for vol in volumes_by_tag]).should.equal(set([volume1.id])) set([vol.id for vol in volumes_by_tag]).should.equal({volume1.id})
volumes_by_unencrypted = conn.get_all_volumes( volumes_by_unencrypted = conn.get_all_volumes(
filters={'encrypted': 'false'}) filters={'encrypted': 'false'})
set([vol.id for vol in volumes_by_unencrypted]).should.equal( set([vol.id for vol in volumes_by_unencrypted if vol.id in volume_ids]).should.equal(
set([block_mapping.volume_id, volume2.id]) {block_mapping.volume_id, volume2.id}
) )
volumes_by_encrypted = conn.get_all_volumes(filters={'encrypted': 'true'}) volumes_by_encrypted = conn.get_all_volumes(filters={'encrypted': 'true'})
set([vol.id for vol in volumes_by_encrypted]).should.equal( set([vol.id for vol in volumes_by_encrypted if vol.id in volume_ids]).should.equal(
set([volume1.id, volume3.id, volume4.id]) {volume1.id, volume3.id, volume4.id}
) )
@ -252,18 +258,20 @@ def test_create_snapshot():
snapshot.update() snapshot.update()
snapshot.status.should.equal('completed') snapshot.status.should.equal('completed')
snapshots = conn.get_all_snapshots() snapshots = [snap for snap in conn.get_all_snapshots() if snap.id == snapshot.id]
snapshots.should.have.length_of(1) snapshots.should.have.length_of(1)
snapshots[0].description.should.equal('a test snapshot') snapshots[0].description.should.equal('a test snapshot')
snapshots[0].start_time.should_not.be.none snapshots[0].start_time.should_not.be.none
snapshots[0].encrypted.should.be(False) snapshots[0].encrypted.should.be(False)
# Create snapshot without description # Create snapshot without description
num_snapshots = len(conn.get_all_snapshots())
snapshot = volume.create_snapshot() snapshot = volume.create_snapshot()
conn.get_all_snapshots().should.have.length_of(2) conn.get_all_snapshots().should.have.length_of(num_snapshots + 1)
snapshot.delete() snapshot.delete()
conn.get_all_snapshots().should.have.length_of(1) conn.get_all_snapshots().should.have.length_of(num_snapshots)
# Deleting something that was already deleted should throw an error # Deleting something that was already deleted should throw an error
with assert_raises(EC2ResponseError) as cm: with assert_raises(EC2ResponseError) as cm:
@ -281,7 +289,7 @@ def test_create_encrypted_snapshot():
snapshot.update() snapshot.update()
snapshot.status.should.equal('completed') snapshot.status.should.equal('completed')
snapshots = conn.get_all_snapshots() snapshots = [snap for snap in conn.get_all_snapshots() if snap.id == snapshot.id]
snapshots.should.have.length_of(1) snapshots.should.have.length_of(1)
snapshots[0].description.should.equal('a test snapshot') snapshots[0].description.should.equal('a test snapshot')
snapshots[0].start_time.should_not.be.none snapshots[0].start_time.should_not.be.none
@ -331,52 +339,52 @@ def test_snapshot_filters():
snapshots_by_description = conn.get_all_snapshots( snapshots_by_description = conn.get_all_snapshots(
filters={'description': 'testsnapshot1'}) filters={'description': 'testsnapshot1'})
set([snap.id for snap in snapshots_by_description] set([snap.id for snap in snapshots_by_description]
).should.equal(set([snapshot1.id])) ).should.equal({snapshot1.id})
snapshots_by_id = conn.get_all_snapshots( snapshots_by_id = conn.get_all_snapshots(
filters={'snapshot-id': snapshot1.id}) filters={'snapshot-id': snapshot1.id})
set([snap.id for snap in snapshots_by_id] set([snap.id for snap in snapshots_by_id]
).should.equal(set([snapshot1.id])) ).should.equal({snapshot1.id})
snapshots_by_start_time = conn.get_all_snapshots( snapshots_by_start_time = conn.get_all_snapshots(
filters={'start-time': snapshot1.start_time}) filters={'start-time': snapshot1.start_time})
set([snap.start_time for snap in snapshots_by_start_time] set([snap.start_time for snap in snapshots_by_start_time]
).should.equal(set([snapshot1.start_time])) ).should.equal({snapshot1.start_time})
snapshots_by_volume_id = conn.get_all_snapshots( snapshots_by_volume_id = conn.get_all_snapshots(
filters={'volume-id': volume1.id}) filters={'volume-id': volume1.id})
set([snap.id for snap in snapshots_by_volume_id] set([snap.id for snap in snapshots_by_volume_id]
).should.equal(set([snapshot1.id, snapshot2.id])) ).should.equal({snapshot1.id, snapshot2.id})
snapshots_by_status = conn.get_all_snapshots( snapshots_by_status = conn.get_all_snapshots(
filters={'status': 'completed'}) filters={'status': 'completed'})
set([snap.id for snap in snapshots_by_status] ({snapshot1.id, snapshot2.id, snapshot3.id} -
).should.equal(set([snapshot1.id, snapshot2.id, snapshot3.id])) {snap.id for snap in snapshots_by_status}).should.have.length_of(0)
snapshots_by_volume_size = conn.get_all_snapshots( snapshots_by_volume_size = conn.get_all_snapshots(
filters={'volume-size': volume1.size}) filters={'volume-size': volume1.size})
set([snap.id for snap in snapshots_by_volume_size] set([snap.id for snap in snapshots_by_volume_size]
).should.equal(set([snapshot1.id, snapshot2.id])) ).should.equal({snapshot1.id, snapshot2.id})
snapshots_by_tag_key = conn.get_all_snapshots( snapshots_by_tag_key = conn.get_all_snapshots(
filters={'tag-key': 'testkey1'}) filters={'tag-key': 'testkey1'})
set([snap.id for snap in snapshots_by_tag_key] set([snap.id for snap in snapshots_by_tag_key]
).should.equal(set([snapshot1.id])) ).should.equal({snapshot1.id})
snapshots_by_tag_value = conn.get_all_snapshots( snapshots_by_tag_value = conn.get_all_snapshots(
filters={'tag-value': 'testvalue1'}) filters={'tag-value': 'testvalue1'})
set([snap.id for snap in snapshots_by_tag_value] set([snap.id for snap in snapshots_by_tag_value]
).should.equal(set([snapshot1.id])) ).should.equal({snapshot1.id})
snapshots_by_tag = conn.get_all_snapshots( snapshots_by_tag = conn.get_all_snapshots(
filters={'tag:testkey1': 'testvalue1'}) filters={'tag:testkey1': 'testvalue1'})
set([snap.id for snap in snapshots_by_tag] set([snap.id for snap in snapshots_by_tag]
).should.equal(set([snapshot1.id])) ).should.equal({snapshot1.id})
snapshots_by_encrypted = conn.get_all_snapshots( snapshots_by_encrypted = conn.get_all_snapshots(
filters={'encrypted': 'true'}) filters={'encrypted': 'true'})
set([snap.id for snap in snapshots_by_encrypted] set([snap.id for snap in snapshots_by_encrypted]
).should.equal(set([snapshot3.id])) ).should.equal({snapshot3.id})
@mock_ec2_deprecated @mock_ec2_deprecated
@ -563,9 +571,11 @@ def test_volume_tag_escaping():
ex.exception.status.should.equal(400) ex.exception.status.should.equal(400)
ex.exception.message.should.equal( ex.exception.message.should.equal(
'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') 'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set')
dict(conn.get_all_snapshots()[0].tags).should_not.be.equal( snaps = [snap for snap in conn.get_all_snapshots() if snap.id == snapshot.id]
dict(snaps[0].tags).should_not.be.equal(
{'key': '</closed>'}) {'key': '</closed>'})
snapshot.add_tags({'key': '</closed>'}) snapshot.add_tags({'key': '</closed>'})
dict(conn.get_all_snapshots()[0].tags).should.equal({'key': '</closed>'}) snaps = [snap for snap in conn.get_all_snapshots() if snap.id == snapshot.id]
dict(snaps[0].tags).should.equal({'key': '</closed>'})

View File

@ -5,7 +5,9 @@ from nose.tools import assert_raises
import base64 import base64
import datetime import datetime
import ipaddress
import six
import boto import boto
import boto3 import boto3
from boto.ec2.instance import Reservation, InstanceAttribute from boto.ec2.instance import Reservation, InstanceAttribute
@ -215,7 +217,6 @@ def test_create_with_tags():
len(instances['Instances'][0]['Tags']).should.equal(3) len(instances['Instances'][0]['Tags']).should.equal(3)
@mock_ec2_deprecated @mock_ec2_deprecated
def test_get_instances_filtering_by_state(): def test_get_instances_filtering_by_state():
conn = boto.connect_ec2() conn = boto.connect_ec2()
@ -413,6 +414,7 @@ def test_get_instances_filtering_by_image_id():
'Values': [image_id]}])['Reservations'] 'Values': [image_id]}])['Reservations']
reservations[0]['Instances'].should.have.length_of(1) reservations[0]['Instances'].should.have.length_of(1)
@mock_ec2 @mock_ec2
def test_get_instances_filtering_by_private_dns(): def test_get_instances_filtering_by_private_dns():
image_id = 'ami-1234abcd' image_id = 'ami-1234abcd'
@ -427,6 +429,7 @@ def test_get_instances_filtering_by_private_dns():
])['Reservations'] ])['Reservations']
reservations[0]['Instances'].should.have.length_of(1) reservations[0]['Instances'].should.have.length_of(1)
@mock_ec2 @mock_ec2
def test_get_instances_filtering_by_ni_private_dns(): def test_get_instances_filtering_by_ni_private_dns():
image_id = 'ami-1234abcd' image_id = 'ami-1234abcd'
@ -441,6 +444,7 @@ def test_get_instances_filtering_by_ni_private_dns():
])['Reservations'] ])['Reservations']
reservations[0]['Instances'].should.have.length_of(1) reservations[0]['Instances'].should.have.length_of(1)
@mock_ec2 @mock_ec2
def test_get_instances_filtering_by_instance_group_name(): def test_get_instances_filtering_by_instance_group_name():
image_id = 'ami-1234abcd' image_id = 'ami-1234abcd'
@ -458,6 +462,7 @@ def test_get_instances_filtering_by_instance_group_name():
])['Reservations'] ])['Reservations']
reservations[0]['Instances'].should.have.length_of(1) reservations[0]['Instances'].should.have.length_of(1)
@mock_ec2 @mock_ec2
def test_get_instances_filtering_by_instance_group_id(): def test_get_instances_filtering_by_instance_group_id():
image_id = 'ami-1234abcd' image_id = 'ami-1234abcd'
@ -476,6 +481,7 @@ def test_get_instances_filtering_by_instance_group_id():
])['Reservations'] ])['Reservations']
reservations[0]['Instances'].should.have.length_of(1) reservations[0]['Instances'].should.have.length_of(1)
@mock_ec2_deprecated @mock_ec2_deprecated
def test_get_instances_filtering_by_tag(): def test_get_instances_filtering_by_tag():
conn = boto.connect_ec2() conn = boto.connect_ec2()
@ -830,18 +836,113 @@ def test_run_instance_with_placement():
instance.placement.should.equal("us-east-1b") instance.placement.should.equal("us-east-1b")
@mock_ec2_deprecated @mock_ec2
def test_run_instance_with_subnet(): def test_run_instance_with_subnet_boto3():
conn = boto.connect_vpc('the_key', 'the_secret') client = boto3.client('ec2', region_name='eu-central-1')
vpc = conn.create_vpc("10.0.0.0/16")
subnet = conn.create_subnet(vpc.id, "10.0.0.0/18")
reservation = conn.run_instances('ami-1234abcd', subnet_id=subnet.id)
instance = reservation.instances[0]
instance.subnet_id.should.equal(subnet.id) ip_networks = [
(ipaddress.ip_network('10.0.0.0/16'), ipaddress.ip_network('10.0.99.0/24')),
(ipaddress.ip_network('192.168.42.0/24'), ipaddress.ip_network('192.168.42.0/25'))
]
all_enis = conn.get_all_network_interfaces() # Tests instances are created with the correct IPs
all_enis.should.have.length_of(1) for vpc_cidr, subnet_cidr in ip_networks:
resp = client.create_vpc(
CidrBlock=str(vpc_cidr),
AmazonProvidedIpv6CidrBlock=False,
DryRun=False,
InstanceTenancy='default'
)
vpc_id = resp['Vpc']['VpcId']
resp = client.create_subnet(
CidrBlock=str(subnet_cidr),
VpcId=vpc_id
)
subnet_id = resp['Subnet']['SubnetId']
resp = client.run_instances(
ImageId='ami-1234abcd',
MaxCount=1,
MinCount=1,
SubnetId=subnet_id
)
instance = resp['Instances'][0]
instance['SubnetId'].should.equal(subnet_id)
priv_ipv4 = ipaddress.ip_address(six.text_type(instance['PrivateIpAddress']))
subnet_cidr.should.contain(priv_ipv4)
@mock_ec2
def test_run_instance_with_specified_private_ipv4():
client = boto3.client('ec2', region_name='eu-central-1')
vpc_cidr = ipaddress.ip_network('192.168.42.0/24')
subnet_cidr = ipaddress.ip_network('192.168.42.0/25')
resp = client.create_vpc(
CidrBlock=str(vpc_cidr),
AmazonProvidedIpv6CidrBlock=False,
DryRun=False,
InstanceTenancy='default'
)
vpc_id = resp['Vpc']['VpcId']
resp = client.create_subnet(
CidrBlock=str(subnet_cidr),
VpcId=vpc_id
)
subnet_id = resp['Subnet']['SubnetId']
resp = client.run_instances(
ImageId='ami-1234abcd',
MaxCount=1,
MinCount=1,
SubnetId=subnet_id,
PrivateIpAddress='192.168.42.5'
)
instance = resp['Instances'][0]
instance['SubnetId'].should.equal(subnet_id)
instance['PrivateIpAddress'].should.equal('192.168.42.5')
@mock_ec2
def test_run_instance_mapped_public_ipv4():
client = boto3.client('ec2', region_name='eu-central-1')
vpc_cidr = ipaddress.ip_network('192.168.42.0/24')
subnet_cidr = ipaddress.ip_network('192.168.42.0/25')
resp = client.create_vpc(
CidrBlock=str(vpc_cidr),
AmazonProvidedIpv6CidrBlock=False,
DryRun=False,
InstanceTenancy='default'
)
vpc_id = resp['Vpc']['VpcId']
resp = client.create_subnet(
CidrBlock=str(subnet_cidr),
VpcId=vpc_id
)
subnet_id = resp['Subnet']['SubnetId']
client.modify_subnet_attribute(
SubnetId=subnet_id,
MapPublicIpOnLaunch={'Value': True}
)
resp = client.run_instances(
ImageId='ami-1234abcd',
MaxCount=1,
MinCount=1,
SubnetId=subnet_id
)
instance = resp['Instances'][0]
instance.should.contain('PublicDnsName')
instance.should.contain('PublicIpAddress')
len(instance['PublicDnsName']).should.be.greater_than(0)
len(instance['PublicIpAddress']).should.be.greater_than(0)
@mock_ec2_deprecated @mock_ec2_deprecated
@ -853,7 +954,7 @@ def test_run_instance_with_nic_autocreated():
'test security group #1', 'this is a test security group') 'test security group #1', 'this is a test security group')
security_group2 = conn.create_security_group( security_group2 = conn.create_security_group(
'test security group #2', 'this is a test security group') 'test security group #2', 'this is a test security group')
private_ip = "54.0.0.1" private_ip = "10.0.0.1"
reservation = conn.run_instances('ami-1234abcd', subnet_id=subnet.id, reservation = conn.run_instances('ami-1234abcd', subnet_id=subnet.id,
security_groups=[security_group1.name], security_groups=[security_group1.name],
@ -880,6 +981,7 @@ def test_run_instance_with_nic_autocreated():
eni.private_ip_addresses.should.have.length_of(1) eni.private_ip_addresses.should.have.length_of(1)
eni.private_ip_addresses[0].private_ip_address.should.equal(private_ip) eni.private_ip_addresses[0].private_ip_address.should.equal(private_ip)
@mock_ec2_deprecated @mock_ec2_deprecated
def test_run_instance_with_nic_preexisting(): def test_run_instance_with_nic_preexisting():
conn = boto.connect_vpc('the_key', 'the_secret') conn = boto.connect_vpc('the_key', 'the_secret')
@ -1012,6 +1114,7 @@ def test_ec2_classic_has_public_ip_address():
instance.private_ip_address.should_not.equal(None) instance.private_ip_address.should_not.equal(None)
instance.private_dns_name.should.contain(instance.private_ip_address.replace('.', '-')) instance.private_dns_name.should.contain(instance.private_ip_address.replace('.', '-'))
@mock_ec2_deprecated @mock_ec2_deprecated
def test_run_instance_with_keypair(): def test_run_instance_with_keypair():
conn = boto.connect_ec2('the_key', 'the_secret') conn = boto.connect_ec2('the_key', 'the_secret')

View File

@ -126,9 +126,9 @@ def test_route_tables_filters_associations():
conn = boto.connect_vpc('the_key', 'the_secret') conn = boto.connect_vpc('the_key', 'the_secret')
vpc = conn.create_vpc("10.0.0.0/16") vpc = conn.create_vpc("10.0.0.0/16")
subnet1 = conn.create_subnet(vpc.id, "10.0.0.0/18") subnet1 = conn.create_subnet(vpc.id, "10.0.0.0/24")
subnet2 = conn.create_subnet(vpc.id, "10.0.1.0/18") subnet2 = conn.create_subnet(vpc.id, "10.0.1.0/24")
subnet3 = conn.create_subnet(vpc.id, "10.0.2.0/18") subnet3 = conn.create_subnet(vpc.id, "10.0.2.0/24")
route_table1 = conn.create_route_table(vpc.id) route_table1 = conn.create_route_table(vpc.id)
route_table2 = conn.create_route_table(vpc.id) route_table2 = conn.create_route_table(vpc.id)

View File

@ -356,7 +356,7 @@ def test_retrieved_snapshots_must_contain_their_tags():
# Fetch the snapshot again # Fetch the snapshot again
all_snapshots = conn.get_all_snapshots() all_snapshots = conn.get_all_snapshots()
snapshot = all_snapshots[0] snapshot = [item for item in all_snapshots if item.id == snapshot.id][0]
retrieved_tags = snapshot.tags retrieved_tags = snapshot.tags
conn.delete_snapshot(snapshot.id) conn.delete_snapshot(snapshot.id)

View File

@ -1611,6 +1611,152 @@ def test_update_service_through_cloudformation_should_trigger_replacement():
len(resp['serviceArns']).should.equal(1) len(resp['serviceArns']).should.equal(1)
@mock_ec2
@mock_ecs
def test_attributes():
# Combined put, list delete attributes into the same test due to the amount of setup
ecs_client = boto3.client('ecs', region_name='us-east-1')
ec2 = boto3.resource('ec2', region_name='us-east-1')
test_cluster_name = 'test_ecs_cluster'
_ = ecs_client.create_cluster(
clusterName=test_cluster_name
)
test_instance = ec2.create_instances(
ImageId="ami-1234abcd",
MinCount=1,
MaxCount=1,
)[0]
instance_id_document = json.dumps(
ec2_utils.generate_instance_identity_document(test_instance)
)
response = ecs_client.register_container_instance(
cluster=test_cluster_name,
instanceIdentityDocument=instance_id_document
)
response['containerInstance'][
'ec2InstanceId'].should.equal(test_instance.id)
full_arn1 = response['containerInstance']['containerInstanceArn']
test_instance = ec2.create_instances(
ImageId="ami-1234abcd",
MinCount=1,
MaxCount=1,
)[0]
instance_id_document = json.dumps(
ec2_utils.generate_instance_identity_document(test_instance)
)
response = ecs_client.register_container_instance(
cluster=test_cluster_name,
instanceIdentityDocument=instance_id_document
)
response['containerInstance'][
'ec2InstanceId'].should.equal(test_instance.id)
full_arn2 = response['containerInstance']['containerInstanceArn']
partial_arn2 = full_arn2.rsplit('/', 1)[-1]
full_arn2.should_not.equal(full_arn1) # uuid1 isnt unique enough when the pc is fast ;-)
# Ok set instance 1 with 1 attribute, instance 2 with another, and all of them with a 3rd.
ecs_client.put_attributes(
cluster=test_cluster_name,
attributes=[
{'name': 'env', 'value': 'prod'},
{'name': 'attr1', 'value': 'instance1', 'targetId': full_arn1},
{'name': 'attr1', 'value': 'instance2', 'targetId': partial_arn2, 'targetType': 'container-instance'}
]
)
resp = ecs_client.list_attributes(
cluster=test_cluster_name,
targetType='container-instance'
)
attrs = resp['attributes']
len(attrs).should.equal(4)
# Tests that the attrs have been set properly
len(list(filter(lambda item: item['name'] == 'env', attrs))).should.equal(2)
len(list(filter(lambda item: item['name'] == 'attr1' and item['value'] == 'instance1', attrs))).should.equal(1)
ecs_client.delete_attributes(
cluster=test_cluster_name,
attributes=[
{'name': 'attr1', 'value': 'instance2', 'targetId': partial_arn2, 'targetType': 'container-instance'}
]
)
resp = ecs_client.list_attributes(
cluster=test_cluster_name,
targetType='container-instance'
)
attrs = resp['attributes']
len(attrs).should.equal(3)
@mock_ecs
def test_poll_endpoint():
# Combined put, list delete attributes into the same test due to the amount of setup
ecs_client = boto3.client('ecs', region_name='us-east-1')
# Just a placeholder until someone actually wants useless data, just testing it doesnt raise an exception
resp = ecs_client.discover_poll_endpoint(cluster='blah', containerInstance='blah')
resp.should.contain('endpoint')
resp.should.contain('telemetryEndpoint')
@mock_ecs
def test_list_task_definition_families():
client = boto3.client('ecs', region_name='us-east-1')
client.register_task_definition(
family='test_ecs_task',
containerDefinitions=[
{
'name': 'hello_world',
'image': 'docker/hello-world:latest',
'cpu': 1024,
'memory': 400,
'essential': True,
'environment': [{
'name': 'AWS_ACCESS_KEY_ID',
'value': 'SOME_ACCESS_KEY'
}],
'logConfiguration': {'logDriver': 'json-file'}
}
]
)
client.register_task_definition(
family='alt_test_ecs_task',
containerDefinitions=[
{
'name': 'hello_world',
'image': 'docker/hello-world:latest',
'cpu': 1024,
'memory': 400,
'essential': True,
'environment': [{
'name': 'AWS_ACCESS_KEY_ID',
'value': 'SOME_ACCESS_KEY'
}],
'logConfiguration': {'logDriver': 'json-file'}
}
]
)
resp1 = client.list_task_definition_families()
resp2 = client.list_task_definition_families(familyPrefix='alt')
len(resp1['families']).should.equal(2)
len(resp2['families']).should.equal(1)
def _fetch_container_instance_resources(container_instance_description): def _fetch_container_instance_resources(container_instance_description):
remaining_resources = {} remaining_resources = {}
registered_resources = {} registered_resources = {}

View File

@ -1,11 +1,13 @@
from __future__ import unicode_literals from __future__ import unicode_literals
import os
import boto3 import boto3
import botocore import botocore
from botocore.exceptions import ClientError from botocore.exceptions import ClientError
from nose.tools import assert_raises from nose.tools import assert_raises
import sure # noqa import sure # noqa
from moto import mock_elbv2, mock_ec2 from moto import mock_elbv2, mock_ec2, mock_acm
from moto.elbv2 import elbv2_backends
@mock_elbv2 @mock_elbv2
@ -283,6 +285,21 @@ def test_create_target_group_and_listeners():
load_balancer_arn = response.get('LoadBalancers')[0].get('LoadBalancerArn') load_balancer_arn = response.get('LoadBalancers')[0].get('LoadBalancerArn')
# Can't create a target group with an invalid protocol
with assert_raises(ClientError):
conn.create_target_group(
Name='a-target',
Protocol='HTTP',
Port=8080,
VpcId=vpc.id,
HealthCheckProtocol='/HTTP',
HealthCheckPort='8080',
HealthCheckPath='/',
HealthCheckIntervalSeconds=5,
HealthCheckTimeoutSeconds=5,
HealthyThresholdCount=5,
UnhealthyThresholdCount=2,
Matcher={'HttpCode': '200'})
response = conn.create_target_group( response = conn.create_target_group(
Name='a-target', Name='a-target',
Protocol='HTTP', Protocol='HTTP',
@ -723,6 +740,21 @@ def test_handle_listener_rules():
load_balancer_arn = response.get('LoadBalancers')[0].get('LoadBalancerArn') load_balancer_arn = response.get('LoadBalancers')[0].get('LoadBalancerArn')
# Can't create a target group with an invalid protocol
with assert_raises(ClientError):
conn.create_target_group(
Name='a-target',
Protocol='HTTP',
Port=8080,
VpcId=vpc.id,
HealthCheckProtocol='/HTTP',
HealthCheckPort='8080',
HealthCheckPath='/',
HealthCheckIntervalSeconds=5,
HealthCheckTimeoutSeconds=5,
HealthyThresholdCount=5,
UnhealthyThresholdCount=2,
Matcher={'HttpCode': '200'})
response = conn.create_target_group( response = conn.create_target_group(
Name='a-target', Name='a-target',
Protocol='HTTP', Protocol='HTTP',
@ -1030,3 +1062,373 @@ def test_describe_invalid_target_group():
# Check error raises correctly # Check error raises correctly
with assert_raises(ClientError): with assert_raises(ClientError):
conn.describe_target_groups(Names=['invalid']) conn.describe_target_groups(Names=['invalid'])
@mock_elbv2
def test_describe_account_limits():
client = boto3.client('elbv2', region_name='eu-central-1')
resp = client.describe_account_limits()
resp['Limits'][0].should.contain('Name')
resp['Limits'][0].should.contain('Max')
@mock_elbv2
def test_describe_ssl_policies():
client = boto3.client('elbv2', region_name='eu-central-1')
resp = client.describe_ssl_policies()
len(resp['SslPolicies']).should.equal(5)
resp = client.describe_ssl_policies(Names=['ELBSecurityPolicy-TLS-1-2-2017-01', 'ELBSecurityPolicy-2016-08'])
len(resp['SslPolicies']).should.equal(2)
@mock_elbv2
@mock_ec2
def test_set_ip_address_type():
client = boto3.client('elbv2', region_name='us-east-1')
ec2 = boto3.resource('ec2', region_name='us-east-1')
security_group = ec2.create_security_group(
GroupName='a-security-group', Description='First One')
vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default')
subnet1 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.192/26',
AvailabilityZone='us-east-1a')
subnet2 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.192/26',
AvailabilityZone='us-east-1b')
response = client.create_load_balancer(
Name='my-lb',
Subnets=[subnet1.id, subnet2.id],
SecurityGroups=[security_group.id],
Scheme='internal',
Tags=[{'Key': 'key_name', 'Value': 'a_value'}])
arn = response['LoadBalancers'][0]['LoadBalancerArn']
# Internal LBs cant be dualstack yet
with assert_raises(ClientError):
client.set_ip_address_type(
LoadBalancerArn=arn,
IpAddressType='dualstack'
)
# Create internet facing one
response = client.create_load_balancer(
Name='my-lb2',
Subnets=[subnet1.id, subnet2.id],
SecurityGroups=[security_group.id],
Scheme='internet-facing',
Tags=[{'Key': 'key_name', 'Value': 'a_value'}])
arn = response['LoadBalancers'][0]['LoadBalancerArn']
client.set_ip_address_type(
LoadBalancerArn=arn,
IpAddressType='dualstack'
)
@mock_elbv2
@mock_ec2
def test_set_security_groups():
client = boto3.client('elbv2', region_name='us-east-1')
ec2 = boto3.resource('ec2', region_name='us-east-1')
security_group = ec2.create_security_group(
GroupName='a-security-group', Description='First One')
security_group2 = ec2.create_security_group(
GroupName='b-security-group', Description='Second One')
vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default')
subnet1 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.192/26',
AvailabilityZone='us-east-1a')
subnet2 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.192/26',
AvailabilityZone='us-east-1b')
response = client.create_load_balancer(
Name='my-lb',
Subnets=[subnet1.id, subnet2.id],
SecurityGroups=[security_group.id],
Scheme='internal',
Tags=[{'Key': 'key_name', 'Value': 'a_value'}])
arn = response['LoadBalancers'][0]['LoadBalancerArn']
client.set_security_groups(
LoadBalancerArn=arn,
SecurityGroups=[security_group.id, security_group2.id]
)
resp = client.describe_load_balancers(LoadBalancerArns=[arn])
len(resp['LoadBalancers'][0]['SecurityGroups']).should.equal(2)
with assert_raises(ClientError):
client.set_security_groups(
LoadBalancerArn=arn,
SecurityGroups=['non_existant']
)
@mock_elbv2
@mock_ec2
def test_set_subnets():
client = boto3.client('elbv2', region_name='us-east-1')
ec2 = boto3.resource('ec2', region_name='us-east-1')
security_group = ec2.create_security_group(
GroupName='a-security-group', Description='First One')
vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default')
subnet1 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.192/26',
AvailabilityZone='us-east-1a')
subnet2 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.192/26',
AvailabilityZone='us-east-1b')
subnet3 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.192/26',
AvailabilityZone='us-east-1c')
response = client.create_load_balancer(
Name='my-lb',
Subnets=[subnet1.id, subnet2.id],
SecurityGroups=[security_group.id],
Scheme='internal',
Tags=[{'Key': 'key_name', 'Value': 'a_value'}])
arn = response['LoadBalancers'][0]['LoadBalancerArn']
client.set_subnets(
LoadBalancerArn=arn,
Subnets=[subnet1.id, subnet2.id, subnet3.id]
)
resp = client.describe_load_balancers(LoadBalancerArns=[arn])
len(resp['LoadBalancers'][0]['AvailabilityZones']).should.equal(3)
# Only 1 AZ
with assert_raises(ClientError):
client.set_subnets(
LoadBalancerArn=arn,
Subnets=[subnet1.id]
)
# Multiple subnets in same AZ
with assert_raises(ClientError):
client.set_subnets(
LoadBalancerArn=arn,
Subnets=[subnet1.id, subnet2.id, subnet2.id]
)
@mock_elbv2
@mock_ec2
def test_set_subnets():
client = boto3.client('elbv2', region_name='us-east-1')
ec2 = boto3.resource('ec2', region_name='us-east-1')
security_group = ec2.create_security_group(
GroupName='a-security-group', Description='First One')
vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default')
subnet1 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.192/26',
AvailabilityZone='us-east-1a')
subnet2 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.192/26',
AvailabilityZone='us-east-1b')
response = client.create_load_balancer(
Name='my-lb',
Subnets=[subnet1.id, subnet2.id],
SecurityGroups=[security_group.id],
Scheme='internal',
Tags=[{'Key': 'key_name', 'Value': 'a_value'}])
arn = response['LoadBalancers'][0]['LoadBalancerArn']
client.modify_load_balancer_attributes(
LoadBalancerArn=arn,
Attributes=[{'Key': 'idle_timeout.timeout_seconds', 'Value': '600'}]
)
# Check its 600 not 60
response = client.describe_load_balancer_attributes(
LoadBalancerArn=arn
)
idle_timeout = list(filter(lambda item: item['Key'] == 'idle_timeout.timeout_seconds', response['Attributes']))[0]
idle_timeout['Value'].should.equal('600')
@mock_elbv2
@mock_ec2
def test_modify_target_group():
client = boto3.client('elbv2', region_name='us-east-1')
ec2 = boto3.resource('ec2', region_name='us-east-1')
vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default')
response = client.create_target_group(
Name='a-target',
Protocol='HTTP',
Port=8080,
VpcId=vpc.id,
HealthCheckProtocol='HTTP',
HealthCheckPort='8080',
HealthCheckPath='/',
HealthCheckIntervalSeconds=5,
HealthCheckTimeoutSeconds=5,
HealthyThresholdCount=5,
UnhealthyThresholdCount=2,
Matcher={'HttpCode': '200'})
arn = response.get('TargetGroups')[0]['TargetGroupArn']
client.modify_target_group(
TargetGroupArn=arn,
HealthCheckProtocol='HTTPS',
HealthCheckPort='8081',
HealthCheckPath='/status',
HealthCheckIntervalSeconds=10,
HealthCheckTimeoutSeconds=10,
HealthyThresholdCount=10,
UnhealthyThresholdCount=4,
Matcher={'HttpCode': '200-399'}
)
response = client.describe_target_groups(
TargetGroupArns=[arn]
)
response['TargetGroups'][0]['Matcher']['HttpCode'].should.equal('200-399')
response['TargetGroups'][0]['HealthCheckIntervalSeconds'].should.equal(10)
response['TargetGroups'][0]['HealthCheckPath'].should.equal('/status')
response['TargetGroups'][0]['HealthCheckPort'].should.equal('8081')
response['TargetGroups'][0]['HealthCheckProtocol'].should.equal('HTTPS')
response['TargetGroups'][0]['HealthCheckTimeoutSeconds'].should.equal(10)
response['TargetGroups'][0]['HealthyThresholdCount'].should.equal(10)
response['TargetGroups'][0]['UnhealthyThresholdCount'].should.equal(4)
@mock_elbv2
@mock_ec2
@mock_acm
def test_modify_listener_http_to_https():
client = boto3.client('elbv2', region_name='eu-central-1')
acm = boto3.client('acm', region_name='eu-central-1')
ec2 = boto3.resource('ec2', region_name='eu-central-1')
security_group = ec2.create_security_group(
GroupName='a-security-group', Description='First One')
vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default')
subnet1 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.192/26',
AvailabilityZone='eu-central-1a')
subnet2 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.192/26',
AvailabilityZone='eu-central-1b')
response = client.create_load_balancer(
Name='my-lb',
Subnets=[subnet1.id, subnet2.id],
SecurityGroups=[security_group.id],
Scheme='internal',
Tags=[{'Key': 'key_name', 'Value': 'a_value'}])
load_balancer_arn = response.get('LoadBalancers')[0].get('LoadBalancerArn')
response = client.create_target_group(
Name='a-target',
Protocol='HTTP',
Port=8080,
VpcId=vpc.id,
HealthCheckProtocol='HTTP',
HealthCheckPort='8080',
HealthCheckPath='/',
HealthCheckIntervalSeconds=5,
HealthCheckTimeoutSeconds=5,
HealthyThresholdCount=5,
UnhealthyThresholdCount=2,
Matcher={'HttpCode': '200'})
target_group = response.get('TargetGroups')[0]
target_group_arn = target_group['TargetGroupArn']
# Plain HTTP listener
response = client.create_listener(
LoadBalancerArn=load_balancer_arn,
Protocol='HTTP',
Port=80,
DefaultActions=[{'Type': 'forward', 'TargetGroupArn': target_group_arn}]
)
listener_arn = response['Listeners'][0]['ListenerArn']
response = acm.request_certificate(
DomainName='google.com',
SubjectAlternativeNames=['google.com', 'www.google.com', 'mail.google.com'],
)
google_arn = response['CertificateArn']
response = acm.request_certificate(
DomainName='yahoo.com',
SubjectAlternativeNames=['yahoo.com', 'www.yahoo.com', 'mail.yahoo.com'],
)
yahoo_arn = response['CertificateArn']
response = client.modify_listener(
ListenerArn=listener_arn,
Port=443,
Protocol='HTTPS',
SslPolicy='ELBSecurityPolicy-TLS-1-2-2017-01',
Certificates=[
{'CertificateArn': google_arn, 'IsDefault': False},
{'CertificateArn': yahoo_arn, 'IsDefault': True}
],
DefaultActions=[
{'Type': 'forward', 'TargetGroupArn': target_group_arn}
]
)
response['Listeners'][0]['Port'].should.equal(443)
response['Listeners'][0]['Protocol'].should.equal('HTTPS')
response['Listeners'][0]['SslPolicy'].should.equal('ELBSecurityPolicy-TLS-1-2-2017-01')
len(response['Listeners'][0]['Certificates']).should.equal(2)
# Check default cert, can't do this in server mode
if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'false':
listener = elbv2_backends['eu-central-1'].load_balancers[load_balancer_arn].listeners[listener_arn]
listener.certificate.should.equal(yahoo_arn)
# No default cert
with assert_raises(ClientError):
client.modify_listener(
ListenerArn=listener_arn,
Port=443,
Protocol='HTTPS',
SslPolicy='ELBSecurityPolicy-TLS-1-2-2017-01',
Certificates=[
{'CertificateArn': google_arn, 'IsDefault': False}
],
DefaultActions=[
{'Type': 'forward', 'TargetGroupArn': target_group_arn}
]
)
# Bad cert
with assert_raises(ClientError):
client.modify_listener(
ListenerArn=listener_arn,
Port=443,
Protocol='HTTPS',
SslPolicy='ELBSecurityPolicy-TLS-1-2-2017-01',
Certificates=[
{'CertificateArn': 'lalala', 'IsDefault': True}
],
DefaultActions=[
{'Type': 'forward', 'TargetGroupArn': target_group_arn}
]
)

View File

@ -3,6 +3,8 @@ import random
import boto3 import boto3
from moto.events import mock_events from moto.events import mock_events
from botocore.exceptions import ClientError
from nose.tools import assert_raises
RULES = [ RULES = [
@ -171,11 +173,36 @@ def test_remove_targets():
assert(targets_before - 1 == targets_after) assert(targets_before - 1 == targets_after)
if __name__ == '__main__': @mock_events
test_list_rules() def test_permissions():
test_describe_rule() client = boto3.client('events', 'eu-central-1')
test_enable_disable_rule()
test_list_rule_names_by_target() client.put_permission(Action='PutEvents', Principal='111111111111', StatementId='Account1')
test_list_rules() client.put_permission(Action='PutEvents', Principal='222222222222', StatementId='Account2')
test_list_targets_by_rule()
test_remove_targets() resp = client.describe_event_bus()
assert len(resp['Policy']['Statement']) == 2
client.remove_permission(StatementId='Account2')
resp = client.describe_event_bus()
assert len(resp['Policy']['Statement']) == 1
assert resp['Policy']['Statement'][0]['Sid'] == 'Account1'
@mock_events
def test_put_events():
client = boto3.client('events', 'eu-central-1')
event = {
"Source": "com.mycompany.myapp",
"Detail": '{"key1": "value3", "key2": "value4"}',
"Resources": ["resource1", "resource2"],
"DetailType": "myDetailType"
}
client.put_events(Entries=[event])
# Boto3 would error if it didn't return 200 OK
with assert_raises(ClientError):
client.put_events(Entries=[event]*20)

179
tests/test_iot/test_iot.py Normal file
View File

@ -0,0 +1,179 @@
from __future__ import unicode_literals
import boto3
import sure # noqa
from moto import mock_iot
@mock_iot
def test_things():
client = boto3.client('iot', region_name='ap-northeast-1')
name = 'my-thing'
type_name = 'my-type-name'
# thing type
thing_type = client.create_thing_type(thingTypeName=type_name)
thing_type.should.have.key('thingTypeName').which.should.equal(type_name)
thing_type.should.have.key('thingTypeArn')
res = client.list_thing_types()
res.should.have.key('thingTypes').which.should.have.length_of(1)
for thing_type in res['thingTypes']:
thing_type.should.have.key('thingTypeName').which.should_not.be.none
thing_type = client.describe_thing_type(thingTypeName=type_name)
thing_type.should.have.key('thingTypeName').which.should.equal(type_name)
thing_type.should.have.key('thingTypeProperties')
thing_type.should.have.key('thingTypeMetadata')
# thing
thing = client.create_thing(thingName=name, thingTypeName=type_name)
thing.should.have.key('thingName').which.should.equal(name)
thing.should.have.key('thingArn')
res = client.list_things()
res.should.have.key('things').which.should.have.length_of(1)
for thing in res['things']:
thing.should.have.key('thingName').which.should_not.be.none
thing = client.update_thing(thingName=name, attributePayload={'attributes': {'k1': 'v1'}})
res = client.list_things()
res.should.have.key('things').which.should.have.length_of(1)
for thing in res['things']:
thing.should.have.key('thingName').which.should_not.be.none
res['things'][0]['attributes'].should.have.key('k1').which.should.equal('v1')
thing = client.describe_thing(thingName=name)
thing.should.have.key('thingName').which.should.equal(name)
thing.should.have.key('defaultClientId')
thing.should.have.key('thingTypeName')
thing.should.have.key('attributes')
thing.should.have.key('version')
# delete thing
client.delete_thing(thingName=name)
res = client.list_things()
res.should.have.key('things').which.should.have.length_of(0)
# delete thing type
client.delete_thing_type(thingTypeName=type_name)
res = client.list_thing_types()
res.should.have.key('thingTypes').which.should.have.length_of(0)
@mock_iot
def test_certs():
client = boto3.client('iot', region_name='ap-northeast-1')
cert = client.create_keys_and_certificate(setAsActive=True)
cert.should.have.key('certificateArn').which.should_not.be.none
cert.should.have.key('certificateId').which.should_not.be.none
cert.should.have.key('certificatePem').which.should_not.be.none
cert.should.have.key('keyPair')
cert['keyPair'].should.have.key('PublicKey').which.should_not.be.none
cert['keyPair'].should.have.key('PrivateKey').which.should_not.be.none
cert_id = cert['certificateId']
cert = client.describe_certificate(certificateId=cert_id)
cert.should.have.key('certificateDescription')
cert_desc = cert['certificateDescription']
cert_desc.should.have.key('certificateArn').which.should_not.be.none
cert_desc.should.have.key('certificateId').which.should_not.be.none
cert_desc.should.have.key('certificatePem').which.should_not.be.none
cert_desc.should.have.key('status').which.should.equal('ACTIVE')
res = client.list_certificates()
res.should.have.key('certificates').which.should.have.length_of(1)
for cert in res['certificates']:
cert.should.have.key('certificateArn').which.should_not.be.none
cert.should.have.key('certificateId').which.should_not.be.none
cert.should.have.key('status').which.should_not.be.none
cert.should.have.key('creationDate').which.should_not.be.none
client.update_certificate(certificateId=cert_id, newStatus='REVOKED')
cert = client.describe_certificate(certificateId=cert_id)
cert_desc.should.have.key('status').which.should.equal('ACTIVE')
client.delete_certificate(certificateId=cert_id)
res = client.list_certificates()
res.should.have.key('certificates').which.should.have.length_of(0)
@mock_iot
def test_policy():
client = boto3.client('iot', region_name='ap-northeast-1')
name = 'my-policy'
doc = '{}'
policy = client.create_policy(policyName=name, policyDocument=doc)
policy.should.have.key('policyName').which.should.equal(name)
policy.should.have.key('policyArn').which.should_not.be.none
policy.should.have.key('policyDocument').which.should.equal(doc)
policy.should.have.key('policyVersionId').which.should.equal('1')
policy = client.get_policy(policyName=name)
policy.should.have.key('policyName').which.should.equal(name)
policy.should.have.key('policyArn').which.should_not.be.none
policy.should.have.key('policyDocument').which.should.equal(doc)
policy.should.have.key('defaultVersionId').which.should.equal('1')
res = client.list_policies()
res.should.have.key('policies').which.should.have.length_of(1)
for policy in res['policies']:
policy.should.have.key('policyName').which.should_not.be.none
policy.should.have.key('policyArn').which.should_not.be.none
client.delete_policy(policyName=name)
res = client.list_policies()
res.should.have.key('policies').which.should.have.length_of(0)
@mock_iot
def test_principal_policy():
client = boto3.client('iot', region_name='ap-northeast-1')
policy_name = 'my-policy'
doc = '{}'
policy = client.create_policy(policyName=policy_name, policyDocument=doc)
cert = client.create_keys_and_certificate(setAsActive=True)
cert_arn = cert['certificateArn']
client.attach_principal_policy(policyName=policy_name, principal=cert_arn)
res = client.list_principal_policies(principal=cert_arn)
res.should.have.key('policies').which.should.have.length_of(1)
for policy in res['policies']:
policy.should.have.key('policyName').which.should_not.be.none
policy.should.have.key('policyArn').which.should_not.be.none
res = client.list_policy_principals(policyName=policy_name)
res.should.have.key('principals').which.should.have.length_of(1)
for principal in res['principals']:
principal.should_not.be.none
client.detach_principal_policy(policyName=policy_name, principal=cert_arn)
res = client.list_principal_policies(principal=cert_arn)
res.should.have.key('policies').which.should.have.length_of(0)
res = client.list_policy_principals(policyName=policy_name)
res.should.have.key('principals').which.should.have.length_of(0)
@mock_iot
def test_principal_thing():
client = boto3.client('iot', region_name='ap-northeast-1')
thing_name = 'my-thing'
thing = client.create_thing(thingName=thing_name)
cert = client.create_keys_and_certificate(setAsActive=True)
cert_arn = cert['certificateArn']
client.attach_thing_principal(thingName=thing_name, principal=cert_arn)
res = client.list_principal_things(principal=cert_arn)
res.should.have.key('things').which.should.have.length_of(1)
for thing in res['things']:
thing.should_not.be.none
res = client.list_thing_principals(thingName=thing_name)
res.should.have.key('principals').which.should.have.length_of(1)
for principal in res['principals']:
principal.should_not.be.none
client.detach_thing_principal(thingName=thing_name, principal=cert_arn)
res = client.list_principal_things(principal=cert_arn)
res.should.have.key('things').which.should.have.length_of(0)
res = client.list_thing_principals(thingName=thing_name)
res.should.have.key('principals').which.should.have.length_of(0)

View File

@ -0,0 +1,19 @@
from __future__ import unicode_literals
import sure # noqa
import moto.server as server
from moto import mock_iot
'''
Test the different server responses
'''
@mock_iot
def test_iot_list():
backend = server.create_backend_app("iot")
test_client = backend.test_client()
# just making sure that server is up
res = test_client.get('/things')
res.status_code.should.equal(404)

View File

@ -0,0 +1,87 @@
from __future__ import unicode_literals
import json
import boto3
import sure # noqa
from nose.tools import assert_raises
from botocore.exceptions import ClientError
from moto import mock_iotdata, mock_iot
@mock_iot
@mock_iotdata
def test_basic():
iot_client = boto3.client('iot', region_name='ap-northeast-1')
client = boto3.client('iot-data', region_name='ap-northeast-1')
name = 'my-thing'
raw_payload = b'{"state": {"desired": {"led": "on"}}}'
iot_client.create_thing(thingName=name)
with assert_raises(ClientError):
client.get_thing_shadow(thingName=name)
res = client.update_thing_shadow(thingName=name, payload=raw_payload)
payload = json.loads(res['payload'].read())
expected_state = '{"desired": {"led": "on"}}'
payload.should.have.key('state').which.should.equal(json.loads(expected_state))
payload.should.have.key('metadata').which.should.have.key('desired').which.should.have.key('led')
payload.should.have.key('version').which.should.equal(1)
payload.should.have.key('timestamp')
res = client.get_thing_shadow(thingName=name)
payload = json.loads(res['payload'].read())
expected_state = b'{"desired": {"led": "on"}, "delta": {"led": "on"}}'
payload.should.have.key('state').which.should.equal(json.loads(expected_state))
payload.should.have.key('metadata').which.should.have.key('desired').which.should.have.key('led')
payload.should.have.key('version').which.should.equal(1)
payload.should.have.key('timestamp')
client.delete_thing_shadow(thingName=name)
with assert_raises(ClientError):
client.get_thing_shadow(thingName=name)
@mock_iot
@mock_iotdata
def test_update():
iot_client = boto3.client('iot', region_name='ap-northeast-1')
client = boto3.client('iot-data', region_name='ap-northeast-1')
name = 'my-thing'
raw_payload = b'{"state": {"desired": {"led": "on"}}}'
iot_client.create_thing(thingName=name)
# first update
res = client.update_thing_shadow(thingName=name, payload=raw_payload)
payload = json.loads(res['payload'].read())
expected_state = '{"desired": {"led": "on"}}'
payload.should.have.key('state').which.should.equal(json.loads(expected_state))
payload.should.have.key('metadata').which.should.have.key('desired').which.should.have.key('led')
payload.should.have.key('version').which.should.equal(1)
payload.should.have.key('timestamp')
res = client.get_thing_shadow(thingName=name)
payload = json.loads(res['payload'].read())
expected_state = b'{"desired": {"led": "on"}, "delta": {"led": "on"}}'
payload.should.have.key('state').which.should.equal(json.loads(expected_state))
payload.should.have.key('metadata').which.should.have.key('desired').which.should.have.key('led')
payload.should.have.key('version').which.should.equal(1)
payload.should.have.key('timestamp')
# reporting new state
new_payload = b'{"state": {"reported": {"led": "on"}}}'
res = client.update_thing_shadow(thingName=name, payload=new_payload)
payload = json.loads(res['payload'].read())
expected_state = '{"reported": {"led": "on"}}'
payload.should.have.key('state').which.should.equal(json.loads(expected_state))
payload.should.have.key('metadata').which.should.have.key('reported').which.should.have.key('led')
payload.should.have.key('version').which.should.equal(2)
payload.should.have.key('timestamp')
res = client.get_thing_shadow(thingName=name)
payload = json.loads(res['payload'].read())
expected_state = b'{"desired": {"led": "on"}, "reported": {"led": "on"}}'
payload.should.have.key('state').which.should.equal(json.loads(expected_state))
payload.should.have.key('metadata').which.should.have.key('desired').which.should.have.key('led')
payload.should.have.key('version').which.should.equal(2)
payload.should.have.key('timestamp')

View File

@ -0,0 +1,20 @@
from __future__ import unicode_literals
import sure # noqa
import moto.server as server
from moto import mock_iotdata
'''
Test the different server responses
'''
@mock_iotdata
def test_iotdata_list():
backend = server.create_backend_app("iot-data")
test_client = backend.test_client()
# just making sure that server is up
thing_name = 'nothing'
res = test_client.get('/things/{}/shadow'.format(thing_name))
res.status_code.should.equal(404)

View File

@ -19,4 +19,4 @@ def test_describe_clusters():
res = test_client.get('/?Action=DescribeClusters') res = test_client.get('/?Action=DescribeClusters')
result = res.data.decode("utf-8") result = res.data.decode("utf-8")
result.should.contain("<DescribeClustersResponse><DescribeClustersResult><Clusters></Clusters></DescribeClustersResult") result.should.contain("<Clusters></Clusters>")

View File

@ -119,8 +119,10 @@ def test_rrset():
rrsets = conn.get_all_rrsets( rrsets = conn.get_all_rrsets(
zoneid, name="bar.foo.testdns.aws.com", type="A") zoneid, name="bar.foo.testdns.aws.com", type="A")
rrsets.should.have.length_of(1) rrsets.should.have.length_of(2)
rrsets[0].resource_records[0].should.equal('5.6.7.8') resource_records = [rr for rr_set in rrsets for rr in rr_set.resource_records]
resource_records.should.contain('1.2.3.4')
resource_records.should.contain('5.6.7.8')
rrsets = conn.get_all_rrsets( rrsets = conn.get_all_rrsets(
zoneid, name="foo.foo.testdns.aws.com", type="A") zoneid, name="foo.foo.testdns.aws.com", type="A")
@ -160,7 +162,10 @@ def test_alias_rrset():
changes.commit() changes.commit()
rrsets = conn.get_all_rrsets(zoneid, type="A") rrsets = conn.get_all_rrsets(zoneid, type="A")
rrsets.should.have.length_of(1) rrset_records = [(rr_set.name, rr) for rr_set in rrsets for rr in rr_set.resource_records]
rrset_records.should.have.length_of(2)
rrset_records.should.contain(('foo.alias.testdns.aws.com', 'foo.testdns.aws.com'))
rrset_records.should.contain(('bar.alias.testdns.aws.com', 'bar.testdns.aws.com'))
rrsets[0].resource_records[0].should.equal('foo.testdns.aws.com') rrsets[0].resource_records[0].should.equal('foo.testdns.aws.com')
rrsets = conn.get_all_rrsets(zoneid, type="CNAME") rrsets = conn.get_all_rrsets(zoneid, type="CNAME")
rrsets.should.have.length_of(1) rrsets.should.have.length_of(1)
@ -647,3 +652,60 @@ def test_change_resource_record_invalid():
response = conn.list_resource_record_sets(HostedZoneId=hosted_zone_id) response = conn.list_resource_record_sets(HostedZoneId=hosted_zone_id)
len(response['ResourceRecordSets']).should.equal(0) len(response['ResourceRecordSets']).should.equal(0)
@mock_route53
def test_list_resource_record_sets_name_type_filters():
conn = boto3.client('route53', region_name='us-east-1')
create_hosted_zone_response = conn.create_hosted_zone(
Name="db.",
CallerReference=str(hash('foo')),
HostedZoneConfig=dict(
PrivateZone=True,
Comment="db",
)
)
hosted_zone_id = create_hosted_zone_response['HostedZone']['Id']
def create_resource_record_set(rec_type, rec_name):
payload = {
'Comment': 'create {} record {}'.format(rec_type, rec_name),
'Changes': [
{
'Action': 'CREATE',
'ResourceRecordSet': {
'Name': rec_name,
'Type': rec_type,
'TTL': 10,
'ResourceRecords': [{
'Value': '127.0.0.1'
}]
}
}
]
}
conn.change_resource_record_sets(HostedZoneId=hosted_zone_id, ChangeBatch=payload)
# record_type, record_name
all_records = [
('A', 'a.a.db'),
('A', 'a.b.db'),
('A', 'b.b.db'),
('CNAME', 'b.b.db'),
('CNAME', 'b.c.db'),
('CNAME', 'c.c.db')
]
for record_type, record_name in all_records:
create_resource_record_set(record_type, record_name)
start_with = 2
response = conn.list_resource_record_sets(
HostedZoneId=hosted_zone_id,
StartRecordType=all_records[start_with][0],
StartRecordName=all_records[start_with][1]
)
returned_records = [(record['Type'], record['Name']) for record in response['ResourceRecordSets']]
len(returned_records).should.equal(len(all_records) - start_with)
for desired_record in all_records[start_with:]:
returned_records.should.contain(desired_record)

View File

@ -1,5 +1,4 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from __future__ import unicode_literals from __future__ import unicode_literals
import datetime import datetime
@ -1775,6 +1774,30 @@ def test_boto3_put_object_tagging():
resp['ResponseMetadata']['HTTPStatusCode'].should.equal(200) resp['ResponseMetadata']['HTTPStatusCode'].should.equal(200)
@mock_s3
def test_boto3_put_object_tagging_with_single_tag():
s3 = boto3.client('s3', region_name='us-east-1')
bucket_name = 'mybucket'
key = 'key-with-tags'
s3.create_bucket(Bucket=bucket_name)
s3.put_object(
Bucket=bucket_name,
Key=key,
Body='test'
)
resp = s3.put_object_tagging(
Bucket=bucket_name,
Key=key,
Tagging={'TagSet': [
{'Key': 'item1', 'Value': 'foo'}
]}
)
resp['ResponseMetadata']['HTTPStatusCode'].should.equal(200)
@mock_s3 @mock_s3
def test_boto3_get_object_tagging(): def test_boto3_get_object_tagging():
s3 = boto3.client('s3', region_name='us-east-1') s3 = boto3.client('s3', region_name='us-east-1')
@ -1841,7 +1864,7 @@ def test_boto3_list_object_versions():
def test_boto3_delete_markers(): def test_boto3_delete_markers():
s3 = boto3.client('s3', region_name='us-east-1') s3 = boto3.client('s3', region_name='us-east-1')
bucket_name = 'mybucket' bucket_name = 'mybucket'
key = 'key-with-versions' key = u'key-with-versions-and-unicode-ó'
s3.create_bucket(Bucket=bucket_name) s3.create_bucket(Bucket=bucket_name)
s3.put_bucket_versioning( s3.put_bucket_versioning(
Bucket=bucket_name, Bucket=bucket_name,
@ -1856,10 +1879,9 @@ def test_boto3_delete_markers():
Key=key, Key=key,
Body=body Body=body
) )
s3.delete_object(
Bucket=bucket_name, s3.delete_objects(Bucket=bucket_name, Delete={'Objects': [{'Key': key}]})
Key=key
)
with assert_raises(ClientError) as e: with assert_raises(ClientError) as e:
s3.get_object( s3.get_object(
Bucket=bucket_name, Bucket=bucket_name,
@ -1881,12 +1903,18 @@ def test_boto3_delete_markers():
Bucket=bucket_name Bucket=bucket_name
) )
response['Versions'].should.have.length_of(2) response['Versions'].should.have.length_of(2)
response['Versions'][-1]['IsLatest'].should.be.true
response['Versions'][0]['IsLatest'].should.be.false # We've asserted there is only 2 records so one is newest, one is oldest
[(key_metadata['Key'], key_metadata['VersionId']) latest = list(filter(lambda item: item['IsLatest'], response['Versions']))[0]
for key_metadata in response['Versions']].should.equal( oldest = list(filter(lambda item: not item['IsLatest'], response['Versions']))[0]
[('key-with-versions', '0'), ('key-with-versions', '1')]
) # Double check ordering of version ID's
latest['VersionId'].should.equal('1')
oldest['VersionId'].should.equal('0')
# Double check the name is still unicode
latest['Key'].should.equal('key-with-versions-and-unicode-ó')
oldest['Key'].should.equal('key-with-versions-and-unicode-ó')
@mock_s3 @mock_s3

View File

@ -13,12 +13,12 @@ def test_sns_server_get():
backend = server.create_backend_app("sns") backend = server.create_backend_app("sns")
test_client = backend.test_client() test_client = backend.test_client()
topic_data = test_client.action_data("CreateTopic", Name="test topic") topic_data = test_client.action_data("CreateTopic", Name="testtopic")
topic_data.should.contain("CreateTopicResult") topic_data.should.contain("CreateTopicResult")
topic_data.should.contain( topic_data.should.contain(
"<TopicArn>arn:aws:sns:us-east-1:123456789012:test topic</TopicArn>") "<TopicArn>arn:aws:sns:us-east-1:123456789012:testtopic</TopicArn>")
topics_data = test_client.action_data("ListTopics") topics_data = test_client.action_data("ListTopics")
topics_data.should.contain("ListTopicsResult") topics_data.should.contain("ListTopicsResult")
topic_data.should.contain( topic_data.should.contain(
"<TopicArn>arn:aws:sns:us-east-1:123456789012:test topic</TopicArn>") "<TopicArn>arn:aws:sns:us-east-1:123456789012:testtopic</TopicArn>")

View File

@ -31,6 +31,26 @@ def test_create_and_delete_topic():
topics = topics_json["Topics"] topics = topics_json["Topics"]
topics.should.have.length_of(0) topics.should.have.length_of(0)
@mock_sns
def test_create_topic_should_be_indempodent():
conn = boto3.client("sns", region_name="us-east-1")
topic_arn = conn.create_topic(Name="some-topic")['TopicArn']
conn.set_topic_attributes(
TopicArn=topic_arn,
AttributeName="DisplayName",
AttributeValue="should_be_set"
)
topic_display_name = conn.get_topic_attributes(
TopicArn=topic_arn
)['Attributes']['DisplayName']
topic_display_name.should.be.equal("should_be_set")
#recreate topic to prove indempodentcy
topic_arn = conn.create_topic(Name="some-topic")['TopicArn']
topic_display_name = conn.get_topic_attributes(
TopicArn=topic_arn
)['Attributes']['DisplayName']
topic_display_name.should.be.equal("should_be_set")
@mock_sns @mock_sns
def test_get_missing_topic(): def test_get_missing_topic():
@ -38,6 +58,27 @@ def test_get_missing_topic():
conn.get_topic_attributes.when.called_with( conn.get_topic_attributes.when.called_with(
TopicArn="a-fake-arn").should.throw(ClientError) TopicArn="a-fake-arn").should.throw(ClientError)
@mock_sns
def test_create_topic_must_meet_constraints():
conn = boto3.client("sns", region_name="us-east-1")
common_random_chars = [':', ";", "!", "@", "|", "^", "%"]
for char in common_random_chars:
conn.create_topic.when.called_with(
Name="no%s_invalidchar" % char).should.throw(ClientError)
conn.create_topic.when.called_with(
Name="no spaces allowed").should.throw(ClientError)
@mock_sns
def test_create_topic_should_be_of_certain_length():
conn = boto3.client("sns", region_name="us-east-1")
too_short = ""
conn.create_topic.when.called_with(
Name=too_short).should.throw(ClientError)
too_long = "x" * 257
conn.create_topic.when.called_with(
Name=too_long).should.throw(ClientError)
@mock_sns @mock_sns
def test_create_topic_in_multiple_regions(): def test_create_topic_in_multiple_regions():

View File

@ -1,5 +1,6 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from __future__ import unicode_literals from __future__ import unicode_literals
import os
import boto import boto
import boto3 import boto3
@ -8,14 +9,18 @@ from botocore.exceptions import ClientError
from boto.exception import SQSError from boto.exception import SQSError
from boto.sqs.message import RawMessage, Message from boto.sqs.message import RawMessage, Message
from freezegun import freeze_time
import base64 import base64
import json
import sure # noqa import sure # noqa
import time import time
import uuid
from moto import settings, mock_sqs, mock_sqs_deprecated from moto import settings, mock_sqs, mock_sqs_deprecated
from tests.helpers import requires_boto_gte from tests.helpers import requires_boto_gte
import tests.backport_assert_raises # noqa import tests.backport_assert_raises # noqa
from nose.tools import assert_raises from nose.tools import assert_raises
from nose import SkipTest
@mock_sqs @mock_sqs
@ -93,8 +98,6 @@ def test_message_send_without_attributes():
msg.get('MD5OfMessageBody').should.equal( msg.get('MD5OfMessageBody').should.equal(
'58fd9edd83341c29f1aebba81c31e257') '58fd9edd83341c29f1aebba81c31e257')
msg.shouldnt.have.key('MD5OfMessageAttributes') msg.shouldnt.have.key('MD5OfMessageAttributes')
msg.get('ResponseMetadata', {}).get('RequestId').should.equal(
'27daac76-34dd-47df-bd01-1f6e873584a0')
msg.get('MessageId').should_not.contain(' \n') msg.get('MessageId').should_not.contain(' \n')
messages = queue.receive_messages() messages = queue.receive_messages()
@ -118,8 +121,6 @@ def test_message_send_with_attributes():
'58fd9edd83341c29f1aebba81c31e257') '58fd9edd83341c29f1aebba81c31e257')
msg.get('MD5OfMessageAttributes').should.equal( msg.get('MD5OfMessageAttributes').should.equal(
'235c5c510d26fb653d073faed50ae77c') '235c5c510d26fb653d073faed50ae77c')
msg.get('ResponseMetadata', {}).get('RequestId').should.equal(
'27daac76-34dd-47df-bd01-1f6e873584a0')
msg.get('MessageId').should_not.contain(' \n') msg.get('MessageId').should_not.contain(' \n')
messages = queue.receive_messages() messages = queue.receive_messages()
@ -143,8 +144,6 @@ def test_message_with_complex_attributes():
'58fd9edd83341c29f1aebba81c31e257') '58fd9edd83341c29f1aebba81c31e257')
msg.get('MD5OfMessageAttributes').should.equal( msg.get('MD5OfMessageAttributes').should.equal(
'8ae21a7957029ef04146b42aeaa18a22') '8ae21a7957029ef04146b42aeaa18a22')
msg.get('ResponseMetadata', {}).get('RequestId').should.equal(
'27daac76-34dd-47df-bd01-1f6e873584a0')
msg.get('MessageId').should_not.contain(' \n') msg.get('MessageId').should_not.contain(' \n')
messages = queue.receive_messages() messages = queue.receive_messages()
@ -755,3 +754,181 @@ def test_delete_message_after_visibility_timeout():
m1_retrieved.delete() m1_retrieved.delete()
assert new_queue.count() == 0 assert new_queue.count() == 0
@mock_sqs
def test_batch_change_message_visibility():
if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'true':
raise SkipTest('Cant manipulate time in server mode')
with freeze_time("2015-01-01 12:00:00"):
sqs = boto3.client('sqs', region_name='us-east-1')
resp = sqs.create_queue(
QueueName='test-dlr-queue.fifo',
Attributes={'FifoQueue': 'true'}
)
queue_url = resp['QueueUrl']
sqs.send_message(QueueUrl=queue_url, MessageBody='msg1')
sqs.send_message(QueueUrl=queue_url, MessageBody='msg2')
sqs.send_message(QueueUrl=queue_url, MessageBody='msg3')
with freeze_time("2015-01-01 12:01:00"):
receive_resp = sqs.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=2)
len(receive_resp['Messages']).should.equal(2)
handles = [item['ReceiptHandle'] for item in receive_resp['Messages']]
entries = [{'Id': str(uuid.uuid4()), 'ReceiptHandle': handle, 'VisibilityTimeout': 43200} for handle in handles]
resp = sqs.change_message_visibility_batch(QueueUrl=queue_url, Entries=entries)
len(resp['Successful']).should.equal(2)
with freeze_time("2015-01-01 14:00:00"):
resp = sqs.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=3)
len(resp['Messages']).should.equal(1)
with freeze_time("2015-01-01 16:00:00"):
resp = sqs.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=3)
len(resp['Messages']).should.equal(1)
with freeze_time("2015-01-02 12:00:00"):
resp = sqs.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=3)
len(resp['Messages']).should.equal(3)
@mock_sqs
def test_permissions():
client = boto3.client('sqs', region_name='us-east-1')
resp = client.create_queue(
QueueName='test-dlr-queue.fifo',
Attributes={'FifoQueue': 'true'}
)
queue_url = resp['QueueUrl']
client.add_permission(QueueUrl=queue_url, Label='account1', AWSAccountIds=['111111111111'], Actions=['*'])
client.add_permission(QueueUrl=queue_url, Label='account2', AWSAccountIds=['222211111111'], Actions=['SendMessage'])
with assert_raises(ClientError):
client.add_permission(QueueUrl=queue_url, Label='account2', AWSAccountIds=['222211111111'], Actions=['SomeRubbish'])
client.remove_permission(QueueUrl=queue_url, Label='account2')
with assert_raises(ClientError):
client.remove_permission(QueueUrl=queue_url, Label='non_existant')
@mock_sqs
def test_tags():
client = boto3.client('sqs', region_name='us-east-1')
resp = client.create_queue(
QueueName='test-dlr-queue.fifo',
Attributes={'FifoQueue': 'true'}
)
queue_url = resp['QueueUrl']
client.tag_queue(
QueueUrl=queue_url,
Tags={
'test1': 'value1',
'test2': 'value2',
}
)
resp = client.list_queue_tags(QueueUrl=queue_url)
resp['Tags'].should.contain('test1')
resp['Tags'].should.contain('test2')
client.untag_queue(
QueueUrl=queue_url,
TagKeys=['test2']
)
resp = client.list_queue_tags(QueueUrl=queue_url)
resp['Tags'].should.contain('test1')
resp['Tags'].should_not.contain('test2')
@mock_sqs
def test_create_fifo_queue_with_dlq():
sqs = boto3.client('sqs', region_name='us-east-1')
resp = sqs.create_queue(
QueueName='test-dlr-queue.fifo',
Attributes={'FifoQueue': 'true'}
)
queue_url1 = resp['QueueUrl']
queue_arn1 = sqs.get_queue_attributes(QueueUrl=queue_url1)['Attributes']['QueueArn']
resp = sqs.create_queue(
QueueName='test-dlr-queue',
Attributes={'FifoQueue': 'false'}
)
queue_url2 = resp['QueueUrl']
queue_arn2 = sqs.get_queue_attributes(QueueUrl=queue_url2)['Attributes']['QueueArn']
sqs.create_queue(
QueueName='test-queue.fifo',
Attributes={
'FifoQueue': 'true',
'RedrivePolicy': json.dumps({'deadLetterTargetArn': queue_arn1, 'maxReceiveCount': 2})
}
)
# Cant have fifo queue with non fifo DLQ
with assert_raises(ClientError):
sqs.create_queue(
QueueName='test-queue2.fifo',
Attributes={
'FifoQueue': 'true',
'RedrivePolicy': json.dumps({'deadLetterTargetArn': queue_arn2, 'maxReceiveCount': 2})
}
)
@mock_sqs
def test_queue_with_dlq():
if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'true':
raise SkipTest('Cant manipulate time in server mode')
sqs = boto3.client('sqs', region_name='us-east-1')
with freeze_time("2015-01-01 12:00:00"):
resp = sqs.create_queue(
QueueName='test-dlr-queue.fifo',
Attributes={'FifoQueue': 'true'}
)
queue_url1 = resp['QueueUrl']
queue_arn1 = sqs.get_queue_attributes(QueueUrl=queue_url1)['Attributes']['QueueArn']
resp = sqs.create_queue(
QueueName='test-queue.fifo',
Attributes={
'FifoQueue': 'true',
'RedrivePolicy': json.dumps({'deadLetterTargetArn': queue_arn1, 'maxReceiveCount': 2})
}
)
queue_url2 = resp['QueueUrl']
sqs.send_message(QueueUrl=queue_url2, MessageBody='msg1')
sqs.send_message(QueueUrl=queue_url2, MessageBody='msg2')
with freeze_time("2015-01-01 13:00:00"):
resp = sqs.receive_message(QueueUrl=queue_url2, VisibilityTimeout=30, WaitTimeSeconds=0)
resp['Messages'][0]['Body'].should.equal('msg1')
with freeze_time("2015-01-01 13:01:00"):
resp = sqs.receive_message(QueueUrl=queue_url2, VisibilityTimeout=30, WaitTimeSeconds=0)
resp['Messages'][0]['Body'].should.equal('msg1')
with freeze_time("2015-01-01 13:02:00"):
resp = sqs.receive_message(QueueUrl=queue_url2, VisibilityTimeout=30, WaitTimeSeconds=0)
len(resp['Messages']).should.equal(1)
resp = sqs.receive_message(QueueUrl=queue_url1, VisibilityTimeout=30, WaitTimeSeconds=0)
resp['Messages'][0]['Body'].should.equal('msg1')
# Might as well test list source queues
resp = sqs.list_dead_letter_source_queues(QueueUrl=queue_url1)
resp['queueUrls'][0].should.equal(queue_url2)

View File

@ -47,6 +47,51 @@ def test_delete_parameters():
len(response['Parameters']).should.equal(0) len(response['Parameters']).should.equal(0)
@mock_ssm
def test_get_parameters_by_path():
client = boto3.client('ssm', region_name='us-east-1')
client.put_parameter(
Name='/foo/name1',
Description='A test parameter',
Value='value1',
Type='String')
client.put_parameter(
Name='/foo/name2',
Description='A test parameter',
Value='value2',
Type='String')
client.put_parameter(
Name='/bar/name3',
Description='A test parameter',
Value='value3',
Type='String')
client.put_parameter(
Name='/bar/name3/name4',
Description='A test parameter',
Value='value4',
Type='String')
response = client.get_parameters_by_path(Path='/foo')
len(response['Parameters']).should.equal(2)
{p['Value'] for p in response['Parameters']}.should.equal(
set(['value1', 'value2'])
)
response = client.get_parameters_by_path(Path='/bar', Recursive=False)
len(response['Parameters']).should.equal(1)
response['Parameters'][0]['Value'].should.equal('value3')
response = client.get_parameters_by_path(Path='/bar', Recursive=True)
len(response['Parameters']).should.equal(2)
{p['Value'] for p in response['Parameters']}.should.equal(
set(['value3', 'value4'])
)
@mock_ssm @mock_ssm
def test_put_parameter(): def test_put_parameter():
client = boto3.client('ssm', region_name='us-east-1') client = boto3.client('ssm', region_name='us-east-1')

View File

@ -1,5 +1,5 @@
[tox] [tox]
envlist = py26, py27, py33, py34 envlist = py27, py36
[testenv] [testenv]
deps = deps =

View File

@ -24,7 +24,7 @@ while True:
break break
except EXCEPTIONS: except EXCEPTIONS:
elapsed_s = time.time() - start_ts elapsed_s = time.time() - start_ts
if elapsed_s > 30: if elapsed_s > 60:
raise raise
print('.') print('.')