Merge branch 'master' into jack/validate-protocol-on-target-group-creation
This commit is contained in:
commit
c186733129
@ -7,10 +7,10 @@ Latest
|
||||
-----
|
||||
|
||||
* Implemented Batch
|
||||
* Fixed regression with moto\_server dashboard
|
||||
* Fixed regression with moto_server dashboard
|
||||
* Fixed and closed many outstanding bugs
|
||||
* Fixed serious performance problem with EC2 reservation listing
|
||||
* Fixed Route53 list\_resource\_record\_sets
|
||||
* Fixed Route53 list_resource_record_sets
|
||||
|
||||
1.1.23
|
||||
-----
|
||||
|
3538
IMPLEMENTATION_COVERAGE.md
Normal file
3538
IMPLEMENTATION_COVERAGE.md
Normal file
File diff suppressed because it is too large
Load Diff
9
Makefile
9
Makefile
@ -29,7 +29,14 @@ tag_github_release:
|
||||
git tag `python setup.py --version`
|
||||
git push origin `python setup.py --version`
|
||||
|
||||
publish: upload_pypi_artifact push_dockerhub_image tag_github_release
|
||||
publish: implementation_coverage \
|
||||
upload_pypi_artifact \
|
||||
tag_github_release \
|
||||
push_dockerhub_image
|
||||
|
||||
implementation_coverage:
|
||||
./scripts/implementation_coverage.py > IMPLEMENTATION_COVERAGE.md
|
||||
git commit IMPLEMENTATION_COVERAGE.md -m "Updating implementation coverage"
|
||||
|
||||
scaffold:
|
||||
@pip install -r requirements-dev.txt > /dev/null
|
||||
|
10
README.md
10
README.md
@ -68,10 +68,12 @@ It gets even better! Moto isn't just for Python code and it isn't just for S3. L
|
||||
|------------------------------------------------------------------------------|
|
||||
| Cloudwatch | @mock_cloudwatch | basic endpoints done |
|
||||
|------------------------------------------------------------------------------|
|
||||
| CloudwatchEvents | @mock_events | all endpoints done |
|
||||
|------------------------------------------------------------------------------|
|
||||
| Data Pipeline | @mock_datapipeline| basic endpoints done |
|
||||
|------------------------------------------------------------------------------|
|
||||
| DynamoDB | @mock_dynamodb | core endpoints done |
|
||||
| DynamoDB2 | @mock_dynamodb2 | core endpoints + partial indexes |
|
||||
| DynamoDB2 | @mock_dynamodb2 | all endpoints + partial indexes |
|
||||
|------------------------------------------------------------------------------|
|
||||
| EC2 | @mock_ec2 | core endpoints done |
|
||||
| - AMI | | core endpoints done |
|
||||
@ -86,7 +88,7 @@ It gets even better! Moto isn't just for Python code and it isn't just for S3. L
|
||||
|------------------------------------------------------------------------------|
|
||||
| ELB | @mock_elb | core endpoints done |
|
||||
|------------------------------------------------------------------------------|
|
||||
| ELBv2 | @mock_elbv2 | core endpoints done |
|
||||
| ELBv2 | @mock_elbv2 | all endpoints done |
|
||||
|------------------------------------------------------------------------------|
|
||||
| EMR | @mock_emr | core endpoints done |
|
||||
|------------------------------------------------------------------------------|
|
||||
@ -115,7 +117,7 @@ It gets even better! Moto isn't just for Python code and it isn't just for S3. L
|
||||
|------------------------------------------------------------------------------|
|
||||
| S3 | @mock_s3 | core endpoints done |
|
||||
|------------------------------------------------------------------------------|
|
||||
| SES | @mock_ses | core endpoints done |
|
||||
| SES | @mock_ses | all endpoints done |
|
||||
|------------------------------------------------------------------------------|
|
||||
| SNS | @mock_sns | all endpoints done |
|
||||
|------------------------------------------------------------------------------|
|
||||
@ -127,7 +129,7 @@ It gets even better! Moto isn't just for Python code and it isn't just for S3. L
|
||||
|------------------------------------------------------------------------------|
|
||||
| SWF | @mock_swf | basic endpoints done |
|
||||
|------------------------------------------------------------------------------|
|
||||
| X-Ray | @mock_xray | core endpoints done |
|
||||
| X-Ray | @mock_xray | all endpoints done |
|
||||
|------------------------------------------------------------------------------|
|
||||
```
|
||||
|
||||
|
@ -314,7 +314,7 @@ DESCRIBE_LAUNCH_CONFIGURATIONS_TEMPLATE = """<DescribeLaunchConfigurationsRespon
|
||||
{% endif %}
|
||||
<InstanceType>{{ launch_configuration.instance_type }}</InstanceType>
|
||||
<LaunchConfigurationARN>arn:aws:autoscaling:us-east-1:803981987763:launchConfiguration:
|
||||
9dbbbf87-6141-428a-a409-0752edbe6cad:launchConfigurationName/my-test-lc</LaunchConfigurationARN>
|
||||
9dbbbf87-6141-428a-a409-0752edbe6cad:launchConfigurationName/{{ launch_configuration.name }}</LaunchConfigurationARN>
|
||||
{% if launch_configuration.block_device_mappings %}
|
||||
<BlockDeviceMappings>
|
||||
{% for mount_point, mapping in launch_configuration.block_device_mappings.items() %}
|
||||
@ -504,7 +504,7 @@ DESCRIBE_AUTOSCALING_GROUPS_TEMPLATE = """<DescribeAutoScalingGroupsResponse xml
|
||||
<HealthCheckGracePeriod>{{ group.health_check_period }}</HealthCheckGracePeriod>
|
||||
<DefaultCooldown>{{ group.default_cooldown }}</DefaultCooldown>
|
||||
<AutoScalingGroupARN>arn:aws:autoscaling:us-east-1:803981987763:autoScalingGroup:ca861182-c8f9-4ca7-b1eb-cd35505f5ebb
|
||||
:autoScalingGroupName/my-test-asg-lbs</AutoScalingGroupARN>
|
||||
:autoScalingGroupName/{{ group.name }}</AutoScalingGroupARN>
|
||||
{% if group.termination_policies %}
|
||||
<TerminationPolicies>
|
||||
{% for policy in group.termination_policies %}
|
||||
|
@ -298,7 +298,12 @@ class LambdaFunction(BaseModel):
|
||||
volumes=["{}:/var/task".format(data_vol.name)], environment=env_vars, detach=True, **run_kwargs)
|
||||
finally:
|
||||
if container:
|
||||
exit_code = container.wait()
|
||||
try:
|
||||
exit_code = container.wait(timeout=300)
|
||||
except requests.exceptions.ReadTimeout:
|
||||
exit_code = -1
|
||||
container.stop()
|
||||
container.kill()
|
||||
output = container.logs(stdout=False, stderr=True)
|
||||
output += container.logs(stdout=True, stderr=False)
|
||||
container.remove()
|
||||
|
@ -37,6 +37,7 @@ from moto.sts import sts_backends
|
||||
from moto.xray import xray_backends
|
||||
from moto.batch import batch_backends
|
||||
|
||||
|
||||
BACKENDS = {
|
||||
'acm': acm_backends,
|
||||
'apigateway': apigateway_backends,
|
||||
@ -74,7 +75,7 @@ BACKENDS = {
|
||||
'sts': sts_backends,
|
||||
'route53': route53_backends,
|
||||
'lambda': lambda_backends,
|
||||
'xray': xray_backends
|
||||
'xray': xray_backends,
|
||||
}
|
||||
|
||||
|
||||
|
@ -15,6 +15,7 @@ from moto.dynamodb import models as dynamodb_models
|
||||
from moto.ec2 import models as ec2_models
|
||||
from moto.ecs import models as ecs_models
|
||||
from moto.elb import models as elb_models
|
||||
from moto.elbv2 import models as elbv2_models
|
||||
from moto.iam import models as iam_models
|
||||
from moto.kinesis import models as kinesis_models
|
||||
from moto.kms import models as kms_models
|
||||
@ -61,6 +62,9 @@ MODEL_MAP = {
|
||||
"AWS::ECS::TaskDefinition": ecs_models.TaskDefinition,
|
||||
"AWS::ECS::Service": ecs_models.Service,
|
||||
"AWS::ElasticLoadBalancing::LoadBalancer": elb_models.FakeLoadBalancer,
|
||||
"AWS::ElasticLoadBalancingV2::LoadBalancer": elbv2_models.FakeLoadBalancer,
|
||||
"AWS::ElasticLoadBalancingV2::TargetGroup": elbv2_models.FakeTargetGroup,
|
||||
"AWS::ElasticLoadBalancingV2::Listener": elbv2_models.FakeListener,
|
||||
"AWS::DataPipeline::Pipeline": datapipeline_models.Pipeline,
|
||||
"AWS::IAM::InstanceProfile": iam_models.InstanceProfile,
|
||||
"AWS::IAM::Role": iam_models.Role,
|
||||
@ -326,7 +330,7 @@ def parse_output(output_logical_id, output_json, resources_map):
|
||||
output_json = clean_json(output_json, resources_map)
|
||||
output = Output()
|
||||
output.key = output_logical_id
|
||||
output.value = output_json['Value']
|
||||
output.value = clean_json(output_json['Value'], resources_map)
|
||||
output.description = output_json.get('Description')
|
||||
return output
|
||||
|
||||
|
@ -1,4 +1,7 @@
|
||||
import json
|
||||
|
||||
from moto.core import BaseBackend, BaseModel
|
||||
from moto.core.exceptions import RESTError
|
||||
import boto.ec2.cloudwatch
|
||||
import datetime
|
||||
|
||||
@ -35,9 +38,26 @@ class FakeAlarm(BaseModel):
|
||||
self.ok_actions = ok_actions
|
||||
self.insufficient_data_actions = insufficient_data_actions
|
||||
self.unit = unit
|
||||
self.state_updated_timestamp = datetime.datetime.utcnow()
|
||||
self.configuration_updated_timestamp = datetime.datetime.utcnow()
|
||||
|
||||
self.history = []
|
||||
|
||||
self.state_reason = ''
|
||||
self.state_reason_data = '{}'
|
||||
self.state = 'OK'
|
||||
self.state_updated_timestamp = datetime.datetime.utcnow()
|
||||
|
||||
def update_state(self, reason, reason_data, state_value):
|
||||
# History type, that then decides what the rest of the items are, can be one of ConfigurationUpdate | StateUpdate | Action
|
||||
self.history.append(
|
||||
('StateUpdate', self.state_reason, self.state_reason_data, self.state, self.state_updated_timestamp)
|
||||
)
|
||||
|
||||
self.state_reason = reason
|
||||
self.state_reason_data = reason_data
|
||||
self.state = state_value
|
||||
self.state_updated_timestamp = datetime.datetime.utcnow()
|
||||
|
||||
|
||||
class MetricDatum(BaseModel):
|
||||
|
||||
@ -122,10 +142,8 @@ class CloudWatchBackend(BaseBackend):
|
||||
if alarm.name in alarm_names
|
||||
]
|
||||
|
||||
def get_alarms_by_state_value(self, state):
|
||||
raise NotImplementedError(
|
||||
"DescribeAlarm by state is not implemented in moto."
|
||||
)
|
||||
def get_alarms_by_state_value(self, target_state):
|
||||
return filter(lambda alarm: alarm.state == target_state, self.alarms.values())
|
||||
|
||||
def delete_alarms(self, alarm_names):
|
||||
for alarm_name in alarm_names:
|
||||
@ -164,6 +182,21 @@ class CloudWatchBackend(BaseBackend):
|
||||
def get_dashboard(self, dashboard):
|
||||
return self.dashboards.get(dashboard)
|
||||
|
||||
def set_alarm_state(self, alarm_name, reason, reason_data, state_value):
|
||||
try:
|
||||
if reason_data is not None:
|
||||
json.loads(reason_data)
|
||||
except ValueError:
|
||||
raise RESTError('InvalidFormat', 'StateReasonData is invalid JSON')
|
||||
|
||||
if alarm_name not in self.alarms:
|
||||
raise RESTError('ResourceNotFound', 'Alarm {0} not found'.format(alarm_name), status=404)
|
||||
|
||||
if state_value not in ('OK', 'ALARM', 'INSUFFICIENT_DATA'):
|
||||
raise RESTError('InvalidParameterValue', 'StateValue is not one of OK | ALARM | INSUFFICIENT_DATA')
|
||||
|
||||
self.alarms[alarm_name].update_state(reason, reason_data, state_value)
|
||||
|
||||
|
||||
class LogGroup(BaseModel):
|
||||
|
||||
|
@ -1,4 +1,5 @@
|
||||
import json
|
||||
from moto.core.utils import amzn_request_id
|
||||
from moto.core.responses import BaseResponse
|
||||
from .models import cloudwatch_backends
|
||||
|
||||
@ -13,6 +14,7 @@ class CloudWatchResponse(BaseResponse):
|
||||
template = self.response_template(ERROR_RESPONSE_TEMPLATE)
|
||||
return template.render(code=code, message=message), dict(status=status)
|
||||
|
||||
@amzn_request_id
|
||||
def put_metric_alarm(self):
|
||||
name = self._get_param('AlarmName')
|
||||
namespace = self._get_param('Namespace')
|
||||
@ -40,6 +42,7 @@ class CloudWatchResponse(BaseResponse):
|
||||
template = self.response_template(PUT_METRIC_ALARM_TEMPLATE)
|
||||
return template.render(alarm=alarm)
|
||||
|
||||
@amzn_request_id
|
||||
def describe_alarms(self):
|
||||
action_prefix = self._get_param('ActionPrefix')
|
||||
alarm_name_prefix = self._get_param('AlarmNamePrefix')
|
||||
@ -62,12 +65,14 @@ class CloudWatchResponse(BaseResponse):
|
||||
template = self.response_template(DESCRIBE_ALARMS_TEMPLATE)
|
||||
return template.render(alarms=alarms)
|
||||
|
||||
@amzn_request_id
|
||||
def delete_alarms(self):
|
||||
alarm_names = self._get_multi_param('AlarmNames.member')
|
||||
self.cloudwatch_backend.delete_alarms(alarm_names)
|
||||
template = self.response_template(DELETE_METRIC_ALARMS_TEMPLATE)
|
||||
return template.render()
|
||||
|
||||
@amzn_request_id
|
||||
def put_metric_data(self):
|
||||
namespace = self._get_param('Namespace')
|
||||
metric_data = []
|
||||
@ -99,11 +104,13 @@ class CloudWatchResponse(BaseResponse):
|
||||
template = self.response_template(PUT_METRIC_DATA_TEMPLATE)
|
||||
return template.render()
|
||||
|
||||
@amzn_request_id
|
||||
def list_metrics(self):
|
||||
metrics = self.cloudwatch_backend.get_all_metrics()
|
||||
template = self.response_template(LIST_METRICS_TEMPLATE)
|
||||
return template.render(metrics=metrics)
|
||||
|
||||
@amzn_request_id
|
||||
def delete_dashboards(self):
|
||||
dashboards = self._get_multi_param('DashboardNames.member')
|
||||
if dashboards is None:
|
||||
@ -116,18 +123,23 @@ class CloudWatchResponse(BaseResponse):
|
||||
template = self.response_template(DELETE_DASHBOARD_TEMPLATE)
|
||||
return template.render()
|
||||
|
||||
@amzn_request_id
|
||||
def describe_alarm_history(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
@amzn_request_id
|
||||
def describe_alarms_for_metric(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
@amzn_request_id
|
||||
def disable_alarm_actions(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
@amzn_request_id
|
||||
def enable_alarm_actions(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
@amzn_request_id
|
||||
def get_dashboard(self):
|
||||
dashboard_name = self._get_param('DashboardName')
|
||||
|
||||
@ -138,9 +150,11 @@ class CloudWatchResponse(BaseResponse):
|
||||
template = self.response_template(GET_DASHBOARD_TEMPLATE)
|
||||
return template.render(dashboard=dashboard)
|
||||
|
||||
@amzn_request_id
|
||||
def get_metric_statistics(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
@amzn_request_id
|
||||
def list_dashboards(self):
|
||||
prefix = self._get_param('DashboardNamePrefix', '')
|
||||
|
||||
@ -149,6 +163,7 @@ class CloudWatchResponse(BaseResponse):
|
||||
template = self.response_template(LIST_DASHBOARD_RESPONSE)
|
||||
return template.render(dashboards=dashboards)
|
||||
|
||||
@amzn_request_id
|
||||
def put_dashboard(self):
|
||||
name = self._get_param('DashboardName')
|
||||
body = self._get_param('DashboardBody')
|
||||
@ -163,14 +178,23 @@ class CloudWatchResponse(BaseResponse):
|
||||
template = self.response_template(PUT_DASHBOARD_RESPONSE)
|
||||
return template.render()
|
||||
|
||||
@amzn_request_id
|
||||
def set_alarm_state(self):
|
||||
raise NotImplementedError()
|
||||
alarm_name = self._get_param('AlarmName')
|
||||
reason = self._get_param('StateReason')
|
||||
reason_data = self._get_param('StateReasonData')
|
||||
state_value = self._get_param('StateValue')
|
||||
|
||||
self.cloudwatch_backend.set_alarm_state(alarm_name, reason, reason_data, state_value)
|
||||
|
||||
template = self.response_template(SET_ALARM_STATE_TEMPLATE)
|
||||
return template.render()
|
||||
|
||||
|
||||
PUT_METRIC_ALARM_TEMPLATE = """<PutMetricAlarmResponse xmlns="http://monitoring.amazonaws.com/doc/2010-08-01/">
|
||||
<ResponseMetadata>
|
||||
<RequestId>
|
||||
2690d7eb-ed86-11dd-9877-6fad448a8419
|
||||
{{ request_id }}
|
||||
</RequestId>
|
||||
</ResponseMetadata>
|
||||
</PutMetricAlarmResponse>"""
|
||||
@ -229,7 +253,7 @@ DESCRIBE_ALARMS_TEMPLATE = """<DescribeAlarmsResponse xmlns="http://monitoring.a
|
||||
DELETE_METRIC_ALARMS_TEMPLATE = """<DeleteMetricAlarmResponse xmlns="http://monitoring.amazonaws.com/doc/2010-08-01/">
|
||||
<ResponseMetadata>
|
||||
<RequestId>
|
||||
2690d7eb-ed86-11dd-9877-6fad448a8419
|
||||
{{ request_id }}
|
||||
</RequestId>
|
||||
</ResponseMetadata>
|
||||
</DeleteMetricAlarmResponse>"""
|
||||
@ -237,7 +261,7 @@ DELETE_METRIC_ALARMS_TEMPLATE = """<DeleteMetricAlarmResponse xmlns="http://moni
|
||||
PUT_METRIC_DATA_TEMPLATE = """<PutMetricDataResponse xmlns="http://monitoring.amazonaws.com/doc/2010-08-01/">
|
||||
<ResponseMetadata>
|
||||
<RequestId>
|
||||
2690d7eb-ed86-11dd-9877-6fad448a8419
|
||||
{{ request_id }}
|
||||
</RequestId>
|
||||
</ResponseMetadata>
|
||||
</PutMetricDataResponse>"""
|
||||
@ -271,7 +295,7 @@ PUT_DASHBOARD_RESPONSE = """<PutDashboardResponse xmlns="http://monitoring.amazo
|
||||
<DashboardValidationMessages/>
|
||||
</PutDashboardResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>44b1d4d8-9fa3-11e7-8ad3-41b86ac5e49e</RequestId>
|
||||
<RequestId>{{ request_id }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</PutDashboardResponse>"""
|
||||
|
||||
@ -289,14 +313,14 @@ LIST_DASHBOARD_RESPONSE = """<ListDashboardsResponse xmlns="http://monitoring.am
|
||||
</DashboardEntries>
|
||||
</ListDashboardsResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>c3773873-9fa5-11e7-b315-31fcc9275d62</RequestId>
|
||||
<RequestId>{{ request_id }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</ListDashboardsResponse>"""
|
||||
|
||||
DELETE_DASHBOARD_TEMPLATE = """<DeleteDashboardsResponse xmlns="http://monitoring.amazonaws.com/doc/2010-08-01/">
|
||||
<DeleteDashboardsResult/>
|
||||
<ResponseMetadata>
|
||||
<RequestId>68d1dc8c-9faa-11e7-a694-df2715690df2</RequestId>
|
||||
<RequestId>{{ request_id }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</DeleteDashboardsResponse>"""
|
||||
|
||||
@ -307,16 +331,22 @@ GET_DASHBOARD_TEMPLATE = """<GetDashboardResponse xmlns="http://monitoring.amazo
|
||||
<DashboardName>{{ dashboard.name }}</DashboardName>
|
||||
</GetDashboardResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>e3c16bb0-9faa-11e7-b315-31fcc9275d62</RequestId>
|
||||
<RequestId>{{ request_id }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</GetDashboardResponse>
|
||||
"""
|
||||
|
||||
SET_ALARM_STATE_TEMPLATE = """<SetAlarmStateResponse xmlns="http://monitoring.amazonaws.com/doc/2010-08-01/">
|
||||
<ResponseMetadata>
|
||||
<RequestId>{{ request_id }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</SetAlarmStateResponse>"""
|
||||
|
||||
ERROR_RESPONSE_TEMPLATE = """<ErrorResponse xmlns="http://monitoring.amazonaws.com/doc/2010-08-01/">
|
||||
<Error>
|
||||
<Type>Sender</Type>
|
||||
<Code>{{ code }}</Code>
|
||||
<Message>{{ message }}</Message>
|
||||
</Error>
|
||||
<RequestId>5e45fd1e-9fa3-11e7-b720-89e8821d38c4</RequestId>
|
||||
<RequestId>{{ request_id }}</RequestId>
|
||||
</ErrorResponse>"""
|
||||
|
@ -34,6 +34,8 @@ ERROR_JSON_RESPONSE = u"""{
|
||||
|
||||
|
||||
class RESTError(HTTPException):
|
||||
code = 400
|
||||
|
||||
templates = {
|
||||
'single_error': SINGLE_ERROR_RESPONSE,
|
||||
'error': ERROR_RESPONSE,
|
||||
@ -54,7 +56,6 @@ class DryRunClientError(RESTError):
|
||||
|
||||
|
||||
class JsonRESTError(RESTError):
|
||||
|
||||
def __init__(self, error_type, message, template='error_json', **kwargs):
|
||||
super(JsonRESTError, self).__init__(
|
||||
error_type, message, template, **kwargs)
|
||||
|
@ -17,6 +17,8 @@ from six.moves.urllib.parse import parse_qs, urlparse
|
||||
import xmltodict
|
||||
from pkg_resources import resource_filename
|
||||
from werkzeug.exceptions import HTTPException
|
||||
|
||||
import boto3
|
||||
from moto.compat import OrderedDict
|
||||
from moto.core.utils import camelcase_to_underscores, method_names_from_class
|
||||
|
||||
@ -103,7 +105,8 @@ class _TemplateEnvironmentMixin(object):
|
||||
class BaseResponse(_TemplateEnvironmentMixin):
|
||||
|
||||
default_region = 'us-east-1'
|
||||
region_regex = r'\.(.+?)\.amazonaws\.com'
|
||||
# to extract region, use [^.]
|
||||
region_regex = r'\.([^.]+?)\.amazonaws\.com'
|
||||
aws_service_spec = None
|
||||
|
||||
@classmethod
|
||||
@ -151,12 +154,12 @@ class BaseResponse(_TemplateEnvironmentMixin):
|
||||
querystring.update(headers)
|
||||
|
||||
querystring = _decode_dict(querystring)
|
||||
|
||||
self.uri = full_url
|
||||
self.path = urlparse(full_url).path
|
||||
self.querystring = querystring
|
||||
self.method = request.method
|
||||
self.region = self.get_region_from_url(request, full_url)
|
||||
self.uri_match = None
|
||||
|
||||
self.headers = request.headers
|
||||
if 'host' not in self.headers:
|
||||
@ -178,6 +181,58 @@ class BaseResponse(_TemplateEnvironmentMixin):
|
||||
self.setup_class(request, full_url, headers)
|
||||
return self.call_action()
|
||||
|
||||
def uri_to_regexp(self, uri):
|
||||
"""converts uri w/ placeholder to regexp
|
||||
'/cars/{carName}/drivers/{DriverName}'
|
||||
-> '^/cars/.*/drivers/[^/]*$'
|
||||
|
||||
'/cars/{carName}/drivers/{DriverName}/drive'
|
||||
-> '^/cars/.*/drivers/.*/drive$'
|
||||
|
||||
"""
|
||||
def _convert(elem, is_last):
|
||||
if not re.match('^{.*}$', elem):
|
||||
return elem
|
||||
name = elem.replace('{', '').replace('}', '')
|
||||
if is_last:
|
||||
return '(?P<%s>[^/]*)' % name
|
||||
return '(?P<%s>.*)' % name
|
||||
|
||||
elems = uri.split('/')
|
||||
num_elems = len(elems)
|
||||
regexp = '^{}$'.format('/'.join([_convert(elem, (i == num_elems - 1)) for i, elem in enumerate(elems)]))
|
||||
return regexp
|
||||
|
||||
def _get_action_from_method_and_request_uri(self, method, request_uri):
|
||||
"""basically used for `rest-json` APIs
|
||||
You can refer to example from link below
|
||||
https://github.com/boto/botocore/blob/develop/botocore/data/iot/2015-05-28/service-2.json
|
||||
"""
|
||||
|
||||
# service response class should have 'SERVICE_NAME' class member,
|
||||
# if you want to get action from method and url
|
||||
if not hasattr(self, 'SERVICE_NAME'):
|
||||
return None
|
||||
service = self.SERVICE_NAME
|
||||
conn = boto3.client(service)
|
||||
|
||||
# make cache if it does not exist yet
|
||||
if not hasattr(self, 'method_urls'):
|
||||
self.method_urls = defaultdict(lambda: defaultdict(str))
|
||||
op_names = conn._service_model.operation_names
|
||||
for op_name in op_names:
|
||||
op_model = conn._service_model.operation_model(op_name)
|
||||
_method = op_model.http['method']
|
||||
uri_regexp = self.uri_to_regexp(op_model.http['requestUri'])
|
||||
self.method_urls[_method][uri_regexp] = op_model.name
|
||||
regexp_and_names = self.method_urls[method]
|
||||
for regexp, name in regexp_and_names.items():
|
||||
match = re.match(regexp, request_uri)
|
||||
self.uri_match = match
|
||||
if match:
|
||||
return name
|
||||
return None
|
||||
|
||||
def _get_action(self):
|
||||
action = self.querystring.get('Action', [""])[0]
|
||||
if not action: # Some services use a header for the action
|
||||
@ -186,7 +241,9 @@ class BaseResponse(_TemplateEnvironmentMixin):
|
||||
'x-amz-target') or self.headers.get('X-Amz-Target')
|
||||
if match:
|
||||
action = match.split(".")[-1]
|
||||
|
||||
# get action from method and uri
|
||||
if not action:
|
||||
return self._get_action_from_method_and_request_uri(self.method, self.path)
|
||||
return action
|
||||
|
||||
def call_action(self):
|
||||
@ -221,6 +278,22 @@ class BaseResponse(_TemplateEnvironmentMixin):
|
||||
val = self.querystring.get(param_name)
|
||||
if val is not None:
|
||||
return val[0]
|
||||
|
||||
# try to get json body parameter
|
||||
if self.body is not None:
|
||||
try:
|
||||
return json.loads(self.body)[param_name]
|
||||
except ValueError:
|
||||
pass
|
||||
except KeyError:
|
||||
pass
|
||||
# try to get path parameter
|
||||
if self.uri_match:
|
||||
try:
|
||||
return self.uri_match.group(param_name)
|
||||
except IndexError:
|
||||
# do nothing if param is not found
|
||||
pass
|
||||
return if_none
|
||||
|
||||
def _get_int_param(self, param_name, if_none=None):
|
||||
|
@ -272,9 +272,6 @@ def amzn_request_id(f):
|
||||
else:
|
||||
status, new_headers, body = response
|
||||
headers.update(new_headers)
|
||||
# Cast status to string
|
||||
if "status" in headers:
|
||||
headers['status'] = str(headers['status'])
|
||||
|
||||
request_id = gen_amzn_requestid_long(headers)
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
from __future__ import unicode_literals
|
||||
from .models import dynamodb_backend2
|
||||
from .models import dynamodb_backends as dynamodb_backends2
|
||||
from ..core.models import base_decorator, deprecated_base_decorator
|
||||
|
||||
dynamodb_backends2 = {"global": dynamodb_backend2}
|
||||
mock_dynamodb2 = dynamodb_backend2.decorator
|
||||
mock_dynamodb2_deprecated = dynamodb_backend2.deprecated_decorator
|
||||
dynamodb_backend2 = dynamodb_backends2['us-east-1']
|
||||
mock_dynamodb2 = base_decorator(dynamodb_backends2)
|
||||
mock_dynamodb2_deprecated = deprecated_base_decorator(dynamodb_backends2)
|
||||
|
@ -5,9 +5,11 @@ import decimal
|
||||
import json
|
||||
import re
|
||||
|
||||
import boto3
|
||||
from moto.compat import OrderedDict
|
||||
from moto.core import BaseBackend, BaseModel
|
||||
from moto.core.utils import unix_time
|
||||
from moto.core.exceptions import JsonRESTError
|
||||
from .comparisons import get_comparison_func, get_filter_expression, Op
|
||||
|
||||
|
||||
@ -271,6 +273,10 @@ class Table(BaseModel):
|
||||
self.items = defaultdict(dict)
|
||||
self.table_arn = self._generate_arn(table_name)
|
||||
self.tags = []
|
||||
self.ttl = {
|
||||
'TimeToLiveStatus': 'DISABLED' # One of 'ENABLING'|'DISABLING'|'ENABLED'|'DISABLED',
|
||||
# 'AttributeName': 'string' # Can contain this
|
||||
}
|
||||
|
||||
def _generate_arn(self, name):
|
||||
return 'arn:aws:dynamodb:us-east-1:123456789011:table/' + name
|
||||
@ -577,9 +583,16 @@ class Table(BaseModel):
|
||||
|
||||
class DynamoDBBackend(BaseBackend):
|
||||
|
||||
def __init__(self):
|
||||
def __init__(self, region_name=None):
|
||||
self.region_name = region_name
|
||||
self.tables = OrderedDict()
|
||||
|
||||
def reset(self):
|
||||
region_name = self.region_name
|
||||
|
||||
self.__dict__ = {}
|
||||
self.__init__(region_name)
|
||||
|
||||
def create_table(self, name, **params):
|
||||
if name in self.tables:
|
||||
return None
|
||||
@ -595,6 +608,11 @@ class DynamoDBBackend(BaseBackend):
|
||||
if self.tables[table].table_arn == table_arn:
|
||||
self.tables[table].tags.extend(tags)
|
||||
|
||||
def untag_resource(self, table_arn, tag_keys):
|
||||
for table in self.tables:
|
||||
if self.tables[table].table_arn == table_arn:
|
||||
self.tables[table].tags = [tag for tag in self.tables[table].tags if tag['Key'] not in tag_keys]
|
||||
|
||||
def list_tags_of_resource(self, table_arn):
|
||||
required_table = None
|
||||
for table in self.tables:
|
||||
@ -796,5 +814,28 @@ class DynamoDBBackend(BaseBackend):
|
||||
hash_key, range_key = self.get_keys_value(table, keys)
|
||||
return table.delete_item(hash_key, range_key)
|
||||
|
||||
def update_ttl(self, table_name, ttl_spec):
|
||||
table = self.tables.get(table_name)
|
||||
if table is None:
|
||||
raise JsonRESTError('ResourceNotFound', 'Table not found')
|
||||
|
||||
dynamodb_backend2 = DynamoDBBackend()
|
||||
if 'Enabled' not in ttl_spec or 'AttributeName' not in ttl_spec:
|
||||
raise JsonRESTError('InvalidParameterValue',
|
||||
'TimeToLiveSpecification does not contain Enabled and AttributeName')
|
||||
|
||||
if ttl_spec['Enabled']:
|
||||
table.ttl['TimeToLiveStatus'] = 'ENABLED'
|
||||
else:
|
||||
table.ttl['TimeToLiveStatus'] = 'DISABLED'
|
||||
table.ttl['AttributeName'] = ttl_spec['AttributeName']
|
||||
|
||||
def describe_ttl(self, table_name):
|
||||
table = self.tables.get(table_name)
|
||||
if table is None:
|
||||
raise JsonRESTError('ResourceNotFound', 'Table not found')
|
||||
|
||||
return table.ttl
|
||||
|
||||
|
||||
available_regions = boto3.session.Session().get_available_regions("dynamodb")
|
||||
dynamodb_backends = {region: DynamoDBBackend(region_name=region) for region in available_regions}
|
||||
|
@ -5,7 +5,7 @@ import re
|
||||
|
||||
from moto.core.responses import BaseResponse
|
||||
from moto.core.utils import camelcase_to_underscores, amzn_request_id
|
||||
from .models import dynamodb_backend2, dynamo_json_dump
|
||||
from .models import dynamodb_backends, dynamo_json_dump
|
||||
|
||||
|
||||
class DynamoHandler(BaseResponse):
|
||||
@ -24,6 +24,14 @@ class DynamoHandler(BaseResponse):
|
||||
def error(self, type_, message, status=400):
|
||||
return status, self.response_headers, dynamo_json_dump({'__type': type_, 'message': message})
|
||||
|
||||
@property
|
||||
def dynamodb_backend(self):
|
||||
"""
|
||||
:return: DynamoDB2 Backend
|
||||
:rtype: moto.dynamodb2.models.DynamoDBBackend
|
||||
"""
|
||||
return dynamodb_backends[self.region]
|
||||
|
||||
@amzn_request_id
|
||||
def call_action(self):
|
||||
self.body = json.loads(self.body or '{}')
|
||||
@ -46,10 +54,10 @@ class DynamoHandler(BaseResponse):
|
||||
limit = body.get('Limit', 100)
|
||||
if body.get("ExclusiveStartTableName"):
|
||||
last = body.get("ExclusiveStartTableName")
|
||||
start = list(dynamodb_backend2.tables.keys()).index(last) + 1
|
||||
start = list(self.dynamodb_backend.tables.keys()).index(last) + 1
|
||||
else:
|
||||
start = 0
|
||||
all_tables = list(dynamodb_backend2.tables.keys())
|
||||
all_tables = list(self.dynamodb_backend.tables.keys())
|
||||
if limit:
|
||||
tables = all_tables[start:start + limit]
|
||||
else:
|
||||
@ -74,12 +82,12 @@ class DynamoHandler(BaseResponse):
|
||||
global_indexes = body.get("GlobalSecondaryIndexes", [])
|
||||
local_secondary_indexes = body.get("LocalSecondaryIndexes", [])
|
||||
|
||||
table = dynamodb_backend2.create_table(table_name,
|
||||
schema=key_schema,
|
||||
throughput=throughput,
|
||||
attr=attr,
|
||||
global_indexes=global_indexes,
|
||||
indexes=local_secondary_indexes)
|
||||
table = self.dynamodb_backend.create_table(table_name,
|
||||
schema=key_schema,
|
||||
throughput=throughput,
|
||||
attr=attr,
|
||||
global_indexes=global_indexes,
|
||||
indexes=local_secondary_indexes)
|
||||
if table is not None:
|
||||
return dynamo_json_dump(table.describe())
|
||||
else:
|
||||
@ -88,7 +96,7 @@ class DynamoHandler(BaseResponse):
|
||||
|
||||
def delete_table(self):
|
||||
name = self.body['TableName']
|
||||
table = dynamodb_backend2.delete_table(name)
|
||||
table = self.dynamodb_backend.delete_table(name)
|
||||
if table is not None:
|
||||
return dynamo_json_dump(table.describe())
|
||||
else:
|
||||
@ -96,15 +104,21 @@ class DynamoHandler(BaseResponse):
|
||||
return self.error(er, 'Requested resource not found')
|
||||
|
||||
def tag_resource(self):
|
||||
tags = self.body['Tags']
|
||||
table_arn = self.body['ResourceArn']
|
||||
dynamodb_backend2.tag_resource(table_arn, tags)
|
||||
return json.dumps({})
|
||||
tags = self.body['Tags']
|
||||
self.dynamodb_backend.tag_resource(table_arn, tags)
|
||||
return ''
|
||||
|
||||
def untag_resource(self):
|
||||
table_arn = self.body['ResourceArn']
|
||||
tags = self.body['TagKeys']
|
||||
self.dynamodb_backend.untag_resource(table_arn, tags)
|
||||
return ''
|
||||
|
||||
def list_tags_of_resource(self):
|
||||
try:
|
||||
table_arn = self.body['ResourceArn']
|
||||
all_tags = dynamodb_backend2.list_tags_of_resource(table_arn)
|
||||
all_tags = self.dynamodb_backend.list_tags_of_resource(table_arn)
|
||||
all_tag_keys = [tag['Key'] for tag in all_tags]
|
||||
marker = self.body.get('NextToken')
|
||||
if marker:
|
||||
@ -127,17 +141,17 @@ class DynamoHandler(BaseResponse):
|
||||
def update_table(self):
|
||||
name = self.body['TableName']
|
||||
if 'GlobalSecondaryIndexUpdates' in self.body:
|
||||
table = dynamodb_backend2.update_table_global_indexes(
|
||||
table = self.dynamodb_backend.update_table_global_indexes(
|
||||
name, self.body['GlobalSecondaryIndexUpdates'])
|
||||
if 'ProvisionedThroughput' in self.body:
|
||||
throughput = self.body["ProvisionedThroughput"]
|
||||
table = dynamodb_backend2.update_table_throughput(name, throughput)
|
||||
table = self.dynamodb_backend.update_table_throughput(name, throughput)
|
||||
return dynamo_json_dump(table.describe())
|
||||
|
||||
def describe_table(self):
|
||||
name = self.body['TableName']
|
||||
try:
|
||||
table = dynamodb_backend2.tables[name]
|
||||
table = self.dynamodb_backend.tables[name]
|
||||
except KeyError:
|
||||
er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'
|
||||
return self.error(er, 'Requested resource not found')
|
||||
@ -188,8 +202,7 @@ class DynamoHandler(BaseResponse):
|
||||
expected[not_exists_m.group(1)] = {'Exists': False}
|
||||
|
||||
try:
|
||||
result = dynamodb_backend2.put_item(
|
||||
name, item, expected, overwrite)
|
||||
result = self.dynamodb_backend.put_item(name, item, expected, overwrite)
|
||||
except ValueError:
|
||||
er = 'com.amazonaws.dynamodb.v20111205#ConditionalCheckFailedException'
|
||||
return self.error(er, 'A condition specified in the operation could not be evaluated.')
|
||||
@ -214,10 +227,10 @@ class DynamoHandler(BaseResponse):
|
||||
request = list(table_request.values())[0]
|
||||
if request_type == 'PutRequest':
|
||||
item = request['Item']
|
||||
dynamodb_backend2.put_item(table_name, item)
|
||||
self.dynamodb_backend.put_item(table_name, item)
|
||||
elif request_type == 'DeleteRequest':
|
||||
keys = request['Key']
|
||||
item = dynamodb_backend2.delete_item(table_name, keys)
|
||||
item = self.dynamodb_backend.delete_item(table_name, keys)
|
||||
|
||||
response = {
|
||||
"ConsumedCapacity": [
|
||||
@ -237,7 +250,7 @@ class DynamoHandler(BaseResponse):
|
||||
name = self.body['TableName']
|
||||
key = self.body['Key']
|
||||
try:
|
||||
item = dynamodb_backend2.get_item(name, key)
|
||||
item = self.dynamodb_backend.get_item(name, key)
|
||||
except ValueError:
|
||||
er = 'com.amazon.coral.validate#ValidationException'
|
||||
return self.error(er, 'Validation Exception')
|
||||
@ -268,7 +281,7 @@ class DynamoHandler(BaseResponse):
|
||||
attributes_to_get = table_request.get('AttributesToGet')
|
||||
results["Responses"][table_name] = []
|
||||
for key in keys:
|
||||
item = dynamodb_backend2.get_item(table_name, key)
|
||||
item = self.dynamodb_backend.get_item(table_name, key)
|
||||
if item:
|
||||
item_describe = item.describe_attrs(attributes_to_get)
|
||||
results["Responses"][table_name].append(
|
||||
@ -297,7 +310,7 @@ class DynamoHandler(BaseResponse):
|
||||
if key_condition_expression:
|
||||
value_alias_map = self.body['ExpressionAttributeValues']
|
||||
|
||||
table = dynamodb_backend2.get_table(name)
|
||||
table = self.dynamodb_backend.get_table(name)
|
||||
|
||||
# If table does not exist
|
||||
if table is None:
|
||||
@ -365,7 +378,7 @@ class DynamoHandler(BaseResponse):
|
||||
key_conditions = self.body.get('KeyConditions')
|
||||
query_filters = self.body.get("QueryFilter")
|
||||
if key_conditions:
|
||||
hash_key_name, range_key_name = dynamodb_backend2.get_table_keys_name(
|
||||
hash_key_name, range_key_name = self.dynamodb_backend.get_table_keys_name(
|
||||
name, key_conditions.keys())
|
||||
for key, value in key_conditions.items():
|
||||
if key not in (hash_key_name, range_key_name):
|
||||
@ -398,9 +411,10 @@ class DynamoHandler(BaseResponse):
|
||||
exclusive_start_key = self.body.get('ExclusiveStartKey')
|
||||
limit = self.body.get("Limit")
|
||||
scan_index_forward = self.body.get("ScanIndexForward")
|
||||
items, scanned_count, last_evaluated_key = dynamodb_backend2.query(
|
||||
items, scanned_count, last_evaluated_key = self.dynamodb_backend.query(
|
||||
name, hash_key, range_comparison, range_values, limit,
|
||||
exclusive_start_key, scan_index_forward, projection_expression, index_name=index_name, **filter_kwargs)
|
||||
exclusive_start_key, scan_index_forward, projection_expression, index_name=index_name, **filter_kwargs
|
||||
)
|
||||
if items is None:
|
||||
er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'
|
||||
return self.error(er, 'Requested resource not found')
|
||||
@ -442,12 +456,12 @@ class DynamoHandler(BaseResponse):
|
||||
limit = self.body.get("Limit")
|
||||
|
||||
try:
|
||||
items, scanned_count, last_evaluated_key = dynamodb_backend2.scan(name, filters,
|
||||
limit,
|
||||
exclusive_start_key,
|
||||
filter_expression,
|
||||
expression_attribute_names,
|
||||
expression_attribute_values)
|
||||
items, scanned_count, last_evaluated_key = self.dynamodb_backend.scan(name, filters,
|
||||
limit,
|
||||
exclusive_start_key,
|
||||
filter_expression,
|
||||
expression_attribute_names,
|
||||
expression_attribute_values)
|
||||
except ValueError as err:
|
||||
er = 'com.amazonaws.dynamodb.v20111205#ValidationError'
|
||||
return self.error(er, 'Bad Filter Expression: {0}'.format(err))
|
||||
@ -478,12 +492,12 @@ class DynamoHandler(BaseResponse):
|
||||
name = self.body['TableName']
|
||||
keys = self.body['Key']
|
||||
return_values = self.body.get('ReturnValues', '')
|
||||
table = dynamodb_backend2.get_table(name)
|
||||
table = self.dynamodb_backend.get_table(name)
|
||||
if not table:
|
||||
er = 'com.amazonaws.dynamodb.v20120810#ConditionalCheckFailedException'
|
||||
return self.error(er, 'A condition specified in the operation could not be evaluated.')
|
||||
|
||||
item = dynamodb_backend2.delete_item(name, keys)
|
||||
item = self.dynamodb_backend.delete_item(name, keys)
|
||||
if item and return_values == 'ALL_OLD':
|
||||
item_dict = item.to_json()
|
||||
else:
|
||||
@ -500,7 +514,7 @@ class DynamoHandler(BaseResponse):
|
||||
'ExpressionAttributeNames', {})
|
||||
expression_attribute_values = self.body.get(
|
||||
'ExpressionAttributeValues', {})
|
||||
existing_item = dynamodb_backend2.get_item(name, key)
|
||||
existing_item = self.dynamodb_backend.get_item(name, key)
|
||||
|
||||
if 'Expected' in self.body:
|
||||
expected = self.body['Expected']
|
||||
@ -536,9 +550,10 @@ class DynamoHandler(BaseResponse):
|
||||
'\s*([=\+-])\s*', '\\1', update_expression)
|
||||
|
||||
try:
|
||||
item = dynamodb_backend2.update_item(
|
||||
name, key, update_expression, attribute_updates, expression_attribute_names, expression_attribute_values,
|
||||
expected)
|
||||
item = self.dynamodb_backend.update_item(
|
||||
name, key, update_expression, attribute_updates, expression_attribute_names,
|
||||
expression_attribute_values, expected
|
||||
)
|
||||
except ValueError:
|
||||
er = 'com.amazonaws.dynamodb.v20111205#ConditionalCheckFailedException'
|
||||
return self.error(er, 'A condition specified in the operation could not be evaluated.')
|
||||
@ -555,3 +570,26 @@ class DynamoHandler(BaseResponse):
|
||||
item_dict['Attributes'] = {}
|
||||
|
||||
return dynamo_json_dump(item_dict)
|
||||
|
||||
def describe_limits(self):
|
||||
return json.dumps({
|
||||
'AccountMaxReadCapacityUnits': 20000,
|
||||
'TableMaxWriteCapacityUnits': 10000,
|
||||
'AccountMaxWriteCapacityUnits': 20000,
|
||||
'TableMaxReadCapacityUnits': 10000
|
||||
})
|
||||
|
||||
def update_time_to_live(self):
|
||||
name = self.body['TableName']
|
||||
ttl_spec = self.body['TimeToLiveSpecification']
|
||||
|
||||
self.dynamodb_backend.update_ttl(name, ttl_spec)
|
||||
|
||||
return json.dumps({'TimeToLiveSpecification': ttl_spec})
|
||||
|
||||
def describe_time_to_live(self):
|
||||
name = self.body['TableName']
|
||||
|
||||
ttl_spec = self.dynamodb_backend.describe_ttl(name)
|
||||
|
||||
return json.dumps({'TimeToLiveDescription': ttl_spec})
|
||||
|
@ -2,6 +2,7 @@ from __future__ import unicode_literals
|
||||
|
||||
import copy
|
||||
import itertools
|
||||
import ipaddress
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
@ -402,6 +403,10 @@ class Instance(TaggedEC2Resource, BotoInstance):
|
||||
subnet = ec2_backend.get_subnet(self.subnet_id)
|
||||
self.vpc_id = subnet.vpc_id
|
||||
self._placement.zone = subnet.availability_zone
|
||||
|
||||
if associate_public_ip is None:
|
||||
# Mapping public ip hasnt been explicitly enabled or disabled
|
||||
associate_public_ip = subnet.map_public_ip_on_launch == 'true'
|
||||
elif placement:
|
||||
self._placement.zone = placement
|
||||
else:
|
||||
@ -409,10 +414,22 @@ class Instance(TaggedEC2Resource, BotoInstance):
|
||||
|
||||
self.block_device_mapping = BlockDeviceMapping()
|
||||
|
||||
self.prep_nics(kwargs.get("nics", {}),
|
||||
subnet_id=self.subnet_id,
|
||||
private_ip=kwargs.get("private_ip"),
|
||||
associate_public_ip=associate_public_ip)
|
||||
self._private_ips = set()
|
||||
self.prep_nics(
|
||||
kwargs.get("nics", {}),
|
||||
private_ip=kwargs.get("private_ip"),
|
||||
associate_public_ip=associate_public_ip
|
||||
)
|
||||
|
||||
def __del__(self):
|
||||
try:
|
||||
subnet = self.ec2_backend.get_subnet(self.subnet_id)
|
||||
for ip in self._private_ips:
|
||||
subnet.del_subnet_ip(ip)
|
||||
except Exception:
|
||||
# Its not "super" critical we clean this up, as reset will do this
|
||||
# worst case we'll get IP address exaustion... rarely
|
||||
pass
|
||||
|
||||
def setup_defaults(self):
|
||||
# Default have an instance with root volume should you not wish to
|
||||
@ -547,14 +564,23 @@ class Instance(TaggedEC2Resource, BotoInstance):
|
||||
else:
|
||||
return self.security_groups
|
||||
|
||||
def prep_nics(self, nic_spec, subnet_id=None, private_ip=None, associate_public_ip=None):
|
||||
def prep_nics(self, nic_spec, private_ip=None, associate_public_ip=None):
|
||||
self.nics = {}
|
||||
|
||||
if not private_ip:
|
||||
if self.subnet_id:
|
||||
subnet = self.ec2_backend.get_subnet(self.subnet_id)
|
||||
if not private_ip:
|
||||
private_ip = subnet.get_available_subnet_ip(instance=self)
|
||||
else:
|
||||
subnet.request_ip(private_ip, instance=self)
|
||||
|
||||
self._private_ips.add(private_ip)
|
||||
elif private_ip is None:
|
||||
# Preserve old behaviour if in EC2-Classic mode
|
||||
private_ip = random_private_ip()
|
||||
|
||||
# Primary NIC defaults
|
||||
primary_nic = {'SubnetId': subnet_id,
|
||||
primary_nic = {'SubnetId': self.subnet_id,
|
||||
'PrivateIpAddress': private_ip,
|
||||
'AssociatePublicIpAddress': associate_public_ip}
|
||||
primary_nic = dict((k, v) for k, v in primary_nic.items() if v)
|
||||
@ -2114,10 +2140,17 @@ class Subnet(TaggedEC2Resource):
|
||||
self.id = subnet_id
|
||||
self.vpc_id = vpc_id
|
||||
self.cidr_block = cidr_block
|
||||
self.cidr = ipaddress.ip_network(six.text_type(self.cidr_block))
|
||||
self._availability_zone = availability_zone
|
||||
self.default_for_az = default_for_az
|
||||
self.map_public_ip_on_launch = map_public_ip_on_launch
|
||||
|
||||
# Theory is we assign ip's as we go (as 16,777,214 usable IPs in a /8)
|
||||
self._subnet_ip_generator = self.cidr.hosts()
|
||||
self.reserved_ips = [six.next(self._subnet_ip_generator) for _ in range(0, 3)] # Reserved by AWS
|
||||
self._unused_ips = set() # if instance is destroyed hold IP here for reuse
|
||||
self._subnet_ips = {} # has IP: instance
|
||||
|
||||
@classmethod
|
||||
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
|
||||
properties = cloudformation_json['Properties']
|
||||
@ -2184,6 +2217,46 @@ class Subnet(TaggedEC2Resource):
|
||||
'"Fn::GetAtt" : [ "{0}" , "AvailabilityZone" ]"')
|
||||
raise UnformattedGetAttTemplateException()
|
||||
|
||||
def get_available_subnet_ip(self, instance):
|
||||
try:
|
||||
new_ip = self._unused_ips.pop()
|
||||
except KeyError:
|
||||
new_ip = six.next(self._subnet_ip_generator)
|
||||
|
||||
# Skips any IP's if they've been manually specified
|
||||
while str(new_ip) in self._subnet_ips:
|
||||
new_ip = six.next(self._subnet_ip_generator)
|
||||
|
||||
if new_ip == self.cidr.broadcast_address:
|
||||
raise StopIteration() # Broadcast address cant be used obviously
|
||||
# TODO StopIteration will be raised if no ip's available, not sure how aws handles this.
|
||||
|
||||
new_ip = str(new_ip)
|
||||
self._subnet_ips[new_ip] = instance
|
||||
|
||||
return new_ip
|
||||
|
||||
def request_ip(self, ip, instance):
|
||||
if ipaddress.ip_address(ip) not in self.cidr:
|
||||
raise Exception('IP does not fall in the subnet CIDR of {0}'.format(self.cidr))
|
||||
|
||||
if ip in self._subnet_ips:
|
||||
raise Exception('IP already in use')
|
||||
try:
|
||||
self._unused_ips.remove(ip)
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
self._subnet_ips[ip] = instance
|
||||
return ip
|
||||
|
||||
def del_subnet_ip(self, ip):
|
||||
try:
|
||||
del self._subnet_ips[ip]
|
||||
self._unused_ips.add(ip)
|
||||
except KeyError:
|
||||
pass # Unknown IP
|
||||
|
||||
|
||||
class SubnetBackend(object):
|
||||
def __init__(self):
|
||||
|
@ -4,6 +4,7 @@ from datetime import datetime
|
||||
from random import random, randint
|
||||
|
||||
import pytz
|
||||
from moto.core.exceptions import JsonRESTError
|
||||
from moto.core import BaseBackend, BaseModel
|
||||
from moto.ec2 import ec2_backends
|
||||
from copy import copy
|
||||
@ -148,7 +149,7 @@ class Task(BaseObject):
|
||||
resource_requirements, overrides={}, started_by=''):
|
||||
self.cluster_arn = cluster.arn
|
||||
self.task_arn = 'arn:aws:ecs:us-east-1:012345678910:task/{0}'.format(
|
||||
str(uuid.uuid1()))
|
||||
str(uuid.uuid4()))
|
||||
self.container_instance_arn = container_instance_arn
|
||||
self.last_status = 'RUNNING'
|
||||
self.desired_status = 'RUNNING'
|
||||
@ -288,7 +289,7 @@ class ContainerInstance(BaseObject):
|
||||
'stringSetValue': [],
|
||||
'type': 'STRINGSET'}]
|
||||
self.container_instance_arn = "arn:aws:ecs:us-east-1:012345678910:container-instance/{0}".format(
|
||||
str(uuid.uuid1()))
|
||||
str(uuid.uuid4()))
|
||||
self.pending_tasks_count = 0
|
||||
self.remaining_resources = [
|
||||
{'doubleValue': 0.0,
|
||||
@ -321,6 +322,8 @@ class ContainerInstance(BaseObject):
|
||||
'dockerVersion': 'DockerVersion: 1.5.0'
|
||||
}
|
||||
|
||||
self.attributes = {}
|
||||
|
||||
@property
|
||||
def response_object(self):
|
||||
response_object = self.gen_response_object()
|
||||
@ -766,6 +769,102 @@ class EC2ContainerServiceBackend(BaseBackend):
|
||||
raise Exception("{0} is not a cluster".format(cluster_name))
|
||||
pass
|
||||
|
||||
def put_attributes(self, cluster_name, attributes=None):
|
||||
if cluster_name is None or cluster_name not in self.clusters:
|
||||
raise JsonRESTError('ClusterNotFoundException', 'Cluster not found', status=400)
|
||||
|
||||
if attributes is None:
|
||||
raise JsonRESTError('InvalidParameterException', 'attributes value is required')
|
||||
|
||||
for attr in attributes:
|
||||
self._put_attribute(cluster_name, attr['name'], attr.get('value'), attr.get('targetId'), attr.get('targetType'))
|
||||
|
||||
def _put_attribute(self, cluster_name, name, value=None, target_id=None, target_type=None):
|
||||
if target_id is None and target_type is None:
|
||||
for instance in self.container_instances[cluster_name].values():
|
||||
instance.attributes[name] = value
|
||||
elif target_type is None:
|
||||
# targetId is full container instance arn
|
||||
try:
|
||||
arn = target_id.rsplit('/', 1)[-1]
|
||||
self.container_instances[cluster_name][arn].attributes[name] = value
|
||||
except KeyError:
|
||||
raise JsonRESTError('TargetNotFoundException', 'Could not find {0}'.format(target_id))
|
||||
else:
|
||||
# targetId is container uuid, targetType must be container-instance
|
||||
try:
|
||||
if target_type != 'container-instance':
|
||||
raise JsonRESTError('TargetNotFoundException', 'Could not find {0}'.format(target_id))
|
||||
|
||||
self.container_instances[cluster_name][target_id].attributes[name] = value
|
||||
except KeyError:
|
||||
raise JsonRESTError('TargetNotFoundException', 'Could not find {0}'.format(target_id))
|
||||
|
||||
def list_attributes(self, target_type, cluster_name=None, attr_name=None, attr_value=None, max_results=None, next_token=None):
|
||||
if target_type != 'container-instance':
|
||||
raise JsonRESTError('InvalidParameterException', 'targetType must be container-instance')
|
||||
|
||||
filters = [lambda x: True]
|
||||
|
||||
# item will be {0 cluster_name, 1 arn, 2 name, 3 value}
|
||||
if cluster_name is not None:
|
||||
filters.append(lambda item: item[0] == cluster_name)
|
||||
if attr_name:
|
||||
filters.append(lambda item: item[2] == attr_name)
|
||||
if attr_name:
|
||||
filters.append(lambda item: item[3] == attr_value)
|
||||
|
||||
all_attrs = []
|
||||
for cluster_name, cobj in self.container_instances.items():
|
||||
for container_instance in cobj.values():
|
||||
for key, value in container_instance.attributes.items():
|
||||
all_attrs.append((cluster_name, container_instance.container_instance_arn, key, value))
|
||||
|
||||
return filter(lambda x: all(f(x) for f in filters), all_attrs)
|
||||
|
||||
def delete_attributes(self, cluster_name, attributes=None):
|
||||
if cluster_name is None or cluster_name not in self.clusters:
|
||||
raise JsonRESTError('ClusterNotFoundException', 'Cluster not found', status=400)
|
||||
|
||||
if attributes is None:
|
||||
raise JsonRESTError('InvalidParameterException', 'attributes value is required')
|
||||
|
||||
for attr in attributes:
|
||||
self._delete_attribute(cluster_name, attr['name'], attr.get('value'), attr.get('targetId'), attr.get('targetType'))
|
||||
|
||||
def _delete_attribute(self, cluster_name, name, value=None, target_id=None, target_type=None):
|
||||
if target_id is None and target_type is None:
|
||||
for instance in self.container_instances[cluster_name].values():
|
||||
if name in instance.attributes and instance.attributes[name] == value:
|
||||
del instance.attributes[name]
|
||||
elif target_type is None:
|
||||
# targetId is full container instance arn
|
||||
try:
|
||||
arn = target_id.rsplit('/', 1)[-1]
|
||||
instance = self.container_instances[cluster_name][arn]
|
||||
if name in instance.attributes and instance.attributes[name] == value:
|
||||
del instance.attributes[name]
|
||||
except KeyError:
|
||||
raise JsonRESTError('TargetNotFoundException', 'Could not find {0}'.format(target_id))
|
||||
else:
|
||||
# targetId is container uuid, targetType must be container-instance
|
||||
try:
|
||||
if target_type != 'container-instance':
|
||||
raise JsonRESTError('TargetNotFoundException', 'Could not find {0}'.format(target_id))
|
||||
|
||||
instance = self.container_instances[cluster_name][target_id]
|
||||
if name in instance.attributes and instance.attributes[name] == value:
|
||||
del instance.attributes[name]
|
||||
except KeyError:
|
||||
raise JsonRESTError('TargetNotFoundException', 'Could not find {0}'.format(target_id))
|
||||
|
||||
def list_task_definition_families(self, family_prefix=None, status=None, max_results=None, next_token=None):
|
||||
for task_fam in self.task_definitions:
|
||||
if family_prefix is not None and not task_fam.startswith(family_prefix):
|
||||
continue
|
||||
|
||||
yield task_fam
|
||||
|
||||
|
||||
ecs_backends = {}
|
||||
for region, ec2_backend in ec2_backends.items():
|
||||
|
@ -9,6 +9,12 @@ class EC2ContainerServiceResponse(BaseResponse):
|
||||
|
||||
@property
|
||||
def ecs_backend(self):
|
||||
"""
|
||||
ECS Backend
|
||||
|
||||
:return: ECS Backend object
|
||||
:rtype: moto.ecs.models.EC2ContainerServiceBackend
|
||||
"""
|
||||
return ecs_backends[self.region]
|
||||
|
||||
@property
|
||||
@ -34,7 +40,7 @@ class EC2ContainerServiceResponse(BaseResponse):
|
||||
cluster_arns = self.ecs_backend.list_clusters()
|
||||
return json.dumps({
|
||||
'clusterArns': cluster_arns
|
||||
# 'nextToken': str(uuid.uuid1())
|
||||
# 'nextToken': str(uuid.uuid4())
|
||||
})
|
||||
|
||||
def describe_clusters(self):
|
||||
@ -66,7 +72,7 @@ class EC2ContainerServiceResponse(BaseResponse):
|
||||
task_definition_arns = self.ecs_backend.list_task_definitions()
|
||||
return json.dumps({
|
||||
'taskDefinitionArns': task_definition_arns
|
||||
# 'nextToken': str(uuid.uuid1())
|
||||
# 'nextToken': str(uuid.uuid4())
|
||||
})
|
||||
|
||||
def describe_task_definition(self):
|
||||
@ -159,7 +165,7 @@ class EC2ContainerServiceResponse(BaseResponse):
|
||||
return json.dumps({
|
||||
'serviceArns': service_arns
|
||||
# ,
|
||||
# 'nextToken': str(uuid.uuid1())
|
||||
# 'nextToken': str(uuid.uuid4())
|
||||
})
|
||||
|
||||
def describe_services(self):
|
||||
@ -245,3 +251,62 @@ class EC2ContainerServiceResponse(BaseResponse):
|
||||
'failures': [ci.response_object for ci in failures],
|
||||
'containerInstances': [ci.response_object for ci in container_instances]
|
||||
})
|
||||
|
||||
def put_attributes(self):
|
||||
cluster_name = self._get_param('cluster')
|
||||
attributes = self._get_param('attributes')
|
||||
|
||||
self.ecs_backend.put_attributes(cluster_name, attributes)
|
||||
|
||||
return json.dumps({'attributes': attributes})
|
||||
|
||||
def list_attributes(self):
|
||||
cluster_name = self._get_param('cluster')
|
||||
attr_name = self._get_param('attributeName')
|
||||
attr_value = self._get_param('attributeValue')
|
||||
target_type = self._get_param('targetType')
|
||||
max_results = self._get_param('maxResults')
|
||||
next_token = self._get_param('nextToken')
|
||||
|
||||
results = self.ecs_backend.list_attributes(target_type, cluster_name, attr_name, attr_value, max_results, next_token)
|
||||
# Result will be [item will be {0 cluster_name, 1 arn, 2 name, 3 value}]
|
||||
|
||||
formatted_results = []
|
||||
for _, arn, name, value in results:
|
||||
tmp_result = {
|
||||
'name': name,
|
||||
'targetId': arn
|
||||
}
|
||||
if value is not None:
|
||||
tmp_result['value'] = value
|
||||
formatted_results.append(tmp_result)
|
||||
|
||||
return json.dumps({'attributes': formatted_results})
|
||||
|
||||
def delete_attributes(self):
|
||||
cluster_name = self._get_param('cluster')
|
||||
attributes = self._get_param('attributes')
|
||||
|
||||
self.ecs_backend.delete_attributes(cluster_name, attributes)
|
||||
|
||||
return json.dumps({'attributes': attributes})
|
||||
|
||||
def discover_poll_endpoint(self):
|
||||
# Here are the arguments, this api is used by the ecs client so obviously no decent
|
||||
# documentation. Hence I've responded with valid but useless data
|
||||
# cluster_name = self._get_param('cluster')
|
||||
# instance = self._get_param('containerInstance')
|
||||
return json.dumps({
|
||||
'endpoint': 'http://localhost',
|
||||
'telemetryEndpoint': 'http://localhost'
|
||||
})
|
||||
|
||||
def list_task_definition_families(self):
|
||||
family_prefix = self._get_param('familyPrefix')
|
||||
status = self._get_param('status')
|
||||
max_results = self._get_param('maxResults')
|
||||
next_token = self._get_param('nextToken')
|
||||
|
||||
results = self.ecs_backend.list_task_definition_families(family_prefix, status, max_results, next_token)
|
||||
|
||||
return json.dumps({'families': list(results)})
|
||||
|
@ -3,8 +3,10 @@ from __future__ import unicode_literals
|
||||
import datetime
|
||||
import re
|
||||
from moto.compat import OrderedDict
|
||||
from moto.core.exceptions import RESTError
|
||||
from moto.core import BaseBackend, BaseModel
|
||||
from moto.ec2.models import ec2_backends
|
||||
from moto.acm.models import acm_backends
|
||||
from .exceptions import (
|
||||
DuplicateLoadBalancerName,
|
||||
DuplicateListenerError,
|
||||
@ -40,6 +42,8 @@ class FakeHealthStatus(BaseModel):
|
||||
|
||||
|
||||
class FakeTargetGroup(BaseModel):
|
||||
HTTP_CODE_REGEX = re.compile(r'(?:(?:\d+-\d+|\d+),?)+')
|
||||
|
||||
def __init__(self,
|
||||
name,
|
||||
arn,
|
||||
@ -52,7 +56,9 @@ class FakeTargetGroup(BaseModel):
|
||||
healthcheck_interval_seconds,
|
||||
healthcheck_timeout_seconds,
|
||||
healthy_threshold_count,
|
||||
unhealthy_threshold_count):
|
||||
unhealthy_threshold_count,
|
||||
matcher=None,
|
||||
target_type=None):
|
||||
self.name = name
|
||||
self.arn = arn
|
||||
self.vpc_id = vpc_id
|
||||
@ -67,6 +73,8 @@ class FakeTargetGroup(BaseModel):
|
||||
self.unhealthy_threshold_count = unhealthy_threshold_count
|
||||
self.load_balancer_arns = []
|
||||
self.tags = {}
|
||||
self.matcher = matcher
|
||||
self.target_type = target_type
|
||||
|
||||
self.attributes = {
|
||||
'deregistration_delay.timeout_seconds': 300,
|
||||
@ -75,6 +83,10 @@ class FakeTargetGroup(BaseModel):
|
||||
|
||||
self.targets = OrderedDict()
|
||||
|
||||
@property
|
||||
def physical_resource_id(self):
|
||||
return self.arn
|
||||
|
||||
def register(self, targets):
|
||||
for target in targets:
|
||||
self.targets[target['id']] = {
|
||||
@ -99,6 +111,46 @@ class FakeTargetGroup(BaseModel):
|
||||
raise InvalidTargetError()
|
||||
return FakeHealthStatus(t['id'], t['port'], self.healthcheck_port, 'healthy')
|
||||
|
||||
@classmethod
|
||||
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
|
||||
properties = cloudformation_json['Properties']
|
||||
|
||||
elbv2_backend = elbv2_backends[region_name]
|
||||
|
||||
# per cloudformation docs:
|
||||
# The target group name should be shorter than 22 characters because
|
||||
# AWS CloudFormation uses the target group name to create the name of the load balancer.
|
||||
name = properties.get('Name', resource_name[:22])
|
||||
vpc_id = properties.get("VpcId")
|
||||
protocol = properties.get('Protocol')
|
||||
port = properties.get("Port")
|
||||
healthcheck_protocol = properties.get("HealthCheckProtocol")
|
||||
healthcheck_port = properties.get("HealthCheckPort")
|
||||
healthcheck_path = properties.get("HealthCheckPath")
|
||||
healthcheck_interval_seconds = properties.get("HealthCheckIntervalSeconds")
|
||||
healthcheck_timeout_seconds = properties.get("HealthCheckTimeoutSeconds")
|
||||
healthy_threshold_count = properties.get("HealthyThresholdCount")
|
||||
unhealthy_threshold_count = properties.get("UnhealthyThresholdCount")
|
||||
matcher = properties.get("Matcher")
|
||||
target_type = properties.get("TargetType")
|
||||
|
||||
target_group = elbv2_backend.create_target_group(
|
||||
name=name,
|
||||
vpc_id=vpc_id,
|
||||
protocol=protocol,
|
||||
port=port,
|
||||
healthcheck_protocol=healthcheck_protocol,
|
||||
healthcheck_port=healthcheck_port,
|
||||
healthcheck_path=healthcheck_path,
|
||||
healthcheck_interval_seconds=healthcheck_interval_seconds,
|
||||
healthcheck_timeout_seconds=healthcheck_timeout_seconds,
|
||||
healthy_threshold_count=healthy_threshold_count,
|
||||
unhealthy_threshold_count=unhealthy_threshold_count,
|
||||
matcher=matcher,
|
||||
target_type=target_type,
|
||||
)
|
||||
return target_group
|
||||
|
||||
|
||||
class FakeListener(BaseModel):
|
||||
|
||||
@ -109,6 +161,7 @@ class FakeListener(BaseModel):
|
||||
self.port = port
|
||||
self.ssl_policy = ssl_policy
|
||||
self.certificate = certificate
|
||||
self.certificates = [certificate] if certificate is not None else []
|
||||
self.default_actions = default_actions
|
||||
self._non_default_rules = []
|
||||
self._default_rule = FakeRule(
|
||||
@ -119,6 +172,10 @@ class FakeListener(BaseModel):
|
||||
is_default=True
|
||||
)
|
||||
|
||||
@property
|
||||
def physical_resource_id(self):
|
||||
return self.arn
|
||||
|
||||
@property
|
||||
def rules(self):
|
||||
return self._non_default_rules + [self._default_rule]
|
||||
@ -130,6 +187,28 @@ class FakeListener(BaseModel):
|
||||
self._non_default_rules.append(rule)
|
||||
self._non_default_rules = sorted(self._non_default_rules, key=lambda x: x.priority)
|
||||
|
||||
@classmethod
|
||||
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
|
||||
properties = cloudformation_json['Properties']
|
||||
|
||||
elbv2_backend = elbv2_backends[region_name]
|
||||
load_balancer_arn = properties.get("LoadBalancerArn")
|
||||
protocol = properties.get("Protocol")
|
||||
port = properties.get("Port")
|
||||
ssl_policy = properties.get("SslPolicy")
|
||||
certificates = properties.get("Certificates")
|
||||
# transform default actions to confirm with the rest of the code and XML templates
|
||||
if "DefaultActions" in properties:
|
||||
default_actions = []
|
||||
for action in properties['DefaultActions']:
|
||||
default_actions.append({'type': action['Type'], 'target_group_arn': action['TargetGroupArn']})
|
||||
else:
|
||||
default_actions = None
|
||||
|
||||
listener = elbv2_backend.create_listener(
|
||||
load_balancer_arn, protocol, port, ssl_policy, certificates, default_actions)
|
||||
return listener
|
||||
|
||||
|
||||
class FakeRule(BaseModel):
|
||||
|
||||
@ -153,6 +232,8 @@ class FakeBackend(BaseModel):
|
||||
|
||||
|
||||
class FakeLoadBalancer(BaseModel):
|
||||
VALID_ATTRS = {'access_logs.s3.enabled', 'access_logs.s3.bucket', 'access_logs.s3.prefix',
|
||||
'deletion_protection.enabled', 'idle_timeout.timeout_seconds'}
|
||||
|
||||
def __init__(self, name, security_groups, subnets, vpc_id, arn, dns_name, scheme='internet-facing'):
|
||||
self.name = name
|
||||
@ -166,9 +247,18 @@ class FakeLoadBalancer(BaseModel):
|
||||
self.arn = arn
|
||||
self.dns_name = dns_name
|
||||
|
||||
self.stack = 'ipv4'
|
||||
self.attrs = {
|
||||
'access_logs.s3.enabled': 'false',
|
||||
'access_logs.s3.bucket': None,
|
||||
'access_logs.s3.prefix': None,
|
||||
'deletion_protection.enabled': 'false',
|
||||
'idle_timeout.timeout_seconds': '60'
|
||||
}
|
||||
|
||||
@property
|
||||
def physical_resource_id(self):
|
||||
return self.name
|
||||
return self.arn
|
||||
|
||||
def add_tag(self, key, value):
|
||||
if len(self.tags) >= 10 and key not in self.tags:
|
||||
@ -186,6 +276,27 @@ class FakeLoadBalancer(BaseModel):
|
||||
''' Not exposed as part of the ELB API - used for CloudFormation. '''
|
||||
elbv2_backends[region].delete_load_balancer(self.arn)
|
||||
|
||||
@classmethod
|
||||
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
|
||||
properties = cloudformation_json['Properties']
|
||||
|
||||
elbv2_backend = elbv2_backends[region_name]
|
||||
|
||||
name = properties.get('Name', resource_name)
|
||||
security_groups = properties.get("SecurityGroups")
|
||||
subnet_ids = properties.get('Subnets')
|
||||
scheme = properties.get('Scheme', 'internet-facing')
|
||||
|
||||
load_balancer = elbv2_backend.create_load_balancer(name, security_groups, subnet_ids, scheme=scheme)
|
||||
return load_balancer
|
||||
|
||||
def get_cfn_attribute(self, attribute_name):
|
||||
attributes = {
|
||||
'DNSName': self.dns_name,
|
||||
'LoadBalancerName': self.name,
|
||||
}
|
||||
return attributes[attribute_name]
|
||||
|
||||
|
||||
class ELBv2Backend(BaseBackend):
|
||||
|
||||
@ -194,6 +305,26 @@ class ELBv2Backend(BaseBackend):
|
||||
self.target_groups = OrderedDict()
|
||||
self.load_balancers = OrderedDict()
|
||||
|
||||
@property
|
||||
def ec2_backend(self):
|
||||
"""
|
||||
EC2 backend
|
||||
|
||||
:return: EC2 Backend
|
||||
:rtype: moto.ec2.models.EC2Backend
|
||||
"""
|
||||
return ec2_backends[self.region_name]
|
||||
|
||||
@property
|
||||
def acm_backend(self):
|
||||
"""
|
||||
ACM backend
|
||||
|
||||
:return: ACM Backend
|
||||
:rtype: moto.acm.models.AWSCertificateManagerBackend
|
||||
"""
|
||||
return acm_backends[self.region_name]
|
||||
|
||||
def reset(self):
|
||||
region_name = self.region_name
|
||||
self.__dict__ = {}
|
||||
@ -201,12 +332,11 @@ class ELBv2Backend(BaseBackend):
|
||||
|
||||
def create_load_balancer(self, name, security_groups, subnet_ids, scheme='internet-facing'):
|
||||
vpc_id = None
|
||||
ec2_backend = ec2_backends[self.region_name]
|
||||
subnets = []
|
||||
if not subnet_ids:
|
||||
raise SubnetNotFoundError()
|
||||
for subnet_id in subnet_ids:
|
||||
subnet = ec2_backend.get_subnet(subnet_id)
|
||||
subnet = self.ec2_backend.get_subnet(subnet_id)
|
||||
if subnet is None:
|
||||
raise SubnetNotFoundError()
|
||||
subnets.append(subnet)
|
||||
@ -279,7 +409,7 @@ class ELBv2Backend(BaseBackend):
|
||||
def create_target_group(self, name, **kwargs):
|
||||
if len(name) > 32:
|
||||
raise InvalidTargetGroupNameError(
|
||||
"Target group name '%s' cannot be longer than '32' characters" % name
|
||||
"Target group name '%s' cannot be longer than '22' characters" % name
|
||||
)
|
||||
if not re.match('^[a-zA-Z0-9\-]+$', name):
|
||||
raise InvalidTargetGroupNameError(
|
||||
@ -310,6 +440,9 @@ class ELBv2Backend(BaseBackend):
|
||||
"Value {} at 'protocol' failed to satisfy constraint: "
|
||||
"Member must satisfy enum value set: {}".format(kwargs['protocol'], valid_protocols))
|
||||
|
||||
if FakeTargetGroup.HTTP_CODE_REGEX.match(kwargs['matcher']['HttpCode']) is None:
|
||||
raise RESTError('InvalidParameterValue', 'HttpCode must be like 200 | 200-399 | 200,201 ...')
|
||||
|
||||
arn = "arn:aws:elasticloadbalancing:%s:1:targetgroup/%s/50dc6c495c0c9188" % (self.region_name, name)
|
||||
target_group = FakeTargetGroup(name, arn, **kwargs)
|
||||
self.target_groups[target_group.arn] = target_group
|
||||
@ -557,6 +690,166 @@ class ELBv2Backend(BaseBackend):
|
||||
modified_rules.append(given_rule)
|
||||
return modified_rules
|
||||
|
||||
def set_ip_address_type(self, arn, ip_type):
|
||||
if ip_type not in ('internal', 'dualstack'):
|
||||
raise RESTError('InvalidParameterValue', 'IpAddressType must be either internal | dualstack')
|
||||
|
||||
balancer = self.load_balancers.get(arn)
|
||||
if balancer is None:
|
||||
raise LoadBalancerNotFoundError()
|
||||
|
||||
if ip_type == 'dualstack' and balancer.scheme == 'internal':
|
||||
raise RESTError('InvalidConfigurationRequest', 'Internal load balancers cannot be dualstack')
|
||||
|
||||
balancer.stack = ip_type
|
||||
|
||||
def set_security_groups(self, arn, sec_groups):
|
||||
balancer = self.load_balancers.get(arn)
|
||||
if balancer is None:
|
||||
raise LoadBalancerNotFoundError()
|
||||
|
||||
# Check all security groups exist
|
||||
for sec_group_id in sec_groups:
|
||||
if self.ec2_backend.get_security_group_from_id(sec_group_id) is None:
|
||||
raise RESTError('InvalidSecurityGroup', 'Security group {0} does not exist'.format(sec_group_id))
|
||||
|
||||
balancer.security_groups = sec_groups
|
||||
|
||||
def set_subnets(self, arn, subnets):
|
||||
balancer = self.load_balancers.get(arn)
|
||||
if balancer is None:
|
||||
raise LoadBalancerNotFoundError()
|
||||
|
||||
subnet_objects = []
|
||||
sub_zone_list = {}
|
||||
for subnet in subnets:
|
||||
try:
|
||||
subnet = self.ec2_backend.get_subnet(subnet)
|
||||
|
||||
if subnet.availability_zone in sub_zone_list:
|
||||
raise RESTError('InvalidConfigurationRequest', 'More than 1 subnet cannot be specified for 1 availability zone')
|
||||
|
||||
sub_zone_list[subnet.availability_zone] = subnet.id
|
||||
subnet_objects.append(subnet)
|
||||
except Exception:
|
||||
raise SubnetNotFoundError()
|
||||
|
||||
if len(sub_zone_list) < 2:
|
||||
raise RESTError('InvalidConfigurationRequest', 'More than 1 availability zone must be specified')
|
||||
|
||||
balancer.subnets = subnet_objects
|
||||
|
||||
return sub_zone_list.items()
|
||||
|
||||
def modify_load_balancer_attributes(self, arn, attrs):
|
||||
balancer = self.load_balancers.get(arn)
|
||||
if balancer is None:
|
||||
raise LoadBalancerNotFoundError()
|
||||
|
||||
for key in attrs:
|
||||
if key not in FakeLoadBalancer.VALID_ATTRS:
|
||||
raise RESTError('InvalidConfigurationRequest', 'Key {0} not valid'.format(key))
|
||||
|
||||
balancer.attrs.update(attrs)
|
||||
return balancer.attrs
|
||||
|
||||
def describe_load_balancer_attributes(self, arn):
|
||||
balancer = self.load_balancers.get(arn)
|
||||
if balancer is None:
|
||||
raise LoadBalancerNotFoundError()
|
||||
|
||||
return balancer.attrs
|
||||
|
||||
def modify_target_group(self, arn, health_check_proto=None, health_check_port=None, health_check_path=None, health_check_interval=None,
|
||||
health_check_timeout=None, healthy_threshold_count=None, unhealthy_threshold_count=None, http_codes=None):
|
||||
target_group = self.target_groups.get(arn)
|
||||
if target_group is None:
|
||||
raise TargetGroupNotFoundError()
|
||||
|
||||
if http_codes is not None and FakeTargetGroup.HTTP_CODE_REGEX.match(http_codes) is None:
|
||||
raise RESTError('InvalidParameterValue', 'HttpCode must be like 200 | 200-399 | 200,201 ...')
|
||||
|
||||
if http_codes is not None:
|
||||
target_group.matcher['HttpCode'] = http_codes
|
||||
if health_check_interval is not None:
|
||||
target_group.healthcheck_interval_seconds = health_check_interval
|
||||
if health_check_path is not None:
|
||||
target_group.healthcheck_path = health_check_path
|
||||
if health_check_port is not None:
|
||||
target_group.healthcheck_port = health_check_port
|
||||
if health_check_proto is not None:
|
||||
target_group.healthcheck_protocol = health_check_proto
|
||||
if health_check_timeout is not None:
|
||||
target_group.healthcheck_timeout_seconds = health_check_timeout
|
||||
if healthy_threshold_count is not None:
|
||||
target_group.healthy_threshold_count = healthy_threshold_count
|
||||
if unhealthy_threshold_count is not None:
|
||||
target_group.unhealthy_threshold_count = unhealthy_threshold_count
|
||||
|
||||
return target_group
|
||||
|
||||
def modify_listener(self, arn, port=None, protocol=None, ssl_policy=None, certificates=None, default_actions=None):
|
||||
for load_balancer in self.load_balancers.values():
|
||||
if arn in load_balancer.listeners:
|
||||
break
|
||||
else:
|
||||
raise ListenerNotFoundError()
|
||||
|
||||
listener = load_balancer.listeners[arn]
|
||||
|
||||
if port is not None:
|
||||
for listener_arn, current_listener in load_balancer.listeners.items():
|
||||
if listener_arn == arn:
|
||||
continue
|
||||
if listener.port == port:
|
||||
raise DuplicateListenerError()
|
||||
|
||||
listener.port = port
|
||||
|
||||
if protocol is not None:
|
||||
if protocol not in ('HTTP', 'HTTPS', 'TCP'):
|
||||
raise RESTError('UnsupportedProtocol', 'Protocol {0} is not supported'.format(protocol))
|
||||
|
||||
# HTTPS checks
|
||||
if protocol == 'HTTPS':
|
||||
# HTTPS
|
||||
|
||||
# Might already be HTTPS so may not provide certs
|
||||
if certificates is None and listener.protocol != 'HTTPS':
|
||||
raise RESTError('InvalidConfigurationRequest', 'Certificates must be provided for HTTPS')
|
||||
|
||||
# Check certificates exist
|
||||
if certificates is not None:
|
||||
default_cert = None
|
||||
all_certs = set() # for SNI
|
||||
for cert in certificates:
|
||||
if cert['is_default'] == 'true':
|
||||
default_cert = cert['certificate_arn']
|
||||
try:
|
||||
self.acm_backend.get_certificate(cert['certificate_arn'])
|
||||
except Exception:
|
||||
raise RESTError('CertificateNotFound', 'Certificate {0} not found'.format(cert['certificate_arn']))
|
||||
|
||||
all_certs.add(cert['certificate_arn'])
|
||||
|
||||
if default_cert is None:
|
||||
raise RESTError('InvalidConfigurationRequest', 'No default certificate')
|
||||
|
||||
listener.certificate = default_cert
|
||||
listener.certificates = list(all_certs)
|
||||
|
||||
listener.protocol = protocol
|
||||
|
||||
if ssl_policy is not None:
|
||||
# Its already validated in responses.py
|
||||
listener.ssl_policy = ssl_policy
|
||||
|
||||
if default_actions is not None:
|
||||
# Is currently not validated
|
||||
listener.default_actions = default_actions
|
||||
|
||||
return listener
|
||||
|
||||
def _any_listener_using(self, target_group_arn):
|
||||
for load_balancer in self.load_balancers.values():
|
||||
for listener in load_balancer.listeners.values():
|
||||
|
@ -1,4 +1,6 @@
|
||||
from __future__ import unicode_literals
|
||||
from moto.core.exceptions import RESTError
|
||||
from moto.core.utils import amzn_request_id
|
||||
from moto.core.responses import BaseResponse
|
||||
from .models import elbv2_backends
|
||||
from .exceptions import DuplicateTagKeysError
|
||||
@ -6,12 +8,131 @@ from .exceptions import LoadBalancerNotFoundError
|
||||
from .exceptions import TargetGroupNotFoundError
|
||||
|
||||
|
||||
class ELBV2Response(BaseResponse):
|
||||
SSL_POLICIES = [
|
||||
{
|
||||
'name': 'ELBSecurityPolicy-2016-08',
|
||||
'ssl_protocols': ['TLSv1', 'TLSv1.1', 'TLSv1.2'],
|
||||
'ciphers': [
|
||||
{'name': 'ECDHE-ECDSA-AES128-GCM-SHA256', 'priority': 1},
|
||||
{'name': 'ECDHE-RSA-AES128-GCM-SHA256', 'priority': 2},
|
||||
{'name': 'ECDHE-ECDSA-AES128-SHA256', 'priority': 3},
|
||||
{'name': 'ECDHE-RSA-AES128-SHA256', 'priority': 4},
|
||||
{'name': 'ECDHE-ECDSA-AES128-SHA', 'priority': 5},
|
||||
{'name': 'ECDHE-RSA-AES128-SHA', 'priority': 6},
|
||||
{'name': 'ECDHE-ECDSA-AES256-GCM-SHA384', 'priority': 7},
|
||||
{'name': 'ECDHE-RSA-AES256-GCM-SHA384', 'priority': 8},
|
||||
{'name': 'ECDHE-ECDSA-AES256-SHA384', 'priority': 9},
|
||||
{'name': 'ECDHE-RSA-AES256-SHA384', 'priority': 10},
|
||||
{'name': 'ECDHE-RSA-AES256-SHA', 'priority': 11},
|
||||
{'name': 'ECDHE-ECDSA-AES256-SHA', 'priority': 12},
|
||||
{'name': 'AES128-GCM-SHA256', 'priority': 13},
|
||||
{'name': 'AES128-SHA256', 'priority': 14},
|
||||
{'name': 'AES128-SHA', 'priority': 15},
|
||||
{'name': 'AES256-GCM-SHA384', 'priority': 16},
|
||||
{'name': 'AES256-SHA256', 'priority': 17},
|
||||
{'name': 'AES256-SHA', 'priority': 18}
|
||||
],
|
||||
},
|
||||
{
|
||||
'name': 'ELBSecurityPolicy-TLS-1-2-2017-01',
|
||||
'ssl_protocols': ['TLSv1.2'],
|
||||
'ciphers': [
|
||||
{'name': 'ECDHE-ECDSA-AES128-GCM-SHA256', 'priority': 1},
|
||||
{'name': 'ECDHE-RSA-AES128-GCM-SHA256', 'priority': 2},
|
||||
{'name': 'ECDHE-ECDSA-AES128-SHA256', 'priority': 3},
|
||||
{'name': 'ECDHE-RSA-AES128-SHA256', 'priority': 4},
|
||||
{'name': 'ECDHE-ECDSA-AES256-GCM-SHA384', 'priority': 5},
|
||||
{'name': 'ECDHE-RSA-AES256-GCM-SHA384', 'priority': 6},
|
||||
{'name': 'ECDHE-ECDSA-AES256-SHA384', 'priority': 7},
|
||||
{'name': 'ECDHE-RSA-AES256-SHA384', 'priority': 8},
|
||||
{'name': 'AES128-GCM-SHA256', 'priority': 9},
|
||||
{'name': 'AES128-SHA256', 'priority': 10},
|
||||
{'name': 'AES256-GCM-SHA384', 'priority': 11},
|
||||
{'name': 'AES256-SHA256', 'priority': 12}
|
||||
]
|
||||
},
|
||||
{
|
||||
'name': 'ELBSecurityPolicy-TLS-1-1-2017-01',
|
||||
'ssl_protocols': ['TLSv1.1', 'TLSv1.2'],
|
||||
'ciphers': [
|
||||
{'name': 'ECDHE-ECDSA-AES128-GCM-SHA256', 'priority': 1},
|
||||
{'name': 'ECDHE-RSA-AES128-GCM-SHA256', 'priority': 2},
|
||||
{'name': 'ECDHE-ECDSA-AES128-SHA256', 'priority': 3},
|
||||
{'name': 'ECDHE-RSA-AES128-SHA256', 'priority': 4},
|
||||
{'name': 'ECDHE-ECDSA-AES128-SHA', 'priority': 5},
|
||||
{'name': 'ECDHE-RSA-AES128-SHA', 'priority': 6},
|
||||
{'name': 'ECDHE-ECDSA-AES256-GCM-SHA384', 'priority': 7},
|
||||
{'name': 'ECDHE-RSA-AES256-GCM-SHA384', 'priority': 8},
|
||||
{'name': 'ECDHE-ECDSA-AES256-SHA384', 'priority': 9},
|
||||
{'name': 'ECDHE-RSA-AES256-SHA384', 'priority': 10},
|
||||
{'name': 'ECDHE-RSA-AES256-SHA', 'priority': 11},
|
||||
{'name': 'ECDHE-ECDSA-AES256-SHA', 'priority': 12},
|
||||
{'name': 'AES128-GCM-SHA256', 'priority': 13},
|
||||
{'name': 'AES128-SHA256', 'priority': 14},
|
||||
{'name': 'AES128-SHA', 'priority': 15},
|
||||
{'name': 'AES256-GCM-SHA384', 'priority': 16},
|
||||
{'name': 'AES256-SHA256', 'priority': 17},
|
||||
{'name': 'AES256-SHA', 'priority': 18}
|
||||
]
|
||||
},
|
||||
{
|
||||
'name': 'ELBSecurityPolicy-2015-05',
|
||||
'ssl_protocols': ['TLSv1', 'TLSv1.1', 'TLSv1.2'],
|
||||
'ciphers': [
|
||||
{'name': 'ECDHE-ECDSA-AES128-GCM-SHA256', 'priority': 1},
|
||||
{'name': 'ECDHE-RSA-AES128-GCM-SHA256', 'priority': 2},
|
||||
{'name': 'ECDHE-ECDSA-AES128-SHA256', 'priority': 3},
|
||||
{'name': 'ECDHE-RSA-AES128-SHA256', 'priority': 4},
|
||||
{'name': 'ECDHE-ECDSA-AES128-SHA', 'priority': 5},
|
||||
{'name': 'ECDHE-RSA-AES128-SHA', 'priority': 6},
|
||||
{'name': 'ECDHE-ECDSA-AES256-GCM-SHA384', 'priority': 7},
|
||||
{'name': 'ECDHE-RSA-AES256-GCM-SHA384', 'priority': 8},
|
||||
{'name': 'ECDHE-ECDSA-AES256-SHA384', 'priority': 9},
|
||||
{'name': 'ECDHE-RSA-AES256-SHA384', 'priority': 10},
|
||||
{'name': 'ECDHE-RSA-AES256-SHA', 'priority': 11},
|
||||
{'name': 'ECDHE-ECDSA-AES256-SHA', 'priority': 12},
|
||||
{'name': 'AES128-GCM-SHA256', 'priority': 13},
|
||||
{'name': 'AES128-SHA256', 'priority': 14},
|
||||
{'name': 'AES128-SHA', 'priority': 15},
|
||||
{'name': 'AES256-GCM-SHA384', 'priority': 16},
|
||||
{'name': 'AES256-SHA256', 'priority': 17},
|
||||
{'name': 'AES256-SHA', 'priority': 18}
|
||||
]
|
||||
},
|
||||
{
|
||||
'name': 'ELBSecurityPolicy-TLS-1-0-2015-04',
|
||||
'ssl_protocols': ['TLSv1', 'TLSv1.1', 'TLSv1.2'],
|
||||
'ciphers': [
|
||||
{'name': 'ECDHE-ECDSA-AES128-GCM-SHA256', 'priority': 1},
|
||||
{'name': 'ECDHE-RSA-AES128-GCM-SHA256', 'priority': 2},
|
||||
{'name': 'ECDHE-ECDSA-AES128-SHA256', 'priority': 3},
|
||||
{'name': 'ECDHE-RSA-AES128-SHA256', 'priority': 4},
|
||||
{'name': 'ECDHE-ECDSA-AES128-SHA', 'priority': 5},
|
||||
{'name': 'ECDHE-RSA-AES128-SHA', 'priority': 6},
|
||||
{'name': 'ECDHE-ECDSA-AES256-GCM-SHA384', 'priority': 7},
|
||||
{'name': 'ECDHE-RSA-AES256-GCM-SHA384', 'priority': 8},
|
||||
{'name': 'ECDHE-ECDSA-AES256-SHA384', 'priority': 9},
|
||||
{'name': 'ECDHE-RSA-AES256-SHA384', 'priority': 10},
|
||||
{'name': 'ECDHE-RSA-AES256-SHA', 'priority': 11},
|
||||
{'name': 'ECDHE-ECDSA-AES256-SHA', 'priority': 12},
|
||||
{'name': 'AES128-GCM-SHA256', 'priority': 13},
|
||||
{'name': 'AES128-SHA256', 'priority': 14},
|
||||
{'name': 'AES128-SHA', 'priority': 15},
|
||||
{'name': 'AES256-GCM-SHA384', 'priority': 16},
|
||||
{'name': 'AES256-SHA256', 'priority': 17},
|
||||
{'name': 'AES256-SHA', 'priority': 18},
|
||||
{'name': 'DES-CBC3-SHA', 'priority': 19}
|
||||
]
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
class ELBV2Response(BaseResponse):
|
||||
@property
|
||||
def elbv2_backend(self):
|
||||
return elbv2_backends[self.region]
|
||||
|
||||
@amzn_request_id
|
||||
def create_load_balancer(self):
|
||||
load_balancer_name = self._get_param('Name')
|
||||
subnet_ids = self._get_multi_param("Subnets.member")
|
||||
@ -28,6 +149,7 @@ class ELBV2Response(BaseResponse):
|
||||
template = self.response_template(CREATE_LOAD_BALANCER_TEMPLATE)
|
||||
return template.render(load_balancer=load_balancer)
|
||||
|
||||
@amzn_request_id
|
||||
def create_rule(self):
|
||||
lister_arn = self._get_param('ListenerArn')
|
||||
_conditions = self._get_list_prefix('Conditions.member')
|
||||
@ -52,6 +174,7 @@ class ELBV2Response(BaseResponse):
|
||||
template = self.response_template(CREATE_RULE_TEMPLATE)
|
||||
return template.render(rules=rules)
|
||||
|
||||
@amzn_request_id
|
||||
def create_target_group(self):
|
||||
name = self._get_param('Name')
|
||||
vpc_id = self._get_param('VpcId')
|
||||
@ -64,6 +187,7 @@ class ELBV2Response(BaseResponse):
|
||||
healthcheck_timeout_seconds = self._get_param('HealthCheckTimeoutSeconds', '5')
|
||||
healthy_threshold_count = self._get_param('HealthyThresholdCount', '5')
|
||||
unhealthy_threshold_count = self._get_param('UnhealthyThresholdCount', '2')
|
||||
http_codes = self._get_param('Matcher.HttpCode', '200')
|
||||
|
||||
target_group = self.elbv2_backend.create_target_group(
|
||||
name,
|
||||
@ -77,11 +201,13 @@ class ELBV2Response(BaseResponse):
|
||||
healthcheck_timeout_seconds=healthcheck_timeout_seconds,
|
||||
healthy_threshold_count=healthy_threshold_count,
|
||||
unhealthy_threshold_count=unhealthy_threshold_count,
|
||||
matcher={'HttpCode': http_codes}
|
||||
)
|
||||
|
||||
template = self.response_template(CREATE_TARGET_GROUP_TEMPLATE)
|
||||
return template.render(target_group=target_group)
|
||||
|
||||
@amzn_request_id
|
||||
def create_listener(self):
|
||||
load_balancer_arn = self._get_param('LoadBalancerArn')
|
||||
protocol = self._get_param('Protocol')
|
||||
@ -105,6 +231,7 @@ class ELBV2Response(BaseResponse):
|
||||
template = self.response_template(CREATE_LISTENER_TEMPLATE)
|
||||
return template.render(listener=listener)
|
||||
|
||||
@amzn_request_id
|
||||
def describe_load_balancers(self):
|
||||
arns = self._get_multi_param("LoadBalancerArns.member")
|
||||
names = self._get_multi_param("Names.member")
|
||||
@ -124,6 +251,7 @@ class ELBV2Response(BaseResponse):
|
||||
template = self.response_template(DESCRIBE_LOAD_BALANCERS_TEMPLATE)
|
||||
return template.render(load_balancers=load_balancers_resp, marker=next_marker)
|
||||
|
||||
@amzn_request_id
|
||||
def describe_rules(self):
|
||||
listener_arn = self._get_param('ListenerArn')
|
||||
rule_arns = self._get_multi_param('RuleArns.member') if any(k for k in list(self.querystring.keys()) if k.startswith('RuleArns.member')) else None
|
||||
@ -144,6 +272,7 @@ class ELBV2Response(BaseResponse):
|
||||
template = self.response_template(DESCRIBE_RULES_TEMPLATE)
|
||||
return template.render(rules=rules_resp, marker=next_marker)
|
||||
|
||||
@amzn_request_id
|
||||
def describe_target_groups(self):
|
||||
load_balancer_arn = self._get_param('LoadBalancerArn')
|
||||
target_group_arns = self._get_multi_param('TargetGroupArns.member')
|
||||
@ -153,6 +282,7 @@ class ELBV2Response(BaseResponse):
|
||||
template = self.response_template(DESCRIBE_TARGET_GROUPS_TEMPLATE)
|
||||
return template.render(target_groups=target_groups)
|
||||
|
||||
@amzn_request_id
|
||||
def describe_target_group_attributes(self):
|
||||
target_group_arn = self._get_param('TargetGroupArn')
|
||||
target_group = self.elbv2_backend.target_groups.get(target_group_arn)
|
||||
@ -161,6 +291,7 @@ class ELBV2Response(BaseResponse):
|
||||
template = self.response_template(DESCRIBE_TARGET_GROUP_ATTRIBUTES_TEMPLATE)
|
||||
return template.render(attributes=target_group.attributes)
|
||||
|
||||
@amzn_request_id
|
||||
def describe_listeners(self):
|
||||
load_balancer_arn = self._get_param('LoadBalancerArn')
|
||||
listener_arns = self._get_multi_param('ListenerArns.member')
|
||||
@ -171,30 +302,35 @@ class ELBV2Response(BaseResponse):
|
||||
template = self.response_template(DESCRIBE_LISTENERS_TEMPLATE)
|
||||
return template.render(listeners=listeners)
|
||||
|
||||
@amzn_request_id
|
||||
def delete_load_balancer(self):
|
||||
arn = self._get_param('LoadBalancerArn')
|
||||
self.elbv2_backend.delete_load_balancer(arn)
|
||||
template = self.response_template(DELETE_LOAD_BALANCER_TEMPLATE)
|
||||
return template.render()
|
||||
|
||||
@amzn_request_id
|
||||
def delete_rule(self):
|
||||
arn = self._get_param('RuleArn')
|
||||
self.elbv2_backend.delete_rule(arn)
|
||||
template = self.response_template(DELETE_RULE_TEMPLATE)
|
||||
return template.render()
|
||||
|
||||
@amzn_request_id
|
||||
def delete_target_group(self):
|
||||
arn = self._get_param('TargetGroupArn')
|
||||
self.elbv2_backend.delete_target_group(arn)
|
||||
template = self.response_template(DELETE_TARGET_GROUP_TEMPLATE)
|
||||
return template.render()
|
||||
|
||||
@amzn_request_id
|
||||
def delete_listener(self):
|
||||
arn = self._get_param('ListenerArn')
|
||||
self.elbv2_backend.delete_listener(arn)
|
||||
template = self.response_template(DELETE_LISTENER_TEMPLATE)
|
||||
return template.render()
|
||||
|
||||
@amzn_request_id
|
||||
def modify_rule(self):
|
||||
rule_arn = self._get_param('RuleArn')
|
||||
_conditions = self._get_list_prefix('Conditions.member')
|
||||
@ -217,6 +353,7 @@ class ELBV2Response(BaseResponse):
|
||||
template = self.response_template(MODIFY_RULE_TEMPLATE)
|
||||
return template.render(rules=rules)
|
||||
|
||||
@amzn_request_id
|
||||
def modify_target_group_attributes(self):
|
||||
target_group_arn = self._get_param('TargetGroupArn')
|
||||
target_group = self.elbv2_backend.target_groups.get(target_group_arn)
|
||||
@ -230,6 +367,7 @@ class ELBV2Response(BaseResponse):
|
||||
template = self.response_template(MODIFY_TARGET_GROUP_ATTRIBUTES_TEMPLATE)
|
||||
return template.render(attributes=attributes)
|
||||
|
||||
@amzn_request_id
|
||||
def register_targets(self):
|
||||
target_group_arn = self._get_param('TargetGroupArn')
|
||||
targets = self._get_list_prefix('Targets.member')
|
||||
@ -238,6 +376,7 @@ class ELBV2Response(BaseResponse):
|
||||
template = self.response_template(REGISTER_TARGETS_TEMPLATE)
|
||||
return template.render()
|
||||
|
||||
@amzn_request_id
|
||||
def deregister_targets(self):
|
||||
target_group_arn = self._get_param('TargetGroupArn')
|
||||
targets = self._get_list_prefix('Targets.member')
|
||||
@ -246,6 +385,7 @@ class ELBV2Response(BaseResponse):
|
||||
template = self.response_template(DEREGISTER_TARGETS_TEMPLATE)
|
||||
return template.render()
|
||||
|
||||
@amzn_request_id
|
||||
def describe_target_health(self):
|
||||
target_group_arn = self._get_param('TargetGroupArn')
|
||||
targets = self._get_list_prefix('Targets.member')
|
||||
@ -254,6 +394,7 @@ class ELBV2Response(BaseResponse):
|
||||
template = self.response_template(DESCRIBE_TARGET_HEALTH_TEMPLATE)
|
||||
return template.render(target_health_descriptions=target_health_descriptions)
|
||||
|
||||
@amzn_request_id
|
||||
def set_rule_priorities(self):
|
||||
rule_priorities = self._get_list_prefix('RulePriorities.member')
|
||||
for rule_priority in rule_priorities:
|
||||
@ -262,6 +403,7 @@ class ELBV2Response(BaseResponse):
|
||||
template = self.response_template(SET_RULE_PRIORITIES_TEMPLATE)
|
||||
return template.render(rules=rules)
|
||||
|
||||
@amzn_request_id
|
||||
def add_tags(self):
|
||||
resource_arns = self._get_multi_param('ResourceArns.member')
|
||||
|
||||
@ -281,6 +423,7 @@ class ELBV2Response(BaseResponse):
|
||||
template = self.response_template(ADD_TAGS_TEMPLATE)
|
||||
return template.render()
|
||||
|
||||
@amzn_request_id
|
||||
def remove_tags(self):
|
||||
resource_arns = self._get_multi_param('ResourceArns.member')
|
||||
tag_keys = self._get_multi_param('TagKeys.member')
|
||||
@ -301,6 +444,7 @@ class ELBV2Response(BaseResponse):
|
||||
template = self.response_template(REMOVE_TAGS_TEMPLATE)
|
||||
return template.render()
|
||||
|
||||
@amzn_request_id
|
||||
def describe_tags(self):
|
||||
resource_arns = self._get_multi_param('ResourceArns.member')
|
||||
resources = []
|
||||
@ -320,6 +464,125 @@ class ELBV2Response(BaseResponse):
|
||||
template = self.response_template(DESCRIBE_TAGS_TEMPLATE)
|
||||
return template.render(resources=resources)
|
||||
|
||||
@amzn_request_id
|
||||
def describe_account_limits(self):
|
||||
# Supports paging but not worth implementing yet
|
||||
# marker = self._get_param('Marker')
|
||||
# page_size = self._get_param('PageSize')
|
||||
|
||||
limits = {
|
||||
'application-load-balancers': 20,
|
||||
'target-groups': 3000,
|
||||
'targets-per-application-load-balancer': 30,
|
||||
'listeners-per-application-load-balancer': 50,
|
||||
'rules-per-application-load-balancer': 100,
|
||||
'network-load-balancers': 20,
|
||||
'targets-per-network-load-balancer': 200,
|
||||
'listeners-per-network-load-balancer': 50
|
||||
}
|
||||
|
||||
template = self.response_template(DESCRIBE_LIMITS_TEMPLATE)
|
||||
return template.render(limits=limits)
|
||||
|
||||
@amzn_request_id
|
||||
def describe_ssl_policies(self):
|
||||
names = self._get_multi_param('Names.member.')
|
||||
# Supports paging but not worth implementing yet
|
||||
# marker = self._get_param('Marker')
|
||||
# page_size = self._get_param('PageSize')
|
||||
|
||||
policies = SSL_POLICIES
|
||||
if names:
|
||||
policies = filter(lambda policy: policy['name'] in names, policies)
|
||||
|
||||
template = self.response_template(DESCRIBE_SSL_POLICIES_TEMPLATE)
|
||||
return template.render(policies=policies)
|
||||
|
||||
@amzn_request_id
|
||||
def set_ip_address_type(self):
|
||||
arn = self._get_param('LoadBalancerArn')
|
||||
ip_type = self._get_param('IpAddressType')
|
||||
|
||||
self.elbv2_backend.set_ip_address_type(arn, ip_type)
|
||||
|
||||
template = self.response_template(SET_IP_ADDRESS_TYPE_TEMPLATE)
|
||||
return template.render(ip_type=ip_type)
|
||||
|
||||
@amzn_request_id
|
||||
def set_security_groups(self):
|
||||
arn = self._get_param('LoadBalancerArn')
|
||||
sec_groups = self._get_multi_param('SecurityGroups.member.')
|
||||
|
||||
self.elbv2_backend.set_security_groups(arn, sec_groups)
|
||||
|
||||
template = self.response_template(SET_SECURITY_GROUPS_TEMPLATE)
|
||||
return template.render(sec_groups=sec_groups)
|
||||
|
||||
@amzn_request_id
|
||||
def set_subnets(self):
|
||||
arn = self._get_param('LoadBalancerArn')
|
||||
subnets = self._get_multi_param('Subnets.member.')
|
||||
|
||||
subnet_zone_list = self.elbv2_backend.set_subnets(arn, subnets)
|
||||
|
||||
template = self.response_template(SET_SUBNETS_TEMPLATE)
|
||||
return template.render(subnets=subnet_zone_list)
|
||||
|
||||
@amzn_request_id
|
||||
def modify_load_balancer_attributes(self):
|
||||
arn = self._get_param('LoadBalancerArn')
|
||||
attrs = self._get_map_prefix('Attributes.member', key_end='Key', value_end='Value')
|
||||
|
||||
all_attrs = self.elbv2_backend.modify_load_balancer_attributes(arn, attrs)
|
||||
|
||||
template = self.response_template(MODIFY_LOADBALANCER_ATTRS_TEMPLATE)
|
||||
return template.render(attrs=all_attrs)
|
||||
|
||||
@amzn_request_id
|
||||
def describe_load_balancer_attributes(self):
|
||||
arn = self._get_param('LoadBalancerArn')
|
||||
attrs = self.elbv2_backend.describe_load_balancer_attributes(arn)
|
||||
|
||||
template = self.response_template(DESCRIBE_LOADBALANCER_ATTRS_TEMPLATE)
|
||||
return template.render(attrs=attrs)
|
||||
|
||||
@amzn_request_id
|
||||
def modify_target_group(self):
|
||||
arn = self._get_param('TargetGroupArn')
|
||||
|
||||
health_check_proto = self._get_param('HealthCheckProtocol') # 'HTTP' | 'HTTPS' | 'TCP',
|
||||
health_check_port = self._get_param('HealthCheckPort')
|
||||
health_check_path = self._get_param('HealthCheckPath')
|
||||
health_check_interval = self._get_param('HealthCheckIntervalSeconds')
|
||||
health_check_timeout = self._get_param('HealthCheckTimeoutSeconds')
|
||||
healthy_threshold_count = self._get_param('HealthyThresholdCount')
|
||||
unhealthy_threshold_count = self._get_param('UnhealthyThresholdCount')
|
||||
http_codes = self._get_param('Matcher.HttpCode')
|
||||
|
||||
target_group = self.elbv2_backend.modify_target_group(arn, health_check_proto, health_check_port, health_check_path, health_check_interval,
|
||||
health_check_timeout, healthy_threshold_count, unhealthy_threshold_count, http_codes)
|
||||
|
||||
template = self.response_template(MODIFY_TARGET_GROUP_TEMPLATE)
|
||||
return template.render(target_group=target_group)
|
||||
|
||||
@amzn_request_id
|
||||
def modify_listener(self):
|
||||
arn = self._get_param('ListenerArn')
|
||||
port = self._get_param('Port')
|
||||
protocol = self._get_param('Protocol')
|
||||
ssl_policy = self._get_param('SslPolicy')
|
||||
certificates = self._get_list_prefix('Certificates.member')
|
||||
default_actions = self._get_list_prefix('DefaultActions.member')
|
||||
|
||||
# Should really move SSL Policies to models
|
||||
if ssl_policy is not None and ssl_policy not in [item['name'] for item in SSL_POLICIES]:
|
||||
raise RESTError('SSLPolicyNotFound', 'Policy {0} not found'.format(ssl_policy))
|
||||
|
||||
listener = self.elbv2_backend.modify_listener(arn, port, protocol, ssl_policy, certificates, default_actions)
|
||||
|
||||
template = self.response_template(MODIFY_LISTENER_TEMPLATE)
|
||||
return template.render(listener=listener)
|
||||
|
||||
def _add_tags(self, resource):
|
||||
tag_values = []
|
||||
tag_keys = []
|
||||
@ -348,14 +611,14 @@ class ELBV2Response(BaseResponse):
|
||||
ADD_TAGS_TEMPLATE = """<AddTagsResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/">
|
||||
<AddTagsResult/>
|
||||
<ResponseMetadata>
|
||||
<RequestId>360e81f7-1100-11e4-b6ed-0f30EXAMPLE</RequestId>
|
||||
<RequestId>{{ request_id }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</AddTagsResponse>"""
|
||||
|
||||
REMOVE_TAGS_TEMPLATE = """<RemoveTagsResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/">
|
||||
<RemoveTagsResult/>
|
||||
<ResponseMetadata>
|
||||
<RequestId>360e81f7-1100-11e4-b6ed-0f30EXAMPLE</RequestId>
|
||||
<RequestId>{{ request_id }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</RemoveTagsResponse>"""
|
||||
|
||||
@ -378,11 +641,10 @@ DESCRIBE_TAGS_TEMPLATE = """<DescribeTagsResponse xmlns="http://elasticloadbalan
|
||||
</TagDescriptions>
|
||||
</DescribeTagsResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>360e81f7-1100-11e4-b6ed-0f30EXAMPLE</RequestId>
|
||||
<RequestId>{{ request_id }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</DescribeTagsResponse>"""
|
||||
|
||||
|
||||
CREATE_LOAD_BALANCER_TEMPLATE = """<CreateLoadBalancerResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/">
|
||||
<CreateLoadBalancerResult>
|
||||
<LoadBalancers>
|
||||
@ -415,7 +677,7 @@ CREATE_LOAD_BALANCER_TEMPLATE = """<CreateLoadBalancerResponse xmlns="http://ela
|
||||
</LoadBalancers>
|
||||
</CreateLoadBalancerResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>32d531b2-f2d0-11e5-9192-3fff33344cfa</RequestId>
|
||||
<RequestId>{{ request_id }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</CreateLoadBalancerResponse>"""
|
||||
|
||||
@ -452,7 +714,7 @@ CREATE_RULE_TEMPLATE = """<CreateRuleResponse xmlns="http://elasticloadbalancing
|
||||
</Rules>
|
||||
</CreateRuleResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>c5478c83-f397-11e5-bb98-57195a6eb84a</RequestId>
|
||||
<RequestId>{{ request_id }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</CreateRuleResponse>"""
|
||||
|
||||
@ -472,14 +734,19 @@ CREATE_TARGET_GROUP_TEMPLATE = """<CreateTargetGroupResponse xmlns="http://elast
|
||||
<HealthCheckTimeoutSeconds>{{ target_group.healthcheck_timeout_seconds }}</HealthCheckTimeoutSeconds>
|
||||
<HealthyThresholdCount>{{ target_group.healthy_threshold_count }}</HealthyThresholdCount>
|
||||
<UnhealthyThresholdCount>{{ target_group.unhealthy_threshold_count }}</UnhealthyThresholdCount>
|
||||
{% if target_group.matcher %}
|
||||
<Matcher>
|
||||
<HttpCode>200</HttpCode>
|
||||
<HttpCode>{{ target_group.matcher['HttpCode'] }}</HttpCode>
|
||||
</Matcher>
|
||||
{% endif %}
|
||||
{% if target_group.target_type %}
|
||||
<TargetType>{{ target_group.target_type }}</TargetType>
|
||||
{% endif %}
|
||||
</member>
|
||||
</TargetGroups>
|
||||
</CreateTargetGroupResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>b83fe90e-f2d5-11e5-b95d-3b2c1831fc26</RequestId>
|
||||
<RequestId>{{ request_id }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</CreateTargetGroupResponse>"""
|
||||
|
||||
@ -489,11 +756,13 @@ CREATE_LISTENER_TEMPLATE = """<CreateListenerResponse xmlns="http://elasticloadb
|
||||
<member>
|
||||
<LoadBalancerArn>{{ listener.load_balancer_arn }}</LoadBalancerArn>
|
||||
<Protocol>{{ listener.protocol }}</Protocol>
|
||||
{% if listener.certificate %}
|
||||
{% if listener.certificates %}
|
||||
<Certificates>
|
||||
{% for cert in listener.certificates %}
|
||||
<member>
|
||||
<CertificateArn>{{ listener.certificate }}</CertificateArn>
|
||||
<CertificateArn>{{ cert }}</CertificateArn>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</Certificates>
|
||||
{% endif %}
|
||||
<Port>{{ listener.port }}</Port>
|
||||
@ -511,35 +780,35 @@ CREATE_LISTENER_TEMPLATE = """<CreateListenerResponse xmlns="http://elasticloadb
|
||||
</Listeners>
|
||||
</CreateListenerResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>97f1bb38-f390-11e5-b95d-3b2c1831fc26</RequestId>
|
||||
<RequestId>{{ request_id }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</CreateListenerResponse>"""
|
||||
|
||||
DELETE_LOAD_BALANCER_TEMPLATE = """<DeleteLoadBalancerResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/">
|
||||
<DeleteLoadBalancerResult/>
|
||||
<ResponseMetadata>
|
||||
<RequestId>1549581b-12b7-11e3-895e-1334aEXAMPLE</RequestId>
|
||||
<RequestId>{{ request_id }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</DeleteLoadBalancerResponse>"""
|
||||
|
||||
DELETE_RULE_TEMPLATE = """<DeleteRuleResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/">
|
||||
<DeleteRuleResult/>
|
||||
<ResponseMetadata>
|
||||
<RequestId>1549581b-12b7-11e3-895e-1334aEXAMPLE</RequestId>
|
||||
<RequestId>{{ request_id }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</DeleteRuleResponse>"""
|
||||
|
||||
DELETE_TARGET_GROUP_TEMPLATE = """<DeleteTargetGroupResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/">
|
||||
<DeleteTargetGroupResult/>
|
||||
<ResponseMetadata>
|
||||
<RequestId>1549581b-12b7-11e3-895e-1334aEXAMPLE</RequestId>
|
||||
<RequestId>{{ request_id }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</DeleteTargetGroupResponse>"""
|
||||
|
||||
DELETE_LISTENER_TEMPLATE = """<DeleteListenerResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/">
|
||||
<DeleteListenerResult/>
|
||||
<ResponseMetadata>
|
||||
<RequestId>1549581b-12b7-11e3-895e-1334aEXAMPLE</RequestId>
|
||||
<RequestId>{{ request_id }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</DeleteListenerResponse>"""
|
||||
|
||||
@ -572,6 +841,7 @@ DESCRIBE_LOAD_BALANCERS_TEMPLATE = """<DescribeLoadBalancersResponse xmlns="http
|
||||
<Code>provisioning</Code>
|
||||
</State>
|
||||
<Type>application</Type>
|
||||
<IpAddressType>ipv4</IpAddressType>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</LoadBalancers>
|
||||
@ -580,7 +850,7 @@ DESCRIBE_LOAD_BALANCERS_TEMPLATE = """<DescribeLoadBalancersResponse xmlns="http
|
||||
{% endif %}
|
||||
</DescribeLoadBalancersResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>f9880f01-7852-629d-a6c3-3ae2-666a409287e6dc0c</RequestId>
|
||||
<RequestId>{{ request_id }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</DescribeLoadBalancersResponse>"""
|
||||
|
||||
@ -620,7 +890,7 @@ DESCRIBE_RULES_TEMPLATE = """<DescribeRulesResponse xmlns="http://elasticloadbal
|
||||
{% endif %}
|
||||
</DescribeRulesResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>74926cf3-f3a3-11e5-b543-9f2c3fbb9bee</RequestId>
|
||||
<RequestId>{{ request_id }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</DescribeRulesResponse>"""
|
||||
|
||||
@ -634,16 +904,21 @@ DESCRIBE_TARGET_GROUPS_TEMPLATE = """<DescribeTargetGroupsResponse xmlns="http:/
|
||||
<Protocol>{{ target_group.protocol }}</Protocol>
|
||||
<Port>{{ target_group.port }}</Port>
|
||||
<VpcId>{{ target_group.vpc_id }}</VpcId>
|
||||
<HealthCheckProtocol>{{ target_group.health_check_protocol }}</HealthCheckProtocol>
|
||||
<HealthCheckProtocol>{{ target_group.healthcheck_protocol }}</HealthCheckProtocol>
|
||||
<HealthCheckPort>{{ target_group.healthcheck_port }}</HealthCheckPort>
|
||||
<HealthCheckPath>{{ target_group.healthcheck_path }}</HealthCheckPath>
|
||||
<HealthCheckIntervalSeconds>{{ target_group.healthcheck_interval_seconds }}</HealthCheckIntervalSeconds>
|
||||
<HealthCheckTimeoutSeconds>{{ target_group.healthcheck_timeout_seconds }}</HealthCheckTimeoutSeconds>
|
||||
<HealthyThresholdCount>{{ target_group.healthy_threshold_count }}</HealthyThresholdCount>
|
||||
<UnhealthyThresholdCount>{{ target_group.unhealthy_threshold_count }}</UnhealthyThresholdCount>
|
||||
{% if target_group.matcher %}
|
||||
<Matcher>
|
||||
<HttpCode>200</HttpCode>
|
||||
<HttpCode>{{ target_group.matcher['HttpCode'] }}</HttpCode>
|
||||
</Matcher>
|
||||
{% endif %}
|
||||
{% if target_group.target_type %}
|
||||
<TargetType>{{ target_group.target_type }}</TargetType>
|
||||
{% endif %}
|
||||
<LoadBalancerArns>
|
||||
{% for load_balancer_arn in target_group.load_balancer_arns %}
|
||||
<member>{{ load_balancer_arn }}</member>
|
||||
@ -654,11 +929,10 @@ DESCRIBE_TARGET_GROUPS_TEMPLATE = """<DescribeTargetGroupsResponse xmlns="http:/
|
||||
</TargetGroups>
|
||||
</DescribeTargetGroupsResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>70092c0e-f3a9-11e5-ae48-cff02092876b</RequestId>
|
||||
<RequestId>{{ request_id }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</DescribeTargetGroupsResponse>"""
|
||||
|
||||
|
||||
DESCRIBE_TARGET_GROUP_ATTRIBUTES_TEMPLATE = """<DescribeTargetGroupAttributesResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/">
|
||||
<DescribeTargetGroupAttributesResult>
|
||||
<Attributes>
|
||||
@ -671,11 +945,10 @@ DESCRIBE_TARGET_GROUP_ATTRIBUTES_TEMPLATE = """<DescribeTargetGroupAttributesRes
|
||||
</Attributes>
|
||||
</DescribeTargetGroupAttributesResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>70092c0e-f3a9-11e5-ae48-cff02092876b</RequestId>
|
||||
<RequestId>{{ request_id }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</DescribeTargetGroupAttributesResponse>"""
|
||||
|
||||
|
||||
DESCRIBE_LISTENERS_TEMPLATE = """<DescribeLoadBalancersResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/">
|
||||
<DescribeListenersResult>
|
||||
<Listeners>
|
||||
@ -706,7 +979,7 @@ DESCRIBE_LISTENERS_TEMPLATE = """<DescribeLoadBalancersResponse xmlns="http://el
|
||||
</Listeners>
|
||||
</DescribeListenersResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>65a3a7ea-f39c-11e5-b543-9f2c3fbb9bee</RequestId>
|
||||
<RequestId>{{ request_id }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</DescribeLoadBalancersResponse>"""
|
||||
|
||||
@ -721,7 +994,7 @@ CONFIGURE_HEALTH_CHECK_TEMPLATE = """<ConfigureHealthCheckResponse xmlns="http:/
|
||||
</HealthCheck>
|
||||
</ConfigureHealthCheckResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>f9880f01-7852-629d-a6c3-3ae2-666a409287e6dc0c</RequestId>
|
||||
<RequestId>{{ request_id }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</ConfigureHealthCheckResponse>"""
|
||||
|
||||
@ -758,7 +1031,7 @@ MODIFY_RULE_TEMPLATE = """<ModifyRuleResponse xmlns="http://elasticloadbalancing
|
||||
</Rules>
|
||||
</ModifyRuleResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>c5478c83-f397-11e5-bb98-57195a6eb84a</RequestId>
|
||||
<RequestId>{{ request_id }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</ModifyRuleResponse>"""
|
||||
|
||||
@ -774,7 +1047,7 @@ MODIFY_TARGET_GROUP_ATTRIBUTES_TEMPLATE = """<ModifyTargetGroupAttributesRespons
|
||||
</Attributes>
|
||||
</ModifyTargetGroupAttributesResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>70092c0e-f3a9-11e5-ae48-cff02092876b</RequestId>
|
||||
<RequestId>{{ request_id }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</ModifyTargetGroupAttributesResponse>"""
|
||||
|
||||
@ -782,7 +1055,7 @@ REGISTER_TARGETS_TEMPLATE = """<RegisterTargetsResponse xmlns="http://elasticloa
|
||||
<RegisterTargetsResult>
|
||||
</RegisterTargetsResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>f9880f01-7852-629d-a6c3-3ae2-666a409287e6dc0c</RequestId>
|
||||
<RequestId>{{ request_id }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</RegisterTargetsResponse>"""
|
||||
|
||||
@ -790,22 +1063,21 @@ DEREGISTER_TARGETS_TEMPLATE = """<DeregisterTargetsResponse xmlns="http://elasti
|
||||
<DeregisterTargetsResult>
|
||||
</DeregisterTargetsResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>f9880f01-7852-629d-a6c3-3ae2-666a409287e6dc0c</RequestId>
|
||||
<RequestId>{{ request_id }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</DeregisterTargetsResponse>"""
|
||||
|
||||
SET_LOAD_BALANCER_SSL_CERTIFICATE = """<SetLoadBalancerListenerSSLCertificateResponse xmlns="http://elasticloadbalan cing.amazonaws.com/doc/2015-12-01/">
|
||||
<SetLoadBalancerListenerSSLCertificateResult/>
|
||||
<ResponseMetadata>
|
||||
<RequestId>83c88b9d-12b7-11e3-8b82-87b12EXAMPLE</RequestId>
|
||||
<RequestId>{{ request_id }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</SetLoadBalancerListenerSSLCertificateResponse>"""
|
||||
|
||||
|
||||
DELETE_LOAD_BALANCER_LISTENERS = """<DeleteLoadBalancerListenersResponse xmlns="http://elasticloadbalan cing.amazonaws.com/doc/2015-12-01/">
|
||||
<DeleteLoadBalancerListenersResult/>
|
||||
<ResponseMetadata>
|
||||
<RequestId>83c88b9d-12b7-11e3-8b82-87b12EXAMPLE</RequestId>
|
||||
<RequestId>{{ request_id }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</DeleteLoadBalancerListenersResponse>"""
|
||||
|
||||
@ -837,7 +1109,7 @@ DESCRIBE_ATTRIBUTES_TEMPLATE = """<DescribeLoadBalancerAttributesResponse xmlns
|
||||
</LoadBalancerAttributes>
|
||||
</DescribeLoadBalancerAttributesResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>83c88b9d-12b7-11e3-8b82-87b12EXAMPLE</RequestId>
|
||||
<RequestId>{{ request_id }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</DescribeLoadBalancerAttributesResponse>
|
||||
"""
|
||||
@ -871,7 +1143,7 @@ MODIFY_ATTRIBUTES_TEMPLATE = """<ModifyLoadBalancerAttributesResponse xmlns="htt
|
||||
</LoadBalancerAttributes>
|
||||
</ModifyLoadBalancerAttributesResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>83c88b9d-12b7-11e3-8b82-87b12EXAMPLE</RequestId>
|
||||
<RequestId>{{ request_id }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</ModifyLoadBalancerAttributesResponse>
|
||||
"""
|
||||
@ -879,7 +1151,7 @@ MODIFY_ATTRIBUTES_TEMPLATE = """<ModifyLoadBalancerAttributesResponse xmlns="htt
|
||||
CREATE_LOAD_BALANCER_POLICY_TEMPLATE = """<CreateLoadBalancerPolicyResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/">
|
||||
<CreateLoadBalancerPolicyResult/>
|
||||
<ResponseMetadata>
|
||||
<RequestId>83c88b9d-12b7-11e3-8b82-87b12EXAMPLE</RequestId>
|
||||
<RequestId>{{ request_id }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</CreateLoadBalancerPolicyResponse>
|
||||
"""
|
||||
@ -887,7 +1159,7 @@ CREATE_LOAD_BALANCER_POLICY_TEMPLATE = """<CreateLoadBalancerPolicyResponse xmln
|
||||
SET_LOAD_BALANCER_POLICIES_OF_LISTENER_TEMPLATE = """<SetLoadBalancerPoliciesOfListenerResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/">
|
||||
<SetLoadBalancerPoliciesOfListenerResult/>
|
||||
<ResponseMetadata>
|
||||
<RequestId>07b1ecbc-1100-11e3-acaf-dd7edEXAMPLE</RequestId>
|
||||
<RequestId>{{ request_id }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</SetLoadBalancerPoliciesOfListenerResponse>
|
||||
"""
|
||||
@ -895,7 +1167,7 @@ SET_LOAD_BALANCER_POLICIES_OF_LISTENER_TEMPLATE = """<SetLoadBalancerPoliciesOfL
|
||||
SET_LOAD_BALANCER_POLICIES_FOR_BACKEND_SERVER_TEMPLATE = """<SetLoadBalancerPoliciesForBackendServerResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/">
|
||||
<SetLoadBalancerPoliciesForBackendServerResult/>
|
||||
<ResponseMetadata>
|
||||
<RequestId>0eb9b381-dde0-11e2-8d78-6ddbaEXAMPLE</RequestId>
|
||||
<RequestId>{{ request_id }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</SetLoadBalancerPoliciesForBackendServerResponse>
|
||||
"""
|
||||
@ -918,7 +1190,7 @@ DESCRIBE_TARGET_HEALTH_TEMPLATE = """<DescribeTargetHealthResponse xmlns="http:/
|
||||
</TargetHealthDescriptions>
|
||||
</DescribeTargetHealthResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>c534f810-f389-11e5-9192-3fff33344cfa</RequestId>
|
||||
<RequestId>{{ request_id }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</DescribeTargetHealthResponse>"""
|
||||
|
||||
@ -955,6 +1227,186 @@ SET_RULE_PRIORITIES_TEMPLATE = """<SetRulePrioritiesResponse xmlns="http://elast
|
||||
</Rules>
|
||||
</SetRulePrioritiesResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>4d7a8036-f3a7-11e5-9c02-8fd20490d5a6</RequestId>
|
||||
<RequestId>{{ request_id }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</SetRulePrioritiesResponse>"""
|
||||
|
||||
DESCRIBE_LIMITS_TEMPLATE = """<DescribeAccountLimitsResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/">
|
||||
<DescribeAccountLimitsResult>
|
||||
<Limits>
|
||||
{% for key, value in limits.items() %}
|
||||
<member>
|
||||
<Name>{{ key }}</Name>
|
||||
<Max>{{ value }}</Max>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</Limits>
|
||||
</DescribeAccountLimitsResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>{{ request_id }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</DescribeAccountLimitsResponse>"""
|
||||
|
||||
DESCRIBE_SSL_POLICIES_TEMPLATE = """<DescribeSSLPoliciesResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/">
|
||||
<DescribeSSLPoliciesResult>
|
||||
<SslPolicies>
|
||||
{% for policy in policies %}
|
||||
<member>
|
||||
<Name>{{ policy['name'] }}</Name>
|
||||
<Ciphers>
|
||||
{% for cipher in policy['ciphers'] %}
|
||||
<member>
|
||||
<Name>{{ cipher['name'] }}</Name>
|
||||
<Priority>{{ cipher['priority'] }}</Priority>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</Ciphers>
|
||||
<SslProtocols>
|
||||
{% for proto in policy['ssl_protocols'] %}
|
||||
<member>{{ proto }}</member>
|
||||
{% endfor %}
|
||||
</SslProtocols>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</SslPolicies>
|
||||
</DescribeSSLPoliciesResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>{{ request_id }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</DescribeSSLPoliciesResponse>"""
|
||||
|
||||
SET_IP_ADDRESS_TYPE_TEMPLATE = """<SetIpAddressTypeResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/">
|
||||
<SetIpAddressTypeResult>
|
||||
<IpAddressType>{{ ip_type }}</IpAddressType>
|
||||
</SetIpAddressTypeResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>{{ request_id }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</SetIpAddressTypeResponse>"""
|
||||
|
||||
SET_SECURITY_GROUPS_TEMPLATE = """<SetSecurityGroupsResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/">
|
||||
<SetSecurityGroupsResult>
|
||||
<SecurityGroupIds>
|
||||
{% for group in sec_groups %}
|
||||
<member>{{ group }}</member>
|
||||
{% endfor %}
|
||||
</SecurityGroupIds>
|
||||
</SetSecurityGroupsResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>{{ request_id }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</SetSecurityGroupsResponse>"""
|
||||
|
||||
SET_SUBNETS_TEMPLATE = """<SetSubnetsResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/">
|
||||
<SetSubnetsResult>
|
||||
<AvailabilityZones>
|
||||
{% for zone_id, subnet_id in subnets %}
|
||||
<member>
|
||||
<SubnetId>{{ subnet_id }}</SubnetId>
|
||||
<ZoneName>{{ zone_id }}</ZoneName>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</AvailabilityZones>
|
||||
</SetSubnetsResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>{{ request_id }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</SetSubnetsResponse>"""
|
||||
|
||||
MODIFY_LOADBALANCER_ATTRS_TEMPLATE = """<ModifyLoadBalancerAttributesResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/">
|
||||
<ModifyLoadBalancerAttributesResult>
|
||||
<Attributes>
|
||||
{% for key, value in attrs.items() %}
|
||||
<member>
|
||||
{% if value == None %}<Value />{% else %}<Value>{{ value }}</Value>{% endif %}
|
||||
<Key>{{ key }}</Key>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</Attributes>
|
||||
</ModifyLoadBalancerAttributesResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>{{ request_id }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</ModifyLoadBalancerAttributesResponse>"""
|
||||
|
||||
DESCRIBE_LOADBALANCER_ATTRS_TEMPLATE = """<DescribeLoadBalancerAttributesResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/">
|
||||
<DescribeLoadBalancerAttributesResult>
|
||||
<Attributes>
|
||||
{% for key, value in attrs.items() %}
|
||||
<member>
|
||||
{% if value == None %}<Value />{% else %}<Value>{{ value }}</Value>{% endif %}
|
||||
<Key>{{ key }}</Key>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</Attributes>
|
||||
</DescribeLoadBalancerAttributesResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>{{ request_id }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</DescribeLoadBalancerAttributesResponse>"""
|
||||
|
||||
MODIFY_TARGET_GROUP_TEMPLATE = """<ModifyTargetGroupResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/">
|
||||
<ModifyTargetGroupResult>
|
||||
<TargetGroups>
|
||||
<member>
|
||||
<TargetGroupArn>{{ target_group.arn }}</TargetGroupArn>
|
||||
<TargetGroupName>{{ target_group.name }}</TargetGroupName>
|
||||
<Protocol>{{ target_group.protocol }}</Protocol>
|
||||
<Port>{{ target_group.port }}</Port>
|
||||
<VpcId>{{ target_group.vpc_id }}</VpcId>
|
||||
<HealthCheckProtocol>{{ target_group.healthcheck_protocol }}</HealthCheckProtocol>
|
||||
<HealthCheckPort>{{ target_group.healthcheck_port }}</HealthCheckPort>
|
||||
<HealthCheckPath>{{ target_group.healthcheck_path }}</HealthCheckPath>
|
||||
<HealthCheckIntervalSeconds>{{ target_group.healthcheck_interval_seconds }}</HealthCheckIntervalSeconds>
|
||||
<HealthCheckTimeoutSeconds>{{ target_group.healthcheck_timeout_seconds }}</HealthCheckTimeoutSeconds>
|
||||
<HealthyThresholdCount>{{ target_group.healthy_threshold_count }}</HealthyThresholdCount>
|
||||
<UnhealthyThresholdCount>{{ target_group.unhealthy_threshold_count }}</UnhealthyThresholdCount>
|
||||
<Matcher>
|
||||
<HttpCode>{{ target_group.matcher['HttpCode'] }}</HttpCode>
|
||||
</Matcher>
|
||||
<LoadBalancerArns>
|
||||
{% for load_balancer_arn in target_group.load_balancer_arns %}
|
||||
<member>{{ load_balancer_arn }}</member>
|
||||
{% endfor %}
|
||||
</LoadBalancerArns>
|
||||
</member>
|
||||
</TargetGroups>
|
||||
</ModifyTargetGroupResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>{{ request_id }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</ModifyTargetGroupResponse>"""
|
||||
|
||||
MODIFY_LISTENER_TEMPLATE = """<ModifyListenerResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/">
|
||||
<ModifyListenerResult>
|
||||
<Listeners>
|
||||
<member>
|
||||
<LoadBalancerArn>{{ listener.load_balancer_arn }}</LoadBalancerArn>
|
||||
<Protocol>{{ listener.protocol }}</Protocol>
|
||||
{% if listener.certificates %}
|
||||
<Certificates>
|
||||
{% for cert in listener.certificates %}
|
||||
<member>
|
||||
<CertificateArn>{{ cert }}</CertificateArn>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</Certificates>
|
||||
{% endif %}
|
||||
<Port>{{ listener.port }}</Port>
|
||||
<SslPolicy>{{ listener.ssl_policy }}</SslPolicy>
|
||||
<ListenerArn>{{ listener.arn }}</ListenerArn>
|
||||
<DefaultActions>
|
||||
{% for action in listener.default_actions %}
|
||||
<member>
|
||||
<Type>{{ action.type }}</Type>
|
||||
<TargetGroupArn>{{ action.target_group_arn }}</TargetGroupArn>
|
||||
</member>
|
||||
{% endfor %}
|
||||
</DefaultActions>
|
||||
</member>
|
||||
</Listeners>
|
||||
</ModifyListenerResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>{{ request_id }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</ModifyListenerResponse>"""
|
||||
|
@ -1,6 +1,7 @@
|
||||
import os
|
||||
import re
|
||||
|
||||
from moto.core.exceptions import JsonRESTError
|
||||
from moto.core import BaseBackend, BaseModel
|
||||
|
||||
|
||||
@ -50,6 +51,8 @@ class Rule(BaseModel):
|
||||
|
||||
|
||||
class EventsBackend(BaseBackend):
|
||||
ACCOUNT_ID = re.compile(r'^(\d{1,12}|\*)$')
|
||||
STATEMENT_ID = re.compile(r'^[a-zA-Z0-9-_]{1,64}$')
|
||||
|
||||
def __init__(self):
|
||||
self.rules = {}
|
||||
@ -58,6 +61,8 @@ class EventsBackend(BaseBackend):
|
||||
self.rules_order = []
|
||||
self.next_tokens = {}
|
||||
|
||||
self.permissions = {}
|
||||
|
||||
def _get_rule_by_index(self, i):
|
||||
return self.rules.get(self.rules_order[i])
|
||||
|
||||
@ -181,6 +186,17 @@ class EventsBackend(BaseBackend):
|
||||
|
||||
return False
|
||||
|
||||
def put_events(self, events):
|
||||
num_events = len(events)
|
||||
|
||||
if num_events < 1:
|
||||
raise JsonRESTError('ValidationError', 'Need at least 1 event')
|
||||
elif num_events > 10:
|
||||
raise JsonRESTError('ValidationError', 'Can only submit 10 events at once')
|
||||
|
||||
# We dont really need to store the events yet
|
||||
return []
|
||||
|
||||
def remove_targets(self, name, ids):
|
||||
rule = self.rules.get(name)
|
||||
|
||||
@ -193,5 +209,40 @@ class EventsBackend(BaseBackend):
|
||||
def test_event_pattern(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def put_permission(self, action, principal, statement_id):
|
||||
if action is None or action != 'PutEvents':
|
||||
raise JsonRESTError('InvalidParameterValue', 'Action must be PutEvents')
|
||||
|
||||
if principal is None or self.ACCOUNT_ID.match(principal) is None:
|
||||
raise JsonRESTError('InvalidParameterValue', 'Principal must match ^(\d{1,12}|\*)$')
|
||||
|
||||
if statement_id is None or self.STATEMENT_ID.match(statement_id) is None:
|
||||
raise JsonRESTError('InvalidParameterValue', 'StatementId must match ^[a-zA-Z0-9-_]{1,64}$')
|
||||
|
||||
self.permissions[statement_id] = {'action': action, 'principal': principal}
|
||||
|
||||
def remove_permission(self, statement_id):
|
||||
try:
|
||||
del self.permissions[statement_id]
|
||||
except KeyError:
|
||||
raise JsonRESTError('ResourceNotFoundException', 'StatementId not found')
|
||||
|
||||
def describe_event_bus(self):
|
||||
arn = "arn:aws:events:us-east-1:000000000000:event-bus/default"
|
||||
statements = []
|
||||
for statement_id, data in self.permissions.items():
|
||||
statements.append({
|
||||
'Sid': statement_id,
|
||||
'Effect': 'Allow',
|
||||
'Principal': {'AWS': 'arn:aws:iam::{0}:root'.format(data['principal'])},
|
||||
'Action': 'events:{0}'.format(data['action']),
|
||||
'Resource': arn
|
||||
})
|
||||
return {
|
||||
'Policy': {'Version': '2012-10-17', 'Statement': statements},
|
||||
'Name': 'default',
|
||||
'Arn': arn
|
||||
}
|
||||
|
||||
|
||||
events_backend = EventsBackend()
|
||||
|
@ -18,9 +18,17 @@ class EventsHandler(BaseResponse):
|
||||
'RoleArn': rule.role_arn
|
||||
}
|
||||
|
||||
def load_body(self):
|
||||
decoded_body = self.body
|
||||
return json.loads(decoded_body or '{}')
|
||||
@property
|
||||
def request_params(self):
|
||||
if not hasattr(self, '_json_body'):
|
||||
try:
|
||||
self._json_body = json.loads(self.body)
|
||||
except ValueError:
|
||||
self._json_body = {}
|
||||
return self._json_body
|
||||
|
||||
def _get_param(self, param, if_none=None):
|
||||
return self.request_params.get(param, if_none)
|
||||
|
||||
def error(self, type_, message='', status=400):
|
||||
headers = self.response_headers
|
||||
@ -28,8 +36,7 @@ class EventsHandler(BaseResponse):
|
||||
return json.dumps({'__type': type_, 'message': message}), headers,
|
||||
|
||||
def delete_rule(self):
|
||||
body = self.load_body()
|
||||
name = body.get('Name')
|
||||
name = self._get_param('Name')
|
||||
|
||||
if not name:
|
||||
return self.error('ValidationException', 'Parameter Name is required.')
|
||||
@ -38,8 +45,7 @@ class EventsHandler(BaseResponse):
|
||||
return '', self.response_headers
|
||||
|
||||
def describe_rule(self):
|
||||
body = self.load_body()
|
||||
name = body.get('Name')
|
||||
name = self._get_param('Name')
|
||||
|
||||
if not name:
|
||||
return self.error('ValidationException', 'Parameter Name is required.')
|
||||
@ -53,8 +59,7 @@ class EventsHandler(BaseResponse):
|
||||
return json.dumps(rule_dict), self.response_headers
|
||||
|
||||
def disable_rule(self):
|
||||
body = self.load_body()
|
||||
name = body.get('Name')
|
||||
name = self._get_param('Name')
|
||||
|
||||
if not name:
|
||||
return self.error('ValidationException', 'Parameter Name is required.')
|
||||
@ -65,8 +70,7 @@ class EventsHandler(BaseResponse):
|
||||
return '', self.response_headers
|
||||
|
||||
def enable_rule(self):
|
||||
body = self.load_body()
|
||||
name = body.get('Name')
|
||||
name = self._get_param('Name')
|
||||
|
||||
if not name:
|
||||
return self.error('ValidationException', 'Parameter Name is required.')
|
||||
@ -80,10 +84,9 @@ class EventsHandler(BaseResponse):
|
||||
pass
|
||||
|
||||
def list_rule_names_by_target(self):
|
||||
body = self.load_body()
|
||||
target_arn = body.get('TargetArn')
|
||||
next_token = body.get('NextToken')
|
||||
limit = body.get('Limit')
|
||||
target_arn = self._get_param('TargetArn')
|
||||
next_token = self._get_param('NextToken')
|
||||
limit = self._get_param('Limit')
|
||||
|
||||
if not target_arn:
|
||||
return self.error('ValidationException', 'Parameter TargetArn is required.')
|
||||
@ -94,10 +97,9 @@ class EventsHandler(BaseResponse):
|
||||
return json.dumps(rule_names), self.response_headers
|
||||
|
||||
def list_rules(self):
|
||||
body = self.load_body()
|
||||
prefix = body.get('NamePrefix')
|
||||
next_token = body.get('NextToken')
|
||||
limit = body.get('Limit')
|
||||
prefix = self._get_param('NamePrefix')
|
||||
next_token = self._get_param('NextToken')
|
||||
limit = self._get_param('Limit')
|
||||
|
||||
rules = events_backend.list_rules(prefix, next_token, limit)
|
||||
rules_obj = {'Rules': []}
|
||||
@ -111,10 +113,9 @@ class EventsHandler(BaseResponse):
|
||||
return json.dumps(rules_obj), self.response_headers
|
||||
|
||||
def list_targets_by_rule(self):
|
||||
body = self.load_body()
|
||||
rule_name = body.get('Rule')
|
||||
next_token = body.get('NextToken')
|
||||
limit = body.get('Limit')
|
||||
rule_name = self._get_param('Rule')
|
||||
next_token = self._get_param('NextToken')
|
||||
limit = self._get_param('Limit')
|
||||
|
||||
if not rule_name:
|
||||
return self.error('ValidationException', 'Parameter Rule is required.')
|
||||
@ -128,13 +129,25 @@ class EventsHandler(BaseResponse):
|
||||
return json.dumps(targets), self.response_headers
|
||||
|
||||
def put_events(self):
|
||||
events = self._get_param('Entries')
|
||||
|
||||
failed_entries = events_backend.put_events(events)
|
||||
|
||||
if failed_entries:
|
||||
return json.dumps({
|
||||
'FailedEntryCount': len(failed_entries),
|
||||
'Entries': failed_entries
|
||||
})
|
||||
|
||||
return '', self.response_headers
|
||||
|
||||
def put_rule(self):
|
||||
body = self.load_body()
|
||||
name = body.get('Name')
|
||||
event_pattern = body.get('EventPattern')
|
||||
sched_exp = body.get('ScheduleExpression')
|
||||
name = self._get_param('Name')
|
||||
event_pattern = self._get_param('EventPattern')
|
||||
sched_exp = self._get_param('ScheduleExpression')
|
||||
state = self._get_param('State')
|
||||
desc = self._get_param('Description')
|
||||
role_arn = self._get_param('RoleArn')
|
||||
|
||||
if not name:
|
||||
return self.error('ValidationException', 'Parameter Name is required.')
|
||||
@ -156,17 +169,16 @@ class EventsHandler(BaseResponse):
|
||||
name,
|
||||
ScheduleExpression=sched_exp,
|
||||
EventPattern=event_pattern,
|
||||
State=body.get('State'),
|
||||
Description=body.get('Description'),
|
||||
RoleArn=body.get('RoleArn')
|
||||
State=state,
|
||||
Description=desc,
|
||||
RoleArn=role_arn
|
||||
)
|
||||
|
||||
return json.dumps({'RuleArn': rule_arn}), self.response_headers
|
||||
|
||||
def put_targets(self):
|
||||
body = self.load_body()
|
||||
rule_name = body.get('Rule')
|
||||
targets = body.get('Targets')
|
||||
rule_name = self._get_param('Rule')
|
||||
targets = self._get_param('Targets')
|
||||
|
||||
if not rule_name:
|
||||
return self.error('ValidationException', 'Parameter Rule is required.')
|
||||
@ -180,9 +192,8 @@ class EventsHandler(BaseResponse):
|
||||
return '', self.response_headers
|
||||
|
||||
def remove_targets(self):
|
||||
body = self.load_body()
|
||||
rule_name = body.get('Rule')
|
||||
ids = body.get('Ids')
|
||||
rule_name = self._get_param('Rule')
|
||||
ids = self._get_param('Ids')
|
||||
|
||||
if not rule_name:
|
||||
return self.error('ValidationException', 'Parameter Rule is required.')
|
||||
@ -197,3 +208,22 @@ class EventsHandler(BaseResponse):
|
||||
|
||||
def test_event_pattern(self):
|
||||
pass
|
||||
|
||||
def put_permission(self):
|
||||
action = self._get_param('Action')
|
||||
principal = self._get_param('Principal')
|
||||
statement_id = self._get_param('StatementId')
|
||||
|
||||
events_backend.put_permission(action, principal, statement_id)
|
||||
|
||||
return ''
|
||||
|
||||
def remove_permission(self):
|
||||
statement_id = self._get_param('StatementId')
|
||||
|
||||
events_backend.remove_permission(statement_id)
|
||||
|
||||
return ''
|
||||
|
||||
def describe_event_bus(self):
|
||||
return json.dumps(events_backend.describe_event_bus())
|
||||
|
@ -704,7 +704,8 @@ class RDS2Backend(BaseBackend):
|
||||
if self.arn_regex.match(source_database_id):
|
||||
db_kwargs['region'] = self.region
|
||||
|
||||
replica = copy.deepcopy(primary)
|
||||
# Shouldn't really copy here as the instance is duplicated. RDS replicas have different instances.
|
||||
replica = copy.copy(primary)
|
||||
replica.update(db_kwargs)
|
||||
replica.set_as_replica()
|
||||
self.databases[database_id] = replica
|
||||
|
@ -764,7 +764,7 @@ class ResponseObject(_TemplateEnvironmentMixin):
|
||||
return FakeTagging()
|
||||
|
||||
def _tagging_from_xml(self, xml):
|
||||
parsed_xml = xmltodict.parse(xml)
|
||||
parsed_xml = xmltodict.parse(xml, force_list={'Tag': True})
|
||||
|
||||
tags = []
|
||||
for tag in parsed_xml['Tagging']['TagSet']['Tag']:
|
||||
|
@ -2,6 +2,7 @@ from __future__ import unicode_literals
|
||||
|
||||
import base64
|
||||
import hashlib
|
||||
import json
|
||||
import re
|
||||
import six
|
||||
import struct
|
||||
@ -9,6 +10,7 @@ from xml.sax.saxutils import escape
|
||||
|
||||
import boto.sqs
|
||||
|
||||
from moto.core.exceptions import RESTError
|
||||
from moto.core import BaseBackend, BaseModel
|
||||
from moto.core.utils import camelcase_to_underscores, get_random_message_id, unix_time, unix_time_millis
|
||||
from .utils import generate_receipt_handle
|
||||
@ -166,11 +168,14 @@ class Queue(BaseModel):
|
||||
'ReceiveMessageWaitTimeSeconds',
|
||||
'VisibilityTimeout',
|
||||
'WaitTimeSeconds']
|
||||
ALLOWED_PERMISSIONS = ('*', 'ChangeMessageVisibility', 'DeleteMessage', 'GetQueueAttributes',
|
||||
'GetQueueUrl', 'ReceiveMessage', 'SendMessage')
|
||||
|
||||
def __init__(self, name, region, **kwargs):
|
||||
self.name = name
|
||||
self.visibility_timeout = int(kwargs.get('VisibilityTimeout', 30))
|
||||
self.region = region
|
||||
self.tags = {}
|
||||
|
||||
self._messages = []
|
||||
|
||||
@ -189,14 +194,42 @@ class Queue(BaseModel):
|
||||
self.message_retention_period = int(kwargs.get('MessageRetentionPeriod', 86400 * 4)) # four days
|
||||
self.queue_arn = 'arn:aws:sqs:{0}:123456789012:{1}'.format(self.region, self.name)
|
||||
self.receive_message_wait_time_seconds = int(kwargs.get('ReceiveMessageWaitTimeSeconds', 0))
|
||||
self.permissions = {}
|
||||
|
||||
# wait_time_seconds will be set to immediate return messages
|
||||
self.wait_time_seconds = int(kwargs.get('WaitTimeSeconds', 0))
|
||||
|
||||
self.redrive_policy = {}
|
||||
self.dead_letter_queue = None
|
||||
|
||||
if 'RedrivePolicy' in kwargs:
|
||||
self._setup_dlq(kwargs['RedrivePolicy'])
|
||||
|
||||
# Check some conditions
|
||||
if self.fifo_queue and not self.name.endswith('.fifo'):
|
||||
raise MessageAttributesInvalid('Queue name must end in .fifo for FIFO queues')
|
||||
|
||||
def _setup_dlq(self, policy_json):
|
||||
try:
|
||||
self.redrive_policy = json.loads(policy_json)
|
||||
except ValueError:
|
||||
raise RESTError('InvalidParameterValue', 'Redrive policy does not contain valid json')
|
||||
|
||||
if 'deadLetterTargetArn' not in self.redrive_policy:
|
||||
raise RESTError('InvalidParameterValue', 'Redrive policy does not contain deadLetterTargetArn')
|
||||
if 'maxReceiveCount' not in self.redrive_policy:
|
||||
raise RESTError('InvalidParameterValue', 'Redrive policy does not contain maxReceiveCount')
|
||||
|
||||
for queue in sqs_backends[self.region].queues.values():
|
||||
if queue.queue_arn == self.redrive_policy['deadLetterTargetArn']:
|
||||
self.dead_letter_queue = queue
|
||||
|
||||
if self.fifo_queue and not queue.fifo_queue:
|
||||
raise RESTError('InvalidParameterCombination', 'Fifo queues cannot use non fifo dead letter queues')
|
||||
break
|
||||
else:
|
||||
raise RESTError('AWS.SimpleQueueService.NonExistentQueue', 'Could not find DLQ for {0}'.format(self.redrive_policy['deadLetterTargetArn']))
|
||||
|
||||
@classmethod
|
||||
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
|
||||
properties = cloudformation_json['Properties']
|
||||
@ -382,9 +415,14 @@ class SQSBackend(BaseBackend):
|
||||
time.sleep(0.001)
|
||||
continue
|
||||
|
||||
messages_to_dlq = []
|
||||
for message in queue.messages:
|
||||
if not message.visible:
|
||||
continue
|
||||
if queue.dead_letter_queue is not None and message.approximate_receive_count >= queue.redrive_policy['maxReceiveCount']:
|
||||
messages_to_dlq.append(message)
|
||||
continue
|
||||
|
||||
message.mark_received(
|
||||
visibility_timeout=visibility_timeout
|
||||
)
|
||||
@ -392,6 +430,10 @@ class SQSBackend(BaseBackend):
|
||||
if len(result) >= count:
|
||||
break
|
||||
|
||||
for message in messages_to_dlq:
|
||||
queue._messages.remove(message)
|
||||
queue.dead_letter_queue.add_message(message)
|
||||
|
||||
return result
|
||||
|
||||
def delete_message(self, queue_name, receipt_handle):
|
||||
@ -419,6 +461,49 @@ class SQSBackend(BaseBackend):
|
||||
queue = self.get_queue(queue_name)
|
||||
queue._messages = []
|
||||
|
||||
def list_dead_letter_source_queues(self, queue_name):
|
||||
dlq = self.get_queue(queue_name)
|
||||
|
||||
queues = []
|
||||
for queue in self.queues.values():
|
||||
if queue.dead_letter_queue is dlq:
|
||||
queues.append(queue)
|
||||
|
||||
return queues
|
||||
|
||||
def add_permission(self, queue_name, actions, account_ids, label):
|
||||
queue = self.get_queue(queue_name)
|
||||
|
||||
if actions is None or len(actions) == 0:
|
||||
raise RESTError('InvalidParameterValue', 'Need at least one Action')
|
||||
if account_ids is None or len(account_ids) == 0:
|
||||
raise RESTError('InvalidParameterValue', 'Need at least one Account ID')
|
||||
|
||||
if not all([item in Queue.ALLOWED_PERMISSIONS for item in actions]):
|
||||
raise RESTError('InvalidParameterValue', 'Invalid permissions')
|
||||
|
||||
queue.permissions[label] = (account_ids, actions)
|
||||
|
||||
def remove_permission(self, queue_name, label):
|
||||
queue = self.get_queue(queue_name)
|
||||
|
||||
if label not in queue.permissions:
|
||||
raise RESTError('InvalidParameterValue', 'Permission doesnt exist for the given label')
|
||||
|
||||
del queue.permissions[label]
|
||||
|
||||
def tag_queue(self, queue_name, tags):
|
||||
queue = self.get_queue(queue_name)
|
||||
queue.tags.update(tags)
|
||||
|
||||
def untag_queue(self, queue_name, tag_keys):
|
||||
queue = self.get_queue(queue_name)
|
||||
for key in tag_keys:
|
||||
try:
|
||||
del queue.tags[key]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
|
||||
sqs_backends = {}
|
||||
for region in boto.sqs.regions():
|
||||
|
@ -40,12 +40,15 @@ class SQSResponse(BaseResponse):
|
||||
queue_name = self.path.split("/")[-1]
|
||||
return queue_name
|
||||
|
||||
def _get_validated_visibility_timeout(self):
|
||||
def _get_validated_visibility_timeout(self, timeout=None):
|
||||
"""
|
||||
:raises ValueError: If specified visibility timeout exceeds MAXIMUM_VISIBILTY_TIMEOUT
|
||||
:raises TypeError: If visibility timeout was not specified
|
||||
"""
|
||||
visibility_timeout = int(self.querystring.get("VisibilityTimeout")[0])
|
||||
if timeout is not None:
|
||||
visibility_timeout = int(timeout)
|
||||
else:
|
||||
visibility_timeout = int(self.querystring.get("VisibilityTimeout")[0])
|
||||
|
||||
if visibility_timeout > MAXIMUM_VISIBILTY_TIMEOUT:
|
||||
raise ValueError
|
||||
@ -119,6 +122,49 @@ class SQSResponse(BaseResponse):
|
||||
template = self.response_template(CHANGE_MESSAGE_VISIBILITY_RESPONSE)
|
||||
return template.render()
|
||||
|
||||
def change_message_visibility_batch(self):
|
||||
queue_name = self._get_queue_name()
|
||||
entries = self._get_list_prefix('ChangeMessageVisibilityBatchRequestEntry')
|
||||
|
||||
success = []
|
||||
error = []
|
||||
for entry in entries:
|
||||
try:
|
||||
visibility_timeout = self._get_validated_visibility_timeout(entry['visibility_timeout'])
|
||||
except ValueError:
|
||||
error.append({
|
||||
'Id': entry['id'],
|
||||
'SenderFault': 'true',
|
||||
'Code': 'InvalidParameterValue',
|
||||
'Message': 'Visibility timeout invalid'
|
||||
})
|
||||
continue
|
||||
|
||||
try:
|
||||
self.sqs_backend.change_message_visibility(
|
||||
queue_name=queue_name,
|
||||
receipt_handle=entry['receipt_handle'],
|
||||
visibility_timeout=visibility_timeout
|
||||
)
|
||||
success.append(entry['id'])
|
||||
except ReceiptHandleIsInvalid as e:
|
||||
error.append({
|
||||
'Id': entry['id'],
|
||||
'SenderFault': 'true',
|
||||
'Code': 'ReceiptHandleIsInvalid',
|
||||
'Message': e.description
|
||||
})
|
||||
except MessageNotInflight as e:
|
||||
error.append({
|
||||
'Id': entry['id'],
|
||||
'SenderFault': 'false',
|
||||
'Code': 'AWS.SimpleQueueService.MessageNotInflight',
|
||||
'Message': e.description
|
||||
})
|
||||
|
||||
template = self.response_template(CHANGE_MESSAGE_VISIBILITY_BATCH_RESPONSE)
|
||||
return template.render(success=success, errors=error)
|
||||
|
||||
def get_queue_attributes(self):
|
||||
queue_name = self._get_queue_name()
|
||||
try:
|
||||
@ -288,8 +334,62 @@ class SQSResponse(BaseResponse):
|
||||
messages = self.sqs_backend.receive_messages(
|
||||
queue_name, message_count, wait_time, visibility_timeout)
|
||||
template = self.response_template(RECEIVE_MESSAGE_RESPONSE)
|
||||
output = template.render(messages=messages)
|
||||
return output
|
||||
return template.render(messages=messages)
|
||||
|
||||
def list_dead_letter_source_queues(self):
|
||||
request_url = urlparse(self.uri)
|
||||
queue_name = self._get_queue_name()
|
||||
|
||||
source_queue_urls = self.sqs_backend.list_dead_letter_source_queues(queue_name)
|
||||
|
||||
template = self.response_template(LIST_DEAD_LETTER_SOURCE_QUEUES_RESPONSE)
|
||||
return template.render(queues=source_queue_urls, request_url=request_url)
|
||||
|
||||
def add_permission(self):
|
||||
queue_name = self._get_queue_name()
|
||||
actions = self._get_multi_param('ActionName')
|
||||
account_ids = self._get_multi_param('AWSAccountId')
|
||||
label = self._get_param('Label')
|
||||
|
||||
self.sqs_backend.add_permission(queue_name, actions, account_ids, label)
|
||||
|
||||
template = self.response_template(ADD_PERMISSION_RESPONSE)
|
||||
return template.render()
|
||||
|
||||
def remove_permission(self):
|
||||
queue_name = self._get_queue_name()
|
||||
label = self._get_param('Label')
|
||||
|
||||
self.sqs_backend.remove_permission(queue_name, label)
|
||||
|
||||
template = self.response_template(REMOVE_PERMISSION_RESPONSE)
|
||||
return template.render()
|
||||
|
||||
def tag_queue(self):
|
||||
queue_name = self._get_queue_name()
|
||||
tags = self._get_map_prefix('Tag', key_end='.Key', value_end='.Value')
|
||||
|
||||
self.sqs_backend.tag_queue(queue_name, tags)
|
||||
|
||||
template = self.response_template(TAG_QUEUE_RESPONSE)
|
||||
return template.render()
|
||||
|
||||
def untag_queue(self):
|
||||
queue_name = self._get_queue_name()
|
||||
tag_keys = self._get_multi_param('TagKey')
|
||||
|
||||
self.sqs_backend.untag_queue(queue_name, tag_keys)
|
||||
|
||||
template = self.response_template(UNTAG_QUEUE_RESPONSE)
|
||||
return template.render()
|
||||
|
||||
def list_queue_tags(self):
|
||||
queue_name = self._get_queue_name()
|
||||
|
||||
queue = self.sqs_backend.get_queue(queue_name)
|
||||
|
||||
template = self.response_template(LIST_QUEUE_TAGS_RESPONSE)
|
||||
return template.render(tags=queue.tags)
|
||||
|
||||
|
||||
CREATE_QUEUE_RESPONSE = """<CreateQueueResponse>
|
||||
@ -307,7 +407,7 @@ GET_QUEUE_URL_RESPONSE = """<GetQueueUrlResponse>
|
||||
<QueueUrl>{{ queue.url(request_url) }}</QueueUrl>
|
||||
</GetQueueUrlResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>470a6f13-2ed9-4181-ad8a-2fdea142988e</RequestId>
|
||||
<RequestId>{{ requestid }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</GetQueueUrlResponse>"""
|
||||
|
||||
@ -318,13 +418,13 @@ LIST_QUEUES_RESPONSE = """<ListQueuesResponse>
|
||||
{% endfor %}
|
||||
</ListQueuesResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>725275ae-0b9b-4762-b238-436d7c65a1ac</RequestId>
|
||||
<RequestId>{{ requestid }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</ListQueuesResponse>"""
|
||||
|
||||
DELETE_QUEUE_RESPONSE = """<DeleteQueueResponse>
|
||||
<ResponseMetadata>
|
||||
<RequestId>6fde8d1e-52cd-4581-8cd9-c512f4c64223</RequestId>
|
||||
<RequestId>{{ requestid }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</DeleteQueueResponse>"""
|
||||
|
||||
@ -338,13 +438,13 @@ GET_QUEUE_ATTRIBUTES_RESPONSE = """<GetQueueAttributesResponse>
|
||||
{% endfor %}
|
||||
</GetQueueAttributesResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>1ea71be5-b5a2-4f9d-b85a-945d8d08cd0b</RequestId>
|
||||
<RequestId>{{ requestid }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</GetQueueAttributesResponse>"""
|
||||
|
||||
SET_QUEUE_ATTRIBUTE_RESPONSE = """<SetQueueAttributesResponse>
|
||||
<ResponseMetadata>
|
||||
<RequestId>e5cca473-4fc0-4198-a451-8abb94d02c75</RequestId>
|
||||
<RequestId>{{ requestid }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</SetQueueAttributesResponse>"""
|
||||
|
||||
@ -361,7 +461,7 @@ SEND_MESSAGE_RESPONSE = """<SendMessageResponse>
|
||||
</MessageId>
|
||||
</SendMessageResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>27daac76-34dd-47df-bd01-1f6e873584a0</RequestId>
|
||||
<RequestId>{{ requestid }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</SendMessageResponse>"""
|
||||
|
||||
@ -409,7 +509,7 @@ RECEIVE_MESSAGE_RESPONSE = """<ReceiveMessageResponse>
|
||||
{% endfor %}
|
||||
</ReceiveMessageResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>b6633655-283d-45b4-aee4-4e84e0ae6afa</RequestId>
|
||||
<RequestId>{{ requestid }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</ReceiveMessageResponse>"""
|
||||
|
||||
@ -427,13 +527,13 @@ SEND_MESSAGE_BATCH_RESPONSE = """<SendMessageBatchResponse>
|
||||
{% endfor %}
|
||||
</SendMessageBatchResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>ca1ad5d0-8271-408b-8d0f-1351bf547e74</RequestId>
|
||||
<RequestId>{{ requestid }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</SendMessageBatchResponse>"""
|
||||
|
||||
DELETE_MESSAGE_RESPONSE = """<DeleteMessageResponse>
|
||||
<ResponseMetadata>
|
||||
<RequestId>b5293cb5-d306-4a17-9048-b263635abe42</RequestId>
|
||||
<RequestId>{{ requestid }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</DeleteMessageResponse>"""
|
||||
|
||||
@ -446,22 +546,92 @@ DELETE_MESSAGE_BATCH_RESPONSE = """<DeleteMessageBatchResponse>
|
||||
{% endfor %}
|
||||
</DeleteMessageBatchResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>d6f86b7a-74d1-4439-b43f-196a1e29cd85</RequestId>
|
||||
<RequestId>{{ requestid }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</DeleteMessageBatchResponse>"""
|
||||
|
||||
CHANGE_MESSAGE_VISIBILITY_RESPONSE = """<ChangeMessageVisibilityResponse>
|
||||
<ResponseMetadata>
|
||||
<RequestId>6a7a282a-d013-4a59-aba9-335b0fa48bed</RequestId>
|
||||
<RequestId>{{ requestid }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</ChangeMessageVisibilityResponse>"""
|
||||
|
||||
CHANGE_MESSAGE_VISIBILITY_BATCH_RESPONSE = """<ChangeMessageVisibilityBatchResponse>
|
||||
<ChangeMessageVisibilityBatchResult>
|
||||
{% for success_id in success %}
|
||||
<ChangeMessageVisibilityBatchResultEntry>
|
||||
<Id>{{ success_id }}</Id>
|
||||
</ChangeMessageVisibilityBatchResultEntry>
|
||||
{% endfor %}
|
||||
{% for error_dict in errors %}
|
||||
<BatchResultErrorEntry>
|
||||
<Id>{{ error_dict['Id'] }}</Id>
|
||||
<Code>{{ error_dict['Code'] }}</Code>
|
||||
<Message>{{ error_dict['Message'] }}</Message>
|
||||
<SenderFault>{{ error_dict['SenderFault'] }}</SenderFault>
|
||||
</BatchResultErrorEntry>
|
||||
{% endfor %}
|
||||
</ChangeMessageVisibilityBatchResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>{{ request_id }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</ChangeMessageVisibilityBatchResponse>"""
|
||||
|
||||
PURGE_QUEUE_RESPONSE = """<PurgeQueueResponse>
|
||||
<ResponseMetadata>
|
||||
<RequestId>6fde8d1e-52cd-4581-8cd9-c512f4c64223</RequestId>
|
||||
<RequestId>{{ requestid }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</PurgeQueueResponse>"""
|
||||
|
||||
LIST_DEAD_LETTER_SOURCE_QUEUES_RESPONSE = """<ListDeadLetterSourceQueuesResponse xmlns="http://queue.amazonaws.com/doc/2012-11-05/">
|
||||
<ListDeadLetterSourceQueuesResult>
|
||||
{% for queue in queues %}
|
||||
<QueueUrl>{{ queue.url(request_url) }}</QueueUrl>
|
||||
{% endfor %}
|
||||
</ListDeadLetterSourceQueuesResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>8ffb921f-b85e-53d9-abcf-d8d0057f38fc</RequestId>
|
||||
</ResponseMetadata>
|
||||
</ListDeadLetterSourceQueuesResponse>"""
|
||||
|
||||
ADD_PERMISSION_RESPONSE = """<AddPermissionResponse>
|
||||
<ResponseMetadata>
|
||||
<RequestId>{{ request_id }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</AddPermissionResponse>"""
|
||||
|
||||
REMOVE_PERMISSION_RESPONSE = """<RemovePermissionResponse>
|
||||
<ResponseMetadata>
|
||||
<RequestId>{{ request_id }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</RemovePermissionResponse>"""
|
||||
|
||||
TAG_QUEUE_RESPONSE = """<TagQueueResponse>
|
||||
<ResponseMetadata>
|
||||
<RequestId>{{ request_id }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</TagQueueResponse>"""
|
||||
|
||||
UNTAG_QUEUE_RESPONSE = """<UntagQueueResponse>
|
||||
<ResponseMetadata>
|
||||
<RequestId>{{ request_id }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</UntagQueueResponse>"""
|
||||
|
||||
LIST_QUEUE_TAGS_RESPONSE = """<ListQueueTagsResponse>
|
||||
<ListQueueTagsResult>
|
||||
{% for key, value in tags.items() %}
|
||||
<Tag>
|
||||
<Key>{{ key }}</Key>
|
||||
<Value>{{ value }}</Value>
|
||||
</Tag>
|
||||
{% endfor %}
|
||||
</ListQueueTagsResult>
|
||||
<ResponseMetadata>
|
||||
<RequestId>{{ request_id }}</RequestId>
|
||||
</ResponseMetadata>
|
||||
</ListQueueTagsResponse>"""
|
||||
|
||||
ERROR_TOO_LONG_RESPONSE = """<ErrorResponse xmlns="http://queue.amazonaws.com/doc/2012-11-05/">
|
||||
<Error>
|
||||
<Type>Sender</Type>
|
||||
|
@ -56,14 +56,14 @@ def print_implementation_coverage():
|
||||
else:
|
||||
percentage_implemented = 0
|
||||
|
||||
print("-----------------------")
|
||||
print("{} - {}% implemented".format(service_name, percentage_implemented))
|
||||
print("-----------------------")
|
||||
print("")
|
||||
print("## {} - {}% implemented".format(service_name, percentage_implemented))
|
||||
for op in operations:
|
||||
if op in implemented:
|
||||
print("[X] {}".format(op))
|
||||
print("- [X] {}".format(op))
|
||||
else:
|
||||
print("[ ] {}".format(op))
|
||||
print("- [ ] {}".format(op))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
print_implementation_coverage()
|
||||
|
@ -81,12 +81,14 @@ def select_service_and_operation():
|
||||
raise click.Abort()
|
||||
return service_name, operation_name
|
||||
|
||||
def get_escaped_service(service):
|
||||
return service.replace('-', '')
|
||||
|
||||
def get_lib_dir(service):
|
||||
return os.path.join('moto', service)
|
||||
return os.path.join('moto', get_escaped_service(service))
|
||||
|
||||
def get_test_dir(service):
|
||||
return os.path.join('tests', 'test_{}'.format(service))
|
||||
return os.path.join('tests', 'test_{}'.format(get_escaped_service(service)))
|
||||
|
||||
|
||||
def render_template(tmpl_dir, tmpl_filename, context, service, alt_filename=None):
|
||||
@ -117,7 +119,7 @@ def append_mock_to_init_py(service):
|
||||
filtered_lines = [_ for _ in lines if re.match('^from.*mock.*$', _)]
|
||||
last_import_line_index = lines.index(filtered_lines[-1])
|
||||
|
||||
new_line = 'from .{} import mock_{} # flake8: noqa'.format(service, service)
|
||||
new_line = 'from .{} import mock_{} # flake8: noqa'.format(get_escaped_service(service), get_escaped_service(service))
|
||||
lines.insert(last_import_line_index + 1, new_line)
|
||||
|
||||
body = '\n'.join(lines) + '\n'
|
||||
@ -135,7 +137,7 @@ def append_mock_import_to_backends_py(service):
|
||||
filtered_lines = [_ for _ in lines if re.match('^from.*backends.*$', _)]
|
||||
last_import_line_index = lines.index(filtered_lines[-1])
|
||||
|
||||
new_line = 'from moto.{} import {}_backends'.format(service, service)
|
||||
new_line = 'from moto.{} import {}_backends'.format(get_escaped_service(service), get_escaped_service(service))
|
||||
lines.insert(last_import_line_index + 1, new_line)
|
||||
|
||||
body = '\n'.join(lines) + '\n'
|
||||
@ -147,13 +149,12 @@ def append_mock_dict_to_backends_py(service):
|
||||
with open(path) as f:
|
||||
lines = [_.replace('\n', '') for _ in f.readlines()]
|
||||
|
||||
# 'xray': xray_backends
|
||||
if any(_ for _ in lines if re.match(".*'{}': {}_backends.*".format(service, service), _)):
|
||||
return
|
||||
filtered_lines = [_ for _ in lines if re.match(".*'.*':.*_backends.*", _)]
|
||||
last_elem_line_index = lines.index(filtered_lines[-1])
|
||||
|
||||
new_line = " '{}': {}_backends,".format(service, service)
|
||||
new_line = " '{}': {}_backends,".format(service, get_escaped_service(service))
|
||||
prev_line = lines[last_elem_line_index]
|
||||
if not prev_line.endswith('{') and not prev_line.endswith(','):
|
||||
lines[last_elem_line_index] += ','
|
||||
@ -166,8 +167,8 @@ def append_mock_dict_to_backends_py(service):
|
||||
def initialize_service(service, operation, api_protocol):
|
||||
"""create lib and test dirs if not exist
|
||||
"""
|
||||
lib_dir = os.path.join('moto', service)
|
||||
test_dir = os.path.join('tests', 'test_{}'.format(service))
|
||||
lib_dir = get_lib_dir(service)
|
||||
test_dir = get_test_dir(service)
|
||||
|
||||
print_progress('Initializing service', service, 'green')
|
||||
|
||||
@ -178,7 +179,9 @@ def initialize_service(service, operation, api_protocol):
|
||||
tmpl_context = {
|
||||
'service': service,
|
||||
'service_class': service_class,
|
||||
'endpoint_prefix': endpoint_prefix
|
||||
'endpoint_prefix': endpoint_prefix,
|
||||
'api_protocol': api_protocol,
|
||||
'escaped_service': get_escaped_service(service)
|
||||
}
|
||||
|
||||
# initialize service directory
|
||||
@ -202,7 +205,7 @@ def initialize_service(service, operation, api_protocol):
|
||||
os.makedirs(test_dir)
|
||||
tmpl_dir = os.path.join(TEMPLATE_DIR, 'test')
|
||||
for tmpl_filename in os.listdir(tmpl_dir):
|
||||
alt_filename = 'test_{}.py'.format(service) if tmpl_filename == 'test_service.py.j2' else None
|
||||
alt_filename = 'test_{}.py'.format(get_escaped_service(service)) if tmpl_filename == 'test_service.py.j2' else None
|
||||
render_template(
|
||||
tmpl_dir, tmpl_filename, tmpl_context, service, alt_filename
|
||||
)
|
||||
@ -212,9 +215,16 @@ def initialize_service(service, operation, api_protocol):
|
||||
append_mock_import_to_backends_py(service)
|
||||
append_mock_dict_to_backends_py(service)
|
||||
|
||||
|
||||
def to_upper_camel_case(s):
|
||||
return ''.join([_.title() for _ in s.split('_')])
|
||||
|
||||
|
||||
def to_lower_camel_case(s):
|
||||
words = s.split('_')
|
||||
return ''.join(words[:1] + [_.title() for _ in words[1:]])
|
||||
|
||||
|
||||
def to_snake_case(s):
|
||||
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', s)
|
||||
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
|
||||
@ -229,25 +239,28 @@ def get_function_in_responses(service, operation, protocol):
|
||||
|
||||
aws_operation_name = to_upper_camel_case(operation)
|
||||
op_model = client._service_model.operation_model(aws_operation_name)
|
||||
outputs = op_model.output_shape.members
|
||||
if not hasattr(op_model.output_shape, 'members'):
|
||||
outputs = {}
|
||||
else:
|
||||
outputs = op_model.output_shape.members
|
||||
inputs = op_model.input_shape.members
|
||||
input_names = [to_snake_case(_) for _ in inputs.keys() if _ not in INPUT_IGNORED_IN_BACKEND]
|
||||
output_names = [to_snake_case(_) for _ in outputs.keys() if _ not in OUTPUT_IGNORED_IN_BACKEND]
|
||||
body = 'def {}(self):\n'.format(operation)
|
||||
body = '\ndef {}(self):\n'.format(operation)
|
||||
|
||||
for input_name, input_type in inputs.items():
|
||||
type_name = input_type.type_name
|
||||
if type_name == 'integer':
|
||||
arg_line_tmpl = ' {} = _get_int_param("{}")\n'
|
||||
arg_line_tmpl = ' {} = self._get_int_param("{}")\n'
|
||||
elif type_name == 'list':
|
||||
arg_line_tmpl = ' {} = self._get_list_prefix("{}.member")\n'
|
||||
else:
|
||||
arg_line_tmpl = ' {} = self._get_param("{}")\n'
|
||||
body += arg_line_tmpl.format(to_snake_case(input_name), input_name)
|
||||
if output_names:
|
||||
body += ' {} = self.{}_backend.{}(\n'.format(','.join(output_names), service, operation)
|
||||
body += ' {} = self.{}_backend.{}(\n'.format(', '.join(output_names), get_escaped_service(service), operation)
|
||||
else:
|
||||
body += ' self.{}_backend.{}(\n'.format(service, operation)
|
||||
body += ' self.{}_backend.{}(\n'.format(get_escaped_service(service), operation)
|
||||
for input_name in input_names:
|
||||
body += ' {}={},\n'.format(input_name, input_name)
|
||||
|
||||
@ -255,11 +268,11 @@ def get_function_in_responses(service, operation, protocol):
|
||||
if protocol == 'query':
|
||||
body += ' template = self.response_template({}_TEMPLATE)\n'.format(operation.upper())
|
||||
body += ' return template.render({})\n'.format(
|
||||
','.join(['{}={}'.format(_, _) for _ in output_names])
|
||||
', '.join(['{}={}'.format(_, _) for _ in output_names])
|
||||
)
|
||||
elif protocol == 'json':
|
||||
body += ' # TODO: adjust reponse\n'
|
||||
body += ' return json.dumps({})\n'.format(','.join(['{}={}'.format(_, _) for _ in output_names]))
|
||||
elif protocol in ['json', 'rest-json']:
|
||||
body += ' # TODO: adjust response\n'
|
||||
body += ' return json.dumps(dict({}))\n'.format(', '.join(['{}={}'.format(to_lower_camel_case(_), _) for _ in output_names]))
|
||||
return body
|
||||
|
||||
|
||||
@ -272,7 +285,10 @@ def get_function_in_models(service, operation):
|
||||
aws_operation_name = to_upper_camel_case(operation)
|
||||
op_model = client._service_model.operation_model(aws_operation_name)
|
||||
inputs = op_model.input_shape.members
|
||||
outputs = op_model.output_shape.members
|
||||
if not hasattr(op_model.output_shape, 'members'):
|
||||
outputs = {}
|
||||
else:
|
||||
outputs = op_model.output_shape.members
|
||||
input_names = [to_snake_case(_) for _ in inputs.keys() if _ not in INPUT_IGNORED_IN_BACKEND]
|
||||
output_names = [to_snake_case(_) for _ in outputs.keys() if _ not in OUTPUT_IGNORED_IN_BACKEND]
|
||||
if input_names:
|
||||
@ -280,7 +296,7 @@ def get_function_in_models(service, operation):
|
||||
else:
|
||||
body = 'def {}(self)\n'
|
||||
body += ' # implement here\n'
|
||||
body += ' return {}\n'.format(', '.join(output_names))
|
||||
body += ' return {}\n\n'.format(', '.join(output_names))
|
||||
|
||||
return body
|
||||
|
||||
@ -388,13 +404,13 @@ def insert_code_to_class(path, base_class, new_code):
|
||||
f.write(body)
|
||||
|
||||
|
||||
def insert_url(service, operation):
|
||||
def insert_url(service, operation, api_protocol):
|
||||
client = boto3.client(service)
|
||||
service_class = client.__class__.__name__
|
||||
aws_operation_name = to_upper_camel_case(operation)
|
||||
uri = client._service_model.operation_model(aws_operation_name).http['requestUri']
|
||||
|
||||
path = os.path.join(os.path.dirname(__file__), '..', 'moto', service, 'urls.py')
|
||||
path = os.path.join(os.path.dirname(__file__), '..', 'moto', get_escaped_service(service), 'urls.py')
|
||||
with open(path) as f:
|
||||
lines = [_.replace('\n', '') for _ in f.readlines()]
|
||||
|
||||
@ -413,81 +429,55 @@ def insert_url(service, operation):
|
||||
if not prev_line.endswith('{') and not prev_line.endswith(','):
|
||||
lines[last_elem_line_index] += ','
|
||||
|
||||
new_line = " '{0}%s$': %sResponse.dispatch," % (
|
||||
uri, service_class
|
||||
)
|
||||
# generate url pattern
|
||||
if api_protocol == 'rest-json':
|
||||
new_line = " '{0}/.*$': response.dispatch,"
|
||||
else:
|
||||
new_line = " '{0}%s$': %sResponse.dispatch," % (
|
||||
uri, service_class
|
||||
)
|
||||
if new_line in lines:
|
||||
return
|
||||
lines.insert(last_elem_line_index + 1, new_line)
|
||||
|
||||
body = '\n'.join(lines) + '\n'
|
||||
with open(path, 'w') as f:
|
||||
f.write(body)
|
||||
|
||||
|
||||
def insert_query_codes(service, operation):
|
||||
func_in_responses = get_function_in_responses(service, operation, 'query')
|
||||
def insert_codes(service, operation, api_protocol):
|
||||
func_in_responses = get_function_in_responses(service, operation, api_protocol)
|
||||
func_in_models = get_function_in_models(service, operation)
|
||||
template = get_response_query_template(service, operation)
|
||||
|
||||
# edit responses.py
|
||||
responses_path = 'moto/{}/responses.py'.format(service)
|
||||
responses_path = 'moto/{}/responses.py'.format(get_escaped_service(service))
|
||||
print_progress('inserting code', responses_path, 'green')
|
||||
insert_code_to_class(responses_path, BaseResponse, func_in_responses)
|
||||
|
||||
# insert template
|
||||
with open(responses_path) as f:
|
||||
lines = [_[:-1] for _ in f.readlines()]
|
||||
lines += template.splitlines()
|
||||
with open(responses_path, 'w') as f:
|
||||
f.write('\n'.join(lines))
|
||||
if api_protocol == 'query':
|
||||
template = get_response_query_template(service, operation)
|
||||
with open(responses_path) as f:
|
||||
lines = [_[:-1] for _ in f.readlines()]
|
||||
lines += template.splitlines()
|
||||
with open(responses_path, 'w') as f:
|
||||
f.write('\n'.join(lines))
|
||||
|
||||
# edit models.py
|
||||
models_path = 'moto/{}/models.py'.format(service)
|
||||
models_path = 'moto/{}/models.py'.format(get_escaped_service(service))
|
||||
print_progress('inserting code', models_path, 'green')
|
||||
insert_code_to_class(models_path, BaseBackend, func_in_models)
|
||||
|
||||
# edit urls.py
|
||||
insert_url(service, operation)
|
||||
insert_url(service, operation, api_protocol)
|
||||
|
||||
def insert_json_codes(service, operation):
|
||||
func_in_responses = get_function_in_responses(service, operation, 'json')
|
||||
func_in_models = get_function_in_models(service, operation)
|
||||
|
||||
# edit responses.py
|
||||
responses_path = 'moto/{}/responses.py'.format(service)
|
||||
print_progress('inserting code', responses_path, 'green')
|
||||
insert_code_to_class(responses_path, BaseResponse, func_in_responses)
|
||||
|
||||
# edit models.py
|
||||
models_path = 'moto/{}/models.py'.format(service)
|
||||
print_progress('inserting code', models_path, 'green')
|
||||
insert_code_to_class(models_path, BaseBackend, func_in_models)
|
||||
|
||||
# edit urls.py
|
||||
insert_url(service, operation)
|
||||
|
||||
def insert_restjson_codes(service, operation):
|
||||
func_in_models = get_function_in_models(service, operation)
|
||||
|
||||
print_progress('skipping inserting code to responses.py', "dont't know how to implement", 'yellow')
|
||||
# edit models.py
|
||||
models_path = 'moto/{}/models.py'.format(service)
|
||||
print_progress('inserting code', models_path, 'green')
|
||||
insert_code_to_class(models_path, BaseBackend, func_in_models)
|
||||
|
||||
# edit urls.py
|
||||
insert_url(service, operation)
|
||||
|
||||
@click.command()
|
||||
def main():
|
||||
service, operation = select_service_and_operation()
|
||||
api_protocol = boto3.client(service)._service_model.metadata['protocol']
|
||||
initialize_service(service, operation, api_protocol)
|
||||
if api_protocol == 'query':
|
||||
insert_query_codes(service, operation)
|
||||
elif api_protocol == 'json':
|
||||
insert_json_codes(service, operation)
|
||||
elif api_protocol == 'rest-json':
|
||||
insert_restjson_codes(service, operation)
|
||||
|
||||
if api_protocol in ['query', 'json', 'rest-json']:
|
||||
insert_codes(service, operation, api_protocol)
|
||||
else:
|
||||
print_progress('skip inserting code', 'api protocol "{}" is not supported'.format(api_protocol), 'yellow')
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
from __future__ import unicode_literals
|
||||
from .models import {{ service }}_backends
|
||||
from .models import {{ escaped_service }}_backends
|
||||
from ..core.models import base_decorator
|
||||
|
||||
{{ service }}_backend = {{ service }}_backends['us-east-1']
|
||||
mock_{{ service }} = base_decorator({{ service }}_backends)
|
||||
{{ escaped_service }}_backend = {{ escaped_service }}_backends['us-east-1']
|
||||
mock_{{ escaped_service }} = base_decorator({{ escaped_service }}_backends)
|
||||
|
||||
|
@ -17,4 +17,4 @@ class {{ service_class }}Backend(BaseBackend):
|
||||
|
||||
|
||||
available_regions = boto3.session.Session().get_available_regions("{{ service }}")
|
||||
{{ service }}_backends = {region: {{ service_class }}Backend(region) for region in available_regions}
|
||||
{{ escaped_service }}_backends = {region: {{ service_class }}Backend(region) for region in available_regions}
|
||||
|
@ -1,12 +1,14 @@
|
||||
from __future__ import unicode_literals
|
||||
from moto.core.responses import BaseResponse
|
||||
from .models import {{ service }}_backends
|
||||
from .models import {{ escaped_service }}_backends
|
||||
import json
|
||||
|
||||
|
||||
class {{ service_class }}Response(BaseResponse):
|
||||
SERVICE_NAME = '{{ service }}'
|
||||
@property
|
||||
def {{ service }}_backend(self):
|
||||
return {{ service }}_backends[self.region]
|
||||
def {{ escaped_service }}_backend(self):
|
||||
return {{ escaped_service }}_backends[self.region]
|
||||
|
||||
# add methods from here
|
||||
|
||||
|
@ -5,5 +5,9 @@ url_bases = [
|
||||
"https?://{{ endpoint_prefix }}.(.+).amazonaws.com",
|
||||
]
|
||||
|
||||
{% if api_protocol == 'rest-json' %}
|
||||
response = {{ service_class }}Response()
|
||||
{% endif %}
|
||||
|
||||
url_paths = {
|
||||
}
|
||||
|
@ -3,14 +3,14 @@ from __future__ import unicode_literals
|
||||
import sure # noqa
|
||||
|
||||
import moto.server as server
|
||||
from moto import mock_{{ service }}
|
||||
from moto import mock_{{ escaped_service }}
|
||||
|
||||
'''
|
||||
Test the different server responses
|
||||
'''
|
||||
|
||||
@mock_{{ service }}
|
||||
def test_{{ service }}_list():
|
||||
@mock_{{ escaped_service }}
|
||||
def test_{{ escaped_service }}_list():
|
||||
backend = server.create_backend_app("{{ service }}")
|
||||
test_client = backend.test_client()
|
||||
# do test
|
||||
|
@ -2,10 +2,10 @@ from __future__ import unicode_literals
|
||||
|
||||
import boto3
|
||||
import sure # noqa
|
||||
from moto import mock_{{ service }}
|
||||
from moto import mock_{{ escaped_service }}
|
||||
|
||||
|
||||
@mock_{{ service }}
|
||||
@mock_{{ escaped_service }}
|
||||
def test_list():
|
||||
# do test
|
||||
pass
|
||||
|
@ -4,6 +4,7 @@ import os
|
||||
import boto3
|
||||
from freezegun import freeze_time
|
||||
import sure # noqa
|
||||
import uuid
|
||||
|
||||
from botocore.exceptions import ClientError
|
||||
|
||||
@ -281,11 +282,23 @@ def test_resend_validation_email_invalid():
|
||||
def test_request_certificate():
|
||||
client = boto3.client('acm', region_name='eu-central-1')
|
||||
|
||||
token = str(uuid.uuid4())
|
||||
|
||||
resp = client.request_certificate(
|
||||
DomainName='google.com',
|
||||
IdempotencyToken=token,
|
||||
SubjectAlternativeNames=['google.com', 'www.google.com', 'mail.google.com'],
|
||||
)
|
||||
resp.should.contain('CertificateArn')
|
||||
arn = resp['CertificateArn']
|
||||
|
||||
resp = client.request_certificate(
|
||||
DomainName='google.com',
|
||||
IdempotencyToken=token,
|
||||
SubjectAlternativeNames=['google.com', 'www.google.com', 'mail.google.com'],
|
||||
)
|
||||
resp['CertificateArn'].should.equal(arn)
|
||||
|
||||
|
||||
@mock_acm
|
||||
def test_request_certificate_no_san():
|
||||
|
@ -488,6 +488,7 @@ def lambda_handler(event, context):
|
||||
assert 'FunctionError' in result
|
||||
assert result['FunctionError'] == 'Handled'
|
||||
|
||||
|
||||
@mock_lambda
|
||||
@mock_s3
|
||||
def test_tags():
|
||||
@ -554,6 +555,7 @@ def test_tags():
|
||||
TagKeys=['spam']
|
||||
)['ResponseMetadata']['HTTPStatusCode'].should.equal(204)
|
||||
|
||||
|
||||
@mock_lambda
|
||||
def test_tags_not_found():
|
||||
"""
|
||||
@ -574,6 +576,7 @@ def test_tags_not_found():
|
||||
TagKeys=['spam']
|
||||
).should.throw(botocore.client.ClientError)
|
||||
|
||||
|
||||
@mock_lambda
|
||||
def test_invoke_async_function():
|
||||
conn = boto3.client('lambda', 'us-west-2')
|
||||
@ -581,10 +584,8 @@ def test_invoke_async_function():
|
||||
FunctionName='testFunction',
|
||||
Runtime='python2.7',
|
||||
Role='test-iam-role',
|
||||
Handler='lambda_function.handler',
|
||||
Code={
|
||||
'ZipFile': get_test_zip_file1(),
|
||||
},
|
||||
Handler='lambda_function.lambda_handler',
|
||||
Code={'ZipFile': get_test_zip_file1()},
|
||||
Description='test lambda function',
|
||||
Timeout=3,
|
||||
MemorySize=128,
|
||||
@ -593,11 +594,12 @@ def test_invoke_async_function():
|
||||
|
||||
success_result = conn.invoke_async(
|
||||
FunctionName='testFunction',
|
||||
InvokeArgs=json.dumps({ 'test': 'event' })
|
||||
InvokeArgs=json.dumps({'test': 'event'})
|
||||
)
|
||||
|
||||
success_result['Status'].should.equal(202)
|
||||
|
||||
|
||||
@mock_lambda
|
||||
@freeze_time('2015-01-01 00:00:00')
|
||||
def test_get_function_created_with_zipfile():
|
||||
@ -646,6 +648,7 @@ def test_get_function_created_with_zipfile():
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@mock_lambda
|
||||
def add_function_permission():
|
||||
conn = boto3.client('lambda', 'us-west-2')
|
||||
|
@ -38,7 +38,7 @@ from moto import (
|
||||
mock_sns_deprecated,
|
||||
mock_sqs,
|
||||
mock_sqs_deprecated,
|
||||
)
|
||||
mock_elbv2)
|
||||
|
||||
from .fixtures import (
|
||||
ec2_classic_eip,
|
||||
@ -2111,3 +2111,156 @@ def test_stack_spot_fleet():
|
||||
launch_spec['SubnetId'].should.equal(subnet_id)
|
||||
launch_spec['SpotPrice'].should.equal("0.13")
|
||||
launch_spec['WeightedCapacity'].should.equal(2.0)
|
||||
|
||||
|
||||
@mock_ec2
|
||||
@mock_elbv2
|
||||
@mock_cloudformation
|
||||
def test_stack_elbv2_resources_integration():
|
||||
alb_template = {
|
||||
"AWSTemplateFormatVersion": "2010-09-09",
|
||||
"Outputs": {
|
||||
"albdns": {
|
||||
"Description": "Load balanacer DNS",
|
||||
"Value": {"Fn::GetAtt": ["alb", "DNSName"]},
|
||||
},
|
||||
"albname": {
|
||||
"Description": "Load balancer name",
|
||||
"Value": {"Fn::GetAtt": ["alb", "LoadBalancerName"]},
|
||||
},
|
||||
},
|
||||
"Resources": {
|
||||
"alb": {
|
||||
"Type": "AWS::ElasticLoadBalancingV2::LoadBalancer",
|
||||
"Properties": {
|
||||
"Name": "myelbv2",
|
||||
"Scheme": "internet-facing",
|
||||
"Subnets": [{
|
||||
"Ref": "mysubnet",
|
||||
}],
|
||||
"SecurityGroups": [{
|
||||
"Ref": "mysg",
|
||||
}],
|
||||
"Type": "application",
|
||||
"IpAddressType": "ipv4",
|
||||
}
|
||||
},
|
||||
"mytargetgroup": {
|
||||
"Type": "AWS::ElasticLoadBalancingV2::TargetGroup",
|
||||
"Properties": {
|
||||
"HealthCheckIntervalSeconds": 30,
|
||||
"HealthCheckPath": "/status",
|
||||
"HealthCheckPort": 80,
|
||||
"HealthCheckProtocol": "HTTP",
|
||||
"HealthCheckTimeoutSeconds": 5,
|
||||
"HealthyThresholdCount": 30,
|
||||
"UnhealthyThresholdCount": 5,
|
||||
"Matcher": {
|
||||
"HttpCode": "200,201"
|
||||
},
|
||||
"Name": "mytargetgroup",
|
||||
"Port": 80,
|
||||
"Protocol": "HTTP",
|
||||
"TargetType": "instance",
|
||||
"Targets": [{
|
||||
"Id": {
|
||||
"Ref": "ec2instance",
|
||||
"Port": 80,
|
||||
},
|
||||
}],
|
||||
"VpcId": {
|
||||
"Ref": "myvpc",
|
||||
}
|
||||
}
|
||||
},
|
||||
"listener": {
|
||||
"Type": "AWS::ElasticLoadBalancingV2::Listener",
|
||||
"Properties": {
|
||||
"DefaultActions": [{
|
||||
"Type": "forward",
|
||||
"TargetGroupArn": {"Ref": "mytargetgroup"}
|
||||
}],
|
||||
"LoadBalancerArn": {"Ref": "alb"},
|
||||
"Port": "80",
|
||||
"Protocol": "HTTP"
|
||||
}
|
||||
},
|
||||
"myvpc": {
|
||||
"Type": "AWS::EC2::VPC",
|
||||
"Properties": {
|
||||
"CidrBlock": "10.0.0.0/16",
|
||||
}
|
||||
},
|
||||
"mysubnet": {
|
||||
"Type": "AWS::EC2::Subnet",
|
||||
"Properties": {
|
||||
"CidrBlock": "10.0.0.0/27",
|
||||
"VpcId": {"Ref": "myvpc"},
|
||||
}
|
||||
},
|
||||
"mysg": {
|
||||
"Type": "AWS::EC2::SecurityGroup",
|
||||
"Properties": {
|
||||
"GroupName": "mysg",
|
||||
"GroupDescription": "test security group",
|
||||
"VpcId": {"Ref": "myvpc"}
|
||||
}
|
||||
},
|
||||
"ec2instance": {
|
||||
"Type": "AWS::EC2::Instance",
|
||||
"Properties": {
|
||||
"ImageId": "ami-1234abcd",
|
||||
"UserData": "some user data",
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
alb_template_json = json.dumps(alb_template)
|
||||
|
||||
cfn_conn = boto3.client("cloudformation", "us-west-1")
|
||||
cfn_conn.create_stack(
|
||||
StackName="elb_stack",
|
||||
TemplateBody=alb_template_json,
|
||||
)
|
||||
|
||||
elbv2_conn = boto3.client("elbv2", "us-west-1")
|
||||
|
||||
load_balancers = elbv2_conn.describe_load_balancers()['LoadBalancers']
|
||||
len(load_balancers).should.equal(1)
|
||||
load_balancers[0]['LoadBalancerName'].should.equal('myelbv2')
|
||||
load_balancers[0]['Scheme'].should.equal('internet-facing')
|
||||
load_balancers[0]['Type'].should.equal('application')
|
||||
load_balancers[0]['IpAddressType'].should.equal('ipv4')
|
||||
|
||||
target_groups = elbv2_conn.describe_target_groups()['TargetGroups']
|
||||
len(target_groups).should.equal(1)
|
||||
target_groups[0]['HealthCheckIntervalSeconds'].should.equal(30)
|
||||
target_groups[0]['HealthCheckPath'].should.equal('/status')
|
||||
target_groups[0]['HealthCheckPort'].should.equal('80')
|
||||
target_groups[0]['HealthCheckProtocol'].should.equal('HTTP')
|
||||
target_groups[0]['HealthCheckTimeoutSeconds'].should.equal(5)
|
||||
target_groups[0]['HealthyThresholdCount'].should.equal(30)
|
||||
target_groups[0]['UnhealthyThresholdCount'].should.equal(5)
|
||||
target_groups[0]['Matcher'].should.equal({'HttpCode': '200,201'})
|
||||
target_groups[0]['TargetGroupName'].should.equal('mytargetgroup')
|
||||
target_groups[0]['Port'].should.equal(80)
|
||||
target_groups[0]['Protocol'].should.equal('HTTP')
|
||||
target_groups[0]['TargetType'].should.equal('instance')
|
||||
|
||||
listeners = elbv2_conn.describe_listeners(LoadBalancerArn=load_balancers[0]['LoadBalancerArn'])['Listeners']
|
||||
len(listeners).should.equal(1)
|
||||
listeners[0]['LoadBalancerArn'].should.equal(load_balancers[0]['LoadBalancerArn'])
|
||||
listeners[0]['Port'].should.equal(80)
|
||||
listeners[0]['Protocol'].should.equal('HTTP')
|
||||
listeners[0]['DefaultActions'].should.equal([{
|
||||
"Type": "forward",
|
||||
"TargetGroupArn": target_groups[0]['TargetGroupArn']
|
||||
}])
|
||||
|
||||
# test outputs
|
||||
stacks = cfn_conn.describe_stacks(StackName='elb_stack')['Stacks']
|
||||
len(stacks).should.equal(1)
|
||||
stacks[0]['Outputs'].should.equal([
|
||||
{'OutputKey': 'albdns', 'OutputValue': load_balancers[0]['DNSName']},
|
||||
{'OutputKey': 'albname', 'OutputValue': load_balancers[0]['LoadBalancerName']},
|
||||
])
|
||||
|
@ -118,12 +118,3 @@ def test_describe_alarms():
|
||||
|
||||
alarms = conn.describe_alarms()
|
||||
alarms.should.have.length_of(0)
|
||||
|
||||
|
||||
@mock_cloudwatch_deprecated
|
||||
def test_describe_state_value_unimplemented():
|
||||
conn = boto.connect_cloudwatch()
|
||||
|
||||
conn.describe_alarms()
|
||||
conn.describe_alarms.when.called_with(
|
||||
state_value="foo").should.throw(NotImplementedError)
|
||||
|
@ -87,6 +87,54 @@ def test_get_dashboard_fail():
|
||||
raise RuntimeError('Should of raised error')
|
||||
|
||||
|
||||
@mock_cloudwatch
|
||||
def test_alarm_state():
|
||||
client = boto3.client('cloudwatch', region_name='eu-central-1')
|
||||
|
||||
client.put_metric_alarm(
|
||||
AlarmName='testalarm1',
|
||||
MetricName='cpu',
|
||||
Namespace='blah',
|
||||
Period=10,
|
||||
EvaluationPeriods=5,
|
||||
Statistic='Average',
|
||||
Threshold=2,
|
||||
ComparisonOperator='GreaterThanThreshold',
|
||||
)
|
||||
client.put_metric_alarm(
|
||||
AlarmName='testalarm2',
|
||||
MetricName='cpu',
|
||||
Namespace='blah',
|
||||
Period=10,
|
||||
EvaluationPeriods=5,
|
||||
Statistic='Average',
|
||||
Threshold=2,
|
||||
ComparisonOperator='GreaterThanThreshold',
|
||||
)
|
||||
|
||||
# This is tested implicitly as if it doesnt work the rest will die
|
||||
client.set_alarm_state(
|
||||
AlarmName='testalarm1',
|
||||
StateValue='ALARM',
|
||||
StateReason='testreason',
|
||||
StateReasonData='{"some": "json_data"}'
|
||||
)
|
||||
|
||||
resp = client.describe_alarms(
|
||||
StateValue='ALARM'
|
||||
)
|
||||
len(resp['MetricAlarms']).should.equal(1)
|
||||
resp['MetricAlarms'][0]['AlarmName'].should.equal('testalarm1')
|
||||
|
||||
resp = client.describe_alarms(
|
||||
StateValue='OK'
|
||||
)
|
||||
len(resp['MetricAlarms']).should.equal(1)
|
||||
resp['MetricAlarms'][0]['AlarmName'].should.equal('testalarm2')
|
||||
|
||||
# Just for sanity
|
||||
resp = client.describe_alarms()
|
||||
len(resp['MetricAlarms']).should.equal(2)
|
||||
|
||||
|
||||
|
||||
|
@ -28,13 +28,13 @@ except ImportError:
|
||||
@mock_dynamodb2_deprecated
|
||||
def test_list_tables():
|
||||
name = 'TestTable'
|
||||
#{'schema': }
|
||||
# Should make tables properly with boto
|
||||
dynamodb_backend2.create_table(name, schema=[
|
||||
{u'KeyType': u'HASH', u'AttributeName': u'forum_name'},
|
||||
{u'KeyType': u'RANGE', u'AttributeName': u'subject'}
|
||||
])
|
||||
conn = boto.dynamodb2.connect_to_region(
|
||||
'us-west-2',
|
||||
'us-east-1',
|
||||
aws_access_key_id="ak",
|
||||
aws_secret_access_key="sk")
|
||||
assert conn.list_tables()["TableNames"] == [name]
|
||||
@ -43,6 +43,7 @@ def test_list_tables():
|
||||
@requires_boto_gte("2.9")
|
||||
@mock_dynamodb2_deprecated
|
||||
def test_list_tables_layer_1():
|
||||
# Should make tables properly with boto
|
||||
dynamodb_backend2.create_table("test_1", schema=[
|
||||
{u'KeyType': u'HASH', u'AttributeName': u'name'}
|
||||
])
|
||||
@ -50,7 +51,7 @@ def test_list_tables_layer_1():
|
||||
{u'KeyType': u'HASH', u'AttributeName': u'name'}
|
||||
])
|
||||
conn = boto.dynamodb2.connect_to_region(
|
||||
'us-west-2',
|
||||
'us-east-1',
|
||||
aws_access_key_id="ak",
|
||||
aws_secret_access_key="sk")
|
||||
|
||||
@ -88,12 +89,22 @@ def test_list_table_tags():
|
||||
ProvisionedThroughput={'ReadCapacityUnits':5,'WriteCapacityUnits':5})
|
||||
table_description = conn.describe_table(TableName=name)
|
||||
arn = table_description['Table']['TableArn']
|
||||
tags = [{'Key':'TestTag', 'Value': 'TestValue'}]
|
||||
conn.tag_resource(ResourceArn=arn,
|
||||
Tags=tags)
|
||||
|
||||
# Tag table
|
||||
tags = [{'Key': 'TestTag', 'Value': 'TestValue'}, {'Key': 'TestTag2', 'Value': 'TestValue2'}]
|
||||
conn.tag_resource(ResourceArn=arn, Tags=tags)
|
||||
|
||||
# Check tags
|
||||
resp = conn.list_tags_of_resource(ResourceArn=arn)
|
||||
assert resp["Tags"] == tags
|
||||
|
||||
# Remove 1 tag
|
||||
conn.untag_resource(ResourceArn=arn, TagKeys=['TestTag'])
|
||||
|
||||
# Check tags
|
||||
resp = conn.list_tags_of_resource(ResourceArn=arn)
|
||||
assert resp["Tags"] == [{'Key': 'TestTag2', 'Value': 'TestValue2'}]
|
||||
|
||||
|
||||
@requires_boto_gte("2.9")
|
||||
@mock_dynamodb2
|
||||
@ -868,3 +879,50 @@ def test_delete_item():
|
||||
|
||||
response = table.scan()
|
||||
assert response['Count'] == 0
|
||||
|
||||
|
||||
@mock_dynamodb2
|
||||
def test_describe_limits():
|
||||
client = boto3.client('dynamodb', region_name='eu-central-1')
|
||||
resp = client.describe_limits()
|
||||
|
||||
resp['AccountMaxReadCapacityUnits'].should.equal(20000)
|
||||
resp['AccountMaxWriteCapacityUnits'].should.equal(20000)
|
||||
resp['TableMaxWriteCapacityUnits'].should.equal(10000)
|
||||
resp['TableMaxReadCapacityUnits'].should.equal(10000)
|
||||
|
||||
|
||||
@mock_dynamodb2
|
||||
def test_set_ttl():
|
||||
client = boto3.client('dynamodb', region_name='us-east-1')
|
||||
|
||||
# Create the DynamoDB table.
|
||||
client.create_table(
|
||||
TableName='test1',
|
||||
AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}],
|
||||
KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}],
|
||||
ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123}
|
||||
)
|
||||
|
||||
client.update_time_to_live(
|
||||
TableName='test1',
|
||||
TimeToLiveSpecification={
|
||||
'Enabled': True,
|
||||
'AttributeName': 'expire'
|
||||
}
|
||||
)
|
||||
|
||||
resp = client.describe_time_to_live(TableName='test1')
|
||||
resp['TimeToLiveDescription']['TimeToLiveStatus'].should.equal('ENABLED')
|
||||
resp['TimeToLiveDescription']['AttributeName'].should.equal('expire')
|
||||
|
||||
client.update_time_to_live(
|
||||
TableName='test1',
|
||||
TimeToLiveSpecification={
|
||||
'Enabled': False,
|
||||
'AttributeName': 'expire'
|
||||
}
|
||||
)
|
||||
|
||||
resp = client.describe_time_to_live(TableName='test1')
|
||||
resp['TimeToLiveDescription']['TimeToLiveStatus'].should.equal('DISABLED')
|
||||
|
@ -54,7 +54,7 @@ def test_create_table():
|
||||
}
|
||||
}
|
||||
conn = boto.dynamodb2.connect_to_region(
|
||||
'us-west-2',
|
||||
'us-east-1',
|
||||
aws_access_key_id="ak",
|
||||
aws_secret_access_key="sk"
|
||||
)
|
||||
@ -425,7 +425,7 @@ def test_get_special_item():
|
||||
|
||||
@mock_dynamodb2_deprecated
|
||||
def test_update_item_remove():
|
||||
conn = boto.dynamodb2.connect_to_region("us-west-2")
|
||||
conn = boto.dynamodb2.connect_to_region("us-east-1")
|
||||
table = Table.create('messages', schema=[
|
||||
HashKey('username')
|
||||
])
|
||||
@ -452,7 +452,7 @@ def test_update_item_remove():
|
||||
|
||||
@mock_dynamodb2_deprecated
|
||||
def test_update_item_set():
|
||||
conn = boto.dynamodb2.connect_to_region("us-west-2")
|
||||
conn = boto.dynamodb2.connect_to_region("us-east-1")
|
||||
table = Table.create('messages', schema=[
|
||||
HashKey('username')
|
||||
])
|
||||
|
@ -5,7 +5,9 @@ from nose.tools import assert_raises
|
||||
|
||||
import base64
|
||||
import datetime
|
||||
import ipaddress
|
||||
|
||||
import six
|
||||
import boto
|
||||
import boto3
|
||||
from boto.ec2.instance import Reservation, InstanceAttribute
|
||||
@ -413,6 +415,7 @@ def test_get_instances_filtering_by_image_id():
|
||||
'Values': [image_id]}])['Reservations']
|
||||
reservations[0]['Instances'].should.have.length_of(1)
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_get_instances_filtering_by_private_dns():
|
||||
image_id = 'ami-1234abcd'
|
||||
@ -427,6 +430,7 @@ def test_get_instances_filtering_by_private_dns():
|
||||
])['Reservations']
|
||||
reservations[0]['Instances'].should.have.length_of(1)
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_get_instances_filtering_by_ni_private_dns():
|
||||
image_id = 'ami-1234abcd'
|
||||
@ -441,6 +445,7 @@ def test_get_instances_filtering_by_ni_private_dns():
|
||||
])['Reservations']
|
||||
reservations[0]['Instances'].should.have.length_of(1)
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_get_instances_filtering_by_instance_group_name():
|
||||
image_id = 'ami-1234abcd'
|
||||
@ -458,6 +463,7 @@ def test_get_instances_filtering_by_instance_group_name():
|
||||
])['Reservations']
|
||||
reservations[0]['Instances'].should.have.length_of(1)
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_get_instances_filtering_by_instance_group_id():
|
||||
image_id = 'ami-1234abcd'
|
||||
@ -476,6 +482,7 @@ def test_get_instances_filtering_by_instance_group_id():
|
||||
])['Reservations']
|
||||
reservations[0]['Instances'].should.have.length_of(1)
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_get_instances_filtering_by_tag():
|
||||
conn = boto.connect_ec2()
|
||||
@ -830,18 +837,113 @@ def test_run_instance_with_placement():
|
||||
instance.placement.should.equal("us-east-1b")
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_run_instance_with_subnet():
|
||||
conn = boto.connect_vpc('the_key', 'the_secret')
|
||||
vpc = conn.create_vpc("10.0.0.0/16")
|
||||
subnet = conn.create_subnet(vpc.id, "10.0.0.0/18")
|
||||
reservation = conn.run_instances('ami-1234abcd', subnet_id=subnet.id)
|
||||
instance = reservation.instances[0]
|
||||
@mock_ec2
|
||||
def test_run_instance_with_subnet_boto3():
|
||||
client = boto3.client('ec2', region_name='eu-central-1')
|
||||
|
||||
instance.subnet_id.should.equal(subnet.id)
|
||||
ip_networks = [
|
||||
(ipaddress.ip_network('10.0.0.0/16'), ipaddress.ip_network('10.0.99.0/24')),
|
||||
(ipaddress.ip_network('192.168.42.0/24'), ipaddress.ip_network('192.168.42.0/25'))
|
||||
]
|
||||
|
||||
all_enis = conn.get_all_network_interfaces()
|
||||
all_enis.should.have.length_of(1)
|
||||
# Tests instances are created with the correct IPs
|
||||
for vpc_cidr, subnet_cidr in ip_networks:
|
||||
resp = client.create_vpc(
|
||||
CidrBlock=str(vpc_cidr),
|
||||
AmazonProvidedIpv6CidrBlock=False,
|
||||
DryRun=False,
|
||||
InstanceTenancy='default'
|
||||
)
|
||||
vpc_id = resp['Vpc']['VpcId']
|
||||
|
||||
resp = client.create_subnet(
|
||||
CidrBlock=str(subnet_cidr),
|
||||
VpcId=vpc_id
|
||||
)
|
||||
subnet_id = resp['Subnet']['SubnetId']
|
||||
|
||||
resp = client.run_instances(
|
||||
ImageId='ami-1234abcd',
|
||||
MaxCount=1,
|
||||
MinCount=1,
|
||||
SubnetId=subnet_id
|
||||
)
|
||||
instance = resp['Instances'][0]
|
||||
instance['SubnetId'].should.equal(subnet_id)
|
||||
|
||||
priv_ipv4 = ipaddress.ip_address(six.text_type(instance['PrivateIpAddress']))
|
||||
subnet_cidr.should.contain(priv_ipv4)
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_run_instance_with_specified_private_ipv4():
|
||||
client = boto3.client('ec2', region_name='eu-central-1')
|
||||
|
||||
vpc_cidr = ipaddress.ip_network('192.168.42.0/24')
|
||||
subnet_cidr = ipaddress.ip_network('192.168.42.0/25')
|
||||
|
||||
resp = client.create_vpc(
|
||||
CidrBlock=str(vpc_cidr),
|
||||
AmazonProvidedIpv6CidrBlock=False,
|
||||
DryRun=False,
|
||||
InstanceTenancy='default'
|
||||
)
|
||||
vpc_id = resp['Vpc']['VpcId']
|
||||
|
||||
resp = client.create_subnet(
|
||||
CidrBlock=str(subnet_cidr),
|
||||
VpcId=vpc_id
|
||||
)
|
||||
subnet_id = resp['Subnet']['SubnetId']
|
||||
|
||||
resp = client.run_instances(
|
||||
ImageId='ami-1234abcd',
|
||||
MaxCount=1,
|
||||
MinCount=1,
|
||||
SubnetId=subnet_id,
|
||||
PrivateIpAddress='192.168.42.5'
|
||||
)
|
||||
instance = resp['Instances'][0]
|
||||
instance['SubnetId'].should.equal(subnet_id)
|
||||
instance['PrivateIpAddress'].should.equal('192.168.42.5')
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_run_instance_mapped_public_ipv4():
|
||||
client = boto3.client('ec2', region_name='eu-central-1')
|
||||
|
||||
vpc_cidr = ipaddress.ip_network('192.168.42.0/24')
|
||||
subnet_cidr = ipaddress.ip_network('192.168.42.0/25')
|
||||
|
||||
resp = client.create_vpc(
|
||||
CidrBlock=str(vpc_cidr),
|
||||
AmazonProvidedIpv6CidrBlock=False,
|
||||
DryRun=False,
|
||||
InstanceTenancy='default'
|
||||
)
|
||||
vpc_id = resp['Vpc']['VpcId']
|
||||
|
||||
resp = client.create_subnet(
|
||||
CidrBlock=str(subnet_cidr),
|
||||
VpcId=vpc_id
|
||||
)
|
||||
subnet_id = resp['Subnet']['SubnetId']
|
||||
client.modify_subnet_attribute(
|
||||
SubnetId=subnet_id,
|
||||
MapPublicIpOnLaunch={'Value': True}
|
||||
)
|
||||
|
||||
resp = client.run_instances(
|
||||
ImageId='ami-1234abcd',
|
||||
MaxCount=1,
|
||||
MinCount=1,
|
||||
SubnetId=subnet_id
|
||||
)
|
||||
instance = resp['Instances'][0]
|
||||
instance.should.contain('PublicDnsName')
|
||||
instance.should.contain('PublicIpAddress')
|
||||
len(instance['PublicDnsName']).should.be.greater_than(0)
|
||||
len(instance['PublicIpAddress']).should.be.greater_than(0)
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
@ -853,7 +955,7 @@ def test_run_instance_with_nic_autocreated():
|
||||
'test security group #1', 'this is a test security group')
|
||||
security_group2 = conn.create_security_group(
|
||||
'test security group #2', 'this is a test security group')
|
||||
private_ip = "54.0.0.1"
|
||||
private_ip = "10.0.0.1"
|
||||
|
||||
reservation = conn.run_instances('ami-1234abcd', subnet_id=subnet.id,
|
||||
security_groups=[security_group1.name],
|
||||
@ -880,6 +982,7 @@ def test_run_instance_with_nic_autocreated():
|
||||
eni.private_ip_addresses.should.have.length_of(1)
|
||||
eni.private_ip_addresses[0].private_ip_address.should.equal(private_ip)
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_run_instance_with_nic_preexisting():
|
||||
conn = boto.connect_vpc('the_key', 'the_secret')
|
||||
@ -1012,6 +1115,7 @@ def test_ec2_classic_has_public_ip_address():
|
||||
instance.private_ip_address.should_not.equal(None)
|
||||
instance.private_dns_name.should.contain(instance.private_ip_address.replace('.', '-'))
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_run_instance_with_keypair():
|
||||
conn = boto.connect_ec2('the_key', 'the_secret')
|
||||
|
@ -126,9 +126,9 @@ def test_route_tables_filters_associations():
|
||||
conn = boto.connect_vpc('the_key', 'the_secret')
|
||||
|
||||
vpc = conn.create_vpc("10.0.0.0/16")
|
||||
subnet1 = conn.create_subnet(vpc.id, "10.0.0.0/18")
|
||||
subnet2 = conn.create_subnet(vpc.id, "10.0.1.0/18")
|
||||
subnet3 = conn.create_subnet(vpc.id, "10.0.2.0/18")
|
||||
subnet1 = conn.create_subnet(vpc.id, "10.0.0.0/24")
|
||||
subnet2 = conn.create_subnet(vpc.id, "10.0.1.0/24")
|
||||
subnet3 = conn.create_subnet(vpc.id, "10.0.2.0/24")
|
||||
route_table1 = conn.create_route_table(vpc.id)
|
||||
route_table2 = conn.create_route_table(vpc.id)
|
||||
|
||||
|
@ -1611,6 +1611,152 @@ def test_update_service_through_cloudformation_should_trigger_replacement():
|
||||
len(resp['serviceArns']).should.equal(1)
|
||||
|
||||
|
||||
@mock_ec2
|
||||
@mock_ecs
|
||||
def test_attributes():
|
||||
# Combined put, list delete attributes into the same test due to the amount of setup
|
||||
ecs_client = boto3.client('ecs', region_name='us-east-1')
|
||||
ec2 = boto3.resource('ec2', region_name='us-east-1')
|
||||
|
||||
test_cluster_name = 'test_ecs_cluster'
|
||||
|
||||
_ = ecs_client.create_cluster(
|
||||
clusterName=test_cluster_name
|
||||
)
|
||||
|
||||
test_instance = ec2.create_instances(
|
||||
ImageId="ami-1234abcd",
|
||||
MinCount=1,
|
||||
MaxCount=1,
|
||||
)[0]
|
||||
|
||||
instance_id_document = json.dumps(
|
||||
ec2_utils.generate_instance_identity_document(test_instance)
|
||||
)
|
||||
|
||||
response = ecs_client.register_container_instance(
|
||||
cluster=test_cluster_name,
|
||||
instanceIdentityDocument=instance_id_document
|
||||
)
|
||||
|
||||
response['containerInstance'][
|
||||
'ec2InstanceId'].should.equal(test_instance.id)
|
||||
full_arn1 = response['containerInstance']['containerInstanceArn']
|
||||
|
||||
test_instance = ec2.create_instances(
|
||||
ImageId="ami-1234abcd",
|
||||
MinCount=1,
|
||||
MaxCount=1,
|
||||
)[0]
|
||||
|
||||
instance_id_document = json.dumps(
|
||||
ec2_utils.generate_instance_identity_document(test_instance)
|
||||
)
|
||||
|
||||
response = ecs_client.register_container_instance(
|
||||
cluster=test_cluster_name,
|
||||
instanceIdentityDocument=instance_id_document
|
||||
)
|
||||
|
||||
response['containerInstance'][
|
||||
'ec2InstanceId'].should.equal(test_instance.id)
|
||||
full_arn2 = response['containerInstance']['containerInstanceArn']
|
||||
partial_arn2 = full_arn2.rsplit('/', 1)[-1]
|
||||
|
||||
full_arn2.should_not.equal(full_arn1) # uuid1 isnt unique enough when the pc is fast ;-)
|
||||
|
||||
# Ok set instance 1 with 1 attribute, instance 2 with another, and all of them with a 3rd.
|
||||
ecs_client.put_attributes(
|
||||
cluster=test_cluster_name,
|
||||
attributes=[
|
||||
{'name': 'env', 'value': 'prod'},
|
||||
{'name': 'attr1', 'value': 'instance1', 'targetId': full_arn1},
|
||||
{'name': 'attr1', 'value': 'instance2', 'targetId': partial_arn2, 'targetType': 'container-instance'}
|
||||
]
|
||||
)
|
||||
|
||||
resp = ecs_client.list_attributes(
|
||||
cluster=test_cluster_name,
|
||||
targetType='container-instance'
|
||||
)
|
||||
attrs = resp['attributes']
|
||||
len(attrs).should.equal(4)
|
||||
|
||||
# Tests that the attrs have been set properly
|
||||
len(list(filter(lambda item: item['name'] == 'env', attrs))).should.equal(2)
|
||||
len(list(filter(lambda item: item['name'] == 'attr1' and item['value'] == 'instance1', attrs))).should.equal(1)
|
||||
|
||||
ecs_client.delete_attributes(
|
||||
cluster=test_cluster_name,
|
||||
attributes=[
|
||||
{'name': 'attr1', 'value': 'instance2', 'targetId': partial_arn2, 'targetType': 'container-instance'}
|
||||
]
|
||||
)
|
||||
|
||||
resp = ecs_client.list_attributes(
|
||||
cluster=test_cluster_name,
|
||||
targetType='container-instance'
|
||||
)
|
||||
attrs = resp['attributes']
|
||||
len(attrs).should.equal(3)
|
||||
|
||||
|
||||
@mock_ecs
|
||||
def test_poll_endpoint():
|
||||
# Combined put, list delete attributes into the same test due to the amount of setup
|
||||
ecs_client = boto3.client('ecs', region_name='us-east-1')
|
||||
|
||||
# Just a placeholder until someone actually wants useless data, just testing it doesnt raise an exception
|
||||
resp = ecs_client.discover_poll_endpoint(cluster='blah', containerInstance='blah')
|
||||
resp.should.contain('endpoint')
|
||||
resp.should.contain('telemetryEndpoint')
|
||||
|
||||
|
||||
@mock_ecs
|
||||
def test_list_task_definition_families():
|
||||
client = boto3.client('ecs', region_name='us-east-1')
|
||||
client.register_task_definition(
|
||||
family='test_ecs_task',
|
||||
containerDefinitions=[
|
||||
{
|
||||
'name': 'hello_world',
|
||||
'image': 'docker/hello-world:latest',
|
||||
'cpu': 1024,
|
||||
'memory': 400,
|
||||
'essential': True,
|
||||
'environment': [{
|
||||
'name': 'AWS_ACCESS_KEY_ID',
|
||||
'value': 'SOME_ACCESS_KEY'
|
||||
}],
|
||||
'logConfiguration': {'logDriver': 'json-file'}
|
||||
}
|
||||
]
|
||||
)
|
||||
client.register_task_definition(
|
||||
family='alt_test_ecs_task',
|
||||
containerDefinitions=[
|
||||
{
|
||||
'name': 'hello_world',
|
||||
'image': 'docker/hello-world:latest',
|
||||
'cpu': 1024,
|
||||
'memory': 400,
|
||||
'essential': True,
|
||||
'environment': [{
|
||||
'name': 'AWS_ACCESS_KEY_ID',
|
||||
'value': 'SOME_ACCESS_KEY'
|
||||
}],
|
||||
'logConfiguration': {'logDriver': 'json-file'}
|
||||
}
|
||||
]
|
||||
)
|
||||
|
||||
resp1 = client.list_task_definition_families()
|
||||
resp2 = client.list_task_definition_families(familyPrefix='alt')
|
||||
|
||||
len(resp1['families']).should.equal(2)
|
||||
len(resp2['families']).should.equal(1)
|
||||
|
||||
|
||||
def _fetch_container_instance_resources(container_instance_description):
|
||||
remaining_resources = {}
|
||||
registered_resources = {}
|
||||
|
@ -1,11 +1,13 @@
|
||||
from __future__ import unicode_literals
|
||||
import os
|
||||
import boto3
|
||||
import botocore
|
||||
from botocore.exceptions import ClientError
|
||||
from nose.tools import assert_raises
|
||||
import sure # noqa
|
||||
|
||||
from moto import mock_elbv2, mock_ec2
|
||||
from moto import mock_elbv2, mock_ec2, mock_acm
|
||||
from moto.elbv2 import elbv2_backends
|
||||
|
||||
|
||||
@mock_elbv2
|
||||
@ -1045,3 +1047,373 @@ def test_describe_invalid_target_group():
|
||||
# Check error raises correctly
|
||||
with assert_raises(ClientError):
|
||||
conn.describe_target_groups(Names=['invalid'])
|
||||
|
||||
|
||||
@mock_elbv2
|
||||
def test_describe_account_limits():
|
||||
client = boto3.client('elbv2', region_name='eu-central-1')
|
||||
|
||||
resp = client.describe_account_limits()
|
||||
resp['Limits'][0].should.contain('Name')
|
||||
resp['Limits'][0].should.contain('Max')
|
||||
|
||||
|
||||
@mock_elbv2
|
||||
def test_describe_ssl_policies():
|
||||
client = boto3.client('elbv2', region_name='eu-central-1')
|
||||
|
||||
resp = client.describe_ssl_policies()
|
||||
len(resp['SslPolicies']).should.equal(5)
|
||||
|
||||
resp = client.describe_ssl_policies(Names=['ELBSecurityPolicy-TLS-1-2-2017-01', 'ELBSecurityPolicy-2016-08'])
|
||||
len(resp['SslPolicies']).should.equal(2)
|
||||
|
||||
|
||||
@mock_elbv2
|
||||
@mock_ec2
|
||||
def test_set_ip_address_type():
|
||||
client = boto3.client('elbv2', region_name='us-east-1')
|
||||
ec2 = boto3.resource('ec2', region_name='us-east-1')
|
||||
|
||||
security_group = ec2.create_security_group(
|
||||
GroupName='a-security-group', Description='First One')
|
||||
vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default')
|
||||
subnet1 = ec2.create_subnet(
|
||||
VpcId=vpc.id,
|
||||
CidrBlock='172.28.7.192/26',
|
||||
AvailabilityZone='us-east-1a')
|
||||
subnet2 = ec2.create_subnet(
|
||||
VpcId=vpc.id,
|
||||
CidrBlock='172.28.7.192/26',
|
||||
AvailabilityZone='us-east-1b')
|
||||
|
||||
response = client.create_load_balancer(
|
||||
Name='my-lb',
|
||||
Subnets=[subnet1.id, subnet2.id],
|
||||
SecurityGroups=[security_group.id],
|
||||
Scheme='internal',
|
||||
Tags=[{'Key': 'key_name', 'Value': 'a_value'}])
|
||||
arn = response['LoadBalancers'][0]['LoadBalancerArn']
|
||||
|
||||
# Internal LBs cant be dualstack yet
|
||||
with assert_raises(ClientError):
|
||||
client.set_ip_address_type(
|
||||
LoadBalancerArn=arn,
|
||||
IpAddressType='dualstack'
|
||||
)
|
||||
|
||||
# Create internet facing one
|
||||
response = client.create_load_balancer(
|
||||
Name='my-lb2',
|
||||
Subnets=[subnet1.id, subnet2.id],
|
||||
SecurityGroups=[security_group.id],
|
||||
Scheme='internet-facing',
|
||||
Tags=[{'Key': 'key_name', 'Value': 'a_value'}])
|
||||
arn = response['LoadBalancers'][0]['LoadBalancerArn']
|
||||
|
||||
client.set_ip_address_type(
|
||||
LoadBalancerArn=arn,
|
||||
IpAddressType='dualstack'
|
||||
)
|
||||
|
||||
|
||||
@mock_elbv2
|
||||
@mock_ec2
|
||||
def test_set_security_groups():
|
||||
client = boto3.client('elbv2', region_name='us-east-1')
|
||||
ec2 = boto3.resource('ec2', region_name='us-east-1')
|
||||
|
||||
security_group = ec2.create_security_group(
|
||||
GroupName='a-security-group', Description='First One')
|
||||
security_group2 = ec2.create_security_group(
|
||||
GroupName='b-security-group', Description='Second One')
|
||||
vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default')
|
||||
subnet1 = ec2.create_subnet(
|
||||
VpcId=vpc.id,
|
||||
CidrBlock='172.28.7.192/26',
|
||||
AvailabilityZone='us-east-1a')
|
||||
subnet2 = ec2.create_subnet(
|
||||
VpcId=vpc.id,
|
||||
CidrBlock='172.28.7.192/26',
|
||||
AvailabilityZone='us-east-1b')
|
||||
|
||||
response = client.create_load_balancer(
|
||||
Name='my-lb',
|
||||
Subnets=[subnet1.id, subnet2.id],
|
||||
SecurityGroups=[security_group.id],
|
||||
Scheme='internal',
|
||||
Tags=[{'Key': 'key_name', 'Value': 'a_value'}])
|
||||
arn = response['LoadBalancers'][0]['LoadBalancerArn']
|
||||
|
||||
client.set_security_groups(
|
||||
LoadBalancerArn=arn,
|
||||
SecurityGroups=[security_group.id, security_group2.id]
|
||||
)
|
||||
|
||||
resp = client.describe_load_balancers(LoadBalancerArns=[arn])
|
||||
len(resp['LoadBalancers'][0]['SecurityGroups']).should.equal(2)
|
||||
|
||||
with assert_raises(ClientError):
|
||||
client.set_security_groups(
|
||||
LoadBalancerArn=arn,
|
||||
SecurityGroups=['non_existant']
|
||||
)
|
||||
|
||||
|
||||
@mock_elbv2
|
||||
@mock_ec2
|
||||
def test_set_subnets():
|
||||
client = boto3.client('elbv2', region_name='us-east-1')
|
||||
ec2 = boto3.resource('ec2', region_name='us-east-1')
|
||||
|
||||
security_group = ec2.create_security_group(
|
||||
GroupName='a-security-group', Description='First One')
|
||||
vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default')
|
||||
subnet1 = ec2.create_subnet(
|
||||
VpcId=vpc.id,
|
||||
CidrBlock='172.28.7.192/26',
|
||||
AvailabilityZone='us-east-1a')
|
||||
subnet2 = ec2.create_subnet(
|
||||
VpcId=vpc.id,
|
||||
CidrBlock='172.28.7.192/26',
|
||||
AvailabilityZone='us-east-1b')
|
||||
subnet3 = ec2.create_subnet(
|
||||
VpcId=vpc.id,
|
||||
CidrBlock='172.28.7.192/26',
|
||||
AvailabilityZone='us-east-1c')
|
||||
|
||||
response = client.create_load_balancer(
|
||||
Name='my-lb',
|
||||
Subnets=[subnet1.id, subnet2.id],
|
||||
SecurityGroups=[security_group.id],
|
||||
Scheme='internal',
|
||||
Tags=[{'Key': 'key_name', 'Value': 'a_value'}])
|
||||
arn = response['LoadBalancers'][0]['LoadBalancerArn']
|
||||
|
||||
client.set_subnets(
|
||||
LoadBalancerArn=arn,
|
||||
Subnets=[subnet1.id, subnet2.id, subnet3.id]
|
||||
)
|
||||
|
||||
resp = client.describe_load_balancers(LoadBalancerArns=[arn])
|
||||
len(resp['LoadBalancers'][0]['AvailabilityZones']).should.equal(3)
|
||||
|
||||
# Only 1 AZ
|
||||
with assert_raises(ClientError):
|
||||
client.set_subnets(
|
||||
LoadBalancerArn=arn,
|
||||
Subnets=[subnet1.id]
|
||||
)
|
||||
|
||||
# Multiple subnets in same AZ
|
||||
with assert_raises(ClientError):
|
||||
client.set_subnets(
|
||||
LoadBalancerArn=arn,
|
||||
Subnets=[subnet1.id, subnet2.id, subnet2.id]
|
||||
)
|
||||
|
||||
|
||||
@mock_elbv2
|
||||
@mock_ec2
|
||||
def test_set_subnets():
|
||||
client = boto3.client('elbv2', region_name='us-east-1')
|
||||
ec2 = boto3.resource('ec2', region_name='us-east-1')
|
||||
|
||||
security_group = ec2.create_security_group(
|
||||
GroupName='a-security-group', Description='First One')
|
||||
vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default')
|
||||
subnet1 = ec2.create_subnet(
|
||||
VpcId=vpc.id,
|
||||
CidrBlock='172.28.7.192/26',
|
||||
AvailabilityZone='us-east-1a')
|
||||
subnet2 = ec2.create_subnet(
|
||||
VpcId=vpc.id,
|
||||
CidrBlock='172.28.7.192/26',
|
||||
AvailabilityZone='us-east-1b')
|
||||
|
||||
response = client.create_load_balancer(
|
||||
Name='my-lb',
|
||||
Subnets=[subnet1.id, subnet2.id],
|
||||
SecurityGroups=[security_group.id],
|
||||
Scheme='internal',
|
||||
Tags=[{'Key': 'key_name', 'Value': 'a_value'}])
|
||||
arn = response['LoadBalancers'][0]['LoadBalancerArn']
|
||||
|
||||
client.modify_load_balancer_attributes(
|
||||
LoadBalancerArn=arn,
|
||||
Attributes=[{'Key': 'idle_timeout.timeout_seconds', 'Value': '600'}]
|
||||
)
|
||||
|
||||
# Check its 600 not 60
|
||||
response = client.describe_load_balancer_attributes(
|
||||
LoadBalancerArn=arn
|
||||
)
|
||||
idle_timeout = list(filter(lambda item: item['Key'] == 'idle_timeout.timeout_seconds', response['Attributes']))[0]
|
||||
idle_timeout['Value'].should.equal('600')
|
||||
|
||||
|
||||
@mock_elbv2
|
||||
@mock_ec2
|
||||
def test_modify_target_group():
|
||||
client = boto3.client('elbv2', region_name='us-east-1')
|
||||
ec2 = boto3.resource('ec2', region_name='us-east-1')
|
||||
|
||||
vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default')
|
||||
|
||||
response = client.create_target_group(
|
||||
Name='a-target',
|
||||
Protocol='HTTP',
|
||||
Port=8080,
|
||||
VpcId=vpc.id,
|
||||
HealthCheckProtocol='HTTP',
|
||||
HealthCheckPort='8080',
|
||||
HealthCheckPath='/',
|
||||
HealthCheckIntervalSeconds=5,
|
||||
HealthCheckTimeoutSeconds=5,
|
||||
HealthyThresholdCount=5,
|
||||
UnhealthyThresholdCount=2,
|
||||
Matcher={'HttpCode': '200'})
|
||||
arn = response.get('TargetGroups')[0]['TargetGroupArn']
|
||||
|
||||
client.modify_target_group(
|
||||
TargetGroupArn=arn,
|
||||
HealthCheckProtocol='HTTPS',
|
||||
HealthCheckPort='8081',
|
||||
HealthCheckPath='/status',
|
||||
HealthCheckIntervalSeconds=10,
|
||||
HealthCheckTimeoutSeconds=10,
|
||||
HealthyThresholdCount=10,
|
||||
UnhealthyThresholdCount=4,
|
||||
Matcher={'HttpCode': '200-399'}
|
||||
)
|
||||
|
||||
response = client.describe_target_groups(
|
||||
TargetGroupArns=[arn]
|
||||
)
|
||||
response['TargetGroups'][0]['Matcher']['HttpCode'].should.equal('200-399')
|
||||
response['TargetGroups'][0]['HealthCheckIntervalSeconds'].should.equal(10)
|
||||
response['TargetGroups'][0]['HealthCheckPath'].should.equal('/status')
|
||||
response['TargetGroups'][0]['HealthCheckPort'].should.equal('8081')
|
||||
response['TargetGroups'][0]['HealthCheckProtocol'].should.equal('HTTPS')
|
||||
response['TargetGroups'][0]['HealthCheckTimeoutSeconds'].should.equal(10)
|
||||
response['TargetGroups'][0]['HealthyThresholdCount'].should.equal(10)
|
||||
response['TargetGroups'][0]['UnhealthyThresholdCount'].should.equal(4)
|
||||
|
||||
|
||||
@mock_elbv2
|
||||
@mock_ec2
|
||||
@mock_acm
|
||||
def test_modify_listener_http_to_https():
|
||||
client = boto3.client('elbv2', region_name='eu-central-1')
|
||||
acm = boto3.client('acm', region_name='eu-central-1')
|
||||
ec2 = boto3.resource('ec2', region_name='eu-central-1')
|
||||
|
||||
security_group = ec2.create_security_group(
|
||||
GroupName='a-security-group', Description='First One')
|
||||
vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default')
|
||||
subnet1 = ec2.create_subnet(
|
||||
VpcId=vpc.id,
|
||||
CidrBlock='172.28.7.192/26',
|
||||
AvailabilityZone='eu-central-1a')
|
||||
subnet2 = ec2.create_subnet(
|
||||
VpcId=vpc.id,
|
||||
CidrBlock='172.28.7.192/26',
|
||||
AvailabilityZone='eu-central-1b')
|
||||
|
||||
response = client.create_load_balancer(
|
||||
Name='my-lb',
|
||||
Subnets=[subnet1.id, subnet2.id],
|
||||
SecurityGroups=[security_group.id],
|
||||
Scheme='internal',
|
||||
Tags=[{'Key': 'key_name', 'Value': 'a_value'}])
|
||||
|
||||
load_balancer_arn = response.get('LoadBalancers')[0].get('LoadBalancerArn')
|
||||
|
||||
response = client.create_target_group(
|
||||
Name='a-target',
|
||||
Protocol='HTTP',
|
||||
Port=8080,
|
||||
VpcId=vpc.id,
|
||||
HealthCheckProtocol='HTTP',
|
||||
HealthCheckPort='8080',
|
||||
HealthCheckPath='/',
|
||||
HealthCheckIntervalSeconds=5,
|
||||
HealthCheckTimeoutSeconds=5,
|
||||
HealthyThresholdCount=5,
|
||||
UnhealthyThresholdCount=2,
|
||||
Matcher={'HttpCode': '200'})
|
||||
target_group = response.get('TargetGroups')[0]
|
||||
target_group_arn = target_group['TargetGroupArn']
|
||||
|
||||
# Plain HTTP listener
|
||||
response = client.create_listener(
|
||||
LoadBalancerArn=load_balancer_arn,
|
||||
Protocol='HTTP',
|
||||
Port=80,
|
||||
DefaultActions=[{'Type': 'forward', 'TargetGroupArn': target_group_arn}]
|
||||
)
|
||||
listener_arn = response['Listeners'][0]['ListenerArn']
|
||||
|
||||
response = acm.request_certificate(
|
||||
DomainName='google.com',
|
||||
SubjectAlternativeNames=['google.com', 'www.google.com', 'mail.google.com'],
|
||||
)
|
||||
google_arn = response['CertificateArn']
|
||||
response = acm.request_certificate(
|
||||
DomainName='yahoo.com',
|
||||
SubjectAlternativeNames=['yahoo.com', 'www.yahoo.com', 'mail.yahoo.com'],
|
||||
)
|
||||
yahoo_arn = response['CertificateArn']
|
||||
|
||||
response = client.modify_listener(
|
||||
ListenerArn=listener_arn,
|
||||
Port=443,
|
||||
Protocol='HTTPS',
|
||||
SslPolicy='ELBSecurityPolicy-TLS-1-2-2017-01',
|
||||
Certificates=[
|
||||
{'CertificateArn': google_arn, 'IsDefault': False},
|
||||
{'CertificateArn': yahoo_arn, 'IsDefault': True}
|
||||
],
|
||||
DefaultActions=[
|
||||
{'Type': 'forward', 'TargetGroupArn': target_group_arn}
|
||||
]
|
||||
)
|
||||
response['Listeners'][0]['Port'].should.equal(443)
|
||||
response['Listeners'][0]['Protocol'].should.equal('HTTPS')
|
||||
response['Listeners'][0]['SslPolicy'].should.equal('ELBSecurityPolicy-TLS-1-2-2017-01')
|
||||
len(response['Listeners'][0]['Certificates']).should.equal(2)
|
||||
|
||||
# Check default cert, can't do this in server mode
|
||||
if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'false':
|
||||
listener = elbv2_backends['eu-central-1'].load_balancers[load_balancer_arn].listeners[listener_arn]
|
||||
listener.certificate.should.equal(yahoo_arn)
|
||||
|
||||
# No default cert
|
||||
with assert_raises(ClientError):
|
||||
client.modify_listener(
|
||||
ListenerArn=listener_arn,
|
||||
Port=443,
|
||||
Protocol='HTTPS',
|
||||
SslPolicy='ELBSecurityPolicy-TLS-1-2-2017-01',
|
||||
Certificates=[
|
||||
{'CertificateArn': google_arn, 'IsDefault': False}
|
||||
],
|
||||
DefaultActions=[
|
||||
{'Type': 'forward', 'TargetGroupArn': target_group_arn}
|
||||
]
|
||||
)
|
||||
|
||||
# Bad cert
|
||||
with assert_raises(ClientError):
|
||||
client.modify_listener(
|
||||
ListenerArn=listener_arn,
|
||||
Port=443,
|
||||
Protocol='HTTPS',
|
||||
SslPolicy='ELBSecurityPolicy-TLS-1-2-2017-01',
|
||||
Certificates=[
|
||||
{'CertificateArn': 'lalala', 'IsDefault': True}
|
||||
],
|
||||
DefaultActions=[
|
||||
{'Type': 'forward', 'TargetGroupArn': target_group_arn}
|
||||
]
|
||||
)
|
||||
|
@ -3,6 +3,8 @@ import random
|
||||
import boto3
|
||||
|
||||
from moto.events import mock_events
|
||||
from botocore.exceptions import ClientError
|
||||
from nose.tools import assert_raises
|
||||
|
||||
|
||||
RULES = [
|
||||
@ -171,11 +173,36 @@ def test_remove_targets():
|
||||
assert(targets_before - 1 == targets_after)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
test_list_rules()
|
||||
test_describe_rule()
|
||||
test_enable_disable_rule()
|
||||
test_list_rule_names_by_target()
|
||||
test_list_rules()
|
||||
test_list_targets_by_rule()
|
||||
test_remove_targets()
|
||||
@mock_events
|
||||
def test_permissions():
|
||||
client = boto3.client('events', 'eu-central-1')
|
||||
|
||||
client.put_permission(Action='PutEvents', Principal='111111111111', StatementId='Account1')
|
||||
client.put_permission(Action='PutEvents', Principal='222222222222', StatementId='Account2')
|
||||
|
||||
resp = client.describe_event_bus()
|
||||
assert len(resp['Policy']['Statement']) == 2
|
||||
|
||||
client.remove_permission(StatementId='Account2')
|
||||
|
||||
resp = client.describe_event_bus()
|
||||
assert len(resp['Policy']['Statement']) == 1
|
||||
assert resp['Policy']['Statement'][0]['Sid'] == 'Account1'
|
||||
|
||||
|
||||
@mock_events
|
||||
def test_put_events():
|
||||
client = boto3.client('events', 'eu-central-1')
|
||||
|
||||
event = {
|
||||
"Source": "com.mycompany.myapp",
|
||||
"Detail": '{"key1": "value3", "key2": "value4"}',
|
||||
"Resources": ["resource1", "resource2"],
|
||||
"DetailType": "myDetailType"
|
||||
}
|
||||
|
||||
client.put_events(Entries=[event])
|
||||
# Boto3 would error if it didn't return 200 OK
|
||||
|
||||
with assert_raises(ClientError):
|
||||
client.put_events(Entries=[event]*20)
|
||||
|
@ -1775,6 +1775,30 @@ def test_boto3_put_object_tagging():
|
||||
resp['ResponseMetadata']['HTTPStatusCode'].should.equal(200)
|
||||
|
||||
|
||||
@mock_s3
|
||||
def test_boto3_put_object_tagging_with_single_tag():
|
||||
s3 = boto3.client('s3', region_name='us-east-1')
|
||||
bucket_name = 'mybucket'
|
||||
key = 'key-with-tags'
|
||||
s3.create_bucket(Bucket=bucket_name)
|
||||
|
||||
s3.put_object(
|
||||
Bucket=bucket_name,
|
||||
Key=key,
|
||||
Body='test'
|
||||
)
|
||||
|
||||
resp = s3.put_object_tagging(
|
||||
Bucket=bucket_name,
|
||||
Key=key,
|
||||
Tagging={'TagSet': [
|
||||
{'Key': 'item1', 'Value': 'foo'}
|
||||
]}
|
||||
)
|
||||
|
||||
resp['ResponseMetadata']['HTTPStatusCode'].should.equal(200)
|
||||
|
||||
|
||||
@mock_s3
|
||||
def test_boto3_get_object_tagging():
|
||||
s3 = boto3.client('s3', region_name='us-east-1')
|
||||
|
@ -1,5 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals
|
||||
import os
|
||||
|
||||
import boto
|
||||
import boto3
|
||||
@ -8,14 +9,18 @@ from botocore.exceptions import ClientError
|
||||
from boto.exception import SQSError
|
||||
from boto.sqs.message import RawMessage, Message
|
||||
|
||||
from freezegun import freeze_time
|
||||
import base64
|
||||
import json
|
||||
import sure # noqa
|
||||
import time
|
||||
import uuid
|
||||
|
||||
from moto import settings, mock_sqs, mock_sqs_deprecated
|
||||
from tests.helpers import requires_boto_gte
|
||||
import tests.backport_assert_raises # noqa
|
||||
from nose.tools import assert_raises
|
||||
from nose import SkipTest
|
||||
|
||||
|
||||
@mock_sqs
|
||||
@ -93,8 +98,6 @@ def test_message_send_without_attributes():
|
||||
msg.get('MD5OfMessageBody').should.equal(
|
||||
'58fd9edd83341c29f1aebba81c31e257')
|
||||
msg.shouldnt.have.key('MD5OfMessageAttributes')
|
||||
msg.get('ResponseMetadata', {}).get('RequestId').should.equal(
|
||||
'27daac76-34dd-47df-bd01-1f6e873584a0')
|
||||
msg.get('MessageId').should_not.contain(' \n')
|
||||
|
||||
messages = queue.receive_messages()
|
||||
@ -118,8 +121,6 @@ def test_message_send_with_attributes():
|
||||
'58fd9edd83341c29f1aebba81c31e257')
|
||||
msg.get('MD5OfMessageAttributes').should.equal(
|
||||
'235c5c510d26fb653d073faed50ae77c')
|
||||
msg.get('ResponseMetadata', {}).get('RequestId').should.equal(
|
||||
'27daac76-34dd-47df-bd01-1f6e873584a0')
|
||||
msg.get('MessageId').should_not.contain(' \n')
|
||||
|
||||
messages = queue.receive_messages()
|
||||
@ -143,8 +144,6 @@ def test_message_with_complex_attributes():
|
||||
'58fd9edd83341c29f1aebba81c31e257')
|
||||
msg.get('MD5OfMessageAttributes').should.equal(
|
||||
'8ae21a7957029ef04146b42aeaa18a22')
|
||||
msg.get('ResponseMetadata', {}).get('RequestId').should.equal(
|
||||
'27daac76-34dd-47df-bd01-1f6e873584a0')
|
||||
msg.get('MessageId').should_not.contain(' \n')
|
||||
|
||||
messages = queue.receive_messages()
|
||||
@ -755,3 +754,181 @@ def test_delete_message_after_visibility_timeout():
|
||||
m1_retrieved.delete()
|
||||
|
||||
assert new_queue.count() == 0
|
||||
|
||||
|
||||
@mock_sqs
|
||||
def test_batch_change_message_visibility():
|
||||
if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'true':
|
||||
raise SkipTest('Cant manipulate time in server mode')
|
||||
|
||||
with freeze_time("2015-01-01 12:00:00"):
|
||||
sqs = boto3.client('sqs', region_name='us-east-1')
|
||||
resp = sqs.create_queue(
|
||||
QueueName='test-dlr-queue.fifo',
|
||||
Attributes={'FifoQueue': 'true'}
|
||||
)
|
||||
queue_url = resp['QueueUrl']
|
||||
|
||||
sqs.send_message(QueueUrl=queue_url, MessageBody='msg1')
|
||||
sqs.send_message(QueueUrl=queue_url, MessageBody='msg2')
|
||||
sqs.send_message(QueueUrl=queue_url, MessageBody='msg3')
|
||||
|
||||
with freeze_time("2015-01-01 12:01:00"):
|
||||
receive_resp = sqs.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=2)
|
||||
len(receive_resp['Messages']).should.equal(2)
|
||||
|
||||
handles = [item['ReceiptHandle'] for item in receive_resp['Messages']]
|
||||
entries = [{'Id': str(uuid.uuid4()), 'ReceiptHandle': handle, 'VisibilityTimeout': 43200} for handle in handles]
|
||||
|
||||
resp = sqs.change_message_visibility_batch(QueueUrl=queue_url, Entries=entries)
|
||||
len(resp['Successful']).should.equal(2)
|
||||
|
||||
with freeze_time("2015-01-01 14:00:00"):
|
||||
resp = sqs.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=3)
|
||||
len(resp['Messages']).should.equal(1)
|
||||
|
||||
with freeze_time("2015-01-01 16:00:00"):
|
||||
resp = sqs.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=3)
|
||||
len(resp['Messages']).should.equal(1)
|
||||
|
||||
with freeze_time("2015-01-02 12:00:00"):
|
||||
resp = sqs.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=3)
|
||||
len(resp['Messages']).should.equal(3)
|
||||
|
||||
|
||||
@mock_sqs
|
||||
def test_permissions():
|
||||
client = boto3.client('sqs', region_name='us-east-1')
|
||||
|
||||
resp = client.create_queue(
|
||||
QueueName='test-dlr-queue.fifo',
|
||||
Attributes={'FifoQueue': 'true'}
|
||||
)
|
||||
queue_url = resp['QueueUrl']
|
||||
|
||||
client.add_permission(QueueUrl=queue_url, Label='account1', AWSAccountIds=['111111111111'], Actions=['*'])
|
||||
client.add_permission(QueueUrl=queue_url, Label='account2', AWSAccountIds=['222211111111'], Actions=['SendMessage'])
|
||||
|
||||
with assert_raises(ClientError):
|
||||
client.add_permission(QueueUrl=queue_url, Label='account2', AWSAccountIds=['222211111111'], Actions=['SomeRubbish'])
|
||||
|
||||
client.remove_permission(QueueUrl=queue_url, Label='account2')
|
||||
|
||||
with assert_raises(ClientError):
|
||||
client.remove_permission(QueueUrl=queue_url, Label='non_existant')
|
||||
|
||||
|
||||
@mock_sqs
|
||||
def test_tags():
|
||||
client = boto3.client('sqs', region_name='us-east-1')
|
||||
|
||||
resp = client.create_queue(
|
||||
QueueName='test-dlr-queue.fifo',
|
||||
Attributes={'FifoQueue': 'true'}
|
||||
)
|
||||
queue_url = resp['QueueUrl']
|
||||
|
||||
client.tag_queue(
|
||||
QueueUrl=queue_url,
|
||||
Tags={
|
||||
'test1': 'value1',
|
||||
'test2': 'value2',
|
||||
}
|
||||
)
|
||||
|
||||
resp = client.list_queue_tags(QueueUrl=queue_url)
|
||||
resp['Tags'].should.contain('test1')
|
||||
resp['Tags'].should.contain('test2')
|
||||
|
||||
client.untag_queue(
|
||||
QueueUrl=queue_url,
|
||||
TagKeys=['test2']
|
||||
)
|
||||
|
||||
resp = client.list_queue_tags(QueueUrl=queue_url)
|
||||
resp['Tags'].should.contain('test1')
|
||||
resp['Tags'].should_not.contain('test2')
|
||||
|
||||
|
||||
@mock_sqs
|
||||
def test_create_fifo_queue_with_dlq():
|
||||
sqs = boto3.client('sqs', region_name='us-east-1')
|
||||
resp = sqs.create_queue(
|
||||
QueueName='test-dlr-queue.fifo',
|
||||
Attributes={'FifoQueue': 'true'}
|
||||
)
|
||||
queue_url1 = resp['QueueUrl']
|
||||
queue_arn1 = sqs.get_queue_attributes(QueueUrl=queue_url1)['Attributes']['QueueArn']
|
||||
|
||||
resp = sqs.create_queue(
|
||||
QueueName='test-dlr-queue',
|
||||
Attributes={'FifoQueue': 'false'}
|
||||
)
|
||||
queue_url2 = resp['QueueUrl']
|
||||
queue_arn2 = sqs.get_queue_attributes(QueueUrl=queue_url2)['Attributes']['QueueArn']
|
||||
|
||||
sqs.create_queue(
|
||||
QueueName='test-queue.fifo',
|
||||
Attributes={
|
||||
'FifoQueue': 'true',
|
||||
'RedrivePolicy': json.dumps({'deadLetterTargetArn': queue_arn1, 'maxReceiveCount': 2})
|
||||
}
|
||||
)
|
||||
|
||||
# Cant have fifo queue with non fifo DLQ
|
||||
with assert_raises(ClientError):
|
||||
sqs.create_queue(
|
||||
QueueName='test-queue2.fifo',
|
||||
Attributes={
|
||||
'FifoQueue': 'true',
|
||||
'RedrivePolicy': json.dumps({'deadLetterTargetArn': queue_arn2, 'maxReceiveCount': 2})
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@mock_sqs
|
||||
def test_queue_with_dlq():
|
||||
if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'true':
|
||||
raise SkipTest('Cant manipulate time in server mode')
|
||||
|
||||
sqs = boto3.client('sqs', region_name='us-east-1')
|
||||
|
||||
with freeze_time("2015-01-01 12:00:00"):
|
||||
resp = sqs.create_queue(
|
||||
QueueName='test-dlr-queue.fifo',
|
||||
Attributes={'FifoQueue': 'true'}
|
||||
)
|
||||
queue_url1 = resp['QueueUrl']
|
||||
queue_arn1 = sqs.get_queue_attributes(QueueUrl=queue_url1)['Attributes']['QueueArn']
|
||||
|
||||
resp = sqs.create_queue(
|
||||
QueueName='test-queue.fifo',
|
||||
Attributes={
|
||||
'FifoQueue': 'true',
|
||||
'RedrivePolicy': json.dumps({'deadLetterTargetArn': queue_arn1, 'maxReceiveCount': 2})
|
||||
}
|
||||
)
|
||||
queue_url2 = resp['QueueUrl']
|
||||
|
||||
sqs.send_message(QueueUrl=queue_url2, MessageBody='msg1')
|
||||
sqs.send_message(QueueUrl=queue_url2, MessageBody='msg2')
|
||||
|
||||
with freeze_time("2015-01-01 13:00:00"):
|
||||
resp = sqs.receive_message(QueueUrl=queue_url2, VisibilityTimeout=30, WaitTimeSeconds=0)
|
||||
resp['Messages'][0]['Body'].should.equal('msg1')
|
||||
|
||||
with freeze_time("2015-01-01 13:01:00"):
|
||||
resp = sqs.receive_message(QueueUrl=queue_url2, VisibilityTimeout=30, WaitTimeSeconds=0)
|
||||
resp['Messages'][0]['Body'].should.equal('msg1')
|
||||
|
||||
with freeze_time("2015-01-01 13:02:00"):
|
||||
resp = sqs.receive_message(QueueUrl=queue_url2, VisibilityTimeout=30, WaitTimeSeconds=0)
|
||||
len(resp['Messages']).should.equal(1)
|
||||
|
||||
resp = sqs.receive_message(QueueUrl=queue_url1, VisibilityTimeout=30, WaitTimeSeconds=0)
|
||||
resp['Messages'][0]['Body'].should.equal('msg1')
|
||||
|
||||
# Might as well test list source queues
|
||||
|
||||
resp = sqs.list_dead_letter_source_queues(QueueUrl=queue_url1)
|
||||
resp['queueUrls'][0].should.equal(queue_url2)
|
||||
|
@ -24,7 +24,7 @@ while True:
|
||||
break
|
||||
except EXCEPTIONS:
|
||||
elapsed_s = time.time() - start_ts
|
||||
if elapsed_s > 30:
|
||||
if elapsed_s > 60:
|
||||
raise
|
||||
|
||||
print('.')
|
||||
|
Loading…
x
Reference in New Issue
Block a user