diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md
index f97963a7b..813f5bf36 100644
--- a/IMPLEMENTATION_COVERAGE.md
+++ b/IMPLEMENTATION_COVERAGE.md
@@ -203,26 +203,26 @@
- [ ] set_load_balancer_policies_for_backend_server
- [X] set_load_balancer_policies_of_listener
-## dynamodb - 36% implemented
-- [ ] batch_get_item
-- [ ] batch_write_item
+## dynamodb - 100% implemented (is dynamodbv2)
+- [X] batch_get_item
+- [X] batch_write_item
- [X] create_table
- [X] delete_item
- [X] delete_table
-- [ ] describe_limits
-- [ ] describe_table
-- [ ] describe_time_to_live
+- [X] describe_limits
+- [X] describe_table
+- [X] describe_time_to_live
- [X] get_item
-- [ ] list_tables
-- [ ] list_tags_of_resource
+- [X] list_tables
+- [X] list_tags_of_resource
- [X] put_item
- [X] query
- [X] scan
-- [ ] tag_resource
-- [ ] untag_resource
-- [ ] update_item
-- [ ] update_table
-- [ ] update_time_to_live
+- [X] tag_resource
+- [X] untag_resource
+- [X] update_item
+- [X] update_table
+- [X] update_time_to_live
## cloudhsmv2 - 0% implemented
- [ ] create_cluster
@@ -917,13 +917,13 @@
- [ ] unpeer_vpc
- [ ] update_domain_entry
-## xray - 0% implemented
-- [ ] batch_get_traces
-- [ ] get_service_graph
-- [ ] get_trace_graph
-- [ ] get_trace_summaries
-- [ ] put_telemetry_records
-- [ ] put_trace_segments
+## xray - 100% implemented
+- [X] batch_get_traces
+- [X] get_service_graph
+- [X] get_trace_graph
+- [X] get_trace_summaries
+- [X] put_telemetry_records
+- [X] put_trace_segments
## ec2 - 41% implemented
- [ ] accept_reserved_instances_exchange_quote
@@ -1483,10 +1483,10 @@
- [ ] post_content
- [ ] post_text
-## ecs - 74% implemented
+## ecs - 90% implemented
- [X] create_cluster
- [X] create_service
-- [ ] delete_attributes
+- [X] delete_attributes
- [X] delete_cluster
- [X] delete_service
- [X] deregister_container_instance
@@ -1496,15 +1496,15 @@
- [X] describe_services
- [X] describe_task_definition
- [X] describe_tasks
-- [ ] discover_poll_endpoint
-- [ ] list_attributes
+- [X] discover_poll_endpoint
+- [X] list_attributes
- [X] list_clusters
- [X] list_container_instances
- [X] list_services
-- [ ] list_task_definition_families
+- [X] list_task_definition_families
- [X] list_task_definitions
- [X] list_tasks
-- [ ] put_attributes
+- [X] put_attributes
- [X] register_container_instance
- [X] register_task_definition
- [X] run_task
@@ -1699,17 +1699,17 @@
- [ ] start_db_instance
- [ ] stop_db_instance
-## acm - 50% implemented
+## acm - 100% implemented
- [X] add_tags_to_certificate
- [X] delete_certificate
-- [ ] describe_certificate
+- [X] describe_certificate
- [X] get_certificate
-- [ ] import_certificate
-- [ ] list_certificates
-- [ ] list_tags_for_certificate
+- [X] import_certificate
+- [X] list_certificates
+- [X] list_tags_for_certificate
- [X] remove_tags_from_certificate
- [X] request_certificate
-- [ ] resend_validation_email
+- [X] resend_validation_email
## elasticache - 0% implemented
- [ ] add_tags_to_resource
@@ -1853,31 +1853,31 @@
- [ ] refresh_trusted_advisor_check
- [ ] resolve_case
-## lambda - 0% implemented
+## lambda - 32% implemented
- [ ] add_permission
- [ ] create_alias
- [ ] create_event_source_mapping
-- [ ] create_function
+- [X] create_function
- [ ] delete_alias
- [ ] delete_event_source_mapping
-- [ ] delete_function
+- [X] delete_function
- [ ] get_account_settings
- [ ] get_alias
- [ ] get_event_source_mapping
-- [ ] get_function
+- [X] get_function
- [ ] get_function_configuration
-- [ ] get_policy
-- [ ] invoke
+- [X] get_policy
+- [X] invoke
- [ ] invoke_async
- [ ] list_aliases
- [ ] list_event_source_mappings
-- [ ] list_functions
-- [ ] list_tags
+- [X] list_functions
+- [X] list_tags
- [ ] list_versions_by_function
- [ ] publish_version
- [ ] remove_permission
-- [ ] tag_resource
-- [ ] untag_resource
+- [X] tag_resource
+- [X] untag_resource
- [ ] update_alias
- [ ] update_event_source_mapping
- [ ] update_function_code
@@ -2456,8 +2456,8 @@
- [ ] update_distribution
- [ ] update_streaming_distribution
-## elbv2 - 54% implemented
-- [ ] add_tags
+## elbv2 - 100% implemented
+- [X] add_tags
- [X] create_listener
- [X] create_load_balancer
- [X] create_rule
@@ -2467,27 +2467,27 @@
- [X] delete_rule
- [X] delete_target_group
- [X] deregister_targets
-- [ ] describe_account_limits
+- [X] describe_account_limits
- [X] describe_listeners
-- [ ] describe_load_balancer_attributes
+- [X] describe_load_balancer_attributes
- [X] describe_load_balancers
- [X] describe_rules
-- [ ] describe_ssl_policies
-- [ ] describe_tags
-- [ ] describe_target_group_attributes
+- [X] describe_ssl_policies
+- [X] describe_tags
+- [X] describe_target_group_attributes
- [X] describe_target_groups
- [X] describe_target_health
-- [ ] modify_listener
-- [ ] modify_load_balancer_attributes
+- [X] modify_listener
+- [X] modify_load_balancer_attributes
- [X] modify_rule
-- [ ] modify_target_group
-- [ ] modify_target_group_attributes
+- [X] modify_target_group
+- [X] modify_target_group_attributes
- [X] register_targets
-- [ ] remove_tags
-- [ ] set_ip_address_type
+- [X] remove_tags
+- [X] set_ip_address_type
- [X] set_rule_priorities
-- [ ] set_security_groups
-- [ ] set_subnets
+- [X] set_security_groups
+- [X] set_subnets
## sdb - 0% implemented
- [ ] batch_delete_attributes
@@ -3089,20 +3089,20 @@
- [ ] start_configuration_recorder
- [ ] stop_configuration_recorder
-## events - 73% implemented
+## events - 100% implemented
- [X] delete_rule
-- [ ] describe_event_bus
+- [X] describe_event_bus
- [X] describe_rule
- [X] disable_rule
- [X] enable_rule
- [X] list_rule_names_by_target
- [X] list_rules
- [X] list_targets_by_rule
-- [ ] put_events
-- [ ] put_permission
+- [X] put_events
+- [X] put_permission
- [X] put_rule
- [X] put_targets
-- [ ] remove_permission
+- [X] remove_permission
- [X] remove_targets
- [X] test_event_pattern
@@ -3149,24 +3149,27 @@
- [ ] put_scaling_policy
- [ ] register_scalable_target
-## sqs - 41% implemented
-- [ ] add_permission
+## sqs - 100% implemented
+- [X] add_permission
- [X] change_message_visibility
-- [ ] change_message_visibility_batch
+- [X] change_message_visibility_batch
- [X] create_queue
- [X] delete_message
-- [ ] delete_message_batch
+- [X] delete_message_batch
- [X] delete_queue
-- [ ] get_queue_attributes
-- [ ] get_queue_url
-- [ ] list_dead_letter_source_queues
+- [X] get_queue_attributes
+- [X] get_queue_url
+- [X] list_dead_letter_source_queues
- [X] list_queues
- [X] purge_queue
-- [ ] receive_message
-- [ ] remove_permission
+- [X] receive_message
+- [X] remove_permission
- [X] send_message
-- [ ] send_message_batch
-- [ ] set_queue_attributes
+- [X] send_message_batch
+- [X] set_queue_attributes
+- [X] tag_queue
+- [X] untag_queue
+- [X] list_queue_tags
## iot - 0% implemented
- [ ] accept_certificate_transfer
diff --git a/README.md b/README.md
index 7ced7b895..9a20bbe15 100644
--- a/README.md
+++ b/README.md
@@ -68,10 +68,12 @@ It gets even better! Moto isn't just for Python code and it isn't just for S3. L
|------------------------------------------------------------------------------|
| Cloudwatch | @mock_cloudwatch | basic endpoints done |
|------------------------------------------------------------------------------|
+| CloudwatchEvents | @mock_events | all endpoints done |
+|------------------------------------------------------------------------------|
| Data Pipeline | @mock_datapipeline| basic endpoints done |
|------------------------------------------------------------------------------|
| DynamoDB | @mock_dynamodb | core endpoints done |
-| DynamoDB2 | @mock_dynamodb2 | core endpoints + partial indexes |
+| DynamoDB2 | @mock_dynamodb2 | all endpoints + partial indexes |
|------------------------------------------------------------------------------|
| EC2 | @mock_ec2 | core endpoints done |
| - AMI | | core endpoints done |
@@ -86,7 +88,7 @@ It gets even better! Moto isn't just for Python code and it isn't just for S3. L
|------------------------------------------------------------------------------|
| ELB | @mock_elb | core endpoints done |
|------------------------------------------------------------------------------|
-| ELBv2 | @mock_elbv2 | core endpoints done |
+| ELBv2 | @mock_elbv2 | all endpoints done |
|------------------------------------------------------------------------------|
| EMR | @mock_emr | core endpoints done |
|------------------------------------------------------------------------------|
@@ -115,7 +117,7 @@ It gets even better! Moto isn't just for Python code and it isn't just for S3. L
|------------------------------------------------------------------------------|
| S3 | @mock_s3 | core endpoints done |
|------------------------------------------------------------------------------|
-| SES | @mock_ses | core endpoints done |
+| SES | @mock_ses | all endpoints done |
|------------------------------------------------------------------------------|
| SNS | @mock_sns | all endpoints done |
|------------------------------------------------------------------------------|
@@ -127,7 +129,7 @@ It gets even better! Moto isn't just for Python code and it isn't just for S3. L
|------------------------------------------------------------------------------|
| SWF | @mock_swf | basic endpoints done |
|------------------------------------------------------------------------------|
-| X-Ray | @mock_xray | core endpoints done |
+| X-Ray | @mock_xray | all endpoints done |
|------------------------------------------------------------------------------|
```
diff --git a/moto/core/exceptions.py b/moto/core/exceptions.py
index 5474707d6..40202f7bd 100644
--- a/moto/core/exceptions.py
+++ b/moto/core/exceptions.py
@@ -34,6 +34,8 @@ ERROR_JSON_RESPONSE = u"""{
class RESTError(HTTPException):
+ code = 400
+
templates = {
'single_error': SINGLE_ERROR_RESPONSE,
'error': ERROR_RESPONSE,
@@ -54,7 +56,6 @@ class DryRunClientError(RESTError):
class JsonRESTError(RESTError):
-
def __init__(self, error_type, message, template='error_json', **kwargs):
super(JsonRESTError, self).__init__(
error_type, message, template, **kwargs)
diff --git a/moto/dynamodb2/__init__.py b/moto/dynamodb2/__init__.py
index ad3f042d2..a56a83b35 100644
--- a/moto/dynamodb2/__init__.py
+++ b/moto/dynamodb2/__init__.py
@@ -1,6 +1,7 @@
from __future__ import unicode_literals
-from .models import dynamodb_backend2
+from .models import dynamodb_backends as dynamodb_backends2
+from ..core.models import base_decorator, deprecated_base_decorator
-dynamodb_backends2 = {"global": dynamodb_backend2}
-mock_dynamodb2 = dynamodb_backend2.decorator
-mock_dynamodb2_deprecated = dynamodb_backend2.deprecated_decorator
+dynamodb_backend2 = dynamodb_backends2['us-east-1']
+mock_dynamodb2 = base_decorator(dynamodb_backends2)
+mock_dynamodb2_deprecated = deprecated_base_decorator(dynamodb_backends2)
diff --git a/moto/dynamodb2/models.py b/moto/dynamodb2/models.py
index bec72d327..855728ec1 100644
--- a/moto/dynamodb2/models.py
+++ b/moto/dynamodb2/models.py
@@ -5,9 +5,11 @@ import decimal
import json
import re
+import boto3
from moto.compat import OrderedDict
from moto.core import BaseBackend, BaseModel
from moto.core.utils import unix_time
+from moto.core.exceptions import JsonRESTError
from .comparisons import get_comparison_func, get_filter_expression, Op
@@ -271,6 +273,10 @@ class Table(BaseModel):
self.items = defaultdict(dict)
self.table_arn = self._generate_arn(table_name)
self.tags = []
+ self.ttl = {
+ 'TimeToLiveStatus': 'DISABLED' # One of 'ENABLING'|'DISABLING'|'ENABLED'|'DISABLED',
+ # 'AttributeName': 'string' # Can contain this
+ }
def _generate_arn(self, name):
return 'arn:aws:dynamodb:us-east-1:123456789011:table/' + name
@@ -577,9 +583,16 @@ class Table(BaseModel):
class DynamoDBBackend(BaseBackend):
- def __init__(self):
+ def __init__(self, region_name=None):
+ self.region_name = region_name
self.tables = OrderedDict()
+ def reset(self):
+ region_name = self.region_name
+
+ self.__dict__ = {}
+ self.__init__(region_name)
+
def create_table(self, name, **params):
if name in self.tables:
return None
@@ -595,6 +608,11 @@ class DynamoDBBackend(BaseBackend):
if self.tables[table].table_arn == table_arn:
self.tables[table].tags.extend(tags)
+ def untag_resource(self, table_arn, tag_keys):
+ for table in self.tables:
+ if self.tables[table].table_arn == table_arn:
+ self.tables[table].tags = [tag for tag in self.tables[table].tags if tag['Key'] not in tag_keys]
+
def list_tags_of_resource(self, table_arn):
required_table = None
for table in self.tables:
@@ -796,5 +814,28 @@ class DynamoDBBackend(BaseBackend):
hash_key, range_key = self.get_keys_value(table, keys)
return table.delete_item(hash_key, range_key)
+ def update_ttl(self, table_name, ttl_spec):
+ table = self.tables.get(table_name)
+ if table is None:
+ raise JsonRESTError('ResourceNotFound', 'Table not found')
-dynamodb_backend2 = DynamoDBBackend()
+ if 'Enabled' not in ttl_spec or 'AttributeName' not in ttl_spec:
+ raise JsonRESTError('InvalidParameterValue',
+ 'TimeToLiveSpecification does not contain Enabled and AttributeName')
+
+ if ttl_spec['Enabled']:
+ table.ttl['TimeToLiveStatus'] = 'ENABLED'
+ else:
+ table.ttl['TimeToLiveStatus'] = 'DISABLED'
+ table.ttl['AttributeName'] = ttl_spec['AttributeName']
+
+ def describe_ttl(self, table_name):
+ table = self.tables.get(table_name)
+ if table is None:
+ raise JsonRESTError('ResourceNotFound', 'Table not found')
+
+ return table.ttl
+
+
+available_regions = boto3.session.Session().get_available_regions("dynamodb")
+dynamodb_backends = {region: DynamoDBBackend(region_name=region) for region in available_regions}
diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py
index 218cfc21d..b9154b6e1 100644
--- a/moto/dynamodb2/responses.py
+++ b/moto/dynamodb2/responses.py
@@ -5,7 +5,7 @@ import re
from moto.core.responses import BaseResponse
from moto.core.utils import camelcase_to_underscores, amzn_request_id
-from .models import dynamodb_backend2, dynamo_json_dump
+from .models import dynamodb_backends, dynamo_json_dump
class DynamoHandler(BaseResponse):
@@ -24,6 +24,14 @@ class DynamoHandler(BaseResponse):
def error(self, type_, message, status=400):
return status, self.response_headers, dynamo_json_dump({'__type': type_, 'message': message})
+ @property
+ def dynamodb_backend(self):
+ """
+ :return: DynamoDB2 Backend
+ :rtype: moto.dynamodb2.models.DynamoDBBackend
+ """
+ return dynamodb_backends[self.region]
+
@amzn_request_id
def call_action(self):
self.body = json.loads(self.body or '{}')
@@ -46,10 +54,10 @@ class DynamoHandler(BaseResponse):
limit = body.get('Limit', 100)
if body.get("ExclusiveStartTableName"):
last = body.get("ExclusiveStartTableName")
- start = list(dynamodb_backend2.tables.keys()).index(last) + 1
+ start = list(self.dynamodb_backend.tables.keys()).index(last) + 1
else:
start = 0
- all_tables = list(dynamodb_backend2.tables.keys())
+ all_tables = list(self.dynamodb_backend.tables.keys())
if limit:
tables = all_tables[start:start + limit]
else:
@@ -74,12 +82,12 @@ class DynamoHandler(BaseResponse):
global_indexes = body.get("GlobalSecondaryIndexes", [])
local_secondary_indexes = body.get("LocalSecondaryIndexes", [])
- table = dynamodb_backend2.create_table(table_name,
- schema=key_schema,
- throughput=throughput,
- attr=attr,
- global_indexes=global_indexes,
- indexes=local_secondary_indexes)
+ table = self.dynamodb_backend.create_table(table_name,
+ schema=key_schema,
+ throughput=throughput,
+ attr=attr,
+ global_indexes=global_indexes,
+ indexes=local_secondary_indexes)
if table is not None:
return dynamo_json_dump(table.describe())
else:
@@ -88,7 +96,7 @@ class DynamoHandler(BaseResponse):
def delete_table(self):
name = self.body['TableName']
- table = dynamodb_backend2.delete_table(name)
+ table = self.dynamodb_backend.delete_table(name)
if table is not None:
return dynamo_json_dump(table.describe())
else:
@@ -96,15 +104,21 @@ class DynamoHandler(BaseResponse):
return self.error(er, 'Requested resource not found')
def tag_resource(self):
- tags = self.body['Tags']
table_arn = self.body['ResourceArn']
- dynamodb_backend2.tag_resource(table_arn, tags)
- return json.dumps({})
+ tags = self.body['Tags']
+ self.dynamodb_backend.tag_resource(table_arn, tags)
+ return ''
+
+ def untag_resource(self):
+ table_arn = self.body['ResourceArn']
+ tags = self.body['TagKeys']
+ self.dynamodb_backend.untag_resource(table_arn, tags)
+ return ''
def list_tags_of_resource(self):
try:
table_arn = self.body['ResourceArn']
- all_tags = dynamodb_backend2.list_tags_of_resource(table_arn)
+ all_tags = self.dynamodb_backend.list_tags_of_resource(table_arn)
all_tag_keys = [tag['Key'] for tag in all_tags]
marker = self.body.get('NextToken')
if marker:
@@ -127,17 +141,17 @@ class DynamoHandler(BaseResponse):
def update_table(self):
name = self.body['TableName']
if 'GlobalSecondaryIndexUpdates' in self.body:
- table = dynamodb_backend2.update_table_global_indexes(
+ table = self.dynamodb_backend.update_table_global_indexes(
name, self.body['GlobalSecondaryIndexUpdates'])
if 'ProvisionedThroughput' in self.body:
throughput = self.body["ProvisionedThroughput"]
- table = dynamodb_backend2.update_table_throughput(name, throughput)
+ table = self.dynamodb_backend.update_table_throughput(name, throughput)
return dynamo_json_dump(table.describe())
def describe_table(self):
name = self.body['TableName']
try:
- table = dynamodb_backend2.tables[name]
+ table = self.dynamodb_backend.tables[name]
except KeyError:
er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'
return self.error(er, 'Requested resource not found')
@@ -188,8 +202,7 @@ class DynamoHandler(BaseResponse):
expected[not_exists_m.group(1)] = {'Exists': False}
try:
- result = dynamodb_backend2.put_item(
- name, item, expected, overwrite)
+ result = self.dynamodb_backend.put_item(name, item, expected, overwrite)
except ValueError:
er = 'com.amazonaws.dynamodb.v20111205#ConditionalCheckFailedException'
return self.error(er, 'A condition specified in the operation could not be evaluated.')
@@ -214,10 +227,10 @@ class DynamoHandler(BaseResponse):
request = list(table_request.values())[0]
if request_type == 'PutRequest':
item = request['Item']
- dynamodb_backend2.put_item(table_name, item)
+ self.dynamodb_backend.put_item(table_name, item)
elif request_type == 'DeleteRequest':
keys = request['Key']
- item = dynamodb_backend2.delete_item(table_name, keys)
+ item = self.dynamodb_backend.delete_item(table_name, keys)
response = {
"ConsumedCapacity": [
@@ -237,7 +250,7 @@ class DynamoHandler(BaseResponse):
name = self.body['TableName']
key = self.body['Key']
try:
- item = dynamodb_backend2.get_item(name, key)
+ item = self.dynamodb_backend.get_item(name, key)
except ValueError:
er = 'com.amazon.coral.validate#ValidationException'
return self.error(er, 'Validation Exception')
@@ -268,7 +281,7 @@ class DynamoHandler(BaseResponse):
attributes_to_get = table_request.get('AttributesToGet')
results["Responses"][table_name] = []
for key in keys:
- item = dynamodb_backend2.get_item(table_name, key)
+ item = self.dynamodb_backend.get_item(table_name, key)
if item:
item_describe = item.describe_attrs(attributes_to_get)
results["Responses"][table_name].append(
@@ -297,7 +310,7 @@ class DynamoHandler(BaseResponse):
if key_condition_expression:
value_alias_map = self.body['ExpressionAttributeValues']
- table = dynamodb_backend2.get_table(name)
+ table = self.dynamodb_backend.get_table(name)
# If table does not exist
if table is None:
@@ -365,7 +378,7 @@ class DynamoHandler(BaseResponse):
key_conditions = self.body.get('KeyConditions')
query_filters = self.body.get("QueryFilter")
if key_conditions:
- hash_key_name, range_key_name = dynamodb_backend2.get_table_keys_name(
+ hash_key_name, range_key_name = self.dynamodb_backend.get_table_keys_name(
name, key_conditions.keys())
for key, value in key_conditions.items():
if key not in (hash_key_name, range_key_name):
@@ -398,9 +411,10 @@ class DynamoHandler(BaseResponse):
exclusive_start_key = self.body.get('ExclusiveStartKey')
limit = self.body.get("Limit")
scan_index_forward = self.body.get("ScanIndexForward")
- items, scanned_count, last_evaluated_key = dynamodb_backend2.query(
+ items, scanned_count, last_evaluated_key = self.dynamodb_backend.query(
name, hash_key, range_comparison, range_values, limit,
- exclusive_start_key, scan_index_forward, projection_expression, index_name=index_name, **filter_kwargs)
+ exclusive_start_key, scan_index_forward, projection_expression, index_name=index_name, **filter_kwargs
+ )
if items is None:
er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'
return self.error(er, 'Requested resource not found')
@@ -442,12 +456,12 @@ class DynamoHandler(BaseResponse):
limit = self.body.get("Limit")
try:
- items, scanned_count, last_evaluated_key = dynamodb_backend2.scan(name, filters,
- limit,
- exclusive_start_key,
- filter_expression,
- expression_attribute_names,
- expression_attribute_values)
+ items, scanned_count, last_evaluated_key = self.dynamodb_backend.scan(name, filters,
+ limit,
+ exclusive_start_key,
+ filter_expression,
+ expression_attribute_names,
+ expression_attribute_values)
except ValueError as err:
er = 'com.amazonaws.dynamodb.v20111205#ValidationError'
return self.error(er, 'Bad Filter Expression: {0}'.format(err))
@@ -478,12 +492,12 @@ class DynamoHandler(BaseResponse):
name = self.body['TableName']
keys = self.body['Key']
return_values = self.body.get('ReturnValues', '')
- table = dynamodb_backend2.get_table(name)
+ table = self.dynamodb_backend.get_table(name)
if not table:
er = 'com.amazonaws.dynamodb.v20120810#ConditionalCheckFailedException'
return self.error(er, 'A condition specified in the operation could not be evaluated.')
- item = dynamodb_backend2.delete_item(name, keys)
+ item = self.dynamodb_backend.delete_item(name, keys)
if item and return_values == 'ALL_OLD':
item_dict = item.to_json()
else:
@@ -500,7 +514,7 @@ class DynamoHandler(BaseResponse):
'ExpressionAttributeNames', {})
expression_attribute_values = self.body.get(
'ExpressionAttributeValues', {})
- existing_item = dynamodb_backend2.get_item(name, key)
+ existing_item = self.dynamodb_backend.get_item(name, key)
if 'Expected' in self.body:
expected = self.body['Expected']
@@ -536,9 +550,10 @@ class DynamoHandler(BaseResponse):
'\s*([=\+-])\s*', '\\1', update_expression)
try:
- item = dynamodb_backend2.update_item(
- name, key, update_expression, attribute_updates, expression_attribute_names, expression_attribute_values,
- expected)
+ item = self.dynamodb_backend.update_item(
+ name, key, update_expression, attribute_updates, expression_attribute_names,
+ expression_attribute_values, expected
+ )
except ValueError:
er = 'com.amazonaws.dynamodb.v20111205#ConditionalCheckFailedException'
return self.error(er, 'A condition specified in the operation could not be evaluated.')
@@ -555,3 +570,26 @@ class DynamoHandler(BaseResponse):
item_dict['Attributes'] = {}
return dynamo_json_dump(item_dict)
+
+ def describe_limits(self):
+ return json.dumps({
+ 'AccountMaxReadCapacityUnits': 20000,
+ 'TableMaxWriteCapacityUnits': 10000,
+ 'AccountMaxWriteCapacityUnits': 20000,
+ 'TableMaxReadCapacityUnits': 10000
+ })
+
+ def update_time_to_live(self):
+ name = self.body['TableName']
+ ttl_spec = self.body['TimeToLiveSpecification']
+
+ self.dynamodb_backend.update_ttl(name, ttl_spec)
+
+ return json.dumps({'TimeToLiveSpecification': ttl_spec})
+
+ def describe_time_to_live(self):
+ name = self.body['TableName']
+
+ ttl_spec = self.dynamodb_backend.describe_ttl(name)
+
+ return json.dumps({'TimeToLiveDescription': ttl_spec})
diff --git a/moto/ecs/models.py b/moto/ecs/models.py
index f5a928791..b44184033 100644
--- a/moto/ecs/models.py
+++ b/moto/ecs/models.py
@@ -4,6 +4,7 @@ from datetime import datetime
from random import random, randint
import pytz
+from moto.core.exceptions import JsonRESTError
from moto.core import BaseBackend, BaseModel
from moto.ec2 import ec2_backends
from copy import copy
@@ -148,7 +149,7 @@ class Task(BaseObject):
resource_requirements, overrides={}, started_by=''):
self.cluster_arn = cluster.arn
self.task_arn = 'arn:aws:ecs:us-east-1:012345678910:task/{0}'.format(
- str(uuid.uuid1()))
+ str(uuid.uuid4()))
self.container_instance_arn = container_instance_arn
self.last_status = 'RUNNING'
self.desired_status = 'RUNNING'
@@ -288,7 +289,7 @@ class ContainerInstance(BaseObject):
'stringSetValue': [],
'type': 'STRINGSET'}]
self.container_instance_arn = "arn:aws:ecs:us-east-1:012345678910:container-instance/{0}".format(
- str(uuid.uuid1()))
+ str(uuid.uuid4()))
self.pending_tasks_count = 0
self.remaining_resources = [
{'doubleValue': 0.0,
@@ -321,6 +322,8 @@ class ContainerInstance(BaseObject):
'dockerVersion': 'DockerVersion: 1.5.0'
}
+ self.attributes = {}
+
@property
def response_object(self):
response_object = self.gen_response_object()
@@ -766,6 +769,102 @@ class EC2ContainerServiceBackend(BaseBackend):
raise Exception("{0} is not a cluster".format(cluster_name))
pass
+ def put_attributes(self, cluster_name, attributes=None):
+ if cluster_name is None or cluster_name not in self.clusters:
+ raise JsonRESTError('ClusterNotFoundException', 'Cluster not found', status=400)
+
+ if attributes is None:
+ raise JsonRESTError('InvalidParameterException', 'attributes value is required')
+
+ for attr in attributes:
+ self._put_attribute(cluster_name, attr['name'], attr.get('value'), attr.get('targetId'), attr.get('targetType'))
+
+ def _put_attribute(self, cluster_name, name, value=None, target_id=None, target_type=None):
+ if target_id is None and target_type is None:
+ for instance in self.container_instances[cluster_name].values():
+ instance.attributes[name] = value
+ elif target_type is None:
+ # targetId is full container instance arn
+ try:
+ arn = target_id.rsplit('/', 1)[-1]
+ self.container_instances[cluster_name][arn].attributes[name] = value
+ except KeyError:
+ raise JsonRESTError('TargetNotFoundException', 'Could not find {0}'.format(target_id))
+ else:
+ # targetId is container uuid, targetType must be container-instance
+ try:
+ if target_type != 'container-instance':
+ raise JsonRESTError('TargetNotFoundException', 'Could not find {0}'.format(target_id))
+
+ self.container_instances[cluster_name][target_id].attributes[name] = value
+ except KeyError:
+ raise JsonRESTError('TargetNotFoundException', 'Could not find {0}'.format(target_id))
+
+ def list_attributes(self, target_type, cluster_name=None, attr_name=None, attr_value=None, max_results=None, next_token=None):
+ if target_type != 'container-instance':
+ raise JsonRESTError('InvalidParameterException', 'targetType must be container-instance')
+
+ filters = [lambda x: True]
+
+ # item will be {0 cluster_name, 1 arn, 2 name, 3 value}
+ if cluster_name is not None:
+ filters.append(lambda item: item[0] == cluster_name)
+ if attr_name:
+ filters.append(lambda item: item[2] == attr_name)
+ if attr_name:
+ filters.append(lambda item: item[3] == attr_value)
+
+ all_attrs = []
+ for cluster_name, cobj in self.container_instances.items():
+ for container_instance in cobj.values():
+ for key, value in container_instance.attributes.items():
+ all_attrs.append((cluster_name, container_instance.container_instance_arn, key, value))
+
+ return filter(lambda x: all(f(x) for f in filters), all_attrs)
+
+ def delete_attributes(self, cluster_name, attributes=None):
+ if cluster_name is None or cluster_name not in self.clusters:
+ raise JsonRESTError('ClusterNotFoundException', 'Cluster not found', status=400)
+
+ if attributes is None:
+ raise JsonRESTError('InvalidParameterException', 'attributes value is required')
+
+ for attr in attributes:
+ self._delete_attribute(cluster_name, attr['name'], attr.get('value'), attr.get('targetId'), attr.get('targetType'))
+
+ def _delete_attribute(self, cluster_name, name, value=None, target_id=None, target_type=None):
+ if target_id is None and target_type is None:
+ for instance in self.container_instances[cluster_name].values():
+ if name in instance.attributes and instance.attributes[name] == value:
+ del instance.attributes[name]
+ elif target_type is None:
+ # targetId is full container instance arn
+ try:
+ arn = target_id.rsplit('/', 1)[-1]
+ instance = self.container_instances[cluster_name][arn]
+ if name in instance.attributes and instance.attributes[name] == value:
+ del instance.attributes[name]
+ except KeyError:
+ raise JsonRESTError('TargetNotFoundException', 'Could not find {0}'.format(target_id))
+ else:
+ # targetId is container uuid, targetType must be container-instance
+ try:
+ if target_type != 'container-instance':
+ raise JsonRESTError('TargetNotFoundException', 'Could not find {0}'.format(target_id))
+
+ instance = self.container_instances[cluster_name][target_id]
+ if name in instance.attributes and instance.attributes[name] == value:
+ del instance.attributes[name]
+ except KeyError:
+ raise JsonRESTError('TargetNotFoundException', 'Could not find {0}'.format(target_id))
+
+ def list_task_definition_families(self, family_prefix=None, status=None, max_results=None, next_token=None):
+ for task_fam in self.task_definitions:
+ if family_prefix is not None and not task_fam.startswith(family_prefix):
+ continue
+
+ yield task_fam
+
ecs_backends = {}
for region, ec2_backend in ec2_backends.items():
diff --git a/moto/ecs/responses.py b/moto/ecs/responses.py
index 8f6fe850f..e81e04145 100644
--- a/moto/ecs/responses.py
+++ b/moto/ecs/responses.py
@@ -9,6 +9,12 @@ class EC2ContainerServiceResponse(BaseResponse):
@property
def ecs_backend(self):
+ """
+ ECS Backend
+
+ :return: ECS Backend object
+ :rtype: moto.ecs.models.EC2ContainerServiceBackend
+ """
return ecs_backends[self.region]
@property
@@ -34,7 +40,7 @@ class EC2ContainerServiceResponse(BaseResponse):
cluster_arns = self.ecs_backend.list_clusters()
return json.dumps({
'clusterArns': cluster_arns
- # 'nextToken': str(uuid.uuid1())
+ # 'nextToken': str(uuid.uuid4())
})
def describe_clusters(self):
@@ -66,7 +72,7 @@ class EC2ContainerServiceResponse(BaseResponse):
task_definition_arns = self.ecs_backend.list_task_definitions()
return json.dumps({
'taskDefinitionArns': task_definition_arns
- # 'nextToken': str(uuid.uuid1())
+ # 'nextToken': str(uuid.uuid4())
})
def describe_task_definition(self):
@@ -159,7 +165,7 @@ class EC2ContainerServiceResponse(BaseResponse):
return json.dumps({
'serviceArns': service_arns
# ,
- # 'nextToken': str(uuid.uuid1())
+ # 'nextToken': str(uuid.uuid4())
})
def describe_services(self):
@@ -245,3 +251,62 @@ class EC2ContainerServiceResponse(BaseResponse):
'failures': [ci.response_object for ci in failures],
'containerInstances': [ci.response_object for ci in container_instances]
})
+
+ def put_attributes(self):
+ cluster_name = self._get_param('cluster')
+ attributes = self._get_param('attributes')
+
+ self.ecs_backend.put_attributes(cluster_name, attributes)
+
+ return json.dumps({'attributes': attributes})
+
+ def list_attributes(self):
+ cluster_name = self._get_param('cluster')
+ attr_name = self._get_param('attributeName')
+ attr_value = self._get_param('attributeValue')
+ target_type = self._get_param('targetType')
+ max_results = self._get_param('maxResults')
+ next_token = self._get_param('nextToken')
+
+ results = self.ecs_backend.list_attributes(target_type, cluster_name, attr_name, attr_value, max_results, next_token)
+ # Result will be [item will be {0 cluster_name, 1 arn, 2 name, 3 value}]
+
+ formatted_results = []
+ for _, arn, name, value in results:
+ tmp_result = {
+ 'name': name,
+ 'targetId': arn
+ }
+ if value is not None:
+ tmp_result['value'] = value
+ formatted_results.append(tmp_result)
+
+ return json.dumps({'attributes': formatted_results})
+
+ def delete_attributes(self):
+ cluster_name = self._get_param('cluster')
+ attributes = self._get_param('attributes')
+
+ self.ecs_backend.delete_attributes(cluster_name, attributes)
+
+ return json.dumps({'attributes': attributes})
+
+ def discover_poll_endpoint(self):
+ # Here are the arguments, this api is used by the ecs client so obviously no decent
+ # documentation. Hence I've responded with valid but useless data
+ # cluster_name = self._get_param('cluster')
+ # instance = self._get_param('containerInstance')
+ return json.dumps({
+ 'endpoint': 'http://localhost',
+ 'telemetryEndpoint': 'http://localhost'
+ })
+
+ def list_task_definition_families(self):
+ family_prefix = self._get_param('familyPrefix')
+ status = self._get_param('status')
+ max_results = self._get_param('maxResults')
+ next_token = self._get_param('nextToken')
+
+ results = self.ecs_backend.list_task_definition_families(family_prefix, status, max_results, next_token)
+
+ return json.dumps({'families': list(results)})
diff --git a/moto/elbv2/models.py b/moto/elbv2/models.py
index c565aa062..7b8cd3571 100644
--- a/moto/elbv2/models.py
+++ b/moto/elbv2/models.py
@@ -3,8 +3,10 @@ from __future__ import unicode_literals
import datetime
import re
from moto.compat import OrderedDict
+from moto.core.exceptions import RESTError
from moto.core import BaseBackend, BaseModel
from moto.ec2.models import ec2_backends
+from moto.acm.models import acm_backends
from .exceptions import (
DuplicateLoadBalancerName,
DuplicateListenerError,
@@ -40,6 +42,8 @@ class FakeHealthStatus(BaseModel):
class FakeTargetGroup(BaseModel):
+ HTTP_CODE_REGEX = re.compile(r'(?:(?:\d+-\d+|\d+),?)+')
+
def __init__(self,
name,
arn,
@@ -157,6 +161,7 @@ class FakeListener(BaseModel):
self.port = port
self.ssl_policy = ssl_policy
self.certificate = certificate
+ self.certificates = [certificate] if certificate is not None else []
self.default_actions = default_actions
self._non_default_rules = []
self._default_rule = FakeRule(
@@ -227,6 +232,8 @@ class FakeBackend(BaseModel):
class FakeLoadBalancer(BaseModel):
+ VALID_ATTRS = {'access_logs.s3.enabled', 'access_logs.s3.bucket', 'access_logs.s3.prefix',
+ 'deletion_protection.enabled', 'idle_timeout.timeout_seconds'}
def __init__(self, name, security_groups, subnets, vpc_id, arn, dns_name, scheme='internet-facing'):
self.name = name
@@ -240,6 +247,15 @@ class FakeLoadBalancer(BaseModel):
self.arn = arn
self.dns_name = dns_name
+ self.stack = 'ipv4'
+ self.attrs = {
+ 'access_logs.s3.enabled': 'false',
+ 'access_logs.s3.bucket': None,
+ 'access_logs.s3.prefix': None,
+ 'deletion_protection.enabled': 'false',
+ 'idle_timeout.timeout_seconds': '60'
+ }
+
@property
def physical_resource_id(self):
return self.arn
@@ -289,6 +305,26 @@ class ELBv2Backend(BaseBackend):
self.target_groups = OrderedDict()
self.load_balancers = OrderedDict()
+ @property
+ def ec2_backend(self):
+ """
+ EC2 backend
+
+ :return: EC2 Backend
+ :rtype: moto.ec2.models.EC2Backend
+ """
+ return ec2_backends[self.region_name]
+
+ @property
+ def acm_backend(self):
+ """
+ ACM backend
+
+ :return: ACM Backend
+ :rtype: moto.acm.models.AWSCertificateManagerBackend
+ """
+ return acm_backends[self.region_name]
+
def reset(self):
region_name = self.region_name
self.__dict__ = {}
@@ -296,12 +332,11 @@ class ELBv2Backend(BaseBackend):
def create_load_balancer(self, name, security_groups, subnet_ids, scheme='internet-facing'):
vpc_id = None
- ec2_backend = ec2_backends[self.region_name]
subnets = []
if not subnet_ids:
raise SubnetNotFoundError()
for subnet_id in subnet_ids:
- subnet = ec2_backend.get_subnet(subnet_id)
+ subnet = self.ec2_backend.get_subnet(subnet_id)
if subnet is None:
raise SubnetNotFoundError()
subnets.append(subnet)
@@ -395,6 +430,9 @@ class ELBv2Backend(BaseBackend):
if target_group.name == name:
raise DuplicateTargetGroupName()
+ if FakeTargetGroup.HTTP_CODE_REGEX.match(kwargs['matcher']['HttpCode']) is None:
+ raise RESTError('InvalidParameterValue', 'HttpCode must be like 200 | 200-399 | 200,201 ...')
+
arn = "arn:aws:elasticloadbalancing:%s:1:targetgroup/%s/50dc6c495c0c9188" % (self.region_name, name)
target_group = FakeTargetGroup(name, arn, **kwargs)
self.target_groups[target_group.arn] = target_group
@@ -642,6 +680,166 @@ class ELBv2Backend(BaseBackend):
modified_rules.append(given_rule)
return modified_rules
+ def set_ip_address_type(self, arn, ip_type):
+ if ip_type not in ('internal', 'dualstack'):
+ raise RESTError('InvalidParameterValue', 'IpAddressType must be either internal | dualstack')
+
+ balancer = self.load_balancers.get(arn)
+ if balancer is None:
+ raise LoadBalancerNotFoundError()
+
+ if ip_type == 'dualstack' and balancer.scheme == 'internal':
+ raise RESTError('InvalidConfigurationRequest', 'Internal load balancers cannot be dualstack')
+
+ balancer.stack = ip_type
+
+ def set_security_groups(self, arn, sec_groups):
+ balancer = self.load_balancers.get(arn)
+ if balancer is None:
+ raise LoadBalancerNotFoundError()
+
+ # Check all security groups exist
+ for sec_group_id in sec_groups:
+ if self.ec2_backend.get_security_group_from_id(sec_group_id) is None:
+ raise RESTError('InvalidSecurityGroup', 'Security group {0} does not exist'.format(sec_group_id))
+
+ balancer.security_groups = sec_groups
+
+ def set_subnets(self, arn, subnets):
+ balancer = self.load_balancers.get(arn)
+ if balancer is None:
+ raise LoadBalancerNotFoundError()
+
+ subnet_objects = []
+ sub_zone_list = {}
+ for subnet in subnets:
+ try:
+ subnet = self.ec2_backend.get_subnet(subnet)
+
+ if subnet.availability_zone in sub_zone_list:
+ raise RESTError('InvalidConfigurationRequest', 'More than 1 subnet cannot be specified for 1 availability zone')
+
+ sub_zone_list[subnet.availability_zone] = subnet.id
+ subnet_objects.append(subnet)
+ except Exception:
+ raise SubnetNotFoundError()
+
+ if len(sub_zone_list) < 2:
+ raise RESTError('InvalidConfigurationRequest', 'More than 1 availability zone must be specified')
+
+ balancer.subnets = subnet_objects
+
+ return sub_zone_list.items()
+
+ def modify_load_balancer_attributes(self, arn, attrs):
+ balancer = self.load_balancers.get(arn)
+ if balancer is None:
+ raise LoadBalancerNotFoundError()
+
+ for key in attrs:
+ if key not in FakeLoadBalancer.VALID_ATTRS:
+ raise RESTError('InvalidConfigurationRequest', 'Key {0} not valid'.format(key))
+
+ balancer.attrs.update(attrs)
+ return balancer.attrs
+
+ def describe_load_balancer_attributes(self, arn):
+ balancer = self.load_balancers.get(arn)
+ if balancer is None:
+ raise LoadBalancerNotFoundError()
+
+ return balancer.attrs
+
+ def modify_target_group(self, arn, health_check_proto=None, health_check_port=None, health_check_path=None, health_check_interval=None,
+ health_check_timeout=None, healthy_threshold_count=None, unhealthy_threshold_count=None, http_codes=None):
+ target_group = self.target_groups.get(arn)
+ if target_group is None:
+ raise TargetGroupNotFoundError()
+
+ if http_codes is not None and FakeTargetGroup.HTTP_CODE_REGEX.match(http_codes) is None:
+ raise RESTError('InvalidParameterValue', 'HttpCode must be like 200 | 200-399 | 200,201 ...')
+
+ if http_codes is not None:
+ target_group.matcher['HttpCode'] = http_codes
+ if health_check_interval is not None:
+ target_group.healthcheck_interval_seconds = health_check_interval
+ if health_check_path is not None:
+ target_group.healthcheck_path = health_check_path
+ if health_check_port is not None:
+ target_group.healthcheck_port = health_check_port
+ if health_check_proto is not None:
+ target_group.healthcheck_protocol = health_check_proto
+ if health_check_timeout is not None:
+ target_group.healthcheck_timeout_seconds = health_check_timeout
+ if healthy_threshold_count is not None:
+ target_group.healthy_threshold_count = healthy_threshold_count
+ if unhealthy_threshold_count is not None:
+ target_group.unhealthy_threshold_count = unhealthy_threshold_count
+
+ return target_group
+
+ def modify_listener(self, arn, port=None, protocol=None, ssl_policy=None, certificates=None, default_actions=None):
+ for load_balancer in self.load_balancers.values():
+ if arn in load_balancer.listeners:
+ break
+ else:
+ raise ListenerNotFoundError()
+
+ listener = load_balancer.listeners[arn]
+
+ if port is not None:
+ for listener_arn, current_listener in load_balancer.listeners.items():
+ if listener_arn == arn:
+ continue
+ if listener.port == port:
+ raise DuplicateListenerError()
+
+ listener.port = port
+
+ if protocol is not None:
+ if protocol not in ('HTTP', 'HTTPS', 'TCP'):
+ raise RESTError('UnsupportedProtocol', 'Protocol {0} is not supported'.format(protocol))
+
+ # HTTPS checks
+ if protocol == 'HTTPS':
+ # HTTPS
+
+ # Might already be HTTPS so may not provide certs
+ if certificates is None and listener.protocol != 'HTTPS':
+ raise RESTError('InvalidConfigurationRequest', 'Certificates must be provided for HTTPS')
+
+ # Check certificates exist
+ if certificates is not None:
+ default_cert = None
+ all_certs = set() # for SNI
+ for cert in certificates:
+ if cert['is_default'] == 'true':
+ default_cert = cert['certificate_arn']
+ try:
+ self.acm_backend.get_certificate(cert['certificate_arn'])
+ except Exception:
+ raise RESTError('CertificateNotFound', 'Certificate {0} not found'.format(cert['certificate_arn']))
+
+ all_certs.add(cert['certificate_arn'])
+
+ if default_cert is None:
+ raise RESTError('InvalidConfigurationRequest', 'No default certificate')
+
+ listener.certificate = default_cert
+ listener.certificates = list(all_certs)
+
+ listener.protocol = protocol
+
+ if ssl_policy is not None:
+ # Its already validated in responses.py
+ listener.ssl_policy = ssl_policy
+
+ if default_actions is not None:
+ # Is currently not validated
+ listener.default_actions = default_actions
+
+ return listener
+
def _any_listener_using(self, target_group_arn):
for load_balancer in self.load_balancers.values():
for listener in load_balancer.listeners.values():
diff --git a/moto/elbv2/responses.py b/moto/elbv2/responses.py
index e8bc5bc23..aa855b430 100644
--- a/moto/elbv2/responses.py
+++ b/moto/elbv2/responses.py
@@ -1,4 +1,6 @@
from __future__ import unicode_literals
+from moto.core.exceptions import RESTError
+from moto.core.utils import amzn_request_id
from moto.core.responses import BaseResponse
from .models import elbv2_backends
from .exceptions import DuplicateTagKeysError
@@ -6,12 +8,131 @@ from .exceptions import LoadBalancerNotFoundError
from .exceptions import TargetGroupNotFoundError
-class ELBV2Response(BaseResponse):
+SSL_POLICIES = [
+ {
+ 'name': 'ELBSecurityPolicy-2016-08',
+ 'ssl_protocols': ['TLSv1', 'TLSv1.1', 'TLSv1.2'],
+ 'ciphers': [
+ {'name': 'ECDHE-ECDSA-AES128-GCM-SHA256', 'priority': 1},
+ {'name': 'ECDHE-RSA-AES128-GCM-SHA256', 'priority': 2},
+ {'name': 'ECDHE-ECDSA-AES128-SHA256', 'priority': 3},
+ {'name': 'ECDHE-RSA-AES128-SHA256', 'priority': 4},
+ {'name': 'ECDHE-ECDSA-AES128-SHA', 'priority': 5},
+ {'name': 'ECDHE-RSA-AES128-SHA', 'priority': 6},
+ {'name': 'ECDHE-ECDSA-AES256-GCM-SHA384', 'priority': 7},
+ {'name': 'ECDHE-RSA-AES256-GCM-SHA384', 'priority': 8},
+ {'name': 'ECDHE-ECDSA-AES256-SHA384', 'priority': 9},
+ {'name': 'ECDHE-RSA-AES256-SHA384', 'priority': 10},
+ {'name': 'ECDHE-RSA-AES256-SHA', 'priority': 11},
+ {'name': 'ECDHE-ECDSA-AES256-SHA', 'priority': 12},
+ {'name': 'AES128-GCM-SHA256', 'priority': 13},
+ {'name': 'AES128-SHA256', 'priority': 14},
+ {'name': 'AES128-SHA', 'priority': 15},
+ {'name': 'AES256-GCM-SHA384', 'priority': 16},
+ {'name': 'AES256-SHA256', 'priority': 17},
+ {'name': 'AES256-SHA', 'priority': 18}
+ ],
+ },
+ {
+ 'name': 'ELBSecurityPolicy-TLS-1-2-2017-01',
+ 'ssl_protocols': ['TLSv1.2'],
+ 'ciphers': [
+ {'name': 'ECDHE-ECDSA-AES128-GCM-SHA256', 'priority': 1},
+ {'name': 'ECDHE-RSA-AES128-GCM-SHA256', 'priority': 2},
+ {'name': 'ECDHE-ECDSA-AES128-SHA256', 'priority': 3},
+ {'name': 'ECDHE-RSA-AES128-SHA256', 'priority': 4},
+ {'name': 'ECDHE-ECDSA-AES256-GCM-SHA384', 'priority': 5},
+ {'name': 'ECDHE-RSA-AES256-GCM-SHA384', 'priority': 6},
+ {'name': 'ECDHE-ECDSA-AES256-SHA384', 'priority': 7},
+ {'name': 'ECDHE-RSA-AES256-SHA384', 'priority': 8},
+ {'name': 'AES128-GCM-SHA256', 'priority': 9},
+ {'name': 'AES128-SHA256', 'priority': 10},
+ {'name': 'AES256-GCM-SHA384', 'priority': 11},
+ {'name': 'AES256-SHA256', 'priority': 12}
+ ]
+ },
+ {
+ 'name': 'ELBSecurityPolicy-TLS-1-1-2017-01',
+ 'ssl_protocols': ['TLSv1.1', 'TLSv1.2'],
+ 'ciphers': [
+ {'name': 'ECDHE-ECDSA-AES128-GCM-SHA256', 'priority': 1},
+ {'name': 'ECDHE-RSA-AES128-GCM-SHA256', 'priority': 2},
+ {'name': 'ECDHE-ECDSA-AES128-SHA256', 'priority': 3},
+ {'name': 'ECDHE-RSA-AES128-SHA256', 'priority': 4},
+ {'name': 'ECDHE-ECDSA-AES128-SHA', 'priority': 5},
+ {'name': 'ECDHE-RSA-AES128-SHA', 'priority': 6},
+ {'name': 'ECDHE-ECDSA-AES256-GCM-SHA384', 'priority': 7},
+ {'name': 'ECDHE-RSA-AES256-GCM-SHA384', 'priority': 8},
+ {'name': 'ECDHE-ECDSA-AES256-SHA384', 'priority': 9},
+ {'name': 'ECDHE-RSA-AES256-SHA384', 'priority': 10},
+ {'name': 'ECDHE-RSA-AES256-SHA', 'priority': 11},
+ {'name': 'ECDHE-ECDSA-AES256-SHA', 'priority': 12},
+ {'name': 'AES128-GCM-SHA256', 'priority': 13},
+ {'name': 'AES128-SHA256', 'priority': 14},
+ {'name': 'AES128-SHA', 'priority': 15},
+ {'name': 'AES256-GCM-SHA384', 'priority': 16},
+ {'name': 'AES256-SHA256', 'priority': 17},
+ {'name': 'AES256-SHA', 'priority': 18}
+ ]
+ },
+ {
+ 'name': 'ELBSecurityPolicy-2015-05',
+ 'ssl_protocols': ['TLSv1', 'TLSv1.1', 'TLSv1.2'],
+ 'ciphers': [
+ {'name': 'ECDHE-ECDSA-AES128-GCM-SHA256', 'priority': 1},
+ {'name': 'ECDHE-RSA-AES128-GCM-SHA256', 'priority': 2},
+ {'name': 'ECDHE-ECDSA-AES128-SHA256', 'priority': 3},
+ {'name': 'ECDHE-RSA-AES128-SHA256', 'priority': 4},
+ {'name': 'ECDHE-ECDSA-AES128-SHA', 'priority': 5},
+ {'name': 'ECDHE-RSA-AES128-SHA', 'priority': 6},
+ {'name': 'ECDHE-ECDSA-AES256-GCM-SHA384', 'priority': 7},
+ {'name': 'ECDHE-RSA-AES256-GCM-SHA384', 'priority': 8},
+ {'name': 'ECDHE-ECDSA-AES256-SHA384', 'priority': 9},
+ {'name': 'ECDHE-RSA-AES256-SHA384', 'priority': 10},
+ {'name': 'ECDHE-RSA-AES256-SHA', 'priority': 11},
+ {'name': 'ECDHE-ECDSA-AES256-SHA', 'priority': 12},
+ {'name': 'AES128-GCM-SHA256', 'priority': 13},
+ {'name': 'AES128-SHA256', 'priority': 14},
+ {'name': 'AES128-SHA', 'priority': 15},
+ {'name': 'AES256-GCM-SHA384', 'priority': 16},
+ {'name': 'AES256-SHA256', 'priority': 17},
+ {'name': 'AES256-SHA', 'priority': 18}
+ ]
+ },
+ {
+ 'name': 'ELBSecurityPolicy-TLS-1-0-2015-04',
+ 'ssl_protocols': ['TLSv1', 'TLSv1.1', 'TLSv1.2'],
+ 'ciphers': [
+ {'name': 'ECDHE-ECDSA-AES128-GCM-SHA256', 'priority': 1},
+ {'name': 'ECDHE-RSA-AES128-GCM-SHA256', 'priority': 2},
+ {'name': 'ECDHE-ECDSA-AES128-SHA256', 'priority': 3},
+ {'name': 'ECDHE-RSA-AES128-SHA256', 'priority': 4},
+ {'name': 'ECDHE-ECDSA-AES128-SHA', 'priority': 5},
+ {'name': 'ECDHE-RSA-AES128-SHA', 'priority': 6},
+ {'name': 'ECDHE-ECDSA-AES256-GCM-SHA384', 'priority': 7},
+ {'name': 'ECDHE-RSA-AES256-GCM-SHA384', 'priority': 8},
+ {'name': 'ECDHE-ECDSA-AES256-SHA384', 'priority': 9},
+ {'name': 'ECDHE-RSA-AES256-SHA384', 'priority': 10},
+ {'name': 'ECDHE-RSA-AES256-SHA', 'priority': 11},
+ {'name': 'ECDHE-ECDSA-AES256-SHA', 'priority': 12},
+ {'name': 'AES128-GCM-SHA256', 'priority': 13},
+ {'name': 'AES128-SHA256', 'priority': 14},
+ {'name': 'AES128-SHA', 'priority': 15},
+ {'name': 'AES256-GCM-SHA384', 'priority': 16},
+ {'name': 'AES256-SHA256', 'priority': 17},
+ {'name': 'AES256-SHA', 'priority': 18},
+ {'name': 'DES-CBC3-SHA', 'priority': 19}
+ ]
+ }
+]
+
+class ELBV2Response(BaseResponse):
@property
def elbv2_backend(self):
return elbv2_backends[self.region]
+ @amzn_request_id
def create_load_balancer(self):
load_balancer_name = self._get_param('Name')
subnet_ids = self._get_multi_param("Subnets.member")
@@ -28,6 +149,7 @@ class ELBV2Response(BaseResponse):
template = self.response_template(CREATE_LOAD_BALANCER_TEMPLATE)
return template.render(load_balancer=load_balancer)
+ @amzn_request_id
def create_rule(self):
lister_arn = self._get_param('ListenerArn')
_conditions = self._get_list_prefix('Conditions.member')
@@ -52,6 +174,7 @@ class ELBV2Response(BaseResponse):
template = self.response_template(CREATE_RULE_TEMPLATE)
return template.render(rules=rules)
+ @amzn_request_id
def create_target_group(self):
name = self._get_param('Name')
vpc_id = self._get_param('VpcId')
@@ -64,6 +187,7 @@ class ELBV2Response(BaseResponse):
healthcheck_timeout_seconds = self._get_param('HealthCheckTimeoutSeconds', '5')
healthy_threshold_count = self._get_param('HealthyThresholdCount', '5')
unhealthy_threshold_count = self._get_param('UnhealthyThresholdCount', '2')
+ http_codes = self._get_param('Matcher.HttpCode', '200')
target_group = self.elbv2_backend.create_target_group(
name,
@@ -77,11 +201,13 @@ class ELBV2Response(BaseResponse):
healthcheck_timeout_seconds=healthcheck_timeout_seconds,
healthy_threshold_count=healthy_threshold_count,
unhealthy_threshold_count=unhealthy_threshold_count,
+ matcher={'HttpCode': http_codes}
)
template = self.response_template(CREATE_TARGET_GROUP_TEMPLATE)
return template.render(target_group=target_group)
+ @amzn_request_id
def create_listener(self):
load_balancer_arn = self._get_param('LoadBalancerArn')
protocol = self._get_param('Protocol')
@@ -105,6 +231,7 @@ class ELBV2Response(BaseResponse):
template = self.response_template(CREATE_LISTENER_TEMPLATE)
return template.render(listener=listener)
+ @amzn_request_id
def describe_load_balancers(self):
arns = self._get_multi_param("LoadBalancerArns.member")
names = self._get_multi_param("Names.member")
@@ -124,6 +251,7 @@ class ELBV2Response(BaseResponse):
template = self.response_template(DESCRIBE_LOAD_BALANCERS_TEMPLATE)
return template.render(load_balancers=load_balancers_resp, marker=next_marker)
+ @amzn_request_id
def describe_rules(self):
listener_arn = self._get_param('ListenerArn')
rule_arns = self._get_multi_param('RuleArns.member') if any(k for k in list(self.querystring.keys()) if k.startswith('RuleArns.member')) else None
@@ -144,6 +272,7 @@ class ELBV2Response(BaseResponse):
template = self.response_template(DESCRIBE_RULES_TEMPLATE)
return template.render(rules=rules_resp, marker=next_marker)
+ @amzn_request_id
def describe_target_groups(self):
load_balancer_arn = self._get_param('LoadBalancerArn')
target_group_arns = self._get_multi_param('TargetGroupArns.member')
@@ -153,6 +282,7 @@ class ELBV2Response(BaseResponse):
template = self.response_template(DESCRIBE_TARGET_GROUPS_TEMPLATE)
return template.render(target_groups=target_groups)
+ @amzn_request_id
def describe_target_group_attributes(self):
target_group_arn = self._get_param('TargetGroupArn')
target_group = self.elbv2_backend.target_groups.get(target_group_arn)
@@ -161,6 +291,7 @@ class ELBV2Response(BaseResponse):
template = self.response_template(DESCRIBE_TARGET_GROUP_ATTRIBUTES_TEMPLATE)
return template.render(attributes=target_group.attributes)
+ @amzn_request_id
def describe_listeners(self):
load_balancer_arn = self._get_param('LoadBalancerArn')
listener_arns = self._get_multi_param('ListenerArns.member')
@@ -171,30 +302,35 @@ class ELBV2Response(BaseResponse):
template = self.response_template(DESCRIBE_LISTENERS_TEMPLATE)
return template.render(listeners=listeners)
+ @amzn_request_id
def delete_load_balancer(self):
arn = self._get_param('LoadBalancerArn')
self.elbv2_backend.delete_load_balancer(arn)
template = self.response_template(DELETE_LOAD_BALANCER_TEMPLATE)
return template.render()
+ @amzn_request_id
def delete_rule(self):
arn = self._get_param('RuleArn')
self.elbv2_backend.delete_rule(arn)
template = self.response_template(DELETE_RULE_TEMPLATE)
return template.render()
+ @amzn_request_id
def delete_target_group(self):
arn = self._get_param('TargetGroupArn')
self.elbv2_backend.delete_target_group(arn)
template = self.response_template(DELETE_TARGET_GROUP_TEMPLATE)
return template.render()
+ @amzn_request_id
def delete_listener(self):
arn = self._get_param('ListenerArn')
self.elbv2_backend.delete_listener(arn)
template = self.response_template(DELETE_LISTENER_TEMPLATE)
return template.render()
+ @amzn_request_id
def modify_rule(self):
rule_arn = self._get_param('RuleArn')
_conditions = self._get_list_prefix('Conditions.member')
@@ -217,6 +353,7 @@ class ELBV2Response(BaseResponse):
template = self.response_template(MODIFY_RULE_TEMPLATE)
return template.render(rules=rules)
+ @amzn_request_id
def modify_target_group_attributes(self):
target_group_arn = self._get_param('TargetGroupArn')
target_group = self.elbv2_backend.target_groups.get(target_group_arn)
@@ -230,6 +367,7 @@ class ELBV2Response(BaseResponse):
template = self.response_template(MODIFY_TARGET_GROUP_ATTRIBUTES_TEMPLATE)
return template.render(attributes=attributes)
+ @amzn_request_id
def register_targets(self):
target_group_arn = self._get_param('TargetGroupArn')
targets = self._get_list_prefix('Targets.member')
@@ -238,6 +376,7 @@ class ELBV2Response(BaseResponse):
template = self.response_template(REGISTER_TARGETS_TEMPLATE)
return template.render()
+ @amzn_request_id
def deregister_targets(self):
target_group_arn = self._get_param('TargetGroupArn')
targets = self._get_list_prefix('Targets.member')
@@ -246,6 +385,7 @@ class ELBV2Response(BaseResponse):
template = self.response_template(DEREGISTER_TARGETS_TEMPLATE)
return template.render()
+ @amzn_request_id
def describe_target_health(self):
target_group_arn = self._get_param('TargetGroupArn')
targets = self._get_list_prefix('Targets.member')
@@ -254,6 +394,7 @@ class ELBV2Response(BaseResponse):
template = self.response_template(DESCRIBE_TARGET_HEALTH_TEMPLATE)
return template.render(target_health_descriptions=target_health_descriptions)
+ @amzn_request_id
def set_rule_priorities(self):
rule_priorities = self._get_list_prefix('RulePriorities.member')
for rule_priority in rule_priorities:
@@ -262,6 +403,7 @@ class ELBV2Response(BaseResponse):
template = self.response_template(SET_RULE_PRIORITIES_TEMPLATE)
return template.render(rules=rules)
+ @amzn_request_id
def add_tags(self):
resource_arns = self._get_multi_param('ResourceArns.member')
@@ -281,6 +423,7 @@ class ELBV2Response(BaseResponse):
template = self.response_template(ADD_TAGS_TEMPLATE)
return template.render()
+ @amzn_request_id
def remove_tags(self):
resource_arns = self._get_multi_param('ResourceArns.member')
tag_keys = self._get_multi_param('TagKeys.member')
@@ -301,6 +444,7 @@ class ELBV2Response(BaseResponse):
template = self.response_template(REMOVE_TAGS_TEMPLATE)
return template.render()
+ @amzn_request_id
def describe_tags(self):
resource_arns = self._get_multi_param('ResourceArns.member')
resources = []
@@ -320,6 +464,125 @@ class ELBV2Response(BaseResponse):
template = self.response_template(DESCRIBE_TAGS_TEMPLATE)
return template.render(resources=resources)
+ @amzn_request_id
+ def describe_account_limits(self):
+ # Supports paging but not worth implementing yet
+ # marker = self._get_param('Marker')
+ # page_size = self._get_param('PageSize')
+
+ limits = {
+ 'application-load-balancers': 20,
+ 'target-groups': 3000,
+ 'targets-per-application-load-balancer': 30,
+ 'listeners-per-application-load-balancer': 50,
+ 'rules-per-application-load-balancer': 100,
+ 'network-load-balancers': 20,
+ 'targets-per-network-load-balancer': 200,
+ 'listeners-per-network-load-balancer': 50
+ }
+
+ template = self.response_template(DESCRIBE_LIMITS_TEMPLATE)
+ return template.render(limits=limits)
+
+ @amzn_request_id
+ def describe_ssl_policies(self):
+ names = self._get_multi_param('Names.member.')
+ # Supports paging but not worth implementing yet
+ # marker = self._get_param('Marker')
+ # page_size = self._get_param('PageSize')
+
+ policies = SSL_POLICIES
+ if names:
+ policies = filter(lambda policy: policy['name'] in names, policies)
+
+ template = self.response_template(DESCRIBE_SSL_POLICIES_TEMPLATE)
+ return template.render(policies=policies)
+
+ @amzn_request_id
+ def set_ip_address_type(self):
+ arn = self._get_param('LoadBalancerArn')
+ ip_type = self._get_param('IpAddressType')
+
+ self.elbv2_backend.set_ip_address_type(arn, ip_type)
+
+ template = self.response_template(SET_IP_ADDRESS_TYPE_TEMPLATE)
+ return template.render(ip_type=ip_type)
+
+ @amzn_request_id
+ def set_security_groups(self):
+ arn = self._get_param('LoadBalancerArn')
+ sec_groups = self._get_multi_param('SecurityGroups.member.')
+
+ self.elbv2_backend.set_security_groups(arn, sec_groups)
+
+ template = self.response_template(SET_SECURITY_GROUPS_TEMPLATE)
+ return template.render(sec_groups=sec_groups)
+
+ @amzn_request_id
+ def set_subnets(self):
+ arn = self._get_param('LoadBalancerArn')
+ subnets = self._get_multi_param('Subnets.member.')
+
+ subnet_zone_list = self.elbv2_backend.set_subnets(arn, subnets)
+
+ template = self.response_template(SET_SUBNETS_TEMPLATE)
+ return template.render(subnets=subnet_zone_list)
+
+ @amzn_request_id
+ def modify_load_balancer_attributes(self):
+ arn = self._get_param('LoadBalancerArn')
+ attrs = self._get_map_prefix('Attributes.member', key_end='Key', value_end='Value')
+
+ all_attrs = self.elbv2_backend.modify_load_balancer_attributes(arn, attrs)
+
+ template = self.response_template(MODIFY_LOADBALANCER_ATTRS_TEMPLATE)
+ return template.render(attrs=all_attrs)
+
+ @amzn_request_id
+ def describe_load_balancer_attributes(self):
+ arn = self._get_param('LoadBalancerArn')
+ attrs = self.elbv2_backend.describe_load_balancer_attributes(arn)
+
+ template = self.response_template(DESCRIBE_LOADBALANCER_ATTRS_TEMPLATE)
+ return template.render(attrs=attrs)
+
+ @amzn_request_id
+ def modify_target_group(self):
+ arn = self._get_param('TargetGroupArn')
+
+ health_check_proto = self._get_param('HealthCheckProtocol') # 'HTTP' | 'HTTPS' | 'TCP',
+ health_check_port = self._get_param('HealthCheckPort')
+ health_check_path = self._get_param('HealthCheckPath')
+ health_check_interval = self._get_param('HealthCheckIntervalSeconds')
+ health_check_timeout = self._get_param('HealthCheckTimeoutSeconds')
+ healthy_threshold_count = self._get_param('HealthyThresholdCount')
+ unhealthy_threshold_count = self._get_param('UnhealthyThresholdCount')
+ http_codes = self._get_param('Matcher.HttpCode')
+
+ target_group = self.elbv2_backend.modify_target_group(arn, health_check_proto, health_check_port, health_check_path, health_check_interval,
+ health_check_timeout, healthy_threshold_count, unhealthy_threshold_count, http_codes)
+
+ template = self.response_template(MODIFY_TARGET_GROUP_TEMPLATE)
+ return template.render(target_group=target_group)
+
+ @amzn_request_id
+ def modify_listener(self):
+ arn = self._get_param('ListenerArn')
+ port = self._get_param('Port')
+ protocol = self._get_param('Protocol')
+ ssl_policy = self._get_param('SslPolicy')
+ certificates = self._get_list_prefix('Certificates.member')
+ default_actions = self._get_list_prefix('DefaultActions.member')
+
+ # Should really move SSL Policies to models
+ if ssl_policy is not None and ssl_policy not in [item['name'] for item in SSL_POLICIES]:
+ raise RESTError('SSLPolicyNotFound', 'Policy {0} not found'.format(ssl_policy))
+
+ listener = self.elbv2_backend.modify_listener(arn, port, protocol, ssl_policy, certificates, default_actions)
+
+ template = self.response_template(MODIFY_LISTENER_TEMPLATE)
+ return template.render(listener=listener)
+
def _add_tags(self, resource):
tag_values = []
tag_keys = []
@@ -348,14 +611,14 @@ class ELBV2Response(BaseResponse):
ADD_TAGS_TEMPLATE = """
- 360e81f7-1100-11e4-b6ed-0f30EXAMPLE
+ {{ request_id }}
"""
REMOVE_TAGS_TEMPLATE = """
- 360e81f7-1100-11e4-b6ed-0f30EXAMPLE
+ {{ request_id }}
"""
@@ -378,11 +641,10 @@ DESCRIBE_TAGS_TEMPLATE = """
@@ -415,7 +677,7 @@ CREATE_LOAD_BALANCER_TEMPLATE = """
- 1549581b-12b7-11e3-895e-1334aEXAMPLE
+ {{ request_id }}
"""
DELETE_RULE_TEMPLATE = """
- 1549581b-12b7-11e3-895e-1334aEXAMPLE
+ {{ request_id }}
"""
DELETE_TARGET_GROUP_TEMPLATE = """
- 1549581b-12b7-11e3-895e-1334aEXAMPLE
+ {{ request_id }}
"""
DELETE_LISTENER_TEMPLATE = """
- 1549581b-12b7-11e3-895e-1334aEXAMPLE
+ {{ request_id }}
"""
@@ -586,7 +850,7 @@ DESCRIBE_LOAD_BALANCERS_TEMPLATE = """
@@ -682,11 +945,10 @@ DESCRIBE_TARGET_GROUP_ATTRIBUTES_TEMPLATE = """
- 70092c0e-f3a9-11e5-ae48-cff02092876b
+ {{ request_id }}
"""
-
DESCRIBE_LISTENERS_TEMPLATE = """
@@ -717,7 +979,7 @@ DESCRIBE_LISTENERS_TEMPLATE = """
- 70092c0e-f3a9-11e5-ae48-cff02092876b
+ {{ request_id }}
"""
@@ -793,7 +1055,7 @@ REGISTER_TARGETS_TEMPLATE = """
- 83c88b9d-12b7-11e3-8b82-87b12EXAMPLE
+ {{ request_id }}
"""
-
DELETE_LOAD_BALANCER_LISTENERS = """
- 83c88b9d-12b7-11e3-8b82-87b12EXAMPLE
+ {{ request_id }}
"""
@@ -848,7 +1109,7 @@ DESCRIBE_ATTRIBUTES_TEMPLATE = """
- 83c88b9d-12b7-11e3-8b82-87b12EXAMPLE
+ {{ request_id }}
"""
@@ -882,7 +1143,7 @@ MODIFY_ATTRIBUTES_TEMPLATE = """
- 83c88b9d-12b7-11e3-8b82-87b12EXAMPLE
+ {{ request_id }}
"""
@@ -898,7 +1159,7 @@ CREATE_LOAD_BALANCER_POLICY_TEMPLATE = """
- 07b1ecbc-1100-11e3-acaf-dd7edEXAMPLE
+ {{ request_id }}
"""
@@ -906,7 +1167,7 @@ SET_LOAD_BALANCER_POLICIES_OF_LISTENER_TEMPLATE = """
- 0eb9b381-dde0-11e2-8d78-6ddbaEXAMPLE
+ {{ request_id }}
"""
@@ -929,7 +1190,7 @@ DESCRIBE_TARGET_HEALTH_TEMPLATE = """
+
+
+ {% for key, value in limits.items() %}
+
+ {{ key }}
+ {{ value }}
+
+ {% endfor %}
+
+
+
+ {{ request_id }}
+
+"""
+
+DESCRIBE_SSL_POLICIES_TEMPLATE = """
+
+
+ {% for policy in policies %}
+
+ {{ policy['name'] }}
+
+ {% for cipher in policy['ciphers'] %}
+
+ {{ cipher['name'] }}
+ {{ cipher['priority'] }}
+
+ {% endfor %}
+
+
+ {% for proto in policy['ssl_protocols'] %}
+ {{ proto }}
+ {% endfor %}
+
+
+ {% endfor %}
+
+
+
+ {{ request_id }}
+
+"""
+
+SET_IP_ADDRESS_TYPE_TEMPLATE = """
+
+ {{ ip_type }}
+
+
+ {{ request_id }}
+
+"""
+
+SET_SECURITY_GROUPS_TEMPLATE = """
+
+
+ {% for group in sec_groups %}
+ {{ group }}
+ {% endfor %}
+
+
+
+ {{ request_id }}
+
+"""
+
+SET_SUBNETS_TEMPLATE = """
+
+
+ {% for zone_id, subnet_id in subnets %}
+
+ {{ subnet_id }}
+ {{ zone_id }}
+
+ {% endfor %}
+
+
+
+ {{ request_id }}
+
+"""
+
+MODIFY_LOADBALANCER_ATTRS_TEMPLATE = """
+
+
+ {% for key, value in attrs.items() %}
+
+ {% if value == None %}{% else %}{{ value }}{% endif %}
+ {{ key }}
+
+ {% endfor %}
+
+
+
+ {{ request_id }}
+
+"""
+
+DESCRIBE_LOADBALANCER_ATTRS_TEMPLATE = """
+
+
+ {% for key, value in attrs.items() %}
+
+ {% if value == None %}{% else %}{{ value }}{% endif %}
+ {{ key }}
+
+ {% endfor %}
+
+
+
+ {{ request_id }}
+
+"""
+
+MODIFY_TARGET_GROUP_TEMPLATE = """
+
+
+
+ {{ target_group.arn }}
+ {{ target_group.name }}
+ {{ target_group.protocol }}
+ {{ target_group.port }}
+ {{ target_group.vpc_id }}
+ {{ target_group.healthcheck_protocol }}
+ {{ target_group.healthcheck_port }}
+ {{ target_group.healthcheck_path }}
+ {{ target_group.healthcheck_interval_seconds }}
+ {{ target_group.healthcheck_timeout_seconds }}
+ {{ target_group.healthy_threshold_count }}
+ {{ target_group.unhealthy_threshold_count }}
+
+ {{ target_group.matcher['HttpCode'] }}
+
+
+ {% for load_balancer_arn in target_group.load_balancer_arns %}
+ {{ load_balancer_arn }}
+ {% endfor %}
+
+
+
+
+
+ {{ request_id }}
+
+"""
+
+MODIFY_LISTENER_TEMPLATE = """
+
+
+
+ {{ listener.load_balancer_arn }}
+ {{ listener.protocol }}
+ {% if listener.certificates %}
+
+ {% for cert in listener.certificates %}
+
+ {{ cert }}
+
+ {% endfor %}
+
+ {% endif %}
+ {{ listener.port }}
+ {{ listener.ssl_policy }}
+ {{ listener.arn }}
+
+ {% for action in listener.default_actions %}
+
+ {{ action.type }}
+ {{ action.target_group_arn }}
+
+ {% endfor %}
+
+
+
+
+
+ {{ request_id }}
+
+"""
diff --git a/moto/events/models.py b/moto/events/models.py
index faec7b434..5c1d507ca 100644
--- a/moto/events/models.py
+++ b/moto/events/models.py
@@ -1,6 +1,7 @@
import os
import re
+from moto.core.exceptions import JsonRESTError
from moto.core import BaseBackend, BaseModel
@@ -50,6 +51,8 @@ class Rule(BaseModel):
class EventsBackend(BaseBackend):
+ ACCOUNT_ID = re.compile(r'^(\d{1,12}|\*)$')
+ STATEMENT_ID = re.compile(r'^[a-zA-Z0-9-_]{1,64}$')
def __init__(self):
self.rules = {}
@@ -58,6 +61,8 @@ class EventsBackend(BaseBackend):
self.rules_order = []
self.next_tokens = {}
+ self.permissions = {}
+
def _get_rule_by_index(self, i):
return self.rules.get(self.rules_order[i])
@@ -181,6 +186,17 @@ class EventsBackend(BaseBackend):
return False
+ def put_events(self, events):
+ num_events = len(events)
+
+ if num_events < 1:
+ raise JsonRESTError('ValidationError', 'Need at least 1 event')
+ elif num_events > 10:
+ raise JsonRESTError('ValidationError', 'Can only submit 10 events at once')
+
+ # We dont really need to store the events yet
+ return []
+
def remove_targets(self, name, ids):
rule = self.rules.get(name)
@@ -193,5 +209,40 @@ class EventsBackend(BaseBackend):
def test_event_pattern(self):
raise NotImplementedError()
+ def put_permission(self, action, principal, statement_id):
+ if action is None or action != 'PutEvents':
+ raise JsonRESTError('InvalidParameterValue', 'Action must be PutEvents')
+
+ if principal is None or self.ACCOUNT_ID.match(principal) is None:
+ raise JsonRESTError('InvalidParameterValue', 'Principal must match ^(\d{1,12}|\*)$')
+
+ if statement_id is None or self.STATEMENT_ID.match(statement_id) is None:
+ raise JsonRESTError('InvalidParameterValue', 'StatementId must match ^[a-zA-Z0-9-_]{1,64}$')
+
+ self.permissions[statement_id] = {'action': action, 'principal': principal}
+
+ def remove_permission(self, statement_id):
+ try:
+ del self.permissions[statement_id]
+ except KeyError:
+ raise JsonRESTError('ResourceNotFoundException', 'StatementId not found')
+
+ def describe_event_bus(self):
+ arn = "arn:aws:events:us-east-1:000000000000:event-bus/default"
+ statements = []
+ for statement_id, data in self.permissions.items():
+ statements.append({
+ 'Sid': statement_id,
+ 'Effect': 'Allow',
+ 'Principal': {'AWS': 'arn:aws:iam::{0}:root'.format(data['principal'])},
+ 'Action': 'events:{0}'.format(data['action']),
+ 'Resource': arn
+ })
+ return {
+ 'Policy': {'Version': '2012-10-17', 'Statement': statements},
+ 'Name': 'default',
+ 'Arn': arn
+ }
+
events_backend = EventsBackend()
diff --git a/moto/events/responses.py b/moto/events/responses.py
index 8f433844a..f9cb9b5b5 100644
--- a/moto/events/responses.py
+++ b/moto/events/responses.py
@@ -18,9 +18,17 @@ class EventsHandler(BaseResponse):
'RoleArn': rule.role_arn
}
- def load_body(self):
- decoded_body = self.body
- return json.loads(decoded_body or '{}')
+ @property
+ def request_params(self):
+ if not hasattr(self, '_json_body'):
+ try:
+ self._json_body = json.loads(self.body)
+ except ValueError:
+ self._json_body = {}
+ return self._json_body
+
+ def _get_param(self, param, if_none=None):
+ return self.request_params.get(param, if_none)
def error(self, type_, message='', status=400):
headers = self.response_headers
@@ -28,8 +36,7 @@ class EventsHandler(BaseResponse):
return json.dumps({'__type': type_, 'message': message}), headers,
def delete_rule(self):
- body = self.load_body()
- name = body.get('Name')
+ name = self._get_param('Name')
if not name:
return self.error('ValidationException', 'Parameter Name is required.')
@@ -38,8 +45,7 @@ class EventsHandler(BaseResponse):
return '', self.response_headers
def describe_rule(self):
- body = self.load_body()
- name = body.get('Name')
+ name = self._get_param('Name')
if not name:
return self.error('ValidationException', 'Parameter Name is required.')
@@ -53,8 +59,7 @@ class EventsHandler(BaseResponse):
return json.dumps(rule_dict), self.response_headers
def disable_rule(self):
- body = self.load_body()
- name = body.get('Name')
+ name = self._get_param('Name')
if not name:
return self.error('ValidationException', 'Parameter Name is required.')
@@ -65,8 +70,7 @@ class EventsHandler(BaseResponse):
return '', self.response_headers
def enable_rule(self):
- body = self.load_body()
- name = body.get('Name')
+ name = self._get_param('Name')
if not name:
return self.error('ValidationException', 'Parameter Name is required.')
@@ -80,10 +84,9 @@ class EventsHandler(BaseResponse):
pass
def list_rule_names_by_target(self):
- body = self.load_body()
- target_arn = body.get('TargetArn')
- next_token = body.get('NextToken')
- limit = body.get('Limit')
+ target_arn = self._get_param('TargetArn')
+ next_token = self._get_param('NextToken')
+ limit = self._get_param('Limit')
if not target_arn:
return self.error('ValidationException', 'Parameter TargetArn is required.')
@@ -94,10 +97,9 @@ class EventsHandler(BaseResponse):
return json.dumps(rule_names), self.response_headers
def list_rules(self):
- body = self.load_body()
- prefix = body.get('NamePrefix')
- next_token = body.get('NextToken')
- limit = body.get('Limit')
+ prefix = self._get_param('NamePrefix')
+ next_token = self._get_param('NextToken')
+ limit = self._get_param('Limit')
rules = events_backend.list_rules(prefix, next_token, limit)
rules_obj = {'Rules': []}
@@ -111,10 +113,9 @@ class EventsHandler(BaseResponse):
return json.dumps(rules_obj), self.response_headers
def list_targets_by_rule(self):
- body = self.load_body()
- rule_name = body.get('Rule')
- next_token = body.get('NextToken')
- limit = body.get('Limit')
+ rule_name = self._get_param('Rule')
+ next_token = self._get_param('NextToken')
+ limit = self._get_param('Limit')
if not rule_name:
return self.error('ValidationException', 'Parameter Rule is required.')
@@ -128,13 +129,25 @@ class EventsHandler(BaseResponse):
return json.dumps(targets), self.response_headers
def put_events(self):
+ events = self._get_param('Entries')
+
+ failed_entries = events_backend.put_events(events)
+
+ if failed_entries:
+ return json.dumps({
+ 'FailedEntryCount': len(failed_entries),
+ 'Entries': failed_entries
+ })
+
return '', self.response_headers
def put_rule(self):
- body = self.load_body()
- name = body.get('Name')
- event_pattern = body.get('EventPattern')
- sched_exp = body.get('ScheduleExpression')
+ name = self._get_param('Name')
+ event_pattern = self._get_param('EventPattern')
+ sched_exp = self._get_param('ScheduleExpression')
+ state = self._get_param('State')
+ desc = self._get_param('Description')
+ role_arn = self._get_param('RoleArn')
if not name:
return self.error('ValidationException', 'Parameter Name is required.')
@@ -156,17 +169,16 @@ class EventsHandler(BaseResponse):
name,
ScheduleExpression=sched_exp,
EventPattern=event_pattern,
- State=body.get('State'),
- Description=body.get('Description'),
- RoleArn=body.get('RoleArn')
+ State=state,
+ Description=desc,
+ RoleArn=role_arn
)
return json.dumps({'RuleArn': rule_arn}), self.response_headers
def put_targets(self):
- body = self.load_body()
- rule_name = body.get('Rule')
- targets = body.get('Targets')
+ rule_name = self._get_param('Rule')
+ targets = self._get_param('Targets')
if not rule_name:
return self.error('ValidationException', 'Parameter Rule is required.')
@@ -180,9 +192,8 @@ class EventsHandler(BaseResponse):
return '', self.response_headers
def remove_targets(self):
- body = self.load_body()
- rule_name = body.get('Rule')
- ids = body.get('Ids')
+ rule_name = self._get_param('Rule')
+ ids = self._get_param('Ids')
if not rule_name:
return self.error('ValidationException', 'Parameter Rule is required.')
@@ -197,3 +208,22 @@ class EventsHandler(BaseResponse):
def test_event_pattern(self):
pass
+
+ def put_permission(self):
+ action = self._get_param('Action')
+ principal = self._get_param('Principal')
+ statement_id = self._get_param('StatementId')
+
+ events_backend.put_permission(action, principal, statement_id)
+
+ return ''
+
+ def remove_permission(self):
+ statement_id = self._get_param('StatementId')
+
+ events_backend.remove_permission(statement_id)
+
+ return ''
+
+ def describe_event_bus(self):
+ return json.dumps(events_backend.describe_event_bus())
diff --git a/moto/sqs/models.py b/moto/sqs/models.py
index 22f310228..85b69ab0e 100644
--- a/moto/sqs/models.py
+++ b/moto/sqs/models.py
@@ -2,6 +2,7 @@ from __future__ import unicode_literals
import base64
import hashlib
+import json
import re
import six
import struct
@@ -9,6 +10,7 @@ from xml.sax.saxutils import escape
import boto.sqs
+from moto.core.exceptions import RESTError
from moto.core import BaseBackend, BaseModel
from moto.core.utils import camelcase_to_underscores, get_random_message_id, unix_time, unix_time_millis
from .utils import generate_receipt_handle
@@ -166,11 +168,14 @@ class Queue(BaseModel):
'ReceiveMessageWaitTimeSeconds',
'VisibilityTimeout',
'WaitTimeSeconds']
+ ALLOWED_PERMISSIONS = ('*', 'ChangeMessageVisibility', 'DeleteMessage', 'GetQueueAttributes',
+ 'GetQueueUrl', 'ReceiveMessage', 'SendMessage')
def __init__(self, name, region, **kwargs):
self.name = name
self.visibility_timeout = int(kwargs.get('VisibilityTimeout', 30))
self.region = region
+ self.tags = {}
self._messages = []
@@ -189,14 +194,42 @@ class Queue(BaseModel):
self.message_retention_period = int(kwargs.get('MessageRetentionPeriod', 86400 * 4)) # four days
self.queue_arn = 'arn:aws:sqs:{0}:123456789012:{1}'.format(self.region, self.name)
self.receive_message_wait_time_seconds = int(kwargs.get('ReceiveMessageWaitTimeSeconds', 0))
+ self.permissions = {}
# wait_time_seconds will be set to immediate return messages
self.wait_time_seconds = int(kwargs.get('WaitTimeSeconds', 0))
+ self.redrive_policy = {}
+ self.dead_letter_queue = None
+
+ if 'RedrivePolicy' in kwargs:
+ self._setup_dlq(kwargs['RedrivePolicy'])
+
# Check some conditions
if self.fifo_queue and not self.name.endswith('.fifo'):
raise MessageAttributesInvalid('Queue name must end in .fifo for FIFO queues')
+ def _setup_dlq(self, policy_json):
+ try:
+ self.redrive_policy = json.loads(policy_json)
+ except ValueError:
+ raise RESTError('InvalidParameterValue', 'Redrive policy does not contain valid json')
+
+ if 'deadLetterTargetArn' not in self.redrive_policy:
+ raise RESTError('InvalidParameterValue', 'Redrive policy does not contain deadLetterTargetArn')
+ if 'maxReceiveCount' not in self.redrive_policy:
+ raise RESTError('InvalidParameterValue', 'Redrive policy does not contain maxReceiveCount')
+
+ for queue in sqs_backends[self.region].queues.values():
+ if queue.queue_arn == self.redrive_policy['deadLetterTargetArn']:
+ self.dead_letter_queue = queue
+
+ if self.fifo_queue and not queue.fifo_queue:
+ raise RESTError('InvalidParameterCombination', 'Fifo queues cannot use non fifo dead letter queues')
+ break
+ else:
+ raise RESTError('AWS.SimpleQueueService.NonExistentQueue', 'Could not find DLQ for {0}'.format(self.redrive_policy['deadLetterTargetArn']))
+
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
@@ -382,9 +415,14 @@ class SQSBackend(BaseBackend):
time.sleep(0.001)
continue
+ messages_to_dlq = []
for message in queue.messages:
if not message.visible:
continue
+ if queue.dead_letter_queue is not None and message.approximate_receive_count >= queue.redrive_policy['maxReceiveCount']:
+ messages_to_dlq.append(message)
+ continue
+
message.mark_received(
visibility_timeout=visibility_timeout
)
@@ -392,6 +430,10 @@ class SQSBackend(BaseBackend):
if len(result) >= count:
break
+ for message in messages_to_dlq:
+ queue._messages.remove(message)
+ queue.dead_letter_queue.add_message(message)
+
return result
def delete_message(self, queue_name, receipt_handle):
@@ -419,6 +461,49 @@ class SQSBackend(BaseBackend):
queue = self.get_queue(queue_name)
queue._messages = []
+ def list_dead_letter_source_queues(self, queue_name):
+ dlq = self.get_queue(queue_name)
+
+ queues = []
+ for queue in self.queues.values():
+ if queue.dead_letter_queue is dlq:
+ queues.append(queue)
+
+ return queues
+
+ def add_permission(self, queue_name, actions, account_ids, label):
+ queue = self.get_queue(queue_name)
+
+ if actions is None or len(actions) == 0:
+ raise RESTError('InvalidParameterValue', 'Need at least one Action')
+ if account_ids is None or len(account_ids) == 0:
+ raise RESTError('InvalidParameterValue', 'Need at least one Account ID')
+
+ if not all([item in Queue.ALLOWED_PERMISSIONS for item in actions]):
+ raise RESTError('InvalidParameterValue', 'Invalid permissions')
+
+ queue.permissions[label] = (account_ids, actions)
+
+ def remove_permission(self, queue_name, label):
+ queue = self.get_queue(queue_name)
+
+ if label not in queue.permissions:
+ raise RESTError('InvalidParameterValue', 'Permission doesnt exist for the given label')
+
+ del queue.permissions[label]
+
+ def tag_queue(self, queue_name, tags):
+ queue = self.get_queue(queue_name)
+ queue.tags.update(tags)
+
+ def untag_queue(self, queue_name, tag_keys):
+ queue = self.get_queue(queue_name)
+ for key in tag_keys:
+ try:
+ del queue.tags[key]
+ except KeyError:
+ pass
+
sqs_backends = {}
for region in boto.sqs.regions():
diff --git a/moto/sqs/responses.py b/moto/sqs/responses.py
index 63a5036d6..bb21c1e2a 100644
--- a/moto/sqs/responses.py
+++ b/moto/sqs/responses.py
@@ -40,12 +40,15 @@ class SQSResponse(BaseResponse):
queue_name = self.path.split("/")[-1]
return queue_name
- def _get_validated_visibility_timeout(self):
+ def _get_validated_visibility_timeout(self, timeout=None):
"""
:raises ValueError: If specified visibility timeout exceeds MAXIMUM_VISIBILTY_TIMEOUT
:raises TypeError: If visibility timeout was not specified
"""
- visibility_timeout = int(self.querystring.get("VisibilityTimeout")[0])
+ if timeout is not None:
+ visibility_timeout = int(timeout)
+ else:
+ visibility_timeout = int(self.querystring.get("VisibilityTimeout")[0])
if visibility_timeout > MAXIMUM_VISIBILTY_TIMEOUT:
raise ValueError
@@ -119,6 +122,49 @@ class SQSResponse(BaseResponse):
template = self.response_template(CHANGE_MESSAGE_VISIBILITY_RESPONSE)
return template.render()
+ def change_message_visibility_batch(self):
+ queue_name = self._get_queue_name()
+ entries = self._get_list_prefix('ChangeMessageVisibilityBatchRequestEntry')
+
+ success = []
+ error = []
+ for entry in entries:
+ try:
+ visibility_timeout = self._get_validated_visibility_timeout(entry['visibility_timeout'])
+ except ValueError:
+ error.append({
+ 'Id': entry['id'],
+ 'SenderFault': 'true',
+ 'Code': 'InvalidParameterValue',
+ 'Message': 'Visibility timeout invalid'
+ })
+ continue
+
+ try:
+ self.sqs_backend.change_message_visibility(
+ queue_name=queue_name,
+ receipt_handle=entry['receipt_handle'],
+ visibility_timeout=visibility_timeout
+ )
+ success.append(entry['id'])
+ except ReceiptHandleIsInvalid as e:
+ error.append({
+ 'Id': entry['id'],
+ 'SenderFault': 'true',
+ 'Code': 'ReceiptHandleIsInvalid',
+ 'Message': e.description
+ })
+ except MessageNotInflight as e:
+ error.append({
+ 'Id': entry['id'],
+ 'SenderFault': 'false',
+ 'Code': 'AWS.SimpleQueueService.MessageNotInflight',
+ 'Message': e.description
+ })
+
+ template = self.response_template(CHANGE_MESSAGE_VISIBILITY_BATCH_RESPONSE)
+ return template.render(success=success, errors=error)
+
def get_queue_attributes(self):
queue_name = self._get_queue_name()
try:
@@ -288,8 +334,62 @@ class SQSResponse(BaseResponse):
messages = self.sqs_backend.receive_messages(
queue_name, message_count, wait_time, visibility_timeout)
template = self.response_template(RECEIVE_MESSAGE_RESPONSE)
- output = template.render(messages=messages)
- return output
+ return template.render(messages=messages)
+
+ def list_dead_letter_source_queues(self):
+ request_url = urlparse(self.uri)
+ queue_name = self._get_queue_name()
+
+ source_queue_urls = self.sqs_backend.list_dead_letter_source_queues(queue_name)
+
+ template = self.response_template(LIST_DEAD_LETTER_SOURCE_QUEUES_RESPONSE)
+ return template.render(queues=source_queue_urls, request_url=request_url)
+
+ def add_permission(self):
+ queue_name = self._get_queue_name()
+ actions = self._get_multi_param('ActionName')
+ account_ids = self._get_multi_param('AWSAccountId')
+ label = self._get_param('Label')
+
+ self.sqs_backend.add_permission(queue_name, actions, account_ids, label)
+
+ template = self.response_template(ADD_PERMISSION_RESPONSE)
+ return template.render()
+
+ def remove_permission(self):
+ queue_name = self._get_queue_name()
+ label = self._get_param('Label')
+
+ self.sqs_backend.remove_permission(queue_name, label)
+
+ template = self.response_template(REMOVE_PERMISSION_RESPONSE)
+ return template.render()
+
+ def tag_queue(self):
+ queue_name = self._get_queue_name()
+ tags = self._get_map_prefix('Tag', key_end='.Key', value_end='.Value')
+
+ self.sqs_backend.tag_queue(queue_name, tags)
+
+ template = self.response_template(TAG_QUEUE_RESPONSE)
+ return template.render()
+
+ def untag_queue(self):
+ queue_name = self._get_queue_name()
+ tag_keys = self._get_multi_param('TagKey')
+
+ self.sqs_backend.untag_queue(queue_name, tag_keys)
+
+ template = self.response_template(UNTAG_QUEUE_RESPONSE)
+ return template.render()
+
+ def list_queue_tags(self):
+ queue_name = self._get_queue_name()
+
+ queue = self.sqs_backend.get_queue(queue_name)
+
+ template = self.response_template(LIST_QUEUE_TAGS_RESPONSE)
+ return template.render(tags=queue.tags)
CREATE_QUEUE_RESPONSE = """
@@ -307,7 +407,7 @@ GET_QUEUE_URL_RESPONSE = """
{{ queue.url(request_url) }}
- 470a6f13-2ed9-4181-ad8a-2fdea142988e
+ {{ requestid }}
"""
@@ -318,13 +418,13 @@ LIST_QUEUES_RESPONSE = """
{% endfor %}
- 725275ae-0b9b-4762-b238-436d7c65a1ac
+ {{ requestid }}
"""
DELETE_QUEUE_RESPONSE = """
- 6fde8d1e-52cd-4581-8cd9-c512f4c64223
+ {{ requestid }}
"""
@@ -338,13 +438,13 @@ GET_QUEUE_ATTRIBUTES_RESPONSE = """
{% endfor %}
- 1ea71be5-b5a2-4f9d-b85a-945d8d08cd0b
+ {{ requestid }}
"""
SET_QUEUE_ATTRIBUTE_RESPONSE = """
- e5cca473-4fc0-4198-a451-8abb94d02c75
+ {{ requestid }}
"""
@@ -361,7 +461,7 @@ SEND_MESSAGE_RESPONSE = """
- 27daac76-34dd-47df-bd01-1f6e873584a0
+ {{ requestid }}
"""
@@ -409,7 +509,7 @@ RECEIVE_MESSAGE_RESPONSE = """
{% endfor %}
- b6633655-283d-45b4-aee4-4e84e0ae6afa
+ {{ requestid }}
"""
@@ -427,13 +527,13 @@ SEND_MESSAGE_BATCH_RESPONSE = """
{% endfor %}
- ca1ad5d0-8271-408b-8d0f-1351bf547e74
+ {{ requestid }}
"""
DELETE_MESSAGE_RESPONSE = """
- b5293cb5-d306-4a17-9048-b263635abe42
+ {{ requestid }}
"""
@@ -446,22 +546,92 @@ DELETE_MESSAGE_BATCH_RESPONSE = """
{% endfor %}
- d6f86b7a-74d1-4439-b43f-196a1e29cd85
+ {{ requestid }}
"""
CHANGE_MESSAGE_VISIBILITY_RESPONSE = """
- 6a7a282a-d013-4a59-aba9-335b0fa48bed
+ {{ requestid }}
"""
+CHANGE_MESSAGE_VISIBILITY_BATCH_RESPONSE = """
+
+ {% for success_id in success %}
+
+ {{ success_id }}
+
+ {% endfor %}
+ {% for error_dict in errors %}
+
+ {{ error_dict['Id'] }}
+ {{ error_dict['Code'] }}
+ {{ error_dict['Message'] }}
+ {{ error_dict['SenderFault'] }}
+
+ {% endfor %}
+
+
+ {{ request_id }}
+
+"""
+
PURGE_QUEUE_RESPONSE = """
- 6fde8d1e-52cd-4581-8cd9-c512f4c64223
+ {{ requestid }}
"""
+LIST_DEAD_LETTER_SOURCE_QUEUES_RESPONSE = """
+
+ {% for queue in queues %}
+ {{ queue.url(request_url) }}
+ {% endfor %}
+
+
+ 8ffb921f-b85e-53d9-abcf-d8d0057f38fc
+
+"""
+
+ADD_PERMISSION_RESPONSE = """
+
+ {{ request_id }}
+
+"""
+
+REMOVE_PERMISSION_RESPONSE = """
+
+ {{ request_id }}
+
+"""
+
+TAG_QUEUE_RESPONSE = """
+
+ {{ request_id }}
+
+"""
+
+UNTAG_QUEUE_RESPONSE = """
+
+ {{ request_id }}
+
+"""
+
+LIST_QUEUE_TAGS_RESPONSE = """
+
+ {% for key, value in tags.items() %}
+
+ {{ key }}
+ {{ value }}
+
+ {% endfor %}
+
+
+ {{ request_id }}
+
+"""
+
ERROR_TOO_LONG_RESPONSE = """
Sender
diff --git a/tests/test_acm/test_acm.py b/tests/test_acm/test_acm.py
index db1969645..ccac48181 100644
--- a/tests/test_acm/test_acm.py
+++ b/tests/test_acm/test_acm.py
@@ -4,6 +4,7 @@ import os
import boto3
from freezegun import freeze_time
import sure # noqa
+import uuid
from botocore.exceptions import ClientError
@@ -281,11 +282,23 @@ def test_resend_validation_email_invalid():
def test_request_certificate():
client = boto3.client('acm', region_name='eu-central-1')
+ token = str(uuid.uuid4())
+
resp = client.request_certificate(
DomainName='google.com',
+ IdempotencyToken=token,
SubjectAlternativeNames=['google.com', 'www.google.com', 'mail.google.com'],
)
resp.should.contain('CertificateArn')
+ arn = resp['CertificateArn']
+
+ resp = client.request_certificate(
+ DomainName='google.com',
+ IdempotencyToken=token,
+ SubjectAlternativeNames=['google.com', 'www.google.com', 'mail.google.com'],
+ )
+ resp['CertificateArn'].should.equal(arn)
+
@mock_acm
def test_request_certificate_no_san():
diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py
index 5df03f8d8..c645a0c4e 100644
--- a/tests/test_dynamodb2/test_dynamodb.py
+++ b/tests/test_dynamodb2/test_dynamodb.py
@@ -28,13 +28,13 @@ except ImportError:
@mock_dynamodb2_deprecated
def test_list_tables():
name = 'TestTable'
- #{'schema': }
+ # Should make tables properly with boto
dynamodb_backend2.create_table(name, schema=[
{u'KeyType': u'HASH', u'AttributeName': u'forum_name'},
{u'KeyType': u'RANGE', u'AttributeName': u'subject'}
])
conn = boto.dynamodb2.connect_to_region(
- 'us-west-2',
+ 'us-east-1',
aws_access_key_id="ak",
aws_secret_access_key="sk")
assert conn.list_tables()["TableNames"] == [name]
@@ -43,6 +43,7 @@ def test_list_tables():
@requires_boto_gte("2.9")
@mock_dynamodb2_deprecated
def test_list_tables_layer_1():
+ # Should make tables properly with boto
dynamodb_backend2.create_table("test_1", schema=[
{u'KeyType': u'HASH', u'AttributeName': u'name'}
])
@@ -50,7 +51,7 @@ def test_list_tables_layer_1():
{u'KeyType': u'HASH', u'AttributeName': u'name'}
])
conn = boto.dynamodb2.connect_to_region(
- 'us-west-2',
+ 'us-east-1',
aws_access_key_id="ak",
aws_secret_access_key="sk")
@@ -88,12 +89,22 @@ def test_list_table_tags():
ProvisionedThroughput={'ReadCapacityUnits':5,'WriteCapacityUnits':5})
table_description = conn.describe_table(TableName=name)
arn = table_description['Table']['TableArn']
- tags = [{'Key':'TestTag', 'Value': 'TestValue'}]
- conn.tag_resource(ResourceArn=arn,
- Tags=tags)
+
+ # Tag table
+ tags = [{'Key': 'TestTag', 'Value': 'TestValue'}, {'Key': 'TestTag2', 'Value': 'TestValue2'}]
+ conn.tag_resource(ResourceArn=arn, Tags=tags)
+
+ # Check tags
resp = conn.list_tags_of_resource(ResourceArn=arn)
assert resp["Tags"] == tags
+ # Remove 1 tag
+ conn.untag_resource(ResourceArn=arn, TagKeys=['TestTag'])
+
+ # Check tags
+ resp = conn.list_tags_of_resource(ResourceArn=arn)
+ assert resp["Tags"] == [{'Key': 'TestTag2', 'Value': 'TestValue2'}]
+
@requires_boto_gte("2.9")
@mock_dynamodb2
@@ -868,3 +879,50 @@ def test_delete_item():
response = table.scan()
assert response['Count'] == 0
+
+
+@mock_dynamodb2
+def test_describe_limits():
+ client = boto3.client('dynamodb', region_name='eu-central-1')
+ resp = client.describe_limits()
+
+ resp['AccountMaxReadCapacityUnits'].should.equal(20000)
+ resp['AccountMaxWriteCapacityUnits'].should.equal(20000)
+ resp['TableMaxWriteCapacityUnits'].should.equal(10000)
+ resp['TableMaxReadCapacityUnits'].should.equal(10000)
+
+
+@mock_dynamodb2
+def test_set_ttl():
+ client = boto3.client('dynamodb', region_name='us-east-1')
+
+ # Create the DynamoDB table.
+ client.create_table(
+ TableName='test1',
+ AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}],
+ KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}],
+ ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123}
+ )
+
+ client.update_time_to_live(
+ TableName='test1',
+ TimeToLiveSpecification={
+ 'Enabled': True,
+ 'AttributeName': 'expire'
+ }
+ )
+
+ resp = client.describe_time_to_live(TableName='test1')
+ resp['TimeToLiveDescription']['TimeToLiveStatus'].should.equal('ENABLED')
+ resp['TimeToLiveDescription']['AttributeName'].should.equal('expire')
+
+ client.update_time_to_live(
+ TableName='test1',
+ TimeToLiveSpecification={
+ 'Enabled': False,
+ 'AttributeName': 'expire'
+ }
+ )
+
+ resp = client.describe_time_to_live(TableName='test1')
+ resp['TimeToLiveDescription']['TimeToLiveStatus'].should.equal('DISABLED')
diff --git a/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py b/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py
index 0e1099559..5e635d5ef 100644
--- a/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py
+++ b/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py
@@ -54,7 +54,7 @@ def test_create_table():
}
}
conn = boto.dynamodb2.connect_to_region(
- 'us-west-2',
+ 'us-east-1',
aws_access_key_id="ak",
aws_secret_access_key="sk"
)
@@ -425,7 +425,7 @@ def test_get_special_item():
@mock_dynamodb2_deprecated
def test_update_item_remove():
- conn = boto.dynamodb2.connect_to_region("us-west-2")
+ conn = boto.dynamodb2.connect_to_region("us-east-1")
table = Table.create('messages', schema=[
HashKey('username')
])
@@ -452,7 +452,7 @@ def test_update_item_remove():
@mock_dynamodb2_deprecated
def test_update_item_set():
- conn = boto.dynamodb2.connect_to_region("us-west-2")
+ conn = boto.dynamodb2.connect_to_region("us-east-1")
table = Table.create('messages', schema=[
HashKey('username')
])
diff --git a/tests/test_ecs/test_ecs_boto3.py b/tests/test_ecs/test_ecs_boto3.py
index 9b6e99b57..9e5e4ff08 100644
--- a/tests/test_ecs/test_ecs_boto3.py
+++ b/tests/test_ecs/test_ecs_boto3.py
@@ -1611,6 +1611,152 @@ def test_update_service_through_cloudformation_should_trigger_replacement():
len(resp['serviceArns']).should.equal(1)
+@mock_ec2
+@mock_ecs
+def test_attributes():
+ # Combined put, list delete attributes into the same test due to the amount of setup
+ ecs_client = boto3.client('ecs', region_name='us-east-1')
+ ec2 = boto3.resource('ec2', region_name='us-east-1')
+
+ test_cluster_name = 'test_ecs_cluster'
+
+ _ = ecs_client.create_cluster(
+ clusterName=test_cluster_name
+ )
+
+ test_instance = ec2.create_instances(
+ ImageId="ami-1234abcd",
+ MinCount=1,
+ MaxCount=1,
+ )[0]
+
+ instance_id_document = json.dumps(
+ ec2_utils.generate_instance_identity_document(test_instance)
+ )
+
+ response = ecs_client.register_container_instance(
+ cluster=test_cluster_name,
+ instanceIdentityDocument=instance_id_document
+ )
+
+ response['containerInstance'][
+ 'ec2InstanceId'].should.equal(test_instance.id)
+ full_arn1 = response['containerInstance']['containerInstanceArn']
+
+ test_instance = ec2.create_instances(
+ ImageId="ami-1234abcd",
+ MinCount=1,
+ MaxCount=1,
+ )[0]
+
+ instance_id_document = json.dumps(
+ ec2_utils.generate_instance_identity_document(test_instance)
+ )
+
+ response = ecs_client.register_container_instance(
+ cluster=test_cluster_name,
+ instanceIdentityDocument=instance_id_document
+ )
+
+ response['containerInstance'][
+ 'ec2InstanceId'].should.equal(test_instance.id)
+ full_arn2 = response['containerInstance']['containerInstanceArn']
+ partial_arn2 = full_arn2.rsplit('/', 1)[-1]
+
+ full_arn2.should_not.equal(full_arn1) # uuid1 isnt unique enough when the pc is fast ;-)
+
+ # Ok set instance 1 with 1 attribute, instance 2 with another, and all of them with a 3rd.
+ ecs_client.put_attributes(
+ cluster=test_cluster_name,
+ attributes=[
+ {'name': 'env', 'value': 'prod'},
+ {'name': 'attr1', 'value': 'instance1', 'targetId': full_arn1},
+ {'name': 'attr1', 'value': 'instance2', 'targetId': partial_arn2, 'targetType': 'container-instance'}
+ ]
+ )
+
+ resp = ecs_client.list_attributes(
+ cluster=test_cluster_name,
+ targetType='container-instance'
+ )
+ attrs = resp['attributes']
+ len(attrs).should.equal(4)
+
+ # Tests that the attrs have been set properly
+ len(list(filter(lambda item: item['name'] == 'env', attrs))).should.equal(2)
+ len(list(filter(lambda item: item['name'] == 'attr1' and item['value'] == 'instance1', attrs))).should.equal(1)
+
+ ecs_client.delete_attributes(
+ cluster=test_cluster_name,
+ attributes=[
+ {'name': 'attr1', 'value': 'instance2', 'targetId': partial_arn2, 'targetType': 'container-instance'}
+ ]
+ )
+
+ resp = ecs_client.list_attributes(
+ cluster=test_cluster_name,
+ targetType='container-instance'
+ )
+ attrs = resp['attributes']
+ len(attrs).should.equal(3)
+
+
+@mock_ecs
+def test_poll_endpoint():
+ # Combined put, list delete attributes into the same test due to the amount of setup
+ ecs_client = boto3.client('ecs', region_name='us-east-1')
+
+ # Just a placeholder until someone actually wants useless data, just testing it doesnt raise an exception
+ resp = ecs_client.discover_poll_endpoint(cluster='blah', containerInstance='blah')
+ resp.should.contain('endpoint')
+ resp.should.contain('telemetryEndpoint')
+
+
+@mock_ecs
+def test_list_task_definition_families():
+ client = boto3.client('ecs', region_name='us-east-1')
+ client.register_task_definition(
+ family='test_ecs_task',
+ containerDefinitions=[
+ {
+ 'name': 'hello_world',
+ 'image': 'docker/hello-world:latest',
+ 'cpu': 1024,
+ 'memory': 400,
+ 'essential': True,
+ 'environment': [{
+ 'name': 'AWS_ACCESS_KEY_ID',
+ 'value': 'SOME_ACCESS_KEY'
+ }],
+ 'logConfiguration': {'logDriver': 'json-file'}
+ }
+ ]
+ )
+ client.register_task_definition(
+ family='alt_test_ecs_task',
+ containerDefinitions=[
+ {
+ 'name': 'hello_world',
+ 'image': 'docker/hello-world:latest',
+ 'cpu': 1024,
+ 'memory': 400,
+ 'essential': True,
+ 'environment': [{
+ 'name': 'AWS_ACCESS_KEY_ID',
+ 'value': 'SOME_ACCESS_KEY'
+ }],
+ 'logConfiguration': {'logDriver': 'json-file'}
+ }
+ ]
+ )
+
+ resp1 = client.list_task_definition_families()
+ resp2 = client.list_task_definition_families(familyPrefix='alt')
+
+ len(resp1['families']).should.equal(2)
+ len(resp2['families']).should.equal(1)
+
+
def _fetch_container_instance_resources(container_instance_description):
remaining_resources = {}
registered_resources = {}
diff --git a/tests/test_elbv2/test_elbv2.py b/tests/test_elbv2/test_elbv2.py
index 98634c677..700152573 100644
--- a/tests/test_elbv2/test_elbv2.py
+++ b/tests/test_elbv2/test_elbv2.py
@@ -1,11 +1,13 @@
from __future__ import unicode_literals
+import os
import boto3
import botocore
from botocore.exceptions import ClientError
from nose.tools import assert_raises
import sure # noqa
-from moto import mock_elbv2, mock_ec2
+from moto import mock_elbv2, mock_ec2, mock_acm
+from moto.elbv2 import elbv2_backends
@mock_elbv2
@@ -1030,3 +1032,373 @@ def test_describe_invalid_target_group():
# Check error raises correctly
with assert_raises(ClientError):
conn.describe_target_groups(Names=['invalid'])
+
+
+@mock_elbv2
+def test_describe_account_limits():
+ client = boto3.client('elbv2', region_name='eu-central-1')
+
+ resp = client.describe_account_limits()
+ resp['Limits'][0].should.contain('Name')
+ resp['Limits'][0].should.contain('Max')
+
+
+@mock_elbv2
+def test_describe_ssl_policies():
+ client = boto3.client('elbv2', region_name='eu-central-1')
+
+ resp = client.describe_ssl_policies()
+ len(resp['SslPolicies']).should.equal(5)
+
+ resp = client.describe_ssl_policies(Names=['ELBSecurityPolicy-TLS-1-2-2017-01', 'ELBSecurityPolicy-2016-08'])
+ len(resp['SslPolicies']).should.equal(2)
+
+
+@mock_elbv2
+@mock_ec2
+def test_set_ip_address_type():
+ client = boto3.client('elbv2', region_name='us-east-1')
+ ec2 = boto3.resource('ec2', region_name='us-east-1')
+
+ security_group = ec2.create_security_group(
+ GroupName='a-security-group', Description='First One')
+ vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default')
+ subnet1 = ec2.create_subnet(
+ VpcId=vpc.id,
+ CidrBlock='172.28.7.192/26',
+ AvailabilityZone='us-east-1a')
+ subnet2 = ec2.create_subnet(
+ VpcId=vpc.id,
+ CidrBlock='172.28.7.192/26',
+ AvailabilityZone='us-east-1b')
+
+ response = client.create_load_balancer(
+ Name='my-lb',
+ Subnets=[subnet1.id, subnet2.id],
+ SecurityGroups=[security_group.id],
+ Scheme='internal',
+ Tags=[{'Key': 'key_name', 'Value': 'a_value'}])
+ arn = response['LoadBalancers'][0]['LoadBalancerArn']
+
+ # Internal LBs cant be dualstack yet
+ with assert_raises(ClientError):
+ client.set_ip_address_type(
+ LoadBalancerArn=arn,
+ IpAddressType='dualstack'
+ )
+
+ # Create internet facing one
+ response = client.create_load_balancer(
+ Name='my-lb2',
+ Subnets=[subnet1.id, subnet2.id],
+ SecurityGroups=[security_group.id],
+ Scheme='internet-facing',
+ Tags=[{'Key': 'key_name', 'Value': 'a_value'}])
+ arn = response['LoadBalancers'][0]['LoadBalancerArn']
+
+ client.set_ip_address_type(
+ LoadBalancerArn=arn,
+ IpAddressType='dualstack'
+ )
+
+
+@mock_elbv2
+@mock_ec2
+def test_set_security_groups():
+ client = boto3.client('elbv2', region_name='us-east-1')
+ ec2 = boto3.resource('ec2', region_name='us-east-1')
+
+ security_group = ec2.create_security_group(
+ GroupName='a-security-group', Description='First One')
+ security_group2 = ec2.create_security_group(
+ GroupName='b-security-group', Description='Second One')
+ vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default')
+ subnet1 = ec2.create_subnet(
+ VpcId=vpc.id,
+ CidrBlock='172.28.7.192/26',
+ AvailabilityZone='us-east-1a')
+ subnet2 = ec2.create_subnet(
+ VpcId=vpc.id,
+ CidrBlock='172.28.7.192/26',
+ AvailabilityZone='us-east-1b')
+
+ response = client.create_load_balancer(
+ Name='my-lb',
+ Subnets=[subnet1.id, subnet2.id],
+ SecurityGroups=[security_group.id],
+ Scheme='internal',
+ Tags=[{'Key': 'key_name', 'Value': 'a_value'}])
+ arn = response['LoadBalancers'][0]['LoadBalancerArn']
+
+ client.set_security_groups(
+ LoadBalancerArn=arn,
+ SecurityGroups=[security_group.id, security_group2.id]
+ )
+
+ resp = client.describe_load_balancers(LoadBalancerArns=[arn])
+ len(resp['LoadBalancers'][0]['SecurityGroups']).should.equal(2)
+
+ with assert_raises(ClientError):
+ client.set_security_groups(
+ LoadBalancerArn=arn,
+ SecurityGroups=['non_existant']
+ )
+
+
+@mock_elbv2
+@mock_ec2
+def test_set_subnets():
+ client = boto3.client('elbv2', region_name='us-east-1')
+ ec2 = boto3.resource('ec2', region_name='us-east-1')
+
+ security_group = ec2.create_security_group(
+ GroupName='a-security-group', Description='First One')
+ vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default')
+ subnet1 = ec2.create_subnet(
+ VpcId=vpc.id,
+ CidrBlock='172.28.7.192/26',
+ AvailabilityZone='us-east-1a')
+ subnet2 = ec2.create_subnet(
+ VpcId=vpc.id,
+ CidrBlock='172.28.7.192/26',
+ AvailabilityZone='us-east-1b')
+ subnet3 = ec2.create_subnet(
+ VpcId=vpc.id,
+ CidrBlock='172.28.7.192/26',
+ AvailabilityZone='us-east-1c')
+
+ response = client.create_load_balancer(
+ Name='my-lb',
+ Subnets=[subnet1.id, subnet2.id],
+ SecurityGroups=[security_group.id],
+ Scheme='internal',
+ Tags=[{'Key': 'key_name', 'Value': 'a_value'}])
+ arn = response['LoadBalancers'][0]['LoadBalancerArn']
+
+ client.set_subnets(
+ LoadBalancerArn=arn,
+ Subnets=[subnet1.id, subnet2.id, subnet3.id]
+ )
+
+ resp = client.describe_load_balancers(LoadBalancerArns=[arn])
+ len(resp['LoadBalancers'][0]['AvailabilityZones']).should.equal(3)
+
+ # Only 1 AZ
+ with assert_raises(ClientError):
+ client.set_subnets(
+ LoadBalancerArn=arn,
+ Subnets=[subnet1.id]
+ )
+
+ # Multiple subnets in same AZ
+ with assert_raises(ClientError):
+ client.set_subnets(
+ LoadBalancerArn=arn,
+ Subnets=[subnet1.id, subnet2.id, subnet2.id]
+ )
+
+
+@mock_elbv2
+@mock_ec2
+def test_set_subnets():
+ client = boto3.client('elbv2', region_name='us-east-1')
+ ec2 = boto3.resource('ec2', region_name='us-east-1')
+
+ security_group = ec2.create_security_group(
+ GroupName='a-security-group', Description='First One')
+ vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default')
+ subnet1 = ec2.create_subnet(
+ VpcId=vpc.id,
+ CidrBlock='172.28.7.192/26',
+ AvailabilityZone='us-east-1a')
+ subnet2 = ec2.create_subnet(
+ VpcId=vpc.id,
+ CidrBlock='172.28.7.192/26',
+ AvailabilityZone='us-east-1b')
+
+ response = client.create_load_balancer(
+ Name='my-lb',
+ Subnets=[subnet1.id, subnet2.id],
+ SecurityGroups=[security_group.id],
+ Scheme='internal',
+ Tags=[{'Key': 'key_name', 'Value': 'a_value'}])
+ arn = response['LoadBalancers'][0]['LoadBalancerArn']
+
+ client.modify_load_balancer_attributes(
+ LoadBalancerArn=arn,
+ Attributes=[{'Key': 'idle_timeout.timeout_seconds', 'Value': '600'}]
+ )
+
+ # Check its 600 not 60
+ response = client.describe_load_balancer_attributes(
+ LoadBalancerArn=arn
+ )
+ idle_timeout = list(filter(lambda item: item['Key'] == 'idle_timeout.timeout_seconds', response['Attributes']))[0]
+ idle_timeout['Value'].should.equal('600')
+
+
+@mock_elbv2
+@mock_ec2
+def test_modify_target_group():
+ client = boto3.client('elbv2', region_name='us-east-1')
+ ec2 = boto3.resource('ec2', region_name='us-east-1')
+
+ vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default')
+
+ response = client.create_target_group(
+ Name='a-target',
+ Protocol='HTTP',
+ Port=8080,
+ VpcId=vpc.id,
+ HealthCheckProtocol='HTTP',
+ HealthCheckPort='8080',
+ HealthCheckPath='/',
+ HealthCheckIntervalSeconds=5,
+ HealthCheckTimeoutSeconds=5,
+ HealthyThresholdCount=5,
+ UnhealthyThresholdCount=2,
+ Matcher={'HttpCode': '200'})
+ arn = response.get('TargetGroups')[0]['TargetGroupArn']
+
+ client.modify_target_group(
+ TargetGroupArn=arn,
+ HealthCheckProtocol='HTTPS',
+ HealthCheckPort='8081',
+ HealthCheckPath='/status',
+ HealthCheckIntervalSeconds=10,
+ HealthCheckTimeoutSeconds=10,
+ HealthyThresholdCount=10,
+ UnhealthyThresholdCount=4,
+ Matcher={'HttpCode': '200-399'}
+ )
+
+ response = client.describe_target_groups(
+ TargetGroupArns=[arn]
+ )
+ response['TargetGroups'][0]['Matcher']['HttpCode'].should.equal('200-399')
+ response['TargetGroups'][0]['HealthCheckIntervalSeconds'].should.equal(10)
+ response['TargetGroups'][0]['HealthCheckPath'].should.equal('/status')
+ response['TargetGroups'][0]['HealthCheckPort'].should.equal('8081')
+ response['TargetGroups'][0]['HealthCheckProtocol'].should.equal('HTTPS')
+ response['TargetGroups'][0]['HealthCheckTimeoutSeconds'].should.equal(10)
+ response['TargetGroups'][0]['HealthyThresholdCount'].should.equal(10)
+ response['TargetGroups'][0]['UnhealthyThresholdCount'].should.equal(4)
+
+
+@mock_elbv2
+@mock_ec2
+@mock_acm
+def test_modify_listener_http_to_https():
+ client = boto3.client('elbv2', region_name='eu-central-1')
+ acm = boto3.client('acm', region_name='eu-central-1')
+ ec2 = boto3.resource('ec2', region_name='eu-central-1')
+
+ security_group = ec2.create_security_group(
+ GroupName='a-security-group', Description='First One')
+ vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default')
+ subnet1 = ec2.create_subnet(
+ VpcId=vpc.id,
+ CidrBlock='172.28.7.192/26',
+ AvailabilityZone='eu-central-1a')
+ subnet2 = ec2.create_subnet(
+ VpcId=vpc.id,
+ CidrBlock='172.28.7.192/26',
+ AvailabilityZone='eu-central-1b')
+
+ response = client.create_load_balancer(
+ Name='my-lb',
+ Subnets=[subnet1.id, subnet2.id],
+ SecurityGroups=[security_group.id],
+ Scheme='internal',
+ Tags=[{'Key': 'key_name', 'Value': 'a_value'}])
+
+ load_balancer_arn = response.get('LoadBalancers')[0].get('LoadBalancerArn')
+
+ response = client.create_target_group(
+ Name='a-target',
+ Protocol='HTTP',
+ Port=8080,
+ VpcId=vpc.id,
+ HealthCheckProtocol='HTTP',
+ HealthCheckPort='8080',
+ HealthCheckPath='/',
+ HealthCheckIntervalSeconds=5,
+ HealthCheckTimeoutSeconds=5,
+ HealthyThresholdCount=5,
+ UnhealthyThresholdCount=2,
+ Matcher={'HttpCode': '200'})
+ target_group = response.get('TargetGroups')[0]
+ target_group_arn = target_group['TargetGroupArn']
+
+ # Plain HTTP listener
+ response = client.create_listener(
+ LoadBalancerArn=load_balancer_arn,
+ Protocol='HTTP',
+ Port=80,
+ DefaultActions=[{'Type': 'forward', 'TargetGroupArn': target_group_arn}]
+ )
+ listener_arn = response['Listeners'][0]['ListenerArn']
+
+ response = acm.request_certificate(
+ DomainName='google.com',
+ SubjectAlternativeNames=['google.com', 'www.google.com', 'mail.google.com'],
+ )
+ google_arn = response['CertificateArn']
+ response = acm.request_certificate(
+ DomainName='yahoo.com',
+ SubjectAlternativeNames=['yahoo.com', 'www.yahoo.com', 'mail.yahoo.com'],
+ )
+ yahoo_arn = response['CertificateArn']
+
+ response = client.modify_listener(
+ ListenerArn=listener_arn,
+ Port=443,
+ Protocol='HTTPS',
+ SslPolicy='ELBSecurityPolicy-TLS-1-2-2017-01',
+ Certificates=[
+ {'CertificateArn': google_arn, 'IsDefault': False},
+ {'CertificateArn': yahoo_arn, 'IsDefault': True}
+ ],
+ DefaultActions=[
+ {'Type': 'forward', 'TargetGroupArn': target_group_arn}
+ ]
+ )
+ response['Listeners'][0]['Port'].should.equal(443)
+ response['Listeners'][0]['Protocol'].should.equal('HTTPS')
+ response['Listeners'][0]['SslPolicy'].should.equal('ELBSecurityPolicy-TLS-1-2-2017-01')
+ len(response['Listeners'][0]['Certificates']).should.equal(2)
+
+ # Check default cert, can't do this in server mode
+ if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'false':
+ listener = elbv2_backends['eu-central-1'].load_balancers[load_balancer_arn].listeners[listener_arn]
+ listener.certificate.should.equal(yahoo_arn)
+
+ # No default cert
+ with assert_raises(ClientError):
+ client.modify_listener(
+ ListenerArn=listener_arn,
+ Port=443,
+ Protocol='HTTPS',
+ SslPolicy='ELBSecurityPolicy-TLS-1-2-2017-01',
+ Certificates=[
+ {'CertificateArn': google_arn, 'IsDefault': False}
+ ],
+ DefaultActions=[
+ {'Type': 'forward', 'TargetGroupArn': target_group_arn}
+ ]
+ )
+
+ # Bad cert
+ with assert_raises(ClientError):
+ client.modify_listener(
+ ListenerArn=listener_arn,
+ Port=443,
+ Protocol='HTTPS',
+ SslPolicy='ELBSecurityPolicy-TLS-1-2-2017-01',
+ Certificates=[
+ {'CertificateArn': 'lalala', 'IsDefault': True}
+ ],
+ DefaultActions=[
+ {'Type': 'forward', 'TargetGroupArn': target_group_arn}
+ ]
+ )
diff --git a/tests/test_events/test_events.py b/tests/test_events/test_events.py
index da8238f72..e839bde5b 100644
--- a/tests/test_events/test_events.py
+++ b/tests/test_events/test_events.py
@@ -3,6 +3,8 @@ import random
import boto3
from moto.events import mock_events
+from botocore.exceptions import ClientError
+from nose.tools import assert_raises
RULES = [
@@ -171,11 +173,36 @@ def test_remove_targets():
assert(targets_before - 1 == targets_after)
-if __name__ == '__main__':
- test_list_rules()
- test_describe_rule()
- test_enable_disable_rule()
- test_list_rule_names_by_target()
- test_list_rules()
- test_list_targets_by_rule()
- test_remove_targets()
+@mock_events
+def test_permissions():
+ client = boto3.client('events', 'eu-central-1')
+
+ client.put_permission(Action='PutEvents', Principal='111111111111', StatementId='Account1')
+ client.put_permission(Action='PutEvents', Principal='222222222222', StatementId='Account2')
+
+ resp = client.describe_event_bus()
+ assert len(resp['Policy']['Statement']) == 2
+
+ client.remove_permission(StatementId='Account2')
+
+ resp = client.describe_event_bus()
+ assert len(resp['Policy']['Statement']) == 1
+ assert resp['Policy']['Statement'][0]['Sid'] == 'Account1'
+
+
+@mock_events
+def test_put_events():
+ client = boto3.client('events', 'eu-central-1')
+
+ event = {
+ "Source": "com.mycompany.myapp",
+ "Detail": '{"key1": "value3", "key2": "value4"}',
+ "Resources": ["resource1", "resource2"],
+ "DetailType": "myDetailType"
+ }
+
+ client.put_events(Entries=[event])
+ # Boto3 would error if it didn't return 200 OK
+
+ with assert_raises(ClientError):
+ client.put_events(Entries=[event]*20)
diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py
index 536261504..c761ec8d9 100644
--- a/tests/test_sqs/test_sqs.py
+++ b/tests/test_sqs/test_sqs.py
@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
+import os
import boto
import boto3
@@ -8,14 +9,18 @@ from botocore.exceptions import ClientError
from boto.exception import SQSError
from boto.sqs.message import RawMessage, Message
+from freezegun import freeze_time
import base64
+import json
import sure # noqa
import time
+import uuid
from moto import settings, mock_sqs, mock_sqs_deprecated
from tests.helpers import requires_boto_gte
import tests.backport_assert_raises # noqa
from nose.tools import assert_raises
+from nose import SkipTest
@mock_sqs
@@ -93,8 +98,6 @@ def test_message_send_without_attributes():
msg.get('MD5OfMessageBody').should.equal(
'58fd9edd83341c29f1aebba81c31e257')
msg.shouldnt.have.key('MD5OfMessageAttributes')
- msg.get('ResponseMetadata', {}).get('RequestId').should.equal(
- '27daac76-34dd-47df-bd01-1f6e873584a0')
msg.get('MessageId').should_not.contain(' \n')
messages = queue.receive_messages()
@@ -118,8 +121,6 @@ def test_message_send_with_attributes():
'58fd9edd83341c29f1aebba81c31e257')
msg.get('MD5OfMessageAttributes').should.equal(
'235c5c510d26fb653d073faed50ae77c')
- msg.get('ResponseMetadata', {}).get('RequestId').should.equal(
- '27daac76-34dd-47df-bd01-1f6e873584a0')
msg.get('MessageId').should_not.contain(' \n')
messages = queue.receive_messages()
@@ -143,8 +144,6 @@ def test_message_with_complex_attributes():
'58fd9edd83341c29f1aebba81c31e257')
msg.get('MD5OfMessageAttributes').should.equal(
'8ae21a7957029ef04146b42aeaa18a22')
- msg.get('ResponseMetadata', {}).get('RequestId').should.equal(
- '27daac76-34dd-47df-bd01-1f6e873584a0')
msg.get('MessageId').should_not.contain(' \n')
messages = queue.receive_messages()
@@ -755,3 +754,181 @@ def test_delete_message_after_visibility_timeout():
m1_retrieved.delete()
assert new_queue.count() == 0
+
+
+@mock_sqs
+def test_batch_change_message_visibility():
+ if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'true':
+ raise SkipTest('Cant manipulate time in server mode')
+
+ with freeze_time("2015-01-01 12:00:00"):
+ sqs = boto3.client('sqs', region_name='us-east-1')
+ resp = sqs.create_queue(
+ QueueName='test-dlr-queue.fifo',
+ Attributes={'FifoQueue': 'true'}
+ )
+ queue_url = resp['QueueUrl']
+
+ sqs.send_message(QueueUrl=queue_url, MessageBody='msg1')
+ sqs.send_message(QueueUrl=queue_url, MessageBody='msg2')
+ sqs.send_message(QueueUrl=queue_url, MessageBody='msg3')
+
+ with freeze_time("2015-01-01 12:01:00"):
+ receive_resp = sqs.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=2)
+ len(receive_resp['Messages']).should.equal(2)
+
+ handles = [item['ReceiptHandle'] for item in receive_resp['Messages']]
+ entries = [{'Id': str(uuid.uuid4()), 'ReceiptHandle': handle, 'VisibilityTimeout': 43200} for handle in handles]
+
+ resp = sqs.change_message_visibility_batch(QueueUrl=queue_url, Entries=entries)
+ len(resp['Successful']).should.equal(2)
+
+ with freeze_time("2015-01-01 14:00:00"):
+ resp = sqs.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=3)
+ len(resp['Messages']).should.equal(1)
+
+ with freeze_time("2015-01-01 16:00:00"):
+ resp = sqs.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=3)
+ len(resp['Messages']).should.equal(1)
+
+ with freeze_time("2015-01-02 12:00:00"):
+ resp = sqs.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=3)
+ len(resp['Messages']).should.equal(3)
+
+
+@mock_sqs
+def test_permissions():
+ client = boto3.client('sqs', region_name='us-east-1')
+
+ resp = client.create_queue(
+ QueueName='test-dlr-queue.fifo',
+ Attributes={'FifoQueue': 'true'}
+ )
+ queue_url = resp['QueueUrl']
+
+ client.add_permission(QueueUrl=queue_url, Label='account1', AWSAccountIds=['111111111111'], Actions=['*'])
+ client.add_permission(QueueUrl=queue_url, Label='account2', AWSAccountIds=['222211111111'], Actions=['SendMessage'])
+
+ with assert_raises(ClientError):
+ client.add_permission(QueueUrl=queue_url, Label='account2', AWSAccountIds=['222211111111'], Actions=['SomeRubbish'])
+
+ client.remove_permission(QueueUrl=queue_url, Label='account2')
+
+ with assert_raises(ClientError):
+ client.remove_permission(QueueUrl=queue_url, Label='non_existant')
+
+
+@mock_sqs
+def test_tags():
+ client = boto3.client('sqs', region_name='us-east-1')
+
+ resp = client.create_queue(
+ QueueName='test-dlr-queue.fifo',
+ Attributes={'FifoQueue': 'true'}
+ )
+ queue_url = resp['QueueUrl']
+
+ client.tag_queue(
+ QueueUrl=queue_url,
+ Tags={
+ 'test1': 'value1',
+ 'test2': 'value2',
+ }
+ )
+
+ resp = client.list_queue_tags(QueueUrl=queue_url)
+ resp['Tags'].should.contain('test1')
+ resp['Tags'].should.contain('test2')
+
+ client.untag_queue(
+ QueueUrl=queue_url,
+ TagKeys=['test2']
+ )
+
+ resp = client.list_queue_tags(QueueUrl=queue_url)
+ resp['Tags'].should.contain('test1')
+ resp['Tags'].should_not.contain('test2')
+
+
+@mock_sqs
+def test_create_fifo_queue_with_dlq():
+ sqs = boto3.client('sqs', region_name='us-east-1')
+ resp = sqs.create_queue(
+ QueueName='test-dlr-queue.fifo',
+ Attributes={'FifoQueue': 'true'}
+ )
+ queue_url1 = resp['QueueUrl']
+ queue_arn1 = sqs.get_queue_attributes(QueueUrl=queue_url1)['Attributes']['QueueArn']
+
+ resp = sqs.create_queue(
+ QueueName='test-dlr-queue',
+ Attributes={'FifoQueue': 'false'}
+ )
+ queue_url2 = resp['QueueUrl']
+ queue_arn2 = sqs.get_queue_attributes(QueueUrl=queue_url2)['Attributes']['QueueArn']
+
+ sqs.create_queue(
+ QueueName='test-queue.fifo',
+ Attributes={
+ 'FifoQueue': 'true',
+ 'RedrivePolicy': json.dumps({'deadLetterTargetArn': queue_arn1, 'maxReceiveCount': 2})
+ }
+ )
+
+ # Cant have fifo queue with non fifo DLQ
+ with assert_raises(ClientError):
+ sqs.create_queue(
+ QueueName='test-queue2.fifo',
+ Attributes={
+ 'FifoQueue': 'true',
+ 'RedrivePolicy': json.dumps({'deadLetterTargetArn': queue_arn2, 'maxReceiveCount': 2})
+ }
+ )
+
+
+@mock_sqs
+def test_queue_with_dlq():
+ if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'true':
+ raise SkipTest('Cant manipulate time in server mode')
+
+ sqs = boto3.client('sqs', region_name='us-east-1')
+
+ with freeze_time("2015-01-01 12:00:00"):
+ resp = sqs.create_queue(
+ QueueName='test-dlr-queue.fifo',
+ Attributes={'FifoQueue': 'true'}
+ )
+ queue_url1 = resp['QueueUrl']
+ queue_arn1 = sqs.get_queue_attributes(QueueUrl=queue_url1)['Attributes']['QueueArn']
+
+ resp = sqs.create_queue(
+ QueueName='test-queue.fifo',
+ Attributes={
+ 'FifoQueue': 'true',
+ 'RedrivePolicy': json.dumps({'deadLetterTargetArn': queue_arn1, 'maxReceiveCount': 2})
+ }
+ )
+ queue_url2 = resp['QueueUrl']
+
+ sqs.send_message(QueueUrl=queue_url2, MessageBody='msg1')
+ sqs.send_message(QueueUrl=queue_url2, MessageBody='msg2')
+
+ with freeze_time("2015-01-01 13:00:00"):
+ resp = sqs.receive_message(QueueUrl=queue_url2, VisibilityTimeout=30, WaitTimeSeconds=0)
+ resp['Messages'][0]['Body'].should.equal('msg1')
+
+ with freeze_time("2015-01-01 13:01:00"):
+ resp = sqs.receive_message(QueueUrl=queue_url2, VisibilityTimeout=30, WaitTimeSeconds=0)
+ resp['Messages'][0]['Body'].should.equal('msg1')
+
+ with freeze_time("2015-01-01 13:02:00"):
+ resp = sqs.receive_message(QueueUrl=queue_url2, VisibilityTimeout=30, WaitTimeSeconds=0)
+ len(resp['Messages']).should.equal(1)
+
+ resp = sqs.receive_message(QueueUrl=queue_url1, VisibilityTimeout=30, WaitTimeSeconds=0)
+ resp['Messages'][0]['Body'].should.equal('msg1')
+
+ # Might as well test list source queues
+
+ resp = sqs.list_dead_letter_source_queues(QueueUrl=queue_url1)
+ resp['queueUrls'][0].should.equal(queue_url2)