Merge remote-tracking branch 'spulec/master' into improve_coverage

This commit is contained in:
Bert Blommers 2020-05-12 15:02:24 +01:00
commit d680b1e025
45 changed files with 2446 additions and 159 deletions

View File

@ -2468,7 +2468,7 @@
## dynamodb ## dynamodb
<details> <details>
<summary>46% implemented</summary> <summary>53% implemented</summary>
- [X] batch_get_item - [X] batch_get_item
- [X] batch_write_item - [X] batch_write_item
@ -2479,7 +2479,7 @@
- [X] delete_item - [X] delete_item
- [X] delete_table - [X] delete_table
- [ ] describe_backup - [ ] describe_backup
- [ ] describe_continuous_backups - [X] describe_continuous_backups
- [ ] describe_contributor_insights - [ ] describe_contributor_insights
- [ ] describe_endpoints - [ ] describe_endpoints
- [ ] describe_global_table - [ ] describe_global_table
@ -2500,10 +2500,10 @@
- [ ] restore_table_to_point_in_time - [ ] restore_table_to_point_in_time
- [X] scan - [X] scan
- [X] tag_resource - [X] tag_resource
- [ ] transact_get_items - [X] transact_get_items
- [X] transact_write_items - [X] transact_write_items
- [X] untag_resource - [X] untag_resource
- [ ] update_continuous_backups - [X] update_continuous_backups
- [ ] update_contributor_insights - [ ] update_contributor_insights
- [ ] update_global_table - [ ] update_global_table
- [ ] update_global_table_settings - [ ] update_global_table_settings
@ -5195,7 +5195,7 @@
## logs ## logs
<details> <details>
<summary>35% implemented</summary> <summary>43% implemented</summary>
- [ ] associate_kms_key - [ ] associate_kms_key
- [ ] cancel_export_task - [ ] cancel_export_task
@ -5208,7 +5208,7 @@
- [ ] delete_metric_filter - [ ] delete_metric_filter
- [ ] delete_resource_policy - [ ] delete_resource_policy
- [X] delete_retention_policy - [X] delete_retention_policy
- [ ] delete_subscription_filter - [X] delete_subscription_filter
- [ ] describe_destinations - [ ] describe_destinations
- [ ] describe_export_tasks - [ ] describe_export_tasks
- [X] describe_log_groups - [X] describe_log_groups
@ -5216,7 +5216,7 @@
- [ ] describe_metric_filters - [ ] describe_metric_filters
- [ ] describe_queries - [ ] describe_queries
- [ ] describe_resource_policies - [ ] describe_resource_policies
- [ ] describe_subscription_filters - [X] describe_subscription_filters
- [ ] disassociate_kms_key - [ ] disassociate_kms_key
- [X] filter_log_events - [X] filter_log_events
- [X] get_log_events - [X] get_log_events
@ -5230,7 +5230,7 @@
- [ ] put_metric_filter - [ ] put_metric_filter
- [ ] put_resource_policy - [ ] put_resource_policy
- [X] put_retention_policy - [X] put_retention_policy
- [ ] put_subscription_filter - [X] put_subscription_filter
- [ ] start_query - [ ] start_query
- [ ] stop_query - [ ] stop_query
- [X] tag_log_group - [X] tag_log_group
@ -5287,21 +5287,21 @@
## managedblockchain ## managedblockchain
<details> <details>
<summary>0% implemented</summary> <summary>16% implemented</summary>
- [ ] create_member - [ ] create_member
- [ ] create_network - [X] create_network
- [ ] create_node - [ ] create_node
- [ ] create_proposal - [ ] create_proposal
- [ ] delete_member - [ ] delete_member
- [ ] delete_node - [ ] delete_node
- [ ] get_member - [ ] get_member
- [ ] get_network - [X] get_network
- [ ] get_node - [ ] get_node
- [ ] get_proposal - [ ] get_proposal
- [ ] list_invitations - [ ] list_invitations
- [ ] list_members - [ ] list_members
- [ ] list_networks - [X] list_networks
- [ ] list_nodes - [ ] list_nodes
- [ ] list_proposal_votes - [ ] list_proposal_votes
- [ ] list_proposals - [ ] list_proposals
@ -7392,11 +7392,11 @@
## ses ## ses
<details> <details>
<summary>14% implemented</summary> <summary>18% implemented</summary>
- [ ] clone_receipt_rule_set - [ ] clone_receipt_rule_set
- [ ] create_configuration_set - [X] create_configuration_set
- [ ] create_configuration_set_event_destination - [X] create_configuration_set_event_destination
- [ ] create_configuration_set_tracking_options - [ ] create_configuration_set_tracking_options
- [ ] create_custom_verification_email_template - [ ] create_custom_verification_email_template
- [ ] create_receipt_filter - [ ] create_receipt_filter
@ -7426,7 +7426,7 @@
- [ ] get_identity_policies - [ ] get_identity_policies
- [ ] get_identity_verification_attributes - [ ] get_identity_verification_attributes
- [X] get_send_quota - [X] get_send_quota
- [ ] get_send_statistics - [X] get_send_statistics
- [ ] get_template - [ ] get_template
- [ ] list_configuration_sets - [ ] list_configuration_sets
- [ ] list_custom_verification_email_templates - [ ] list_custom_verification_email_templates

View File

@ -73,6 +73,7 @@ mock_kms = lazy_load(".kms", "mock_kms")
mock_kms_deprecated = lazy_load(".kms", "mock_kms_deprecated") mock_kms_deprecated = lazy_load(".kms", "mock_kms_deprecated")
mock_logs = lazy_load(".logs", "mock_logs") mock_logs = lazy_load(".logs", "mock_logs")
mock_logs_deprecated = lazy_load(".logs", "mock_logs_deprecated") mock_logs_deprecated = lazy_load(".logs", "mock_logs_deprecated")
mock_managedblockchain = lazy_load(".managedblockchain", "mock_managedblockchain")
mock_opsworks = lazy_load(".opsworks", "mock_opsworks") mock_opsworks = lazy_load(".opsworks", "mock_opsworks")
mock_opsworks_deprecated = lazy_load(".opsworks", "mock_opsworks_deprecated") mock_opsworks_deprecated = lazy_load(".opsworks", "mock_opsworks_deprecated")
mock_organizations = lazy_load(".organizations", "mock_organizations") mock_organizations = lazy_load(".organizations", "mock_organizations")

View File

@ -56,8 +56,10 @@ class Deployment(BaseModel, dict):
class IntegrationResponse(BaseModel, dict): class IntegrationResponse(BaseModel, dict):
def __init__(self, status_code, selection_pattern=None): def __init__(self, status_code, selection_pattern=None, response_templates=None):
self["responseTemplates"] = {"application/json": None} if response_templates is None:
response_templates = {"application/json": None}
self["responseTemplates"] = response_templates
self["statusCode"] = status_code self["statusCode"] = status_code
if selection_pattern: if selection_pattern:
self["selectionPattern"] = selection_pattern self["selectionPattern"] = selection_pattern
@ -72,8 +74,14 @@ class Integration(BaseModel, dict):
self["requestTemplates"] = request_templates self["requestTemplates"] = request_templates
self["integrationResponses"] = {"200": IntegrationResponse(200)} self["integrationResponses"] = {"200": IntegrationResponse(200)}
def create_integration_response(self, status_code, selection_pattern): def create_integration_response(
integration_response = IntegrationResponse(status_code, selection_pattern) self, status_code, selection_pattern, response_templates
):
if response_templates == {}:
response_templates = None
integration_response = IntegrationResponse(
status_code, selection_pattern, response_templates
)
self["integrationResponses"][status_code] = integration_response self["integrationResponses"][status_code] = integration_response
return integration_response return integration_response
@ -956,7 +964,7 @@ class APIGatewayBackend(BaseBackend):
raise InvalidRequestInput() raise InvalidRequestInput()
integration = self.get_integration(function_id, resource_id, method_type) integration = self.get_integration(function_id, resource_id, method_type)
integration_response = integration.create_integration_response( integration_response = integration.create_integration_response(
status_code, selection_pattern status_code, selection_pattern, response_templates
) )
return integration_response return integration_response

View File

@ -419,11 +419,8 @@ class FakeAutoScalingGroup(BaseModel):
curr_instance_count = len(self.active_instances()) curr_instance_count = len(self.active_instances())
if self.desired_capacity == curr_instance_count: if self.desired_capacity == curr_instance_count:
self.autoscaling_backend.update_attached_elbs(self.name) pass # Nothing to do here
self.autoscaling_backend.update_attached_target_groups(self.name) elif self.desired_capacity > curr_instance_count:
return
if self.desired_capacity > curr_instance_count:
# Need more instances # Need more instances
count_needed = int(self.desired_capacity) - int(curr_instance_count) count_needed = int(self.desired_capacity) - int(curr_instance_count)
@ -447,6 +444,7 @@ class FakeAutoScalingGroup(BaseModel):
self.instance_states = list( self.instance_states = list(
set(self.instance_states) - set(instances_to_remove) set(self.instance_states) - set(instances_to_remove)
) )
if self.name in self.autoscaling_backend.autoscaling_groups:
self.autoscaling_backend.update_attached_elbs(self.name) self.autoscaling_backend.update_attached_elbs(self.name)
self.autoscaling_backend.update_attached_target_groups(self.name) self.autoscaling_backend.update_attached_target_groups(self.name)
@ -695,6 +693,7 @@ class AutoScalingBackend(BaseBackend):
) )
group.instance_states.extend(new_instances) group.instance_states.extend(new_instances)
self.update_attached_elbs(group.name) self.update_attached_elbs(group.name)
self.update_attached_target_groups(group.name)
def set_instance_health( def set_instance_health(
self, instance_id, health_status, should_respect_grace_period self, instance_id, health_status, should_respect_grace_period
@ -938,7 +937,6 @@ class AutoScalingBackend(BaseBackend):
standby_instances.append(instance_state) standby_instances.append(instance_state)
if should_decrement: if should_decrement:
group.desired_capacity = group.desired_capacity - len(instance_ids) group.desired_capacity = group.desired_capacity - len(instance_ids)
else:
group.set_desired_capacity(group.desired_capacity) group.set_desired_capacity(group.desired_capacity)
return standby_instances, original_size, group.desired_capacity return standby_instances, original_size, group.desired_capacity
@ -951,6 +949,7 @@ class AutoScalingBackend(BaseBackend):
instance_state.lifecycle_state = "InService" instance_state.lifecycle_state = "InService"
standby_instances.append(instance_state) standby_instances.append(instance_state)
group.desired_capacity = group.desired_capacity + len(instance_ids) group.desired_capacity = group.desired_capacity + len(instance_ids)
group.set_desired_capacity(group.desired_capacity)
return standby_instances, original_size, group.desired_capacity return standby_instances, original_size, group.desired_capacity
def terminate_instance(self, instance_id, should_decrement): def terminate_instance(self, instance_id, should_decrement):

View File

@ -5,6 +5,8 @@ import time
from collections import defaultdict from collections import defaultdict
import copy import copy
import datetime import datetime
from gzip import GzipFile
import docker import docker
import docker.errors import docker.errors
import hashlib import hashlib
@ -983,6 +985,28 @@ class LambdaBackend(BaseBackend):
func = self._lambdas.get_arn(function_arn) func = self._lambdas.get_arn(function_arn)
return func.invoke(json.dumps(event), {}, {}) return func.invoke(json.dumps(event), {}, {})
def send_log_event(
self, function_arn, filter_name, log_group_name, log_stream_name, log_events
):
data = {
"messageType": "DATA_MESSAGE",
"owner": ACCOUNT_ID,
"logGroup": log_group_name,
"logStream": log_stream_name,
"subscriptionFilters": [filter_name],
"logEvents": log_events,
}
output = io.BytesIO()
with GzipFile(fileobj=output, mode="w") as f:
f.write(json.dumps(data, separators=(",", ":")).encode("utf-8"))
payload_gz_encoded = base64.b64encode(output.getvalue()).decode("utf-8")
event = {"awslogs": {"data": payload_gz_encoded}}
func = self._lambdas.get_arn(function_arn)
return func.invoke(json.dumps(event), {}, {})
def list_tags(self, resource): def list_tags(self, resource):
return self.get_function_by_arn(resource).tags return self.get_function_by_arn(resource).tags

View File

@ -39,6 +39,7 @@ BACKENDS = {
"kms": ("kms", "kms_backends"), "kms": ("kms", "kms_backends"),
"lambda": ("awslambda", "lambda_backends"), "lambda": ("awslambda", "lambda_backends"),
"logs": ("logs", "logs_backends"), "logs": ("logs", "logs_backends"),
"managedblockchain": ("managedblockchain", "managedblockchain_backends"),
"moto_api": ("core", "moto_api_backends"), "moto_api": ("core", "moto_api_backends"),
"opsworks": ("opsworks", "opsworks_backends"), "opsworks": ("opsworks", "opsworks_backends"),
"organizations": ("organizations", "organizations_backends"), "organizations": ("organizations", "organizations_backends"),

View File

@ -316,6 +316,12 @@ class Table(BaseModel):
} }
self.set_stream_specification(streams) self.set_stream_specification(streams)
self.lambda_event_source_mappings = {} self.lambda_event_source_mappings = {}
self.continuous_backups = {
"ContinuousBackupsStatus": "ENABLED", # One of 'ENABLED'|'DISABLED', it's enabled by default
"PointInTimeRecoveryDescription": {
"PointInTimeRecoveryStatus": "DISABLED" # One of 'ENABLED'|'DISABLED'
},
}
@classmethod @classmethod
def create_from_cloudformation_json( def create_from_cloudformation_json(
@ -1282,6 +1288,33 @@ class DynamoDBBackend(BaseBackend):
self.tables = original_table_state self.tables = original_table_state
raise raise
def describe_continuous_backups(self, table_name):
table = self.get_table(table_name)
return table.continuous_backups
def update_continuous_backups(self, table_name, point_in_time_spec):
table = self.get_table(table_name)
if (
point_in_time_spec["PointInTimeRecoveryEnabled"]
and table.continuous_backups["PointInTimeRecoveryDescription"][
"PointInTimeRecoveryStatus"
]
== "DISABLED"
):
table.continuous_backups["PointInTimeRecoveryDescription"] = {
"PointInTimeRecoveryStatus": "ENABLED",
"EarliestRestorableDateTime": unix_time(),
"LatestRestorableDateTime": unix_time(),
}
elif not point_in_time_spec["PointInTimeRecoveryEnabled"]:
table.continuous_backups["PointInTimeRecoveryDescription"] = {
"PointInTimeRecoveryStatus": "DISABLED"
}
return table.continuous_backups
###################### ######################
# LIST of methods where the logic completely resides in responses.py # LIST of methods where the logic completely resides in responses.py
# Duplicated here so that the implementation coverage script is aware # Duplicated here so that the implementation coverage script is aware

View File

@ -1,6 +1,10 @@
from abc import abstractmethod from abc import abstractmethod
from moto.dynamodb2.exceptions import IncorrectOperandType, IncorrectDataType from moto.dynamodb2.exceptions import (
IncorrectOperandType,
IncorrectDataType,
ProvidedKeyDoesNotExist,
)
from moto.dynamodb2.models import DynamoType from moto.dynamodb2.models import DynamoType
from moto.dynamodb2.models.dynamo_type import DDBTypeConversion, DDBType from moto.dynamodb2.models.dynamo_type import DDBTypeConversion, DDBType
from moto.dynamodb2.parsing.ast_nodes import ( from moto.dynamodb2.parsing.ast_nodes import (
@ -193,7 +197,18 @@ class AddExecutor(NodeExecutor):
value_to_add = self.get_action_value() value_to_add = self.get_action_value()
if isinstance(value_to_add, DynamoType): if isinstance(value_to_add, DynamoType):
if value_to_add.is_set(): if value_to_add.is_set():
try:
current_string_set = self.get_item_at_end_of_path(item) current_string_set = self.get_item_at_end_of_path(item)
except ProvidedKeyDoesNotExist:
current_string_set = DynamoType({value_to_add.type: []})
SetExecutor.set(
item_part_to_modify_with_set=self.get_item_before_end_of_path(
item
),
element_to_set=self.get_element_to_action(),
value_to_set=current_string_set,
expression_attribute_names=self.expression_attribute_names,
)
assert isinstance(current_string_set, DynamoType) assert isinstance(current_string_set, DynamoType)
if not current_string_set.type == value_to_add.type: if not current_string_set.type == value_to_add.type:
raise IncorrectDataType() raise IncorrectDataType()
@ -204,7 +219,11 @@ class AddExecutor(NodeExecutor):
else: else:
current_string_set.value.append(value) current_string_set.value.append(value)
elif value_to_add.type == DDBType.NUMBER: elif value_to_add.type == DDBType.NUMBER:
try:
existing_value = self.get_item_at_end_of_path(item) existing_value = self.get_item_at_end_of_path(item)
except ProvidedKeyDoesNotExist:
existing_value = DynamoType({DDBType.NUMBER: "0"})
assert isinstance(existing_value, DynamoType) assert isinstance(existing_value, DynamoType)
if not existing_value.type == DDBType.NUMBER: if not existing_value.type == DDBType.NUMBER:
raise IncorrectDataType() raise IncorrectDataType()

View File

@ -919,3 +919,32 @@ class DynamoHandler(BaseResponse):
) )
response = {"ConsumedCapacity": [], "ItemCollectionMetrics": {}} response = {"ConsumedCapacity": [], "ItemCollectionMetrics": {}}
return dynamo_json_dump(response) return dynamo_json_dump(response)
def describe_continuous_backups(self):
name = self.body["TableName"]
if self.dynamodb_backend.get_table(name) is None:
return self.error(
"com.amazonaws.dynamodb.v20111205#TableNotFoundException",
"Table not found: {}".format(name),
)
response = self.dynamodb_backend.describe_continuous_backups(name)
return json.dumps({"ContinuousBackupsDescription": response})
def update_continuous_backups(self):
name = self.body["TableName"]
point_in_time_spec = self.body["PointInTimeRecoverySpecification"]
if self.dynamodb_backend.get_table(name) is None:
return self.error(
"com.amazonaws.dynamodb.v20111205#TableNotFoundException",
"Table not found: {}".format(name),
)
response = self.dynamodb_backend.update_continuous_backups(
name, point_in_time_spec
)
return json.dumps({"ContinuousBackupsDescription": response})

View File

@ -560,8 +560,10 @@ class Instance(TaggedEC2Resource, BotoInstance):
# worst case we'll get IP address exaustion... rarely # worst case we'll get IP address exaustion... rarely
pass pass
def add_block_device(self, size, device_path): def add_block_device(self, size, device_path, snapshot_id=None, encrypted=False):
volume = self.ec2_backend.create_volume(size, self.region_name) volume = self.ec2_backend.create_volume(
size, self.region_name, snapshot_id, encrypted
)
self.ec2_backend.attach_volume(volume.id, self.id, device_path) self.ec2_backend.attach_volume(volume.id, self.id, device_path)
def setup_defaults(self): def setup_defaults(self):
@ -891,8 +893,12 @@ class InstanceBackend(object):
new_instance.add_tags(instance_tags) new_instance.add_tags(instance_tags)
if "block_device_mappings" in kwargs: if "block_device_mappings" in kwargs:
for block_device in kwargs["block_device_mappings"]: for block_device in kwargs["block_device_mappings"]:
device_name = block_device["DeviceName"]
volume_size = block_device["Ebs"].get("VolumeSize")
snapshot_id = block_device["Ebs"].get("SnapshotId")
encrypted = block_device["Ebs"].get("Encrypted", False)
new_instance.add_block_device( new_instance.add_block_device(
block_device["Ebs"]["VolumeSize"], block_device["DeviceName"] volume_size, device_name, snapshot_id, encrypted
) )
else: else:
new_instance.setup_defaults() new_instance.setup_defaults()

View File

@ -4,10 +4,16 @@ from boto.ec2.instancetype import InstanceType
from moto.autoscaling import autoscaling_backends from moto.autoscaling import autoscaling_backends
from moto.core.responses import BaseResponse from moto.core.responses import BaseResponse
from moto.core.utils import camelcase_to_underscores from moto.core.utils import camelcase_to_underscores
from moto.ec2.utils import filters_from_querystring, dict_from_querystring from moto.ec2.exceptions import MissingParameterError
from moto.ec2.utils import (
filters_from_querystring,
dict_from_querystring,
)
from moto.elbv2 import elbv2_backends from moto.elbv2 import elbv2_backends
from moto.core import ACCOUNT_ID from moto.core import ACCOUNT_ID
from copy import deepcopy
class InstanceResponse(BaseResponse): class InstanceResponse(BaseResponse):
def describe_instances(self): def describe_instances(self):
@ -44,40 +50,31 @@ class InstanceResponse(BaseResponse):
owner_id = self._get_param("OwnerId") owner_id = self._get_param("OwnerId")
user_data = self._get_param("UserData") user_data = self._get_param("UserData")
security_group_names = self._get_multi_param("SecurityGroup") security_group_names = self._get_multi_param("SecurityGroup")
security_group_ids = self._get_multi_param("SecurityGroupId") kwargs = {
nics = dict_from_querystring("NetworkInterface", self.querystring) "instance_type": self._get_param("InstanceType", if_none="m1.small"),
instance_type = self._get_param("InstanceType", if_none="m1.small") "placement": self._get_param("Placement.AvailabilityZone"),
placement = self._get_param("Placement.AvailabilityZone") "region_name": self.region,
subnet_id = self._get_param("SubnetId") "subnet_id": self._get_param("SubnetId"),
private_ip = self._get_param("PrivateIpAddress") "owner_id": owner_id,
associate_public_ip = self._get_param("AssociatePublicIpAddress") "key_name": self._get_param("KeyName"),
key_name = self._get_param("KeyName") "security_group_ids": self._get_multi_param("SecurityGroupId"),
ebs_optimized = self._get_param("EbsOptimized") or False "nics": dict_from_querystring("NetworkInterface", self.querystring),
instance_initiated_shutdown_behavior = self._get_param( "private_ip": self._get_param("PrivateIpAddress"),
"associate_public_ip": self._get_param("AssociatePublicIpAddress"),
"tags": self._parse_tag_specification("TagSpecification"),
"ebs_optimized": self._get_param("EbsOptimized") or False,
"instance_initiated_shutdown_behavior": self._get_param(
"InstanceInitiatedShutdownBehavior" "InstanceInitiatedShutdownBehavior"
) ),
tags = self._parse_tag_specification("TagSpecification") }
region_name = self.region
mappings = self._parse_block_device_mapping()
if mappings:
kwargs["block_device_mappings"] = mappings
if self.is_not_dryrun("RunInstance"): if self.is_not_dryrun("RunInstance"):
new_reservation = self.ec2_backend.add_instances( new_reservation = self.ec2_backend.add_instances(
image_id, image_id, min_count, user_data, security_group_names, **kwargs
min_count,
user_data,
security_group_names,
instance_type=instance_type,
placement=placement,
region_name=region_name,
subnet_id=subnet_id,
owner_id=owner_id,
key_name=key_name,
security_group_ids=security_group_ids,
nics=nics,
private_ip=private_ip,
associate_public_ip=associate_public_ip,
tags=tags,
ebs_optimized=ebs_optimized,
instance_initiated_shutdown_behavior=instance_initiated_shutdown_behavior,
) )
template = self.response_template(EC2_RUN_INSTANCES) template = self.response_template(EC2_RUN_INSTANCES)
@ -272,6 +269,58 @@ class InstanceResponse(BaseResponse):
) )
return EC2_MODIFY_INSTANCE_ATTRIBUTE return EC2_MODIFY_INSTANCE_ATTRIBUTE
def _parse_block_device_mapping(self):
device_mappings = self._get_list_prefix("BlockDeviceMapping")
mappings = []
for device_mapping in device_mappings:
self._validate_block_device_mapping(device_mapping)
device_template = deepcopy(BLOCK_DEVICE_MAPPING_TEMPLATE)
device_template["VirtualName"] = device_mapping.get("virtual_name")
device_template["DeviceName"] = device_mapping.get("device_name")
device_template["Ebs"]["SnapshotId"] = device_mapping.get(
"ebs._snapshot_id"
)
device_template["Ebs"]["VolumeSize"] = device_mapping.get(
"ebs._volume_size"
)
device_template["Ebs"]["DeleteOnTermination"] = device_mapping.get(
"ebs._delete_on_termination", False
)
device_template["Ebs"]["VolumeType"] = device_mapping.get(
"ebs._volume_type"
)
device_template["Ebs"]["Iops"] = device_mapping.get("ebs._iops")
device_template["Ebs"]["Encrypted"] = device_mapping.get(
"ebs._encrypted", False
)
mappings.append(device_template)
return mappings
@staticmethod
def _validate_block_device_mapping(device_mapping):
if not any(mapping for mapping in device_mapping if mapping.startswith("ebs.")):
raise MissingParameterError("ebs")
if (
"ebs._volume_size" not in device_mapping
and "ebs._snapshot_id" not in device_mapping
):
raise MissingParameterError("size or snapshotId")
BLOCK_DEVICE_MAPPING_TEMPLATE = {
"VirtualName": None,
"DeviceName": None,
"Ebs": {
"SnapshotId": None,
"VolumeSize": None,
"DeleteOnTermination": None,
"VolumeType": None,
"Iops": None,
"Encrypted": None,
},
}
EC2_RUN_INSTANCES = ( EC2_RUN_INSTANCES = (
"""<RunInstancesResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/"> """<RunInstancesResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">

View File

@ -857,8 +857,30 @@ class IoTBackend(BaseBackend):
del self.thing_groups[thing_group.arn] del self.thing_groups[thing_group.arn]
def list_thing_groups(self, parent_group, name_prefix_filter, recursive): def list_thing_groups(self, parent_group, name_prefix_filter, recursive):
thing_groups = self.thing_groups.values() if recursive is None:
return thing_groups recursive = True
if name_prefix_filter is None:
name_prefix_filter = ""
if parent_group and parent_group not in [
_.thing_group_name for _ in self.thing_groups.values()
]:
raise ResourceNotFoundException()
thing_groups = [
_ for _ in self.thing_groups.values() if _.parent_group_name == parent_group
]
if recursive:
for g in thing_groups:
thing_groups.extend(
self.list_thing_groups(
parent_group=g.thing_group_name,
name_prefix_filter=None,
recursive=False,
)
)
# thing_groups = groups_to_process.values()
return [
_ for _ in thing_groups if _.thing_group_name.startswith(name_prefix_filter)
]
def update_thing_group( def update_thing_group(
self, thing_group_name, thing_group_properties, expected_version self, thing_group_name, thing_group_properties, expected_version

View File

@ -535,7 +535,7 @@ class IoTResponse(BaseResponse):
# max_results = self._get_int_param("maxResults") # max_results = self._get_int_param("maxResults")
parent_group = self._get_param("parentGroup") parent_group = self._get_param("parentGroup")
name_prefix_filter = self._get_param("namePrefixFilter") name_prefix_filter = self._get_param("namePrefixFilter")
recursive = self._get_param("recursive") recursive = self._get_bool_param("recursive")
thing_groups = self.iot_backend.list_thing_groups( thing_groups = self.iot_backend.list_thing_groups(
parent_group=parent_group, parent_group=parent_group,
name_prefix_filter=name_prefix_filter, name_prefix_filter=name_prefix_filter,

View File

@ -7,10 +7,10 @@ class LogsClientError(JsonRESTError):
class ResourceNotFoundException(LogsClientError): class ResourceNotFoundException(LogsClientError):
def __init__(self): def __init__(self, msg=None):
self.code = 400 self.code = 400
super(ResourceNotFoundException, self).__init__( super(ResourceNotFoundException, self).__init__(
"ResourceNotFoundException", "The specified resource does not exist" "ResourceNotFoundException", msg or "The specified log group does not exist"
) )
@ -28,3 +28,11 @@ class ResourceAlreadyExistsException(LogsClientError):
super(ResourceAlreadyExistsException, self).__init__( super(ResourceAlreadyExistsException, self).__init__(
"ResourceAlreadyExistsException", "The specified log group already exists" "ResourceAlreadyExistsException", "The specified log group already exists"
) )
class LimitExceededException(LogsClientError):
def __init__(self):
self.code = 400
super(LimitExceededException, self).__init__(
"LimitExceededException", "Resource limit exceeded."
)

View File

@ -6,6 +6,7 @@ from .exceptions import (
ResourceNotFoundException, ResourceNotFoundException,
ResourceAlreadyExistsException, ResourceAlreadyExistsException,
InvalidParameterException, InvalidParameterException,
LimitExceededException,
) )
@ -57,6 +58,8 @@ class LogStream:
0 # I'm guessing this is token needed for sequenceToken by put_events 0 # I'm guessing this is token needed for sequenceToken by put_events
) )
self.events = [] self.events = []
self.destination_arn = None
self.filter_name = None
self.__class__._log_ids += 1 self.__class__._log_ids += 1
@ -97,11 +100,32 @@ class LogStream:
self.lastIngestionTime = int(unix_time_millis()) self.lastIngestionTime = int(unix_time_millis())
# TODO: make this match AWS if possible # TODO: make this match AWS if possible
self.storedBytes += sum([len(log_event["message"]) for log_event in log_events]) self.storedBytes += sum([len(log_event["message"]) for log_event in log_events])
self.events += [ events = [
LogEvent(self.lastIngestionTime, log_event) for log_event in log_events LogEvent(self.lastIngestionTime, log_event) for log_event in log_events
] ]
self.events += events
self.uploadSequenceToken += 1 self.uploadSequenceToken += 1
if self.destination_arn and self.destination_arn.split(":")[2] == "lambda":
from moto.awslambda import lambda_backends # due to circular dependency
lambda_log_events = [
{
"id": event.eventId,
"timestamp": event.timestamp,
"message": event.message,
}
for event in events
]
lambda_backends[self.region].send_log_event(
self.destination_arn,
self.filter_name,
log_group_name,
log_stream_name,
lambda_log_events,
)
return "{:056d}".format(self.uploadSequenceToken) return "{:056d}".format(self.uploadSequenceToken)
def get_log_events( def get_log_events(
@ -227,6 +251,7 @@ class LogGroup:
self.retention_in_days = kwargs.get( self.retention_in_days = kwargs.get(
"RetentionInDays" "RetentionInDays"
) # AWS defaults to Never Expire for log group retention ) # AWS defaults to Never Expire for log group retention
self.subscription_filters = []
def create_log_stream(self, log_stream_name): def create_log_stream(self, log_stream_name):
if log_stream_name in self.streams: if log_stream_name in self.streams:
@ -386,6 +411,48 @@ class LogGroup:
k: v for (k, v) in self.tags.items() if k not in tags_to_remove k: v for (k, v) in self.tags.items() if k not in tags_to_remove
} }
def describe_subscription_filters(self):
return self.subscription_filters
def put_subscription_filter(
self, filter_name, filter_pattern, destination_arn, role_arn
):
creation_time = int(unix_time_millis())
# only one subscription filter can be associated with a log group
if self.subscription_filters:
if self.subscription_filters[0]["filterName"] == filter_name:
creation_time = self.subscription_filters[0]["creationTime"]
else:
raise LimitExceededException
for stream in self.streams.values():
stream.destination_arn = destination_arn
stream.filter_name = filter_name
self.subscription_filters = [
{
"filterName": filter_name,
"logGroupName": self.name,
"filterPattern": filter_pattern,
"destinationArn": destination_arn,
"roleArn": role_arn,
"distribution": "ByLogStream",
"creationTime": creation_time,
}
]
def delete_subscription_filter(self, filter_name):
if (
not self.subscription_filters
or self.subscription_filters[0]["filterName"] != filter_name
):
raise ResourceNotFoundException(
"The specified subscription filter does not exist."
)
self.subscription_filters = []
class LogsBackend(BaseBackend): class LogsBackend(BaseBackend):
def __init__(self, region_name): def __init__(self, region_name):
@ -557,6 +624,46 @@ class LogsBackend(BaseBackend):
log_group = self.groups[log_group_name] log_group = self.groups[log_group_name]
log_group.untag(tags) log_group.untag(tags)
def describe_subscription_filters(self, log_group_name):
log_group = self.groups.get(log_group_name)
if not log_group:
raise ResourceNotFoundException()
return log_group.describe_subscription_filters()
def put_subscription_filter(
self, log_group_name, filter_name, filter_pattern, destination_arn, role_arn
):
# TODO: support other destinations like Kinesis stream
from moto.awslambda import lambda_backends # due to circular dependency
log_group = self.groups.get(log_group_name)
if not log_group:
raise ResourceNotFoundException()
lambda_func = lambda_backends[self.region_name].get_function(destination_arn)
# no specific permission check implemented
if not lambda_func:
raise InvalidParameterException(
"Could not execute the lambda function. "
"Make sure you have given CloudWatch Logs permission to execute your function."
)
log_group.put_subscription_filter(
filter_name, filter_pattern, destination_arn, role_arn
)
def delete_subscription_filter(self, log_group_name, filter_name):
log_group = self.groups.get(log_group_name)
if not log_group:
raise ResourceNotFoundException()
log_group.delete_subscription_filter(filter_name)
logs_backends = {} logs_backends = {}
for region in Session().get_available_regions("logs"): for region in Session().get_available_regions("logs"):

View File

@ -178,3 +178,33 @@ class LogsResponse(BaseResponse):
tags = self._get_param("tags") tags = self._get_param("tags")
self.logs_backend.untag_log_group(log_group_name, tags) self.logs_backend.untag_log_group(log_group_name, tags)
return "" return ""
def describe_subscription_filters(self):
log_group_name = self._get_param("logGroupName")
subscription_filters = self.logs_backend.describe_subscription_filters(
log_group_name
)
return json.dumps({"subscriptionFilters": subscription_filters})
def put_subscription_filter(self):
log_group_name = self._get_param("logGroupName")
filter_name = self._get_param("filterName")
filter_pattern = self._get_param("filterPattern")
destination_arn = self._get_param("destinationArn")
role_arn = self._get_param("roleArn")
self.logs_backend.put_subscription_filter(
log_group_name, filter_name, filter_pattern, destination_arn, role_arn
)
return ""
def delete_subscription_filter(self):
log_group_name = self._get_param("logGroupName")
filter_name = self._get_param("filterName")
self.logs_backend.delete_subscription_filter(log_group_name, filter_name)
return ""

View File

@ -0,0 +1,9 @@
from __future__ import unicode_literals
from .models import managedblockchain_backends
from ..core.models import base_decorator, deprecated_base_decorator
managedblockchain_backend = managedblockchain_backends["us-east-1"]
mock_managedblockchain = base_decorator(managedblockchain_backends)
mock_managedblockchain_deprecated = deprecated_base_decorator(
managedblockchain_backends
)

View File

@ -0,0 +1,27 @@
from __future__ import unicode_literals
from moto.core.exceptions import RESTError
class ManagedBlockchainClientError(RESTError):
code = 400
class BadRequestException(ManagedBlockchainClientError):
def __init__(self, pretty_called_method, operation_error):
super(BadRequestException, self).__init__(
"BadRequestException",
"An error occurred (BadRequestException) when calling the {0} operation: {1}".format(
pretty_called_method, operation_error
),
)
class ResourceNotFoundException(ManagedBlockchainClientError):
def __init__(self, pretty_called_method, operation_error):
self.code = 404
super(ResourceNotFoundException, self).__init__(
"ResourceNotFoundException",
"An error occurred (BadRequestException) when calling the {0} operation: {1}".format(
pretty_called_method, operation_error
),
)

View File

@ -0,0 +1,176 @@
from __future__ import unicode_literals
import datetime
from boto3 import Session
from moto.core import BaseBackend, BaseModel
from .exceptions import BadRequestException, ResourceNotFoundException
from .utils import get_network_id, get_member_id
FRAMEWORKS = [
"HYPERLEDGER_FABRIC",
]
FRAMEWORKVERSIONS = [
"1.2",
]
EDITIONS = [
"STARTER",
"STANDARD",
]
class ManagedBlockchainNetwork(BaseModel):
def __init__(
self,
id,
name,
framework,
frameworkversion,
frameworkconfiguration,
voting_policy,
member_configuration,
region,
description=None,
):
self.creationdate = datetime.datetime.utcnow()
self.id = id
self.name = name
self.description = description
self.framework = framework
self.frameworkversion = frameworkversion
self.frameworkconfiguration = frameworkconfiguration
self.voting_policy = voting_policy
self.member_configuration = member_configuration
self.region = region
def to_dict(self):
# Format for list_networks
d = {
"Id": self.id,
"Name": self.name,
"Framework": self.framework,
"FrameworkVersion": self.frameworkversion,
"Status": "AVAILABLE",
"CreationDate": self.creationdate.strftime("%Y-%m-%dT%H:%M:%S.%f%z"),
}
if self.description is not None:
d["Description"] = self.description
return d
def get_format(self):
# Format for get_networks
frameworkattributes = {
"Fabric": {
"OrderingServiceEndpoint": "orderer.{0}.managedblockchain.{1}.amazonaws.com:30001".format(
self.id.lower(), self.region
),
"Edition": self.frameworkconfiguration["Fabric"]["Edition"],
}
}
vpcendpointname = "com.amazonaws.{0}.managedblockchain.{1}".format(
self.region, self.id.lower()
)
d = {
"Id": self.id,
"Name": self.name,
"Framework": self.framework,
"FrameworkVersion": self.frameworkversion,
"FrameworkAttributes": frameworkattributes,
"VpcEndpointServiceName": vpcendpointname,
"VotingPolicy": self.voting_policy,
"Status": "AVAILABLE",
"CreationDate": self.creationdate.strftime("%Y-%m-%dT%H:%M:%S.%f%z"),
}
if self.description is not None:
d["Description"] = self.description
return d
class ManagedBlockchainBackend(BaseBackend):
def __init__(self, region_name):
self.networks = {}
self.region_name = region_name
def reset(self):
region_name = self.region_name
self.__dict__ = {}
self.__init__(region_name)
def create_network(
self,
name,
framework,
frameworkversion,
frameworkconfiguration,
voting_policy,
member_configuration,
description=None,
):
self.name = name
self.framework = framework
self.frameworkversion = frameworkversion
self.frameworkconfiguration = frameworkconfiguration
self.voting_policy = voting_policy
self.member_configuration = member_configuration
self.description = description
# Check framework
if framework not in FRAMEWORKS:
raise BadRequestException("CreateNetwork", "Invalid request body")
# Check framework version
if frameworkversion not in FRAMEWORKVERSIONS:
raise BadRequestException(
"CreateNetwork",
"Invalid version {0} requested for framework HYPERLEDGER_FABRIC".format(
frameworkversion
),
)
# Check edition
if frameworkconfiguration["Fabric"]["Edition"] not in EDITIONS:
raise BadRequestException("CreateNetwork", "Invalid request body")
## Generate network ID
network_id = get_network_id()
## Generate memberid ID - will need to actually create member
member_id = get_member_id()
self.networks[network_id] = ManagedBlockchainNetwork(
id=network_id,
name=name,
framework=self.framework,
frameworkversion=self.frameworkversion,
frameworkconfiguration=self.frameworkconfiguration,
voting_policy=self.voting_policy,
member_configuration=self.member_configuration,
region=self.region_name,
description=self.description,
)
# Return the network and member ID
d = {"NetworkId": network_id, "MemberId": member_id}
return d
def list_networks(self):
return self.networks.values()
def get_network(self, network_id):
if network_id not in self.networks:
raise ResourceNotFoundException(
"CreateNetwork", "Network {0} not found".format(network_id)
)
return self.networks.get(network_id)
managedblockchain_backends = {}
for region in Session().get_available_regions("managedblockchain"):
managedblockchain_backends[region] = ManagedBlockchainBackend(region)

View File

@ -0,0 +1,90 @@
from __future__ import unicode_literals
import json
from six.moves.urllib.parse import urlparse, parse_qs
from moto.core.responses import BaseResponse
from .models import managedblockchain_backends
from .utils import (
region_from_managedblckchain_url,
networkid_from_managedblockchain_url,
)
class ManagedBlockchainResponse(BaseResponse):
def __init__(self, backend):
super(ManagedBlockchainResponse, self).__init__()
self.backend = backend
@classmethod
def network_response(clazz, request, full_url, headers):
region_name = region_from_managedblckchain_url(full_url)
response_instance = ManagedBlockchainResponse(
managedblockchain_backends[region_name]
)
return response_instance._network_response(request, full_url, headers)
def _network_response(self, request, full_url, headers):
method = request.method
if hasattr(request, "body"):
body = request.body
else:
body = request.data
parsed_url = urlparse(full_url)
querystring = parse_qs(parsed_url.query, keep_blank_values=True)
if method == "GET":
return self._all_networks_response(request, full_url, headers)
elif method == "POST":
json_body = json.loads(body.decode("utf-8"))
return self._network_response_post(json_body, querystring, headers)
def _all_networks_response(self, request, full_url, headers):
mbcnetworks = self.backend.list_networks()
response = json.dumps(
{"Networks": [mbcnetwork.to_dict() for mbcnetwork in mbcnetworks]}
)
headers["content-type"] = "application/json"
return 200, headers, response
def _network_response_post(self, json_body, querystring, headers):
name = json_body["Name"]
framework = json_body["Framework"]
frameworkversion = json_body["FrameworkVersion"]
frameworkconfiguration = json_body["FrameworkConfiguration"]
voting_policy = json_body["VotingPolicy"]
member_configuration = json_body["MemberConfiguration"]
# Optional
description = json_body.get("Description", None)
response = self.backend.create_network(
name,
framework,
frameworkversion,
frameworkconfiguration,
voting_policy,
member_configuration,
description,
)
return 201, headers, json.dumps(response)
@classmethod
def networkid_response(clazz, request, full_url, headers):
region_name = region_from_managedblckchain_url(full_url)
response_instance = ManagedBlockchainResponse(
managedblockchain_backends[region_name]
)
return response_instance._networkid_response(request, full_url, headers)
def _networkid_response(self, request, full_url, headers):
method = request.method
if method == "GET":
network_id = networkid_from_managedblockchain_url(full_url)
return self._networkid_response_get(network_id, headers)
def _networkid_response_get(self, network_id, headers):
mbcnetwork = self.backend.get_network(network_id)
response = json.dumps({"Network": mbcnetwork.get_format()})
headers["content-type"] = "application/json"
return 200, headers, response

View File

@ -0,0 +1,9 @@
from __future__ import unicode_literals
from .responses import ManagedBlockchainResponse
url_bases = ["https?://managedblockchain.(.+).amazonaws.com"]
url_paths = {
"{0}/networks$": ManagedBlockchainResponse.network_response,
"{0}/networks/(?P<networkid>[^/.]+)$": ManagedBlockchainResponse.networkid_response,
}

View File

@ -0,0 +1,29 @@
import random
import string
from six.moves.urllib.parse import urlparse
def region_from_managedblckchain_url(url):
domain = urlparse(url).netloc
if "." in domain:
return domain.split(".")[1]
else:
return "us-east-1"
def networkid_from_managedblockchain_url(full_url):
return full_url.split("/")[-1]
def get_network_id():
return "n-" + "".join(
random.choice(string.ascii_uppercase + string.digits) for _ in range(26)
)
def get_member_id():
return "m-" + "".join(
random.choice(string.ascii_uppercase + string.digits) for _ in range(26)
)

View File

@ -125,6 +125,9 @@ class OpsworkInstance(BaseModel):
def status(self): def status(self):
if self.instance is None: if self.instance is None:
return "stopped" return "stopped"
# OpsWorks reports the "running" state as "online"
elif self.instance._state.name == "running":
return "online"
return self.instance._state.name return self.instance._state.name
def to_dict(self): def to_dict(self):

View File

@ -136,3 +136,10 @@ class SnapshotCopyAlreadyEnabledFaultError(RedshiftClientError):
cluster_identifier cluster_identifier
), ),
) )
class ClusterAlreadyExistsFaultError(RedshiftClientError):
def __init__(self):
super(ClusterAlreadyExistsFaultError, self).__init__(
"ClusterAlreadyExists", "Cluster already exists"
)

View File

@ -10,6 +10,7 @@ from moto.core import BaseBackend, BaseModel
from moto.core.utils import iso_8601_datetime_with_milliseconds from moto.core.utils import iso_8601_datetime_with_milliseconds
from moto.ec2 import ec2_backends from moto.ec2 import ec2_backends
from .exceptions import ( from .exceptions import (
ClusterAlreadyExistsFaultError,
ClusterNotFoundError, ClusterNotFoundError,
ClusterParameterGroupNotFoundError, ClusterParameterGroupNotFoundError,
ClusterSecurityGroupNotFoundError, ClusterSecurityGroupNotFoundError,
@ -580,6 +581,8 @@ class RedshiftBackend(BaseBackend):
def create_cluster(self, **cluster_kwargs): def create_cluster(self, **cluster_kwargs):
cluster_identifier = cluster_kwargs["cluster_identifier"] cluster_identifier = cluster_kwargs["cluster_identifier"]
if cluster_identifier in self.clusters:
raise ClusterAlreadyExistsFaultError()
cluster = Cluster(self, **cluster_kwargs) cluster = Cluster(self, **cluster_kwargs)
self.clusters[cluster_identifier] = cluster self.clusters[cluster_identifier] = cluster
return cluster return cluster

View File

@ -377,3 +377,12 @@ class NoSystemTags(S3ClientError):
super(NoSystemTags, self).__init__( super(NoSystemTags, self).__init__(
"InvalidTag", "System tags cannot be added/updated by requester" "InvalidTag", "System tags cannot be added/updated by requester"
) )
class NoSuchUpload(S3ClientError):
code = 404
def __init__(self):
super(NoSuchUpload, self).__init__(
"NoSuchUpload", "The specified multipart upload does not exist."
)

View File

@ -40,6 +40,7 @@ from .exceptions import (
NoSuchPublicAccessBlockConfiguration, NoSuchPublicAccessBlockConfiguration,
InvalidPublicAccessBlockConfiguration, InvalidPublicAccessBlockConfiguration,
WrongPublicAccessBlockAccountIdError, WrongPublicAccessBlockAccountIdError,
NoSuchUpload,
) )
from .utils import clean_key_name, _VersionedKeyStore from .utils import clean_key_name, _VersionedKeyStore
@ -1478,6 +1479,9 @@ class S3Backend(BaseBackend):
def cancel_multipart(self, bucket_name, multipart_id): def cancel_multipart(self, bucket_name, multipart_id):
bucket = self.get_bucket(bucket_name) bucket = self.get_bucket(bucket_name)
multipart_data = bucket.multiparts.get(multipart_id, None)
if not multipart_data:
raise NoSuchUpload()
del bucket.multiparts[multipart_id] del bucket.multiparts[multipart_id]
def list_multipart(self, bucket_name, multipart_id): def list_multipart(self, bucket_name, multipart_id):

View File

@ -7,3 +7,21 @@ class MessageRejectedError(RESTError):
def __init__(self, message): def __init__(self, message):
super(MessageRejectedError, self).__init__("MessageRejected", message) super(MessageRejectedError, self).__init__("MessageRejected", message)
class ConfigurationSetDoesNotExist(RESTError):
code = 400
def __init__(self, message):
super(ConfigurationSetDoesNotExist, self).__init__(
"ConfigurationSetDoesNotExist", message
)
class EventDestinationAlreadyExists(RESTError):
code = 400
def __init__(self, message):
super(EventDestinationAlreadyExists, self).__init__(
"EventDestinationAlreadyExists", message
)

View File

@ -1,11 +1,16 @@
from __future__ import unicode_literals from __future__ import unicode_literals
import datetime
import email import email
from email.utils import parseaddr from email.utils import parseaddr
from moto.core import BaseBackend, BaseModel from moto.core import BaseBackend, BaseModel
from moto.sns.models import sns_backends from moto.sns.models import sns_backends
from .exceptions import MessageRejectedError from .exceptions import (
MessageRejectedError,
ConfigurationSetDoesNotExist,
EventDestinationAlreadyExists,
)
from .utils import get_random_message_id from .utils import get_random_message_id
from .feedback import COMMON_MAIL, BOUNCE, COMPLAINT, DELIVERY from .feedback import COMMON_MAIL, BOUNCE, COMPLAINT, DELIVERY
@ -81,7 +86,11 @@ class SESBackend(BaseBackend):
self.domains = [] self.domains = []
self.sent_messages = [] self.sent_messages = []
self.sent_message_count = 0 self.sent_message_count = 0
self.rejected_messages_count = 0
self.sns_topics = {} self.sns_topics = {}
self.config_set = {}
self.config_set_event_destination = {}
self.event_destinations = {}
def _is_verified_address(self, source): def _is_verified_address(self, source):
_, address = parseaddr(source) _, address = parseaddr(source)
@ -118,6 +127,7 @@ class SESBackend(BaseBackend):
if recipient_count > RECIPIENT_LIMIT: if recipient_count > RECIPIENT_LIMIT:
raise MessageRejectedError("Too many recipients.") raise MessageRejectedError("Too many recipients.")
if not self._is_verified_address(source): if not self._is_verified_address(source):
self.rejected_messages_count += 1
raise MessageRejectedError("Email address not verified %s" % source) raise MessageRejectedError("Email address not verified %s" % source)
self.__process_sns_feedback__(source, destinations, region) self.__process_sns_feedback__(source, destinations, region)
@ -135,6 +145,7 @@ class SESBackend(BaseBackend):
if recipient_count > RECIPIENT_LIMIT: if recipient_count > RECIPIENT_LIMIT:
raise MessageRejectedError("Too many recipients.") raise MessageRejectedError("Too many recipients.")
if not self._is_verified_address(source): if not self._is_verified_address(source):
self.rejected_messages_count += 1
raise MessageRejectedError("Email address not verified %s" % source) raise MessageRejectedError("Email address not verified %s" % source)
self.__process_sns_feedback__(source, destinations, region) self.__process_sns_feedback__(source, destinations, region)
@ -237,5 +248,34 @@ class SESBackend(BaseBackend):
return {} return {}
def create_configuration_set(self, configuration_set_name):
self.config_set[configuration_set_name] = 1
return {}
def create_configuration_set_event_destination(
self, configuration_set_name, event_destination
):
if self.config_set.get(configuration_set_name) is None:
raise ConfigurationSetDoesNotExist("Invalid Configuration Set Name.")
if self.event_destinations.get(event_destination["Name"]):
raise EventDestinationAlreadyExists("Duplicate Event destination Name.")
self.config_set_event_destination[configuration_set_name] = event_destination
self.event_destinations[event_destination["Name"]] = 1
return {}
def get_send_statistics(self):
statistics = {}
statistics["DeliveryAttempts"] = self.sent_message_count
statistics["Rejects"] = self.rejected_messages_count
statistics["Complaints"] = 0
statistics["Bounces"] = 0
statistics["Timestamp"] = datetime.datetime.utcnow()
return statistics
ses_backend = SESBackend() ses_backend = SESBackend()

View File

@ -133,6 +133,48 @@ class EmailResponse(BaseResponse):
template = self.response_template(SET_IDENTITY_NOTIFICATION_TOPIC_RESPONSE) template = self.response_template(SET_IDENTITY_NOTIFICATION_TOPIC_RESPONSE)
return template.render() return template.render()
def get_send_statistics(self):
statistics = ses_backend.get_send_statistics()
template = self.response_template(GET_SEND_STATISTICS)
return template.render(all_statistics=[statistics])
def create_configuration_set(self):
configuration_set_name = self.querystring.get("ConfigurationSet.Name")[0]
ses_backend.create_configuration_set(
configuration_set_name=configuration_set_name
)
template = self.response_template(CREATE_CONFIGURATION_SET)
return template.render()
def create_configuration_set_event_destination(self):
configuration_set_name = self._get_param("ConfigurationSetName")
is_configuration_event_enabled = self.querystring.get(
"EventDestination.Enabled"
)[0]
configuration_event_name = self.querystring.get("EventDestination.Name")[0]
event_topic_arn = self.querystring.get(
"EventDestination.SNSDestination.TopicARN"
)[0]
event_matching_types = self._get_multi_param(
"EventDestination.MatchingEventTypes.member"
)
event_destination = {
"Name": configuration_event_name,
"Enabled": is_configuration_event_enabled,
"EventMatchingTypes": event_matching_types,
"SNSDestination": event_topic_arn,
}
ses_backend.create_configuration_set_event_destination(
configuration_set_name=configuration_set_name,
event_destination=event_destination,
)
template = self.response_template(CREATE_CONFIGURATION_SET_EVENT_DESTINATION)
return template.render()
VERIFY_EMAIL_IDENTITY = """<VerifyEmailIdentityResponse xmlns="http://ses.amazonaws.com/doc/2010-12-01/"> VERIFY_EMAIL_IDENTITY = """<VerifyEmailIdentityResponse xmlns="http://ses.amazonaws.com/doc/2010-12-01/">
<VerifyEmailIdentityResult/> <VerifyEmailIdentityResult/>
@ -248,3 +290,37 @@ SET_IDENTITY_NOTIFICATION_TOPIC_RESPONSE = """<SetIdentityNotificationTopicRespo
<RequestId>47e0ef1a-9bf2-11e1-9279-0100e8cf109a</RequestId> <RequestId>47e0ef1a-9bf2-11e1-9279-0100e8cf109a</RequestId>
</ResponseMetadata> </ResponseMetadata>
</SetIdentityNotificationTopicResponse>""" </SetIdentityNotificationTopicResponse>"""
GET_SEND_STATISTICS = """<GetSendStatisticsResponse xmlns="http://ses.amazonaws.com/doc/2010-12-01/">
<GetSendStatisticsResult>
<SendDataPoints>
{% for statistics in all_statistics %}
<item>
<DeliveryAttempts>{{ statistics["DeliveryAttempts"] }}</DeliveryAttempts>
<Rejects>{{ statistics["Rejects"] }}</Rejects>
<Bounces>{{ statistics["Bounces"] }}</Bounces>
<Complaints>{{ statistics["Complaints"] }}</Complaints>
<Timestamp>{{ statistics["Timestamp"] }}</Timestamp>
</item>
{% endfor %}
</SendDataPoints>
<ResponseMetadata>
<RequestId>e0abcdfa-c866-11e0-b6d0-273d09173z49</RequestId>
</ResponseMetadata>
</GetSendStatisticsResult>
</GetSendStatisticsResponse>"""
CREATE_CONFIGURATION_SET = """<CreateConfigurationSetResponse xmlns="http://ses.amazonaws.com/doc/2010-12-01/">
<CreateConfigurationSetResult/>
<ResponseMetadata>
<RequestId>47e0ef1a-9bf2-11e1-9279-0100e8cf109a</RequestId>
</ResponseMetadata>
</CreateConfigurationSetResponse>"""
CREATE_CONFIGURATION_SET_EVENT_DESTINATION = """<CreateConfigurationSetEventDestinationResponse xmlns="http://ses.amazonaws.com/doc/2010-12-01/">
<CreateConfigurationSetEventDestinationResult/>
<ResponseMetadata>
<RequestId>67e0ef1a-9bf2-11e1-9279-0100e8cf109a</RequestId>
</ResponseMetadata>
</CreateConfigurationSetEventDestinationResponse>"""

View File

@ -514,6 +514,16 @@ class SimpleSystemManagerBackend(BaseBackend):
def get_parameters(self, names, with_decryption): def get_parameters(self, names, with_decryption):
result = [] result = []
if len(names) > 10:
raise ValidationException(
"1 validation error detected: "
"Value '[{}]' at 'names' failed to satisfy constraint: "
"Member must have length less than or equal to 10.".format(
", ".join(names)
)
)
for name in names: for name in names:
if name in self._parameters: if name in self._parameters:
result.append(self.get_parameter(name, with_decryption)) result.append(self.get_parameter(name, with_decryption))

View File

@ -1,5 +1,6 @@
from __future__ import unicode_literals from __future__ import unicode_literals
import json
import boto3 import boto3
from freezegun import freeze_time from freezegun import freeze_time
@ -1230,6 +1231,65 @@ def test_put_integration_response_requires_responseTemplate():
) )
@mock_apigateway
def test_put_integration_response_with_response_template():
client = boto3.client("apigateway", region_name="us-west-2")
response = client.create_rest_api(name="my_api", description="this is my api")
api_id = response["id"]
resources = client.get_resources(restApiId=api_id)
root_id = [resource for resource in resources["items"] if resource["path"] == "/"][
0
]["id"]
client.put_method(
restApiId=api_id, resourceId=root_id, httpMethod="GET", authorizationType="NONE"
)
client.put_method_response(
restApiId=api_id, resourceId=root_id, httpMethod="GET", statusCode="200"
)
client.put_integration(
restApiId=api_id,
resourceId=root_id,
httpMethod="GET",
type="HTTP",
uri="http://httpbin.org/robots.txt",
integrationHttpMethod="POST",
)
with assert_raises(ClientError) as ex:
client.put_integration_response(
restApiId=api_id, resourceId=root_id, httpMethod="GET", statusCode="200"
)
ex.exception.response["Error"]["Code"].should.equal("BadRequestException")
ex.exception.response["Error"]["Message"].should.equal("Invalid request input")
client.put_integration_response(
restApiId=api_id,
resourceId=root_id,
httpMethod="GET",
statusCode="200",
selectionPattern="foobar",
responseTemplates={"application/json": json.dumps({"data": "test"})},
)
response = client.get_integration_response(
restApiId=api_id, resourceId=root_id, httpMethod="GET", statusCode="200"
)
# this is hard to match against, so remove it
response["ResponseMetadata"].pop("HTTPHeaders", None)
response["ResponseMetadata"].pop("RetryAttempts", None)
response.should.equal(
{
"statusCode": "200",
"selectionPattern": "foobar",
"ResponseMetadata": {"HTTPStatusCode": 200},
"responseTemplates": {"application/json": json.dumps({"data": "test"})},
}
)
@mock_apigateway @mock_apigateway
def test_put_integration_validation(): def test_put_integration_validation():
client = boto3.client("apigateway", region_name="us-west-2") client = boto3.client("apigateway", region_name="us-west-2")

View File

@ -1071,6 +1071,7 @@ def test_autoscaling_describe_policies_boto3():
response["ScalingPolicies"][0]["PolicyName"].should.equal("test_policy_down") response["ScalingPolicies"][0]["PolicyName"].should.equal("test_policy_down")
@mock_elb
@mock_autoscaling @mock_autoscaling
@mock_ec2 @mock_ec2
def test_detach_one_instance_decrement(): def test_detach_one_instance_decrement():
@ -1096,6 +1097,19 @@ def test_detach_one_instance_decrement():
], ],
VPCZoneIdentifier=mocked_networking["subnet1"], VPCZoneIdentifier=mocked_networking["subnet1"],
) )
elb_client = boto3.client("elb", region_name="us-east-1")
elb_client.create_load_balancer(
LoadBalancerName="my-lb",
Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}],
AvailabilityZones=["us-east-1a", "us-east-1b"],
)
response = client.attach_load_balancers(
AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"]
)
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
instance_to_detach = response["AutoScalingGroups"][0]["Instances"][0]["InstanceId"] instance_to_detach = response["AutoScalingGroups"][0]["Instances"][0]["InstanceId"]
instance_to_keep = response["AutoScalingGroups"][0]["Instances"][1]["InstanceId"] instance_to_keep = response["AutoScalingGroups"][0]["Instances"][1]["InstanceId"]
@ -1111,6 +1125,9 @@ def test_detach_one_instance_decrement():
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
response["AutoScalingGroups"][0]["Instances"].should.have.length_of(1) response["AutoScalingGroups"][0]["Instances"].should.have.length_of(1)
instance_to_detach.shouldnt.be.within(
[x["InstanceId"] for x in response["AutoScalingGroups"][0]["Instances"]]
)
# test to ensure tag has been removed # test to ensure tag has been removed
response = ec2_client.describe_instances(InstanceIds=[instance_to_detach]) response = ec2_client.describe_instances(InstanceIds=[instance_to_detach])
@ -1122,7 +1139,14 @@ def test_detach_one_instance_decrement():
tags = response["Reservations"][0]["Instances"][0]["Tags"] tags = response["Reservations"][0]["Instances"][0]["Tags"]
tags.should.have.length_of(2) tags.should.have.length_of(2)
response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"])
list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(1)
instance_to_detach.shouldnt.be.within(
[x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]]
)
@mock_elb
@mock_autoscaling @mock_autoscaling
@mock_ec2 @mock_ec2
def test_detach_one_instance(): def test_detach_one_instance():
@ -1148,6 +1172,19 @@ def test_detach_one_instance():
], ],
VPCZoneIdentifier=mocked_networking["subnet1"], VPCZoneIdentifier=mocked_networking["subnet1"],
) )
elb_client = boto3.client("elb", region_name="us-east-1")
elb_client.create_load_balancer(
LoadBalancerName="my-lb",
Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}],
AvailabilityZones=["us-east-1a", "us-east-1b"],
)
response = client.attach_load_balancers(
AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"]
)
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
instance_to_detach = response["AutoScalingGroups"][0]["Instances"][0]["InstanceId"] instance_to_detach = response["AutoScalingGroups"][0]["Instances"][0]["InstanceId"]
instance_to_keep = response["AutoScalingGroups"][0]["Instances"][1]["InstanceId"] instance_to_keep = response["AutoScalingGroups"][0]["Instances"][1]["InstanceId"]
@ -1173,7 +1210,14 @@ def test_detach_one_instance():
tags = response["Reservations"][0]["Instances"][0]["Tags"] tags = response["Reservations"][0]["Instances"][0]["Tags"]
tags.should.have.length_of(2) tags.should.have.length_of(2)
response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"])
list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(2)
instance_to_detach.shouldnt.be.within(
[x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]]
)
@mock_elb
@mock_autoscaling @mock_autoscaling
@mock_ec2 @mock_ec2
def test_standby_one_instance_decrement(): def test_standby_one_instance_decrement():
@ -1199,6 +1243,19 @@ def test_standby_one_instance_decrement():
], ],
VPCZoneIdentifier=mocked_networking["subnet1"], VPCZoneIdentifier=mocked_networking["subnet1"],
) )
elb_client = boto3.client("elb", region_name="us-east-1")
elb_client.create_load_balancer(
LoadBalancerName="my-lb",
Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}],
AvailabilityZones=["us-east-1a", "us-east-1b"],
)
response = client.attach_load_balancers(
AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"]
)
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
instance_to_standby = response["AutoScalingGroups"][0]["Instances"][0]["InstanceId"] instance_to_standby = response["AutoScalingGroups"][0]["Instances"][0]["InstanceId"]
instance_to_keep = response["AutoScalingGroups"][0]["Instances"][1]["InstanceId"] instance_to_keep = response["AutoScalingGroups"][0]["Instances"][1]["InstanceId"]
@ -1226,7 +1283,14 @@ def test_standby_one_instance_decrement():
tags = instance["Tags"] tags = instance["Tags"]
tags.should.have.length_of(2) tags.should.have.length_of(2)
response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"])
list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(1)
instance_to_standby.shouldnt.be.within(
[x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]]
)
@mock_elb
@mock_autoscaling @mock_autoscaling
@mock_ec2 @mock_ec2
def test_standby_one_instance(): def test_standby_one_instance():
@ -1252,6 +1316,19 @@ def test_standby_one_instance():
], ],
VPCZoneIdentifier=mocked_networking["subnet1"], VPCZoneIdentifier=mocked_networking["subnet1"],
) )
elb_client = boto3.client("elb", region_name="us-east-1")
elb_client.create_load_balancer(
LoadBalancerName="my-lb",
Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}],
AvailabilityZones=["us-east-1a", "us-east-1b"],
)
response = client.attach_load_balancers(
AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"]
)
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
instance_to_standby = response["AutoScalingGroups"][0]["Instances"][0]["InstanceId"] instance_to_standby = response["AutoScalingGroups"][0]["Instances"][0]["InstanceId"]
instance_to_keep = response["AutoScalingGroups"][0]["Instances"][1]["InstanceId"] instance_to_keep = response["AutoScalingGroups"][0]["Instances"][1]["InstanceId"]
@ -1279,6 +1356,12 @@ def test_standby_one_instance():
tags = instance["Tags"] tags = instance["Tags"]
tags.should.have.length_of(2) tags.should.have.length_of(2)
response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"])
list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(2)
instance_to_standby.shouldnt.be.within(
[x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]]
)
@mock_elb @mock_elb
@mock_autoscaling @mock_autoscaling
@ -1338,8 +1421,12 @@ def test_standby_elb_update():
response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"]) response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"])
list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(2) list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(2)
instance_to_standby.shouldnt.be.within(
[x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]]
)
@mock_elb
@mock_autoscaling @mock_autoscaling
@mock_ec2 @mock_ec2
def test_standby_terminate_instance_decrement(): def test_standby_terminate_instance_decrement():
@ -1366,6 +1453,18 @@ def test_standby_terminate_instance_decrement():
VPCZoneIdentifier=mocked_networking["subnet1"], VPCZoneIdentifier=mocked_networking["subnet1"],
) )
elb_client = boto3.client("elb", region_name="us-east-1")
elb_client.create_load_balancer(
LoadBalancerName="my-lb",
Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}],
AvailabilityZones=["us-east-1a", "us-east-1b"],
)
response = client.attach_load_balancers(
AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"]
)
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
instance_to_standby_terminate = response["AutoScalingGroups"][0]["Instances"][0][ instance_to_standby_terminate = response["AutoScalingGroups"][0]["Instances"][0][
"InstanceId" "InstanceId"
@ -1409,7 +1508,14 @@ def test_standby_terminate_instance_decrement():
"terminated" "terminated"
) )
response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"])
list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(1)
instance_to_standby_terminate.shouldnt.be.within(
[x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]]
)
@mock_elb
@mock_autoscaling @mock_autoscaling
@mock_ec2 @mock_ec2
def test_standby_terminate_instance_no_decrement(): def test_standby_terminate_instance_no_decrement():
@ -1436,6 +1542,18 @@ def test_standby_terminate_instance_no_decrement():
VPCZoneIdentifier=mocked_networking["subnet1"], VPCZoneIdentifier=mocked_networking["subnet1"],
) )
elb_client = boto3.client("elb", region_name="us-east-1")
elb_client.create_load_balancer(
LoadBalancerName="my-lb",
Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}],
AvailabilityZones=["us-east-1a", "us-east-1b"],
)
response = client.attach_load_balancers(
AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"]
)
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
instance_to_standby_terminate = response["AutoScalingGroups"][0]["Instances"][0][ instance_to_standby_terminate = response["AutoScalingGroups"][0]["Instances"][0][
"InstanceId" "InstanceId"
@ -1479,7 +1597,14 @@ def test_standby_terminate_instance_no_decrement():
"terminated" "terminated"
) )
response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"])
list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(2)
instance_to_standby_terminate.shouldnt.be.within(
[x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]]
)
@mock_elb
@mock_autoscaling @mock_autoscaling
@mock_ec2 @mock_ec2
def test_standby_detach_instance_decrement(): def test_standby_detach_instance_decrement():
@ -1506,6 +1631,18 @@ def test_standby_detach_instance_decrement():
VPCZoneIdentifier=mocked_networking["subnet1"], VPCZoneIdentifier=mocked_networking["subnet1"],
) )
elb_client = boto3.client("elb", region_name="us-east-1")
elb_client.create_load_balancer(
LoadBalancerName="my-lb",
Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}],
AvailabilityZones=["us-east-1a", "us-east-1b"],
)
response = client.attach_load_balancers(
AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"]
)
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
instance_to_standby_detach = response["AutoScalingGroups"][0]["Instances"][0][ instance_to_standby_detach = response["AutoScalingGroups"][0]["Instances"][0][
"InstanceId" "InstanceId"
@ -1547,7 +1684,14 @@ def test_standby_detach_instance_decrement():
response = ec2_client.describe_instances(InstanceIds=[instance_to_standby_detach]) response = ec2_client.describe_instances(InstanceIds=[instance_to_standby_detach])
response["Reservations"][0]["Instances"][0]["State"]["Name"].should.equal("running") response["Reservations"][0]["Instances"][0]["State"]["Name"].should.equal("running")
response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"])
list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(1)
instance_to_standby_detach.shouldnt.be.within(
[x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]]
)
@mock_elb
@mock_autoscaling @mock_autoscaling
@mock_ec2 @mock_ec2
def test_standby_detach_instance_no_decrement(): def test_standby_detach_instance_no_decrement():
@ -1574,6 +1718,18 @@ def test_standby_detach_instance_no_decrement():
VPCZoneIdentifier=mocked_networking["subnet1"], VPCZoneIdentifier=mocked_networking["subnet1"],
) )
elb_client = boto3.client("elb", region_name="us-east-1")
elb_client.create_load_balancer(
LoadBalancerName="my-lb",
Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}],
AvailabilityZones=["us-east-1a", "us-east-1b"],
)
response = client.attach_load_balancers(
AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"]
)
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
instance_to_standby_detach = response["AutoScalingGroups"][0]["Instances"][0][ instance_to_standby_detach = response["AutoScalingGroups"][0]["Instances"][0][
"InstanceId" "InstanceId"
@ -1615,7 +1771,14 @@ def test_standby_detach_instance_no_decrement():
response = ec2_client.describe_instances(InstanceIds=[instance_to_standby_detach]) response = ec2_client.describe_instances(InstanceIds=[instance_to_standby_detach])
response["Reservations"][0]["Instances"][0]["State"]["Name"].should.equal("running") response["Reservations"][0]["Instances"][0]["State"]["Name"].should.equal("running")
response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"])
list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(2)
instance_to_standby_detach.shouldnt.be.within(
[x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]]
)
@mock_elb
@mock_autoscaling @mock_autoscaling
@mock_ec2 @mock_ec2
def test_standby_exit_standby(): def test_standby_exit_standby():
@ -1642,6 +1805,18 @@ def test_standby_exit_standby():
VPCZoneIdentifier=mocked_networking["subnet1"], VPCZoneIdentifier=mocked_networking["subnet1"],
) )
elb_client = boto3.client("elb", region_name="us-east-1")
elb_client.create_load_balancer(
LoadBalancerName="my-lb",
Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}],
AvailabilityZones=["us-east-1a", "us-east-1b"],
)
response = client.attach_load_balancers(
AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"]
)
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
instance_to_standby_exit_standby = response["AutoScalingGroups"][0]["Instances"][0][ instance_to_standby_exit_standby = response["AutoScalingGroups"][0]["Instances"][0][
"InstanceId" "InstanceId"
@ -1683,7 +1858,14 @@ def test_standby_exit_standby():
) )
response["Reservations"][0]["Instances"][0]["State"]["Name"].should.equal("running") response["Reservations"][0]["Instances"][0]["State"]["Name"].should.equal("running")
response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"])
list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(3)
instance_to_standby_exit_standby.should.be.within(
[x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]]
)
@mock_elb
@mock_autoscaling @mock_autoscaling
@mock_ec2 @mock_ec2
def test_attach_one_instance(): def test_attach_one_instance():
@ -1711,6 +1893,18 @@ def test_attach_one_instance():
NewInstancesProtectedFromScaleIn=True, NewInstancesProtectedFromScaleIn=True,
) )
elb_client = boto3.client("elb", region_name="us-east-1")
elb_client.create_load_balancer(
LoadBalancerName="my-lb",
Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}],
AvailabilityZones=["us-east-1a", "us-east-1b"],
)
response = client.attach_load_balancers(
AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"]
)
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
ec2 = boto3.resource("ec2", "us-east-1") ec2 = boto3.resource("ec2", "us-east-1")
instances_to_add = [ instances_to_add = [
x.id for x in ec2.create_instances(ImageId="", MinCount=1, MaxCount=1) x.id for x in ec2.create_instances(ImageId="", MinCount=1, MaxCount=1)
@ -1727,6 +1921,9 @@ def test_attach_one_instance():
for instance in instances: for instance in instances:
instance["ProtectedFromScaleIn"].should.equal(True) instance["ProtectedFromScaleIn"].should.equal(True)
response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"])
list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(3)
@mock_autoscaling @mock_autoscaling
@mock_ec2 @mock_ec2
@ -1948,6 +2145,7 @@ def test_terminate_instance_via_ec2_in_autoscaling_group():
replaced_instance_id.should_not.equal(original_instance_id) replaced_instance_id.should_not.equal(original_instance_id)
@mock_elb
@mock_autoscaling @mock_autoscaling
@mock_ec2 @mock_ec2
def test_terminate_instance_in_auto_scaling_group_decrement(): def test_terminate_instance_in_auto_scaling_group_decrement():
@ -1966,6 +2164,18 @@ def test_terminate_instance_in_auto_scaling_group_decrement():
NewInstancesProtectedFromScaleIn=False, NewInstancesProtectedFromScaleIn=False,
) )
elb_client = boto3.client("elb", region_name="us-east-1")
elb_client.create_load_balancer(
LoadBalancerName="my-lb",
Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}],
AvailabilityZones=["us-east-1a", "us-east-1b"],
)
response = client.attach_load_balancers(
AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"]
)
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
original_instance_id = next( original_instance_id = next(
instance["InstanceId"] instance["InstanceId"]
@ -1979,7 +2189,11 @@ def test_terminate_instance_in_auto_scaling_group_decrement():
response["AutoScalingGroups"][0]["Instances"].should.equal([]) response["AutoScalingGroups"][0]["Instances"].should.equal([])
response["AutoScalingGroups"][0]["DesiredCapacity"].should.equal(0) response["AutoScalingGroups"][0]["DesiredCapacity"].should.equal(0)
response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"])
list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(0)
@mock_elb
@mock_autoscaling @mock_autoscaling
@mock_ec2 @mock_ec2
def test_terminate_instance_in_auto_scaling_group_no_decrement(): def test_terminate_instance_in_auto_scaling_group_no_decrement():
@ -1998,6 +2212,18 @@ def test_terminate_instance_in_auto_scaling_group_no_decrement():
NewInstancesProtectedFromScaleIn=False, NewInstancesProtectedFromScaleIn=False,
) )
elb_client = boto3.client("elb", region_name="us-east-1")
elb_client.create_load_balancer(
LoadBalancerName="my-lb",
Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}],
AvailabilityZones=["us-east-1a", "us-east-1b"],
)
response = client.attach_load_balancers(
AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"]
)
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
original_instance_id = next( original_instance_id = next(
instance["InstanceId"] instance["InstanceId"]
@ -2014,3 +2240,9 @@ def test_terminate_instance_in_auto_scaling_group_no_decrement():
) )
replaced_instance_id.should_not.equal(original_instance_id) replaced_instance_id.should_not.equal(original_instance_id)
response["AutoScalingGroups"][0]["DesiredCapacity"].should.equal(1) response["AutoScalingGroups"][0]["DesiredCapacity"].should.equal(1)
response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"])
list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(1)
original_instance_id.shouldnt.be.within(
[x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]]
)

View File

@ -1,5 +1,6 @@
from __future__ import unicode_literals, print_function from __future__ import unicode_literals, print_function
from datetime import datetime
from decimal import Decimal from decimal import Decimal
import boto import boto
@ -2049,6 +2050,141 @@ def test_set_ttl():
resp["TimeToLiveDescription"]["TimeToLiveStatus"].should.equal("DISABLED") resp["TimeToLiveDescription"]["TimeToLiveStatus"].should.equal("DISABLED")
@mock_dynamodb2
def test_describe_continuous_backups():
# given
client = boto3.client("dynamodb", region_name="us-east-1")
table_name = client.create_table(
TableName="test",
AttributeDefinitions=[
{"AttributeName": "client", "AttributeType": "S"},
{"AttributeName": "app", "AttributeType": "S"},
],
KeySchema=[
{"AttributeName": "client", "KeyType": "HASH"},
{"AttributeName": "app", "KeyType": "RANGE"},
],
BillingMode="PAY_PER_REQUEST",
)["TableDescription"]["TableName"]
# when
response = client.describe_continuous_backups(TableName=table_name)
# then
response["ContinuousBackupsDescription"].should.equal(
{
"ContinuousBackupsStatus": "ENABLED",
"PointInTimeRecoveryDescription": {"PointInTimeRecoveryStatus": "DISABLED"},
}
)
@mock_dynamodb2
def test_describe_continuous_backups_errors():
# given
client = boto3.client("dynamodb", region_name="us-east-1")
# when
with assert_raises(Exception) as e:
client.describe_continuous_backups(TableName="not-existing-table")
# then
ex = e.exception
ex.operation_name.should.equal("DescribeContinuousBackups")
ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
ex.response["Error"]["Code"].should.contain("TableNotFoundException")
ex.response["Error"]["Message"].should.equal("Table not found: not-existing-table")
@mock_dynamodb2
def test_update_continuous_backups():
# given
client = boto3.client("dynamodb", region_name="us-east-1")
table_name = client.create_table(
TableName="test",
AttributeDefinitions=[
{"AttributeName": "client", "AttributeType": "S"},
{"AttributeName": "app", "AttributeType": "S"},
],
KeySchema=[
{"AttributeName": "client", "KeyType": "HASH"},
{"AttributeName": "app", "KeyType": "RANGE"},
],
BillingMode="PAY_PER_REQUEST",
)["TableDescription"]["TableName"]
# when
response = client.update_continuous_backups(
TableName=table_name,
PointInTimeRecoverySpecification={"PointInTimeRecoveryEnabled": True},
)
# then
response["ContinuousBackupsDescription"]["ContinuousBackupsStatus"].should.equal(
"ENABLED"
)
point_in_time = response["ContinuousBackupsDescription"][
"PointInTimeRecoveryDescription"
]
earliest_datetime = point_in_time["EarliestRestorableDateTime"]
earliest_datetime.should.be.a(datetime)
latest_datetime = point_in_time["LatestRestorableDateTime"]
latest_datetime.should.be.a(datetime)
point_in_time["PointInTimeRecoveryStatus"].should.equal("ENABLED")
# when
# a second update should not change anything
response = client.update_continuous_backups(
TableName=table_name,
PointInTimeRecoverySpecification={"PointInTimeRecoveryEnabled": True},
)
# then
response["ContinuousBackupsDescription"]["ContinuousBackupsStatus"].should.equal(
"ENABLED"
)
point_in_time = response["ContinuousBackupsDescription"][
"PointInTimeRecoveryDescription"
]
point_in_time["EarliestRestorableDateTime"].should.equal(earliest_datetime)
point_in_time["LatestRestorableDateTime"].should.equal(latest_datetime)
point_in_time["PointInTimeRecoveryStatus"].should.equal("ENABLED")
# when
response = client.update_continuous_backups(
TableName=table_name,
PointInTimeRecoverySpecification={"PointInTimeRecoveryEnabled": False},
)
# then
response["ContinuousBackupsDescription"].should.equal(
{
"ContinuousBackupsStatus": "ENABLED",
"PointInTimeRecoveryDescription": {"PointInTimeRecoveryStatus": "DISABLED"},
}
)
@mock_dynamodb2
def test_update_continuous_backups_errors():
# given
client = boto3.client("dynamodb", region_name="us-east-1")
# when
with assert_raises(Exception) as e:
client.update_continuous_backups(
TableName="not-existing-table",
PointInTimeRecoverySpecification={"PointInTimeRecoveryEnabled": True},
)
# then
ex = e.exception
ex.operation_name.should.equal("UpdateContinuousBackups")
ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
ex.response["Error"]["Code"].should.contain("TableNotFoundException")
ex.response["Error"]["Message"].should.equal("Table not found: not-existing-table")
# https://github.com/spulec/moto/issues/1043 # https://github.com/spulec/moto/issues/1043
@mock_dynamodb2 @mock_dynamodb2
def test_query_missing_expr_names(): def test_query_missing_expr_names():
@ -5029,3 +5165,81 @@ def test_update_item_atomic_counter_return_values():
"v" in response["Attributes"] "v" in response["Attributes"]
), "v has been updated, and should be returned here" ), "v has been updated, and should be returned here"
response["Attributes"]["v"]["N"].should.equal("8") response["Attributes"]["v"]["N"].should.equal("8")
@mock_dynamodb2
def test_update_item_atomic_counter_from_zero():
table = "table_t"
ddb_mock = boto3.client("dynamodb", region_name="eu-west-1")
ddb_mock.create_table(
TableName=table,
KeySchema=[{"AttributeName": "t_id", "KeyType": "HASH"}],
AttributeDefinitions=[{"AttributeName": "t_id", "AttributeType": "S"}],
BillingMode="PAY_PER_REQUEST",
)
key = {"t_id": {"S": "item1"}}
ddb_mock.put_item(
TableName=table, Item=key,
)
ddb_mock.update_item(
TableName=table,
Key=key,
UpdateExpression="add n_i :inc1, n_f :inc2",
ExpressionAttributeValues={":inc1": {"N": "1.2"}, ":inc2": {"N": "-0.5"}},
)
updated_item = ddb_mock.get_item(TableName=table, Key=key)["Item"]
assert updated_item["n_i"]["N"] == "1.2"
assert updated_item["n_f"]["N"] == "-0.5"
@mock_dynamodb2
def test_update_item_add_to_non_existent_set():
table = "table_t"
ddb_mock = boto3.client("dynamodb", region_name="eu-west-1")
ddb_mock.create_table(
TableName=table,
KeySchema=[{"AttributeName": "t_id", "KeyType": "HASH"}],
AttributeDefinitions=[{"AttributeName": "t_id", "AttributeType": "S"}],
BillingMode="PAY_PER_REQUEST",
)
key = {"t_id": {"S": "item1"}}
ddb_mock.put_item(
TableName=table, Item=key,
)
ddb_mock.update_item(
TableName=table,
Key=key,
UpdateExpression="add s_i :s1",
ExpressionAttributeValues={":s1": {"SS": ["hello"]}},
)
updated_item = ddb_mock.get_item(TableName=table, Key=key)["Item"]
assert updated_item["s_i"]["SS"] == ["hello"]
@mock_dynamodb2
def test_update_item_add_to_non_existent_number_set():
table = "table_t"
ddb_mock = boto3.client("dynamodb", region_name="eu-west-1")
ddb_mock.create_table(
TableName=table,
KeySchema=[{"AttributeName": "t_id", "KeyType": "HASH"}],
AttributeDefinitions=[{"AttributeName": "t_id", "AttributeType": "S"}],
BillingMode="PAY_PER_REQUEST",
)
key = {"t_id": {"S": "item1"}}
ddb_mock.put_item(
TableName=table, Item=key,
)
ddb_mock.update_item(
TableName=table,
Key=key,
UpdateExpression="add s_i :s1",
ExpressionAttributeValues={":s1": {"NS": ["3"]}},
)
updated_item = ddb_mock.get_item(TableName=table, Key=key)["Item"]
assert updated_item["s_i"]["NS"] == ["3"]

View File

@ -1307,16 +1307,16 @@ def test_update_item_add_with_expression():
ExpressionAttributeValues={":v": {"item4"}}, ExpressionAttributeValues={":v": {"item4"}},
) )
current_item["str_set"] = current_item["str_set"].union({"item4"}) current_item["str_set"] = current_item["str_set"].union({"item4"})
dict(table.get_item(Key=item_key)["Item"]).should.equal(current_item) assert dict(table.get_item(Key=item_key)["Item"]) == current_item
# Update item to add a string value to a non-existing set # Update item to add a string value to a non-existing set
# Should throw: 'The provided key element does not match the schema' table.update_item(
assert_failure_due_to_key_not_in_schema(
table.update_item,
Key=item_key, Key=item_key,
UpdateExpression="ADD non_existing_str_set :v", UpdateExpression="ADD non_existing_str_set :v",
ExpressionAttributeValues={":v": {"item4"}}, ExpressionAttributeValues={":v": {"item4"}},
) )
current_item["non_existing_str_set"] = {"item4"}
assert dict(table.get_item(Key=item_key)["Item"]) == current_item
# Update item to add a num value to a num set # Update item to add a num value to a num set
table.update_item( table.update_item(
@ -1325,7 +1325,7 @@ def test_update_item_add_with_expression():
ExpressionAttributeValues={":v": {6}}, ExpressionAttributeValues={":v": {6}},
) )
current_item["num_set"] = current_item["num_set"].union({6}) current_item["num_set"] = current_item["num_set"].union({6})
dict(table.get_item(Key=item_key)["Item"]).should.equal(current_item) assert dict(table.get_item(Key=item_key)["Item"]) == current_item
# Update item to add a value to a number value # Update item to add a value to a number value
table.update_item( table.update_item(
@ -1334,7 +1334,7 @@ def test_update_item_add_with_expression():
ExpressionAttributeValues={":v": 20}, ExpressionAttributeValues={":v": 20},
) )
current_item["num_val"] = current_item["num_val"] + 20 current_item["num_val"] = current_item["num_val"] + 20
dict(table.get_item(Key=item_key)["Item"]).should.equal(current_item) assert dict(table.get_item(Key=item_key)["Item"]) == current_item
# Attempt to add a number value to a string set, should raise Client Error # Attempt to add a number value to a string set, should raise Client Error
table.update_item.when.called_with( table.update_item.when.called_with(
@ -1342,7 +1342,7 @@ def test_update_item_add_with_expression():
UpdateExpression="ADD str_set :v", UpdateExpression="ADD str_set :v",
ExpressionAttributeValues={":v": 20}, ExpressionAttributeValues={":v": 20},
).should.have.raised(ClientError) ).should.have.raised(ClientError)
dict(table.get_item(Key=item_key)["Item"]).should.equal(current_item) assert dict(table.get_item(Key=item_key)["Item"]) == current_item
# Attempt to add a number set to the string set, should raise a ClientError # Attempt to add a number set to the string set, should raise a ClientError
table.update_item.when.called_with( table.update_item.when.called_with(
@ -1350,7 +1350,7 @@ def test_update_item_add_with_expression():
UpdateExpression="ADD str_set :v", UpdateExpression="ADD str_set :v",
ExpressionAttributeValues={":v": {20}}, ExpressionAttributeValues={":v": {20}},
).should.have.raised(ClientError) ).should.have.raised(ClientError)
dict(table.get_item(Key=item_key)["Item"]).should.equal(current_item) assert dict(table.get_item(Key=item_key)["Item"]) == current_item
# Attempt to update with a bad expression # Attempt to update with a bad expression
table.update_item.when.called_with( table.update_item.when.called_with(
@ -1388,17 +1388,18 @@ def test_update_item_add_with_nested_sets():
current_item["nested"]["str_set"] = current_item["nested"]["str_set"].union( current_item["nested"]["str_set"] = current_item["nested"]["str_set"].union(
{"item4"} {"item4"}
) )
dict(table.get_item(Key=item_key)["Item"]).should.equal(current_item) assert dict(table.get_item(Key=item_key)["Item"]) == current_item
# Update item to add a string value to a non-existing set # Update item to add a string value to a non-existing set
# Should raise # Should raise
assert_failure_due_to_key_not_in_schema( table.update_item(
table.update_item,
Key=item_key, Key=item_key,
UpdateExpression="ADD #ns.#ne :v", UpdateExpression="ADD #ns.#ne :v",
ExpressionAttributeNames={"#ns": "nested", "#ne": "non_existing_str_set"}, ExpressionAttributeNames={"#ns": "nested", "#ne": "non_existing_str_set"},
ExpressionAttributeValues={":v": {"new_item"}}, ExpressionAttributeValues={":v": {"new_item"}},
) )
current_item["nested"]["non_existing_str_set"] = {"new_item"}
assert dict(table.get_item(Key=item_key)["Item"]) == current_item
@mock_dynamodb2 @mock_dynamodb2

View File

@ -1126,6 +1126,111 @@ def test_run_instance_with_keypair():
instance.key_name.should.equal("keypair_name") instance.key_name.should.equal("keypair_name")
@mock_ec2
def test_run_instance_with_block_device_mappings():
ec2_client = boto3.client("ec2", region_name="us-east-1")
kwargs = {
"MinCount": 1,
"MaxCount": 1,
"ImageId": "ami-d3adb33f",
"KeyName": "the_key",
"InstanceType": "t1.micro",
"BlockDeviceMappings": [{"DeviceName": "/dev/sda2", "Ebs": {"VolumeSize": 50}}],
}
ec2_client.run_instances(**kwargs)
instances = ec2_client.describe_instances()
volume = instances["Reservations"][0]["Instances"][0]["BlockDeviceMappings"][0][
"Ebs"
]
volumes = ec2_client.describe_volumes(VolumeIds=[volume["VolumeId"]])
volumes["Volumes"][0]["Size"].should.equal(50)
@mock_ec2
def test_run_instance_with_block_device_mappings_missing_ebs():
ec2_client = boto3.client("ec2", region_name="us-east-1")
kwargs = {
"MinCount": 1,
"MaxCount": 1,
"ImageId": "ami-d3adb33f",
"KeyName": "the_key",
"InstanceType": "t1.micro",
"BlockDeviceMappings": [{"DeviceName": "/dev/sda2"}],
}
with assert_raises(ClientError) as ex:
ec2_client.run_instances(**kwargs)
ex.exception.response["Error"]["Code"].should.equal("MissingParameter")
ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
ex.exception.response["Error"]["Message"].should.equal(
"The request must contain the parameter ebs"
)
@mock_ec2
def test_run_instance_with_block_device_mappings_missing_size():
ec2_client = boto3.client("ec2", region_name="us-east-1")
kwargs = {
"MinCount": 1,
"MaxCount": 1,
"ImageId": "ami-d3adb33f",
"KeyName": "the_key",
"InstanceType": "t1.micro",
"BlockDeviceMappings": [
{"DeviceName": "/dev/sda2", "Ebs": {"VolumeType": "standard"}}
],
}
with assert_raises(ClientError) as ex:
ec2_client.run_instances(**kwargs)
ex.exception.response["Error"]["Code"].should.equal("MissingParameter")
ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
ex.exception.response["Error"]["Message"].should.equal(
"The request must contain the parameter size or snapshotId"
)
@mock_ec2
def test_run_instance_with_block_device_mappings_from_snapshot():
ec2_client = boto3.client("ec2", region_name="us-east-1")
ec2_resource = boto3.resource("ec2", region_name="us-east-1")
volume_details = {
"AvailabilityZone": "1a",
"Size": 30,
}
volume = ec2_resource.create_volume(**volume_details)
snapshot = volume.create_snapshot()
kwargs = {
"MinCount": 1,
"MaxCount": 1,
"ImageId": "ami-d3adb33f",
"KeyName": "the_key",
"InstanceType": "t1.micro",
"BlockDeviceMappings": [
{"DeviceName": "/dev/sda2", "Ebs": {"SnapshotId": snapshot.snapshot_id}}
],
}
ec2_client.run_instances(**kwargs)
instances = ec2_client.describe_instances()
volume = instances["Reservations"][0]["Instances"][0]["BlockDeviceMappings"][0][
"Ebs"
]
volumes = ec2_client.describe_volumes(VolumeIds=[volume["VolumeId"]])
volumes["Volumes"][0]["Size"].should.equal(30)
volumes["Volumes"][0]["SnapshotId"].should.equal(snapshot.snapshot_id)
@mock_ec2_deprecated @mock_ec2_deprecated
def test_describe_instance_status_no_instances(): def test_describe_instance_status_no_instances():
conn = boto.connect_ec2("the_key", "the_secret") conn = boto.connect_ec2("the_key", "the_secret")

View File

@ -9,6 +9,38 @@ from botocore.exceptions import ClientError
from nose.tools import assert_raises from nose.tools import assert_raises
def generate_thing_group_tree(iot_client, tree_dict, _parent=None):
"""
Generates a thing group tree given the input tree structure.
:param iot_client: the iot client for boto3
:param tree_dict: dictionary with the key being the group_name, and the value being a sub tree.
tree_dict = {
"group_name_1a":{
"group_name_2a":{
"group_name_3a":{} or None
},
},
"group_name_1b":{}
}
:return: a dictionary of created groups, keyed by group name
"""
if tree_dict is None:
tree_dict = {}
created_dict = {}
for group_name in tree_dict.keys():
params = {"thingGroupName": group_name}
if _parent:
params["parentGroupName"] = _parent
created_group = iot_client.create_thing_group(**params)
created_dict[group_name] = created_group
subtree_dict = generate_thing_group_tree(
iot_client=iot_client, tree_dict=tree_dict[group_name], _parent=group_name
)
created_dict.update(created_dict)
created_dict.update(subtree_dict)
return created_dict
@mock_iot @mock_iot
def test_attach_policy(): def test_attach_policy():
client = boto3.client("iot", region_name="ap-northeast-1") client = boto3.client("iot", region_name="ap-northeast-1")
@ -756,25 +788,143 @@ def test_delete_principal_thing():
client.delete_certificate(certificateId=cert_id) client.delete_certificate(certificateId=cert_id)
class TestListThingGroup:
group_name_1a = "my-group-name-1a"
group_name_1b = "my-group-name-1b"
group_name_2a = "my-group-name-2a"
group_name_2b = "my-group-name-2b"
group_name_3a = "my-group-name-3a"
group_name_3b = "my-group-name-3b"
group_name_3c = "my-group-name-3c"
group_name_3d = "my-group-name-3d"
tree_dict = {
group_name_1a: {
group_name_2a: {group_name_3a: {}, group_name_3b: {}},
group_name_2b: {group_name_3c: {}, group_name_3d: {}},
},
group_name_1b: {},
}
@mock_iot
def test_should_list_all_groups(self):
# setup
client = boto3.client("iot", region_name="ap-northeast-1")
group_catalog = generate_thing_group_tree(client, self.tree_dict)
# test
resp = client.list_thing_groups()
resp.should.have.key("thingGroups")
resp["thingGroups"].should.have.length_of(8)
@mock_iot
def test_should_list_all_groups_non_recursively(self):
# setup
client = boto3.client("iot", region_name="ap-northeast-1")
group_catalog = generate_thing_group_tree(client, self.tree_dict)
# test
resp = client.list_thing_groups(recursive=False)
resp.should.have.key("thingGroups")
resp["thingGroups"].should.have.length_of(2)
@mock_iot
def test_should_list_all_groups_filtered_by_parent(self):
# setup
client = boto3.client("iot", region_name="ap-northeast-1")
group_catalog = generate_thing_group_tree(client, self.tree_dict)
# test
resp = client.list_thing_groups(parentGroup=self.group_name_1a)
resp.should.have.key("thingGroups")
resp["thingGroups"].should.have.length_of(6)
resp = client.list_thing_groups(parentGroup=self.group_name_2a)
resp.should.have.key("thingGroups")
resp["thingGroups"].should.have.length_of(2)
resp = client.list_thing_groups(parentGroup=self.group_name_1b)
resp.should.have.key("thingGroups")
resp["thingGroups"].should.have.length_of(0)
with assert_raises(ClientError) as e:
client.list_thing_groups(parentGroup="inexistant-group-name")
e.exception.response["Error"]["Code"].should.equal(
"ResourceNotFoundException"
)
@mock_iot
def test_should_list_all_groups_filtered_by_parent_non_recursively(self):
# setup
client = boto3.client("iot", region_name="ap-northeast-1")
group_catalog = generate_thing_group_tree(client, self.tree_dict)
# test
resp = client.list_thing_groups(parentGroup=self.group_name_1a, recursive=False)
resp.should.have.key("thingGroups")
resp["thingGroups"].should.have.length_of(2)
resp = client.list_thing_groups(parentGroup=self.group_name_2a, recursive=False)
resp.should.have.key("thingGroups")
resp["thingGroups"].should.have.length_of(2)
@mock_iot
def test_should_list_all_groups_filtered_by_name_prefix(self):
# setup
client = boto3.client("iot", region_name="ap-northeast-1")
group_catalog = generate_thing_group_tree(client, self.tree_dict)
# test
resp = client.list_thing_groups(namePrefixFilter="my-group-name-1")
resp.should.have.key("thingGroups")
resp["thingGroups"].should.have.length_of(2)
resp = client.list_thing_groups(namePrefixFilter="my-group-name-3")
resp.should.have.key("thingGroups")
resp["thingGroups"].should.have.length_of(4)
resp = client.list_thing_groups(namePrefixFilter="prefix-which-doesn-not-match")
resp.should.have.key("thingGroups")
resp["thingGroups"].should.have.length_of(0)
@mock_iot
def test_should_list_all_groups_filtered_by_name_prefix_non_recursively(self):
# setup
client = boto3.client("iot", region_name="ap-northeast-1")
group_catalog = generate_thing_group_tree(client, self.tree_dict)
# test
resp = client.list_thing_groups(
namePrefixFilter="my-group-name-1", recursive=False
)
resp.should.have.key("thingGroups")
resp["thingGroups"].should.have.length_of(2)
resp = client.list_thing_groups(
namePrefixFilter="my-group-name-3", recursive=False
)
resp.should.have.key("thingGroups")
resp["thingGroups"].should.have.length_of(0)
@mock_iot
def test_should_list_all_groups_filtered_by_name_prefix_and_parent(self):
# setup
client = boto3.client("iot", region_name="ap-northeast-1")
group_catalog = generate_thing_group_tree(client, self.tree_dict)
# test
resp = client.list_thing_groups(
namePrefixFilter="my-group-name-2", parentGroup=self.group_name_1a
)
resp.should.have.key("thingGroups")
resp["thingGroups"].should.have.length_of(2)
resp = client.list_thing_groups(
namePrefixFilter="my-group-name-3", parentGroup=self.group_name_1a
)
resp.should.have.key("thingGroups")
resp["thingGroups"].should.have.length_of(4)
resp = client.list_thing_groups(
namePrefixFilter="prefix-which-doesn-not-match",
parentGroup=self.group_name_1a,
)
resp.should.have.key("thingGroups")
resp["thingGroups"].should.have.length_of(0)
@mock_iot @mock_iot
def test_delete_thing_group(): def test_delete_thing_group():
client = boto3.client("iot", region_name="ap-northeast-1") client = boto3.client("iot", region_name="ap-northeast-1")
group_name_1a = "my-group-name-1a" group_name_1a = "my-group-name-1a"
group_name_2a = "my-group-name-2a" group_name_2a = "my-group-name-2a"
# --1a tree_dict = {
# |--2a group_name_1a: {group_name_2a: {},},
}
# create thing groups tree group_catalog = generate_thing_group_tree(client, tree_dict)
# 1
thing_group1a = client.create_thing_group(thingGroupName=group_name_1a)
thing_group1a.should.have.key("thingGroupName").which.should.equal(group_name_1a)
thing_group1a.should.have.key("thingGroupArn")
# 2
thing_group2a = client.create_thing_group(
thingGroupName=group_name_2a, parentGroupName=group_name_1a
)
thing_group2a.should.have.key("thingGroupName").which.should.equal(group_name_2a)
thing_group2a.should.have.key("thingGroupArn")
# delete group with child # delete group with child
try: try:
@ -809,56 +959,14 @@ def test_describe_thing_group_metadata_hierarchy():
group_name_3c = "my-group-name-3c" group_name_3c = "my-group-name-3c"
group_name_3d = "my-group-name-3d" group_name_3d = "my-group-name-3d"
# --1a tree_dict = {
# |--2a group_name_1a: {
# | |--3a group_name_2a: {group_name_3a: {}, group_name_3b: {}},
# | |--3b group_name_2b: {group_name_3c: {}, group_name_3d: {}},
# | },
# |--2b group_name_1b: {},
# |--3c }
# |--3d group_catalog = generate_thing_group_tree(client, tree_dict)
# --1b
# create thing groups tree
# 1
thing_group1a = client.create_thing_group(thingGroupName=group_name_1a)
thing_group1a.should.have.key("thingGroupName").which.should.equal(group_name_1a)
thing_group1a.should.have.key("thingGroupArn")
thing_group1b = client.create_thing_group(thingGroupName=group_name_1b)
thing_group1b.should.have.key("thingGroupName").which.should.equal(group_name_1b)
thing_group1b.should.have.key("thingGroupArn")
# 2
thing_group2a = client.create_thing_group(
thingGroupName=group_name_2a, parentGroupName=group_name_1a
)
thing_group2a.should.have.key("thingGroupName").which.should.equal(group_name_2a)
thing_group2a.should.have.key("thingGroupArn")
thing_group2b = client.create_thing_group(
thingGroupName=group_name_2b, parentGroupName=group_name_1a
)
thing_group2b.should.have.key("thingGroupName").which.should.equal(group_name_2b)
thing_group2b.should.have.key("thingGroupArn")
# 3
thing_group3a = client.create_thing_group(
thingGroupName=group_name_3a, parentGroupName=group_name_2a
)
thing_group3a.should.have.key("thingGroupName").which.should.equal(group_name_3a)
thing_group3a.should.have.key("thingGroupArn")
thing_group3b = client.create_thing_group(
thingGroupName=group_name_3b, parentGroupName=group_name_2a
)
thing_group3b.should.have.key("thingGroupName").which.should.equal(group_name_3b)
thing_group3b.should.have.key("thingGroupArn")
thing_group3c = client.create_thing_group(
thingGroupName=group_name_3c, parentGroupName=group_name_2b
)
thing_group3c.should.have.key("thingGroupName").which.should.equal(group_name_3c)
thing_group3c.should.have.key("thingGroupArn")
thing_group3d = client.create_thing_group(
thingGroupName=group_name_3d, parentGroupName=group_name_2b
)
thing_group3d.should.have.key("thingGroupName").which.should.equal(group_name_3d)
thing_group3d.should.have.key("thingGroupArn")
# describe groups # describe groups
# groups level 1 # groups level 1
@ -910,7 +1018,7 @@ def test_describe_thing_group_metadata_hierarchy():
].should.match(group_name_1a) ].should.match(group_name_1a)
thing_group_description2a["thingGroupMetadata"]["rootToParentThingGroups"][0][ thing_group_description2a["thingGroupMetadata"]["rootToParentThingGroups"][0][
"groupArn" "groupArn"
].should.match(thing_group1a["thingGroupArn"]) ].should.match(group_catalog[group_name_1a]["thingGroupArn"])
thing_group_description2a.should.have.key("version") thing_group_description2a.should.have.key("version")
# 2b # 2b
thing_group_description2b = client.describe_thing_group( thing_group_description2b = client.describe_thing_group(
@ -936,7 +1044,7 @@ def test_describe_thing_group_metadata_hierarchy():
].should.match(group_name_1a) ].should.match(group_name_1a)
thing_group_description2b["thingGroupMetadata"]["rootToParentThingGroups"][0][ thing_group_description2b["thingGroupMetadata"]["rootToParentThingGroups"][0][
"groupArn" "groupArn"
].should.match(thing_group1a["thingGroupArn"]) ].should.match(group_catalog[group_name_1a]["thingGroupArn"])
thing_group_description2b.should.have.key("version") thing_group_description2b.should.have.key("version")
# groups level 3 # groups level 3
# 3a # 3a
@ -963,13 +1071,13 @@ def test_describe_thing_group_metadata_hierarchy():
].should.match(group_name_1a) ].should.match(group_name_1a)
thing_group_description3a["thingGroupMetadata"]["rootToParentThingGroups"][0][ thing_group_description3a["thingGroupMetadata"]["rootToParentThingGroups"][0][
"groupArn" "groupArn"
].should.match(thing_group1a["thingGroupArn"]) ].should.match(group_catalog[group_name_1a]["thingGroupArn"])
thing_group_description3a["thingGroupMetadata"]["rootToParentThingGroups"][1][ thing_group_description3a["thingGroupMetadata"]["rootToParentThingGroups"][1][
"groupName" "groupName"
].should.match(group_name_2a) ].should.match(group_name_2a)
thing_group_description3a["thingGroupMetadata"]["rootToParentThingGroups"][1][ thing_group_description3a["thingGroupMetadata"]["rootToParentThingGroups"][1][
"groupArn" "groupArn"
].should.match(thing_group2a["thingGroupArn"]) ].should.match(group_catalog[group_name_2a]["thingGroupArn"])
thing_group_description3a.should.have.key("version") thing_group_description3a.should.have.key("version")
# 3b # 3b
thing_group_description3b = client.describe_thing_group( thing_group_description3b = client.describe_thing_group(
@ -995,13 +1103,13 @@ def test_describe_thing_group_metadata_hierarchy():
].should.match(group_name_1a) ].should.match(group_name_1a)
thing_group_description3b["thingGroupMetadata"]["rootToParentThingGroups"][0][ thing_group_description3b["thingGroupMetadata"]["rootToParentThingGroups"][0][
"groupArn" "groupArn"
].should.match(thing_group1a["thingGroupArn"]) ].should.match(group_catalog[group_name_1a]["thingGroupArn"])
thing_group_description3b["thingGroupMetadata"]["rootToParentThingGroups"][1][ thing_group_description3b["thingGroupMetadata"]["rootToParentThingGroups"][1][
"groupName" "groupName"
].should.match(group_name_2a) ].should.match(group_name_2a)
thing_group_description3b["thingGroupMetadata"]["rootToParentThingGroups"][1][ thing_group_description3b["thingGroupMetadata"]["rootToParentThingGroups"][1][
"groupArn" "groupArn"
].should.match(thing_group2a["thingGroupArn"]) ].should.match(group_catalog[group_name_2a]["thingGroupArn"])
thing_group_description3b.should.have.key("version") thing_group_description3b.should.have.key("version")
# 3c # 3c
thing_group_description3c = client.describe_thing_group( thing_group_description3c = client.describe_thing_group(
@ -1027,13 +1135,13 @@ def test_describe_thing_group_metadata_hierarchy():
].should.match(group_name_1a) ].should.match(group_name_1a)
thing_group_description3c["thingGroupMetadata"]["rootToParentThingGroups"][0][ thing_group_description3c["thingGroupMetadata"]["rootToParentThingGroups"][0][
"groupArn" "groupArn"
].should.match(thing_group1a["thingGroupArn"]) ].should.match(group_catalog[group_name_1a]["thingGroupArn"])
thing_group_description3c["thingGroupMetadata"]["rootToParentThingGroups"][1][ thing_group_description3c["thingGroupMetadata"]["rootToParentThingGroups"][1][
"groupName" "groupName"
].should.match(group_name_2b) ].should.match(group_name_2b)
thing_group_description3c["thingGroupMetadata"]["rootToParentThingGroups"][1][ thing_group_description3c["thingGroupMetadata"]["rootToParentThingGroups"][1][
"groupArn" "groupArn"
].should.match(thing_group2b["thingGroupArn"]) ].should.match(group_catalog[group_name_2b]["thingGroupArn"])
thing_group_description3c.should.have.key("version") thing_group_description3c.should.have.key("version")
# 3d # 3d
thing_group_description3d = client.describe_thing_group( thing_group_description3d = client.describe_thing_group(
@ -1059,13 +1167,13 @@ def test_describe_thing_group_metadata_hierarchy():
].should.match(group_name_1a) ].should.match(group_name_1a)
thing_group_description3d["thingGroupMetadata"]["rootToParentThingGroups"][0][ thing_group_description3d["thingGroupMetadata"]["rootToParentThingGroups"][0][
"groupArn" "groupArn"
].should.match(thing_group1a["thingGroupArn"]) ].should.match(group_catalog[group_name_1a]["thingGroupArn"])
thing_group_description3d["thingGroupMetadata"]["rootToParentThingGroups"][1][ thing_group_description3d["thingGroupMetadata"]["rootToParentThingGroups"][1][
"groupName" "groupName"
].should.match(group_name_2b) ].should.match(group_name_2b)
thing_group_description3d["thingGroupMetadata"]["rootToParentThingGroups"][1][ thing_group_description3d["thingGroupMetadata"]["rootToParentThingGroups"][1][
"groupArn" "groupArn"
].should.match(thing_group2b["thingGroupArn"]) ].should.match(group_catalog[group_name_2b]["thingGroupArn"])
thing_group_description3d.should.have.key("version") thing_group_description3d.should.have.key("version")

View File

@ -1,10 +1,17 @@
import base64
import json
import time
import zlib
from io import BytesIO
from zipfile import ZipFile, ZIP_DEFLATED
import boto3 import boto3
import os import os
import sure # noqa import sure # noqa
import six import six
from botocore.exceptions import ClientError from botocore.exceptions import ClientError
from moto import mock_logs, settings from moto import mock_logs, settings, mock_lambda, mock_iam
from nose.tools import assert_raises from nose.tools import assert_raises
from nose import SkipTest from nose import SkipTest
@ -425,3 +432,408 @@ def test_untag_log_group():
assert response["tags"] == remaining_tags assert response["tags"] == remaining_tags
response = conn.delete_log_group(logGroupName=log_group_name) response = conn.delete_log_group(logGroupName=log_group_name)
@mock_logs
def test_describe_subscription_filters():
# given
client = boto3.client("logs", "us-east-1")
log_group_name = "/test"
client.create_log_group(logGroupName=log_group_name)
# when
response = client.describe_subscription_filters(logGroupName=log_group_name)
# then
response["subscriptionFilters"].should.have.length_of(0)
@mock_logs
def test_describe_subscription_filters_errors():
# given
client = boto3.client("logs", "us-east-1")
# when
with assert_raises(ClientError) as e:
client.describe_subscription_filters(logGroupName="not-existing-log-group",)
# then
ex = e.exception
ex.operation_name.should.equal("DescribeSubscriptionFilters")
ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
ex.response["Error"]["Code"].should.contain("ResourceNotFoundException")
ex.response["Error"]["Message"].should.equal(
"The specified log group does not exist"
)
@mock_lambda
@mock_logs
def test_put_subscription_filter_update():
# given
region_name = "us-east-1"
client_lambda = boto3.client("lambda", region_name)
client_logs = boto3.client("logs", region_name)
log_group_name = "/test"
log_stream_name = "stream"
client_logs.create_log_group(logGroupName=log_group_name)
client_logs.create_log_stream(
logGroupName=log_group_name, logStreamName=log_stream_name
)
function_arn = client_lambda.create_function(
FunctionName="test",
Runtime="python3.8",
Role=_get_role_name(region_name),
Handler="lambda_function.lambda_handler",
Code={"ZipFile": _get_test_zip_file()},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)["FunctionArn"]
# when
client_logs.put_subscription_filter(
logGroupName=log_group_name,
filterName="test",
filterPattern="",
destinationArn=function_arn,
)
# then
response = client_logs.describe_subscription_filters(logGroupName=log_group_name)
response["subscriptionFilters"].should.have.length_of(1)
filter = response["subscriptionFilters"][0]
creation_time = filter["creationTime"]
creation_time.should.be.a(int)
filter["destinationArn"] = "arn:aws:lambda:us-east-1:123456789012:function:test"
filter["distribution"] = "ByLogStream"
filter["logGroupName"] = "/test"
filter["filterName"] = "test"
filter["filterPattern"] = ""
# when
# to update an existing subscription filter the 'filerName' must be identical
client_logs.put_subscription_filter(
logGroupName=log_group_name,
filterName="test",
filterPattern="[]",
destinationArn=function_arn,
)
# then
response = client_logs.describe_subscription_filters(logGroupName=log_group_name)
response["subscriptionFilters"].should.have.length_of(1)
filter = response["subscriptionFilters"][0]
filter["creationTime"].should.equal(creation_time)
filter["destinationArn"] = "arn:aws:lambda:us-east-1:123456789012:function:test"
filter["distribution"] = "ByLogStream"
filter["logGroupName"] = "/test"
filter["filterName"] = "test"
filter["filterPattern"] = "[]"
# when
# only one subscription filter can be associated with a log group
with assert_raises(ClientError) as e:
client_logs.put_subscription_filter(
logGroupName=log_group_name,
filterName="test-2",
filterPattern="",
destinationArn=function_arn,
)
# then
ex = e.exception
ex.operation_name.should.equal("PutSubscriptionFilter")
ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
ex.response["Error"]["Code"].should.contain("LimitExceededException")
ex.response["Error"]["Message"].should.equal("Resource limit exceeded.")
@mock_lambda
@mock_logs
def test_put_subscription_filter_with_lambda():
# given
region_name = "us-east-1"
client_lambda = boto3.client("lambda", region_name)
client_logs = boto3.client("logs", region_name)
log_group_name = "/test"
log_stream_name = "stream"
client_logs.create_log_group(logGroupName=log_group_name)
client_logs.create_log_stream(
logGroupName=log_group_name, logStreamName=log_stream_name
)
function_arn = client_lambda.create_function(
FunctionName="test",
Runtime="python3.8",
Role=_get_role_name(region_name),
Handler="lambda_function.lambda_handler",
Code={"ZipFile": _get_test_zip_file()},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)["FunctionArn"]
# when
client_logs.put_subscription_filter(
logGroupName=log_group_name,
filterName="test",
filterPattern="",
destinationArn=function_arn,
)
# then
response = client_logs.describe_subscription_filters(logGroupName=log_group_name)
response["subscriptionFilters"].should.have.length_of(1)
filter = response["subscriptionFilters"][0]
filter["creationTime"].should.be.a(int)
filter["destinationArn"] = "arn:aws:lambda:us-east-1:123456789012:function:test"
filter["distribution"] = "ByLogStream"
filter["logGroupName"] = "/test"
filter["filterName"] = "test"
filter["filterPattern"] = ""
# when
client_logs.put_log_events(
logGroupName=log_group_name,
logStreamName=log_stream_name,
logEvents=[
{"timestamp": 0, "message": "test"},
{"timestamp": 0, "message": "test 2"},
],
)
# then
msg_showed_up, received_message = _wait_for_log_msg(
client_logs, "/aws/lambda/test", "awslogs"
)
assert msg_showed_up, "CloudWatch log event was not found. All logs: {}".format(
received_message
)
data = json.loads(received_message)["awslogs"]["data"]
response = json.loads(
zlib.decompress(base64.b64decode(data), 16 + zlib.MAX_WBITS).decode("utf-8")
)
response["messageType"].should.equal("DATA_MESSAGE")
response["owner"].should.equal("123456789012")
response["logGroup"].should.equal("/test")
response["logStream"].should.equal("stream")
response["subscriptionFilters"].should.equal(["test"])
log_events = sorted(response["logEvents"], key=lambda log_event: log_event["id"])
log_events.should.have.length_of(2)
log_events[0]["id"].should.be.a(int)
log_events[0]["message"].should.equal("test")
log_events[0]["timestamp"].should.equal(0)
log_events[1]["id"].should.be.a(int)
log_events[1]["message"].should.equal("test 2")
log_events[1]["timestamp"].should.equal(0)
@mock_logs
def test_put_subscription_filter_errors():
# given
client = boto3.client("logs", "us-east-1")
log_group_name = "/test"
client.create_log_group(logGroupName=log_group_name)
# when
with assert_raises(ClientError) as e:
client.put_subscription_filter(
logGroupName="not-existing-log-group",
filterName="test",
filterPattern="",
destinationArn="arn:aws:lambda:us-east-1:123456789012:function:test",
)
# then
ex = e.exception
ex.operation_name.should.equal("PutSubscriptionFilter")
ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
ex.response["Error"]["Code"].should.contain("ResourceNotFoundException")
ex.response["Error"]["Message"].should.equal(
"The specified log group does not exist"
)
# when
with assert_raises(ClientError) as e:
client.put_subscription_filter(
logGroupName="/test",
filterName="test",
filterPattern="",
destinationArn="arn:aws:lambda:us-east-1:123456789012:function:not-existing",
)
# then
ex = e.exception
ex.operation_name.should.equal("PutSubscriptionFilter")
ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
ex.response["Error"]["Code"].should.contain("InvalidParameterException")
ex.response["Error"]["Message"].should.equal(
"Could not execute the lambda function. "
"Make sure you have given CloudWatch Logs permission to execute your function."
)
# when
with assert_raises(ClientError) as e:
client.put_subscription_filter(
logGroupName="/test",
filterName="test",
filterPattern="",
destinationArn="arn:aws:lambda:us-east-1:123456789012:function:not-existing",
)
# then
ex = e.exception
ex.operation_name.should.equal("PutSubscriptionFilter")
ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
ex.response["Error"]["Code"].should.contain("InvalidParameterException")
ex.response["Error"]["Message"].should.equal(
"Could not execute the lambda function. "
"Make sure you have given CloudWatch Logs permission to execute your function."
)
@mock_lambda
@mock_logs
def test_delete_subscription_filter_errors():
# given
region_name = "us-east-1"
client_lambda = boto3.client("lambda", region_name)
client_logs = boto3.client("logs", region_name)
log_group_name = "/test"
client_logs.create_log_group(logGroupName=log_group_name)
function_arn = client_lambda.create_function(
FunctionName="test",
Runtime="python3.8",
Role=_get_role_name(region_name),
Handler="lambda_function.lambda_handler",
Code={"ZipFile": _get_test_zip_file()},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)["FunctionArn"]
client_logs.put_subscription_filter(
logGroupName=log_group_name,
filterName="test",
filterPattern="",
destinationArn=function_arn,
)
# when
client_logs.delete_subscription_filter(
logGroupName="/test", filterName="test",
)
# then
response = client_logs.describe_subscription_filters(logGroupName=log_group_name)
response["subscriptionFilters"].should.have.length_of(0)
@mock_lambda
@mock_logs
def test_delete_subscription_filter_errors():
# given
region_name = "us-east-1"
client_lambda = boto3.client("lambda", region_name)
client_logs = boto3.client("logs", region_name)
log_group_name = "/test"
client_logs.create_log_group(logGroupName=log_group_name)
function_arn = client_lambda.create_function(
FunctionName="test",
Runtime="python3.8",
Role=_get_role_name(region_name),
Handler="lambda_function.lambda_handler",
Code={"ZipFile": _get_test_zip_file()},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)["FunctionArn"]
client_logs.put_subscription_filter(
logGroupName=log_group_name,
filterName="test",
filterPattern="",
destinationArn=function_arn,
)
# when
with assert_raises(ClientError) as e:
client_logs.delete_subscription_filter(
logGroupName="not-existing-log-group", filterName="test",
)
# then
ex = e.exception
ex.operation_name.should.equal("DeleteSubscriptionFilter")
ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
ex.response["Error"]["Code"].should.contain("ResourceNotFoundException")
ex.response["Error"]["Message"].should.equal(
"The specified log group does not exist"
)
# when
with assert_raises(ClientError) as e:
client_logs.delete_subscription_filter(
logGroupName="/test", filterName="wrong-filter-name",
)
# then
ex = e.exception
ex.operation_name.should.equal("DeleteSubscriptionFilter")
ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
ex.response["Error"]["Code"].should.contain("ResourceNotFoundException")
ex.response["Error"]["Message"].should.equal(
"The specified subscription filter does not exist."
)
def _get_role_name(region_name):
with mock_iam():
iam = boto3.client("iam", region_name=region_name)
try:
return iam.get_role(RoleName="test-role")["Role"]["Arn"]
except ClientError:
return iam.create_role(
RoleName="test-role", AssumeRolePolicyDocument="test policy", Path="/",
)["Role"]["Arn"]
def _get_test_zip_file():
func_str = """
def lambda_handler(event, context):
return event
"""
zip_output = BytesIO()
zip_file = ZipFile(zip_output, "w", ZIP_DEFLATED)
zip_file.writestr("lambda_function.py", func_str)
zip_file.close()
zip_output.seek(0)
return zip_output.read()
def _wait_for_log_msg(client, log_group_name, expected_msg_part):
received_messages = []
start = time.time()
while (time.time() - start) < 10:
result = client.describe_log_streams(logGroupName=log_group_name)
log_streams = result.get("logStreams")
if not log_streams:
time.sleep(1)
continue
for log_stream in log_streams:
result = client.get_log_events(
logGroupName=log_group_name, logStreamName=log_stream["logStreamName"],
)
received_messages.extend(
[event["message"] for event in result.get("events")]
)
for message in received_messages:
if expected_msg_part in message:
return True, message
time.sleep(1)
return False, received_messages

View File

@ -0,0 +1,142 @@
from __future__ import unicode_literals
import boto3
import sure # noqa
from moto.managedblockchain.exceptions import BadRequestException
from moto import mock_managedblockchain
default_frameworkconfiguration = {"Fabric": {"Edition": "STARTER"}}
default_votingpolicy = {
"ApprovalThresholdPolicy": {
"ThresholdPercentage": 50,
"ProposalDurationInHours": 24,
"ThresholdComparator": "GREATER_THAN_OR_EQUAL_TO",
}
}
default_memberconfiguration = {
"Name": "testmember1",
"Description": "Test Member 1",
"FrameworkConfiguration": {
"Fabric": {"AdminUsername": "admin", "AdminPassword": "Admin12345"}
},
"LogPublishingConfiguration": {
"Fabric": {"CaLogs": {"Cloudwatch": {"Enabled": False}}}
},
}
@mock_managedblockchain
def test_create_network():
conn = boto3.client("managedblockchain", region_name="us-east-1")
response = conn.create_network(
Name="testnetwork1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=default_frameworkconfiguration,
VotingPolicy=default_votingpolicy,
MemberConfiguration=default_memberconfiguration,
)
response["NetworkId"].should.match("n-[A-Z0-9]{26}")
response["MemberId"].should.match("m-[A-Z0-9]{26}")
# Find in full list
response = conn.list_networks()
mbcnetworks = response["Networks"]
mbcnetworks.should.have.length_of(1)
mbcnetworks[0]["Name"].should.equal("testnetwork1")
# Get network details
network_id = mbcnetworks[0]["Id"]
response = conn.get_network(NetworkId=network_id)
response["Network"]["Name"].should.equal("testnetwork1")
@mock_managedblockchain
def test_create_network_withopts():
conn = boto3.client("managedblockchain", region_name="us-east-1")
response = conn.create_network(
Name="testnetwork1",
Description="Test Network 1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=default_frameworkconfiguration,
VotingPolicy=default_votingpolicy,
MemberConfiguration=default_memberconfiguration,
)
response["NetworkId"].should.match("n-[A-Z0-9]{26}")
response["MemberId"].should.match("m-[A-Z0-9]{26}")
# Find in full list
response = conn.list_networks()
mbcnetworks = response["Networks"]
mbcnetworks.should.have.length_of(1)
mbcnetworks[0]["Description"].should.equal("Test Network 1")
# Get network details
network_id = mbcnetworks[0]["Id"]
response = conn.get_network(NetworkId=network_id)
response["Network"]["Description"].should.equal("Test Network 1")
@mock_managedblockchain
def test_create_network_noframework():
conn = boto3.client("managedblockchain", region_name="us-east-1")
response = conn.create_network.when.called_with(
Name="testnetwork1",
Description="Test Network 1",
Framework="HYPERLEDGER_VINYL",
FrameworkVersion="1.2",
FrameworkConfiguration=default_frameworkconfiguration,
VotingPolicy=default_votingpolicy,
MemberConfiguration=default_memberconfiguration,
).should.throw(Exception, "Invalid request body")
@mock_managedblockchain
def test_create_network_badframeworkver():
conn = boto3.client("managedblockchain", region_name="us-east-1")
response = conn.create_network.when.called_with(
Name="testnetwork1",
Description="Test Network 1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.X",
FrameworkConfiguration=default_frameworkconfiguration,
VotingPolicy=default_votingpolicy,
MemberConfiguration=default_memberconfiguration,
).should.throw(
Exception, "Invalid version 1.X requested for framework HYPERLEDGER_FABRIC"
)
@mock_managedblockchain
def test_create_network_badedition():
conn = boto3.client("managedblockchain", region_name="us-east-1")
frameworkconfiguration = {"Fabric": {"Edition": "SUPER"}}
response = conn.create_network.when.called_with(
Name="testnetwork1",
Description="Test Network 1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=frameworkconfiguration,
VotingPolicy=default_votingpolicy,
MemberConfiguration=default_memberconfiguration,
).should.throw(Exception, "Invalid request body")
@mock_managedblockchain
def test_get_network_badnetwork():
conn = boto3.client("managedblockchain", region_name="us-east-1")
response = conn.get_network.when.called_with(
NetworkId="n-BADNETWORK",
).should.throw(Exception, "Network n-BADNETWORK not found")

View File

@ -195,6 +195,10 @@ def test_ec2_integration():
reservations = ec2.describe_instances()["Reservations"] reservations = ec2.describe_instances()["Reservations"]
assert reservations.should.be.empty assert reservations.should.be.empty
# Before starting the instance, its status should be "stopped"
opsworks_instance = opsworks.describe_instances(StackId=stack_id)["Instances"][0]
opsworks_instance["Status"].should.equal("stopped")
# After starting the instance, it should be discoverable via ec2 # After starting the instance, it should be discoverable via ec2
opsworks.start_instance(InstanceId=instance_id) opsworks.start_instance(InstanceId=instance_id)
reservations = ec2.describe_instances()["Reservations"] reservations = ec2.describe_instances()["Reservations"]
@ -204,3 +208,5 @@ def test_ec2_integration():
instance["InstanceId"].should.equal(opsworks_instance["Ec2InstanceId"]) instance["InstanceId"].should.equal(opsworks_instance["Ec2InstanceId"])
instance["PrivateIpAddress"].should.equal(opsworks_instance["PrivateIp"]) instance["PrivateIpAddress"].should.equal(opsworks_instance["PrivateIp"])
# After starting the instance, its status should be "online"
opsworks_instance["Status"].should.equal("online")

View File

@ -915,6 +915,11 @@ def test_create_cluster_from_snapshot():
ClusterIdentifier=original_cluster_identifier, ClusterIdentifier=original_cluster_identifier,
) )
client.restore_from_cluster_snapshot.when.called_with(
ClusterIdentifier=original_cluster_identifier,
SnapshotIdentifier=original_snapshot_identifier,
).should.throw(ClientError, "ClusterAlreadyExists")
response = client.restore_from_cluster_snapshot( response = client.restore_from_cluster_snapshot(
ClusterIdentifier=new_cluster_identifier, ClusterIdentifier=new_cluster_identifier,
SnapshotIdentifier=original_snapshot_identifier, SnapshotIdentifier=original_snapshot_identifier,
@ -1333,3 +1338,20 @@ def test_modify_snapshot_copy_retention_period():
response = client.describe_clusters(ClusterIdentifier="test") response = client.describe_clusters(ClusterIdentifier="test")
cluster_snapshot_copy_status = response["Clusters"][0]["ClusterSnapshotCopyStatus"] cluster_snapshot_copy_status = response["Clusters"][0]["ClusterSnapshotCopyStatus"]
cluster_snapshot_copy_status["RetentionPeriod"].should.equal(5) cluster_snapshot_copy_status["RetentionPeriod"].should.equal(5)
@mock_redshift
def test_create_duplicate_cluster_fails():
kwargs = {
"ClusterIdentifier": "test",
"ClusterType": "single-node",
"DBName": "test",
"MasterUsername": "user",
"MasterUserPassword": "password",
"NodeType": "ds2.xlarge",
}
client = boto3.client("redshift", region_name="us-east-1")
client.create_cluster(**kwargs)
client.create_cluster.when.called_with(**kwargs).should.throw(
ClientError, "ClusterAlreadyExists"
)

View File

@ -2149,6 +2149,19 @@ def test_boto3_copy_object_with_versioning():
data.should.equal(b"test2") data.should.equal(b"test2")
@mock_s3
def test_s3_abort_multipart_data_with_invalid_upload_and_key():
client = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
client.create_bucket(Bucket="blah")
with assert_raises(Exception) as err:
client.abort_multipart_upload(
Bucket="blah", Key="foobar", UploadId="dummy_upload_id"
)
err.exception.response["Error"]["Code"].should.equal("NoSuchUpload")
@mock_s3 @mock_s3
def test_boto3_copy_object_from_unversioned_to_versioned_bucket(): def test_boto3_copy_object_from_unversioned_to_versioned_bucket():
client = boto3.client("s3", region_name=DEFAULT_REGION_NAME) client = boto3.client("s3", region_name=DEFAULT_REGION_NAME)

View File

@ -127,3 +127,53 @@ def test_send_raw_email():
send_quota["GetSendQuotaResponse"]["GetSendQuotaResult"]["SentLast24Hours"] send_quota["GetSendQuotaResponse"]["GetSendQuotaResult"]["SentLast24Hours"]
) )
sent_count.should.equal(1) sent_count.should.equal(1)
@mock_ses_deprecated
def test_get_send_statistics():
conn = boto.connect_ses("the_key", "the_secret")
conn.send_email.when.called_with(
"test@example.com",
"test subject",
"<span>test body</span>",
"test_to@example.com",
format="html",
).should.throw(BotoServerError)
# tests to verify rejects in get_send_statistics
result = conn.get_send_statistics()
reject_count = int(
result["GetSendStatisticsResponse"]["GetSendStatisticsResult"][
"SendDataPoints"
][0]["Rejects"]
)
delivery_count = int(
result["GetSendStatisticsResponse"]["GetSendStatisticsResult"][
"SendDataPoints"
][0]["DeliveryAttempts"]
)
reject_count.should.equal(1)
delivery_count.should.equal(0)
conn.verify_email_identity("test@example.com")
conn.send_email(
"test@example.com", "test subject", "test body", "test_to@example.com"
)
# tests to delivery attempts in get_send_statistics
result = conn.get_send_statistics()
reject_count = int(
result["GetSendStatisticsResponse"]["GetSendStatisticsResult"][
"SendDataPoints"
][0]["Rejects"]
)
delivery_count = int(
result["GetSendStatisticsResponse"]["GetSendStatisticsResult"][
"SendDataPoints"
][0]["DeliveryAttempts"]
)
reject_count.should.equal(1)
delivery_count.should.equal(1)

View File

@ -4,6 +4,8 @@ import boto3
from botocore.exceptions import ClientError from botocore.exceptions import ClientError
from six.moves.email_mime_multipart import MIMEMultipart from six.moves.email_mime_multipart import MIMEMultipart
from six.moves.email_mime_text import MIMEText from six.moves.email_mime_text import MIMEText
from nose.tools import assert_raises
import sure # noqa import sure # noqa
@ -227,3 +229,51 @@ def test_send_email_notification_with_encoded_sender():
Message={"Subject": {"Data": "hi",}, "Body": {"Text": {"Data": "there",}}}, Message={"Subject": {"Data": "hi",}, "Body": {"Text": {"Data": "there",}}},
) )
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
@mock_ses
def test_create_configuration_set():
conn = boto3.client("ses", region_name="us-east-1")
conn.create_configuration_set(ConfigurationSet=dict({"Name": "test"}))
conn.create_configuration_set_event_destination(
ConfigurationSetName="test",
EventDestination={
"Name": "snsEvent",
"Enabled": True,
"MatchingEventTypes": ["send",],
"SNSDestination": {
"TopicARN": "arn:aws:sns:us-east-1:123456789012:myTopic"
},
},
)
with assert_raises(ClientError) as ex:
conn.create_configuration_set_event_destination(
ConfigurationSetName="failtest",
EventDestination={
"Name": "snsEvent",
"Enabled": True,
"MatchingEventTypes": ["send",],
"SNSDestination": {
"TopicARN": "arn:aws:sns:us-east-1:123456789012:myTopic"
},
},
)
ex.exception.response["Error"]["Code"].should.equal("ConfigurationSetDoesNotExist")
with assert_raises(ClientError) as ex:
conn.create_configuration_set_event_destination(
ConfigurationSetName="test",
EventDestination={
"Name": "snsEvent",
"Enabled": True,
"MatchingEventTypes": ["send",],
"SNSDestination": {
"TopicARN": "arn:aws:sns:us-east-1:123456789012:myTopic"
},
},
)
ex.exception.response["Error"]["Code"].should.equal("EventDestinationAlreadyExists")

View File

@ -1,5 +1,7 @@
from __future__ import unicode_literals from __future__ import unicode_literals
import string
import boto3 import boto3
import botocore.exceptions import botocore.exceptions
import sure # noqa import sure # noqa
@ -300,6 +302,30 @@ def test_get_parameter():
) )
@mock_ssm
def test_get_parameters_errors():
client = boto3.client("ssm", region_name="us-east-1")
ssm_parameters = {name: "value" for name in string.ascii_lowercase[:11]}
for name, value in ssm_parameters.items():
client.put_parameter(Name=name, Value=value, Type="String")
with assert_raises(ClientError) as e:
client.get_parameters(Names=list(ssm_parameters.keys()))
ex = e.exception
ex.operation_name.should.equal("GetParameters")
ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
ex.response["Error"]["Code"].should.contain("ValidationException")
ex.response["Error"]["Message"].should.equal(
"1 validation error detected: "
"Value '[{}]' at 'names' failed to satisfy constraint: "
"Member must have length less than or equal to 10.".format(
", ".join(ssm_parameters.keys())
)
)
@mock_ssm @mock_ssm
def test_get_nonexistant_parameter(): def test_get_nonexistant_parameter():
client = boto3.client("ssm", region_name="us-east-1") client = boto3.client("ssm", region_name="us-east-1")