Merge pull request #47 from spulec/master

Merge upstream
This commit is contained in:
Bert Blommers 2020-06-06 12:10:58 +01:00 committed by GitHub
commit 5508da186b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
32 changed files with 973 additions and 105 deletions

View File

@ -2738,7 +2738,7 @@
- [ ] describe_local_gateways - [ ] describe_local_gateways
- [ ] describe_moving_addresses - [ ] describe_moving_addresses
- [ ] describe_nat_gateways - [ ] describe_nat_gateways
- [ ] describe_network_acls - [X] describe_network_acls
- [ ] describe_network_interface_attribute - [ ] describe_network_interface_attribute
- [ ] describe_network_interface_permissions - [ ] describe_network_interface_permissions
- [X] describe_network_interfaces - [X] describe_network_interfaces

View File

@ -301,6 +301,14 @@ class FakeAutoScalingGroup(BaseModel):
self.availability_zones = availability_zones self.availability_zones = availability_zones
self.vpc_zone_identifier = vpc_zone_identifier self.vpc_zone_identifier = vpc_zone_identifier
@staticmethod
def __set_string_propagate_at_launch_booleans_on_tags(tags):
bool_to_string = {True: "true", False: "false"}
for tag in tags:
if "PropagateAtLaunch" in tag:
tag["PropagateAtLaunch"] = bool_to_string[tag["PropagateAtLaunch"]]
return tags
@classmethod @classmethod
def create_from_cloudformation_json( def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name cls, resource_name, cloudformation_json, region_name
@ -329,7 +337,9 @@ class FakeAutoScalingGroup(BaseModel):
target_group_arns=target_group_arns, target_group_arns=target_group_arns,
placement_group=None, placement_group=None,
termination_policies=properties.get("TerminationPolicies", []), termination_policies=properties.get("TerminationPolicies", []),
tags=properties.get("Tags", []), tags=cls.__set_string_propagate_at_launch_booleans_on_tags(
properties.get("Tags", [])
),
new_instances_protected_from_scale_in=properties.get( new_instances_protected_from_scale_in=properties.get(
"NewInstancesProtectedFromScaleIn", False "NewInstancesProtectedFromScaleIn", False
), ),
@ -455,7 +465,7 @@ class FakeAutoScalingGroup(BaseModel):
# boto3 and cloudformation use PropagateAtLaunch # boto3 and cloudformation use PropagateAtLaunch
if "propagate_at_launch" in tag and tag["propagate_at_launch"] == "true": if "propagate_at_launch" in tag and tag["propagate_at_launch"] == "true":
propagated_tags[tag["key"]] = tag["value"] propagated_tags[tag["key"]] = tag["value"]
if "PropagateAtLaunch" in tag and tag["PropagateAtLaunch"]: if "PropagateAtLaunch" in tag and tag["PropagateAtLaunch"] == "true":
propagated_tags[tag["Key"]] = tag["Value"] propagated_tags[tag["Key"]] = tag["Value"]
return propagated_tags return propagated_tags

View File

@ -219,7 +219,12 @@ class FakeStack(BaseModel):
self.stack_id = stack_id self.stack_id = stack_id
self.name = name self.name = name
self.template = template self.template = template
if template != {}:
self._parse_template() self._parse_template()
self.description = self.template_dict.get("Description")
else:
self.template_dict = {}
self.description = None
self.parameters = parameters self.parameters = parameters
self.region_name = region_name self.region_name = region_name
self.notification_arns = notification_arns if notification_arns else [] self.notification_arns = notification_arns if notification_arns else []
@ -235,12 +240,12 @@ class FakeStack(BaseModel):
"CREATE_IN_PROGRESS", resource_status_reason="User Initiated" "CREATE_IN_PROGRESS", resource_status_reason="User Initiated"
) )
self.description = self.template_dict.get("Description")
self.cross_stack_resources = cross_stack_resources or {} self.cross_stack_resources = cross_stack_resources or {}
self.resource_map = self._create_resource_map() self.resource_map = self._create_resource_map()
self.output_map = self._create_output_map() self.output_map = self._create_output_map()
if create_change_set: if create_change_set:
self.status = "REVIEW_IN_PROGRESS" self.status = "CREATE_COMPLETE"
self.execution_status = "AVAILABLE"
else: else:
self.create_resources() self.create_resources()
self._add_stack_event("CREATE_COMPLETE") self._add_stack_event("CREATE_COMPLETE")
@ -330,7 +335,9 @@ class FakeStack(BaseModel):
return self.output_map.exports return self.output_map.exports
def create_resources(self): def create_resources(self):
self.resource_map.create() self.resource_map.create(self.template_dict)
# Set the description of the stack
self.description = self.template_dict.get("Description")
self.status = "CREATE_COMPLETE" self.status = "CREATE_COMPLETE"
def update(self, template, role_arn=None, parameters=None, tags=None): def update(self, template, role_arn=None, parameters=None, tags=None):
@ -397,6 +404,9 @@ class FakeChangeSet(FakeStack):
self.change_set_id = change_set_id self.change_set_id = change_set_id
self.change_set_name = change_set_name self.change_set_name = change_set_name
self.changes = self.diff(template=template, parameters=parameters) self.changes = self.diff(template=template, parameters=parameters)
if self.description is None:
self.description = self.template_dict.get("Description")
self.creation_time = datetime.utcnow()
def diff(self, template, parameters=None): def diff(self, template, parameters=None):
self.template = template self.template = template
@ -587,8 +597,8 @@ class CloudFormationBackend(BaseBackend):
if stack is None: if stack is None:
raise ValidationError(stack_name) raise ValidationError(stack_name)
else: else:
stack_id = generate_stack_id(stack_name) stack_id = generate_stack_id(stack_name, region_name)
stack_template = template stack_template = {}
change_set_id = generate_changeset_id(change_set_name, region_name) change_set_id = generate_changeset_id(change_set_name, region_name)
new_change_set = FakeChangeSet( new_change_set = FakeChangeSet(
@ -643,6 +653,9 @@ class CloudFormationBackend(BaseBackend):
if stack is None: if stack is None:
raise ValidationError(stack_name) raise ValidationError(stack_name)
if stack.events[-1].resource_status == "REVIEW_IN_PROGRESS": if stack.events[-1].resource_status == "REVIEW_IN_PROGRESS":
stack._add_stack_event(
"CREATE_IN_PROGRESS", resource_status_reason="User Initiated"
)
stack._add_stack_event("CREATE_COMPLETE") stack._add_stack_event("CREATE_COMPLETE")
else: else:
stack._add_stack_event("UPDATE_IN_PROGRESS") stack._add_stack_event("UPDATE_IN_PROGRESS")

View File

@ -96,6 +96,7 @@ MODEL_MAP = {
"AWS::S3::Bucket": s3_models.FakeBucket, "AWS::S3::Bucket": s3_models.FakeBucket,
"AWS::SQS::Queue": sqs_models.Queue, "AWS::SQS::Queue": sqs_models.Queue,
"AWS::Events::Rule": events_models.Rule, "AWS::Events::Rule": events_models.Rule,
"AWS::Events::EventBus": events_models.EventBus,
} }
UNDOCUMENTED_NAME_TYPE_MAP = { UNDOCUMENTED_NAME_TYPE_MAP = {
@ -456,7 +457,7 @@ class ResourceMap(collections_abc.Mapping):
cross_stack_resources, cross_stack_resources,
): ):
self._template = template self._template = template
self._resource_json_map = template["Resources"] self._resource_json_map = template["Resources"] if template != {} else {}
self._region_name = region_name self._region_name = region_name
self.input_parameters = parameters self.input_parameters = parameters
self.tags = copy.deepcopy(tags) self.tags = copy.deepcopy(tags)
@ -592,10 +593,12 @@ class ResourceMap(collections_abc.Mapping):
self.load_parameters() self.load_parameters()
self.load_conditions() self.load_conditions()
def create(self): def create(self, template):
# Since this is a lazy map, to create every object we just need to # Since this is a lazy map, to create every object we just need to
# iterate through self. # iterate through self.
# Assumes that self.load() has been called before # Assumes that self.load() has been called before
self._template = template
self._resource_json_map = template["Resources"]
self.tags.update( self.tags.update(
{ {
"aws:cloudformation:stack-name": self.get("AWS::StackName"), "aws:cloudformation:stack-name": self.get("AWS::StackName"),

View File

@ -609,7 +609,7 @@ DESCRIBE_CHANGE_SET_RESPONSE_TEMPLATE = """<DescribeChangeSetResponse>
</member> </member>
{% endfor %} {% endfor %}
</Parameters> </Parameters>
<CreationTime>2011-05-23T15:47:44Z</CreationTime> <CreationTime>{{ change_set.creation_time_iso_8601 }}</CreationTime>
<ExecutionStatus>{{ change_set.execution_status }}</ExecutionStatus> <ExecutionStatus>{{ change_set.execution_status }}</ExecutionStatus>
<Status>{{ change_set.status }}</Status> <Status>{{ change_set.status }}</Status>
<StatusReason>{{ change_set.status_reason }}</StatusReason> <StatusReason>{{ change_set.status_reason }}</StatusReason>
@ -662,6 +662,10 @@ DESCRIBE_STACKS_TEMPLATE = """<DescribeStacksResponse>
<member> <member>
<StackName>{{ stack.name }}</StackName> <StackName>{{ stack.name }}</StackName>
<StackId>{{ stack.stack_id }}</StackId> <StackId>{{ stack.stack_id }}</StackId>
{% if stack.change_set_id %}
<ChangeSetId>{{ stack.change_set_id }}</ChangeSetId>
{% endif %}
<Description>{{ stack.description }}</Description>
<CreationTime>{{ stack.creation_time_iso_8601 }}</CreationTime> <CreationTime>{{ stack.creation_time_iso_8601 }}</CreationTime>
<StackStatus>{{ stack.status }}</StackStatus> <StackStatus>{{ stack.status }}</StackStatus>
{% if stack.notification_arns %} {% if stack.notification_arns %}

View File

@ -304,6 +304,13 @@ class CloudWatchBackend(BaseBackend):
) )
def delete_alarms(self, alarm_names): def delete_alarms(self, alarm_names):
for alarm_name in alarm_names:
if alarm_name not in self.alarms:
raise RESTError(
"ResourceNotFound",
"Alarm {0} not found".format(alarm_name),
status=404,
)
for alarm_name in alarm_names: for alarm_name in alarm_names:
self.alarms.pop(alarm_name, None) self.alarms.pop(alarm_name, None)

View File

@ -231,6 +231,16 @@ class InvalidVolumeAttachmentError(EC2ClientError):
) )
class InvalidVolumeDetachmentError(EC2ClientError):
def __init__(self, volume_id, instance_id, device):
super(InvalidVolumeDetachmentError, self).__init__(
"InvalidAttachment.NotFound",
"The volume {0} is not attached to instance {1} as device {2}".format(
volume_id, instance_id, device
),
)
class VolumeInUseError(EC2ClientError): class VolumeInUseError(EC2ClientError):
def __init__(self, volume_id, instance_id): def __init__(self, volume_id, instance_id):
super(VolumeInUseError, self).__init__( super(VolumeInUseError, self).__init__(

View File

@ -72,6 +72,7 @@ from .exceptions import (
InvalidVolumeIdError, InvalidVolumeIdError,
VolumeInUseError, VolumeInUseError,
InvalidVolumeAttachmentError, InvalidVolumeAttachmentError,
InvalidVolumeDetachmentError,
InvalidVpcCidrBlockAssociationIdError, InvalidVpcCidrBlockAssociationIdError,
InvalidVPCPeeringConnectionIdError, InvalidVPCPeeringConnectionIdError,
InvalidVPCPeeringConnectionStateTransitionError, InvalidVPCPeeringConnectionStateTransitionError,
@ -560,22 +561,33 @@ class Instance(TaggedEC2Resource, BotoInstance):
# worst case we'll get IP address exaustion... rarely # worst case we'll get IP address exaustion... rarely
pass pass
def add_block_device(self, size, device_path, snapshot_id=None, encrypted=False): def add_block_device(
self,
size,
device_path,
snapshot_id=None,
encrypted=False,
delete_on_termination=False,
):
volume = self.ec2_backend.create_volume( volume = self.ec2_backend.create_volume(
size, self.region_name, snapshot_id, encrypted size, self.region_name, snapshot_id, encrypted
) )
self.ec2_backend.attach_volume(volume.id, self.id, device_path) self.ec2_backend.attach_volume(
volume.id, self.id, device_path, delete_on_termination
)
def setup_defaults(self): def setup_defaults(self):
# Default have an instance with root volume should you not wish to # Default have an instance with root volume should you not wish to
# override with attach volume cmd. # override with attach volume cmd.
volume = self.ec2_backend.create_volume(8, "us-east-1a") volume = self.ec2_backend.create_volume(8, "us-east-1a")
self.ec2_backend.attach_volume(volume.id, self.id, "/dev/sda1") self.ec2_backend.attach_volume(volume.id, self.id, "/dev/sda1", True)
def teardown_defaults(self): def teardown_defaults(self):
if "/dev/sda1" in self.block_device_mapping: for device_path in list(self.block_device_mapping.keys()):
volume_id = self.block_device_mapping["/dev/sda1"].volume_id volume = self.block_device_mapping[device_path]
self.ec2_backend.detach_volume(volume_id, self.id, "/dev/sda1") volume_id = volume.volume_id
self.ec2_backend.detach_volume(volume_id, self.id, device_path)
if volume.delete_on_termination:
self.ec2_backend.delete_volume(volume_id) self.ec2_backend.delete_volume(volume_id)
@property @property
@ -897,8 +909,15 @@ class InstanceBackend(object):
volume_size = block_device["Ebs"].get("VolumeSize") volume_size = block_device["Ebs"].get("VolumeSize")
snapshot_id = block_device["Ebs"].get("SnapshotId") snapshot_id = block_device["Ebs"].get("SnapshotId")
encrypted = block_device["Ebs"].get("Encrypted", False) encrypted = block_device["Ebs"].get("Encrypted", False)
delete_on_termination = block_device["Ebs"].get(
"DeleteOnTermination", False
)
new_instance.add_block_device( new_instance.add_block_device(
volume_size, device_name, snapshot_id, encrypted volume_size,
device_name,
snapshot_id,
encrypted,
delete_on_termination,
) )
else: else:
new_instance.setup_defaults() new_instance.setup_defaults()
@ -2475,7 +2494,9 @@ class EBSBackend(object):
return self.volumes.pop(volume_id) return self.volumes.pop(volume_id)
raise InvalidVolumeIdError(volume_id) raise InvalidVolumeIdError(volume_id)
def attach_volume(self, volume_id, instance_id, device_path): def attach_volume(
self, volume_id, instance_id, device_path, delete_on_termination=False
):
volume = self.get_volume(volume_id) volume = self.get_volume(volume_id)
instance = self.get_instance(instance_id) instance = self.get_instance(instance_id)
@ -2489,17 +2510,25 @@ class EBSBackend(object):
status=volume.status, status=volume.status,
size=volume.size, size=volume.size,
attach_time=utc_date_and_time(), attach_time=utc_date_and_time(),
delete_on_termination=delete_on_termination,
) )
instance.block_device_mapping[device_path] = bdt instance.block_device_mapping[device_path] = bdt
return volume.attachment return volume.attachment
def detach_volume(self, volume_id, instance_id, device_path): def detach_volume(self, volume_id, instance_id, device_path):
volume = self.get_volume(volume_id) volume = self.get_volume(volume_id)
self.get_instance(instance_id) instance = self.get_instance(instance_id)
old_attachment = volume.attachment old_attachment = volume.attachment
if not old_attachment: if not old_attachment:
raise InvalidVolumeAttachmentError(volume_id, instance_id) raise InvalidVolumeAttachmentError(volume_id, instance_id)
device_path = device_path or old_attachment.device
try:
del instance.block_device_mapping[device_path]
except KeyError:
raise InvalidVolumeDetachmentError(volume_id, instance_id, device_path)
old_attachment.status = "detached" old_attachment.status = "detached"
volume.attachment = None volume.attachment = None
@ -4721,23 +4750,7 @@ class NetworkAclBackend(object):
) )
def get_all_network_acls(self, network_acl_ids=None, filters=None): def get_all_network_acls(self, network_acl_ids=None, filters=None):
network_acls = self.network_acls.values() self.describe_network_acls(network_acl_ids, filters)
if network_acl_ids:
network_acls = [
network_acl
for network_acl in network_acls
if network_acl.id in network_acl_ids
]
if len(network_acls) != len(network_acl_ids):
invalid_id = list(
set(network_acl_ids).difference(
set([network_acl.id for network_acl in network_acls])
)
)[0]
raise InvalidRouteTableIdError(invalid_id)
return generic_filter(filters, network_acls)
def delete_network_acl(self, network_acl_id): def delete_network_acl(self, network_acl_id):
deleted = self.network_acls.pop(network_acl_id, None) deleted = self.network_acls.pop(network_acl_id, None)
@ -4857,6 +4870,25 @@ class NetworkAclBackend(object):
self, association_id, subnet_id, acl.id self, association_id, subnet_id, acl.id
) )
def describe_network_acls(self, network_acl_ids=None, filters=None):
network_acls = self.network_acls.values()
if network_acl_ids:
network_acls = [
network_acl
for network_acl in network_acls
if network_acl.id in network_acl_ids
]
if len(network_acls) != len(network_acl_ids):
invalid_id = list(
set(network_acl_ids).difference(
set([network_acl.id for network_acl in network_acls])
)
)[0]
raise InvalidRouteTableIdError(invalid_id)
return generic_filter(filters, network_acls)
class NetworkAclAssociation(object): class NetworkAclAssociation(object):
def __init__(self, ec2_backend, new_association_id, subnet_id, network_acl_id): def __init__(self, ec2_backend, new_association_id, subnet_id, network_acl_id):

View File

@ -83,7 +83,7 @@ class NetworkACLs(BaseResponse):
def describe_network_acls(self): def describe_network_acls(self):
network_acl_ids = self._get_multi_param("NetworkAclId") network_acl_ids = self._get_multi_param("NetworkAclId")
filters = filters_from_querystring(self.querystring) filters = filters_from_querystring(self.querystring)
network_acls = self.ec2_backend.get_all_network_acls(network_acl_ids, filters) network_acls = self.ec2_backend.describe_network_acls(network_acl_ids, filters)
template = self.response_template(DESCRIBE_NETWORK_ACL_RESPONSE) template = self.response_template(DESCRIBE_NETWORK_ACL_RESPONSE)
return template.render(network_acls=network_acls) return template.render(network_acls=network_acls)

View File

@ -80,6 +80,15 @@ class Rule(BaseModel):
event_name = properties.get("Name") or resource_name event_name = properties.get("Name") or resource_name
return event_backend.put_rule(name=event_name, **properties) return event_backend.put_rule(name=event_name, **properties)
@classmethod
def update_from_cloudformation_json(
cls, original_resource, new_resource_name, cloudformation_json, region_name
):
original_resource.delete(region_name)
return cls.create_from_cloudformation_json(
new_resource_name, cloudformation_json, region_name
)
@classmethod @classmethod
def delete_from_cloudformation_json( def delete_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name cls, resource_name, cloudformation_json, region_name
@ -125,6 +134,52 @@ class EventBus(BaseModel):
return json.dumps(policy) return json.dumps(policy)
def delete(self, region_name):
event_backend = events_backends[region_name]
event_backend.delete_event_bus(name=self.name)
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == "Arn":
return self.arn
elif attribute_name == "Name":
return self.name
elif attribute_name == "Policy":
return self.policy
raise UnformattedGetAttTemplateException()
@classmethod
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
properties = cloudformation_json["Properties"]
event_backend = events_backends[region_name]
event_name = properties["Name"]
event_source_name = properties.get("EventSourceName")
return event_backend.create_event_bus(
name=event_name, event_source_name=event_source_name
)
@classmethod
def update_from_cloudformation_json(
cls, original_resource, new_resource_name, cloudformation_json, region_name
):
original_resource.delete(region_name)
return cls.create_from_cloudformation_json(
new_resource_name, cloudformation_json, region_name
)
@classmethod
def delete_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
properties = cloudformation_json["Properties"]
event_backend = events_backends[region_name]
event_bus_name = properties["Name"]
event_backend.delete_event_bus(event_bus_name)
class EventsBackend(BaseBackend): class EventsBackend(BaseBackend):
ACCOUNT_ID = re.compile(r"^(\d{1,12}|\*)$") ACCOUNT_ID = re.compile(r"^(\d{1,12}|\*)$")
@ -360,7 +415,7 @@ class EventsBackend(BaseBackend):
return event_bus return event_bus
def create_event_bus(self, name, event_source_name): def create_event_bus(self, name, event_source_name=None):
if name in self.event_buses: if name in self.event_buses:
raise JsonRESTError( raise JsonRESTError(
"ResourceAlreadyExistsException", "ResourceAlreadyExistsException",
@ -397,7 +452,6 @@ class EventsBackend(BaseBackend):
raise JsonRESTError( raise JsonRESTError(
"ValidationException", "Cannot delete event bus default." "ValidationException", "Cannot delete event bus default."
) )
self.event_buses.pop(name, None) self.event_buses.pop(name, None)
def list_tags_for_resource(self, arn): def list_tags_for_resource(self, arn):

View File

@ -217,7 +217,10 @@ class EventsHandler(BaseResponse):
"ResourceNotFoundException", "Rule " + rule_name + " does not exist." "ResourceNotFoundException", "Rule " + rule_name + " does not exist."
) )
return "", self.response_headers return (
json.dumps({"FailedEntryCount": 0, "FailedEntries": []}),
self.response_headers,
)
def remove_targets(self): def remove_targets(self):
rule_name = self._get_param("Rule") rule_name = self._get_param("Rule")

View File

@ -1148,8 +1148,8 @@ class IAMBackend(BaseBackend):
def delete_role(self, role_name): def delete_role(self, role_name):
role = self.get_role(role_name) role = self.get_role(role_name)
for instance_profile in self.get_instance_profiles(): for instance_profile in self.get_instance_profiles():
for role in instance_profile.roles: for profile_role in instance_profile.roles:
if role.name == role_name: if profile_role.name == role_name:
raise IAMConflictException( raise IAMConflictException(
code="DeleteConflict", code="DeleteConflict",
message="Cannot delete entity, must remove roles from instance profile first.", message="Cannot delete entity, must remove roles from instance profile first.",
@ -1341,6 +1341,15 @@ class IAMBackend(BaseBackend):
self.instance_profiles[name] = instance_profile self.instance_profiles[name] = instance_profile
return instance_profile return instance_profile
def delete_instance_profile(self, name):
instance_profile = self.get_instance_profile(name)
if len(instance_profile.roles) > 0:
raise IAMConflictException(
code="DeleteConflict",
message="Cannot delete entity, must remove roles from instance profile first.",
)
del self.instance_profiles[name]
def get_instance_profile(self, profile_name): def get_instance_profile(self, profile_name):
for profile in self.get_instance_profiles(): for profile in self.get_instance_profiles():
if profile.name == profile_name: if profile.name == profile_name:

View File

@ -305,6 +305,13 @@ class IamResponse(BaseResponse):
template = self.response_template(CREATE_INSTANCE_PROFILE_TEMPLATE) template = self.response_template(CREATE_INSTANCE_PROFILE_TEMPLATE)
return template.render(profile=profile) return template.render(profile=profile)
def delete_instance_profile(self):
profile_name = self._get_param("InstanceProfileName")
profile = iam_backend.delete_instance_profile(profile_name)
template = self.response_template(DELETE_INSTANCE_PROFILE_TEMPLATE)
return template.render(profile=profile)
def get_instance_profile(self): def get_instance_profile(self):
profile_name = self._get_param("InstanceProfileName") profile_name = self._get_param("InstanceProfileName")
profile = iam_backend.get_instance_profile(profile_name) profile = iam_backend.get_instance_profile(profile_name)
@ -1180,6 +1187,12 @@ CREATE_INSTANCE_PROFILE_TEMPLATE = """<CreateInstanceProfileResponse xmlns="http
</ResponseMetadata> </ResponseMetadata>
</CreateInstanceProfileResponse>""" </CreateInstanceProfileResponse>"""
DELETE_INSTANCE_PROFILE_TEMPLATE = """<DeleteInstanceProfileResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
<ResponseMetadata>
<RequestId>786dff92-6cfd-4fa4-b1eb-27EXAMPLE804</RequestId>
</ResponseMetadata>
</DeleteInstanceProfileResponse>"""
GET_INSTANCE_PROFILE_TEMPLATE = """<GetInstanceProfileResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/"> GET_INSTANCE_PROFILE_TEMPLATE = """<GetInstanceProfileResponse xmlns="https://iam.amazonaws.com/doc/2010-05-08/">
<GetInstanceProfileResult> <GetInstanceProfileResult>
<InstanceProfile> <InstanceProfile>

View File

@ -778,6 +778,7 @@ class FakeBucket(BaseModel):
self.payer = "BucketOwner" self.payer = "BucketOwner"
self.creation_date = datetime.datetime.utcnow() self.creation_date = datetime.datetime.utcnow()
self.public_access_block = None self.public_access_block = None
self.encryption = None
@property @property
def location(self): def location(self):
@ -1227,6 +1228,9 @@ class S3Backend(BaseBackend):
def get_bucket_versioning(self, bucket_name): def get_bucket_versioning(self, bucket_name):
return self.get_bucket(bucket_name).versioning_status return self.get_bucket(bucket_name).versioning_status
def get_bucket_encryption(self, bucket_name):
return self.get_bucket(bucket_name).encryption
def get_bucket_latest_versions(self, bucket_name): def get_bucket_latest_versions(self, bucket_name):
versions = self.get_bucket_versions(bucket_name) versions = self.get_bucket_versions(bucket_name)
latest_modified_per_key = {} latest_modified_per_key = {}
@ -1275,6 +1279,12 @@ class S3Backend(BaseBackend):
bucket = self.get_bucket(bucket_name) bucket = self.get_bucket(bucket_name)
bucket.policy = None bucket.policy = None
def put_bucket_encryption(self, bucket_name, encryption):
self.get_bucket(bucket_name).encryption = encryption
def delete_bucket_encryption(self, bucket_name):
self.get_bucket(bucket_name).encryption = None
def set_bucket_lifecycle(self, bucket_name, rules): def set_bucket_lifecycle(self, bucket_name, rules):
bucket = self.get_bucket(bucket_name) bucket = self.get_bucket(bucket_name)
bucket.set_lifecycle(rules) bucket.set_lifecycle(rules)

View File

@ -466,6 +466,13 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin):
is_truncated="false", is_truncated="false",
), ),
) )
elif "encryption" in querystring:
encryption = self.backend.get_bucket_encryption(bucket_name)
if not encryption:
template = self.response_template(S3_NO_ENCRYPTION)
return 404, {}, template.render(bucket_name=bucket_name)
template = self.response_template(S3_ENCRYPTION_CONFIG)
return 200, {}, template.render(encryption=encryption)
elif querystring.get("list-type", [None])[0] == "2": elif querystring.get("list-type", [None])[0] == "2":
return 200, {}, self._handle_list_objects_v2(bucket_name, querystring) return 200, {}, self._handle_list_objects_v2(bucket_name, querystring)
@ -703,7 +710,16 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin):
bucket_name, pab_config["PublicAccessBlockConfiguration"] bucket_name, pab_config["PublicAccessBlockConfiguration"]
) )
return "" return ""
elif "encryption" in querystring:
try:
self.backend.put_bucket_encryption(
bucket_name, self._encryption_config_from_xml(body)
)
return ""
except KeyError:
raise MalformedXML()
except Exception as e:
raise e
else: else:
# us-east-1, the default AWS region behaves a bit differently # us-east-1, the default AWS region behaves a bit differently
# - you should not use it as a location constraint --> it fails # - you should not use it as a location constraint --> it fails
@ -768,6 +784,9 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin):
elif "publicAccessBlock" in querystring: elif "publicAccessBlock" in querystring:
self.backend.delete_bucket_public_access_block(bucket_name) self.backend.delete_bucket_public_access_block(bucket_name)
return 204, {}, "" return 204, {}, ""
elif "encryption" in querystring:
bucket = self.backend.delete_bucket_encryption(bucket_name)
return 204, {}, ""
removed_bucket = self.backend.delete_bucket(bucket_name) removed_bucket = self.backend.delete_bucket(bucket_name)
@ -1427,6 +1446,22 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin):
return [parsed_xml["CORSConfiguration"]["CORSRule"]] return [parsed_xml["CORSConfiguration"]["CORSRule"]]
def _encryption_config_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
if (
not parsed_xml["ServerSideEncryptionConfiguration"].get("Rule")
or not parsed_xml["ServerSideEncryptionConfiguration"]["Rule"].get(
"ApplyServerSideEncryptionByDefault"
)
or not parsed_xml["ServerSideEncryptionConfiguration"]["Rule"][
"ApplyServerSideEncryptionByDefault"
].get("SSEAlgorithm")
):
raise MalformedXML()
return [parsed_xml["ServerSideEncryptionConfiguration"]]
def _logging_from_xml(self, xml): def _logging_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml) parsed_xml = xmltodict.parse(xml)
@ -2130,6 +2165,31 @@ S3_NO_LOGGING_CONFIG = """<?xml version="1.0" encoding="UTF-8"?>
<BucketLoggingStatus xmlns="http://doc.s3.amazonaws.com/2006-03-01" /> <BucketLoggingStatus xmlns="http://doc.s3.amazonaws.com/2006-03-01" />
""" """
S3_ENCRYPTION_CONFIG = """<?xml version="1.0" encoding="UTF-8"?>
<BucketEncryptionStatus xmlns="http://doc.s3.amazonaws.com/2006-03-01">
{% for entry in encryption %}
<Rule>
<ApplyServerSideEncryptionByDefault>
<SSEAlgorithm>{{ entry["Rule"]["ApplyServerSideEncryptionByDefault"]["SSEAlgorithm"] }}</SSEAlgorithm>
{% if entry["Rule"]["ApplyServerSideEncryptionByDefault"].get("KMSMasterKeyID") %}
<KMSMasterKeyID>{{ entry["Rule"]["ApplyServerSideEncryptionByDefault"]["KMSMasterKeyID"] }}</KMSMasterKeyID>
{% endif %}
</ApplyServerSideEncryptionByDefault>
</Rule>
{% endfor %}
</BucketEncryptionStatus>
"""
S3_NO_ENCRYPTION = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>ServerSideEncryptionConfigurationNotFoundError</Code>
<Message>The server side encryption configuration was not found</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>0D68A23BB2E2215B</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_GET_BUCKET_NOTIFICATION_CONFIG = """<?xml version="1.0" encoding="UTF-8"?> S3_GET_BUCKET_NOTIFICATION_CONFIG = """<?xml version="1.0" encoding="UTF-8"?>
<NotificationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"> <NotificationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
{% for topic in bucket.notification_configuration.topic %} {% for topic in bucket.notification_configuration.topic %}

View File

@ -274,6 +274,7 @@ class SecretsManagerBackend(BaseBackend):
raise SecretNotFoundException() raise SecretNotFoundException()
secret = self.secrets[secret_id] secret = self.secrets[secret_id]
version_id_to_stages = self.form_version_ids_to_stages(secret["versions"])
response = json.dumps( response = json.dumps(
{ {
@ -291,6 +292,7 @@ class SecretsManagerBackend(BaseBackend):
"LastAccessedDate": None, "LastAccessedDate": None,
"DeletedDate": secret.get("deleted_date", None), "DeletedDate": secret.get("deleted_date", None),
"Tags": secret["tags"], "Tags": secret["tags"],
"VersionIdsToStages": version_id_to_stages,
} }
) )
@ -552,6 +554,14 @@ class SecretsManagerBackend(BaseBackend):
} }
) )
@staticmethod
def form_version_ids_to_stages(secret):
version_id_to_stages = {}
for key, value in secret.items():
version_id_to_stages[key] = value["version_stages"]
return version_id_to_stages
secretsmanager_backends = {} secretsmanager_backends = {}
for region in Session().get_available_regions("secretsmanager"): for region in Session().get_available_regions("secretsmanager"):

View File

@ -6,6 +6,7 @@ import json
import re import re
import six import six
import struct import struct
from copy import deepcopy
from xml.sax.saxutils import escape from xml.sax.saxutils import escape
from boto3 import Session from boto3 import Session
@ -101,7 +102,6 @@ class Message(BaseModel):
if data_type == "String" or data_type == "Number": if data_type == "String" or data_type == "Number":
value = attr["string_value"] value = attr["string_value"]
elif data_type == "Binary": elif data_type == "Binary":
print(data_type, attr["binary_value"], type(attr["binary_value"]))
value = base64.b64decode(attr["binary_value"]) value = base64.b64decode(attr["binary_value"])
else: else:
print( print(
@ -722,6 +722,7 @@ class SQSBackend(BaseBackend):
previous_result_count = len(result) previous_result_count = len(result)
polling_end = unix_time() + wait_seconds_timeout polling_end = unix_time() + wait_seconds_timeout
currently_pending_groups = deepcopy(queue.pending_message_groups)
# queue.messages only contains visible messages # queue.messages only contains visible messages
while True: while True:
@ -739,11 +740,11 @@ class SQSBackend(BaseBackend):
# The message is pending but is visible again, so the # The message is pending but is visible again, so the
# consumer must have timed out. # consumer must have timed out.
queue.pending_messages.remove(message) queue.pending_messages.remove(message)
currently_pending_groups = deepcopy(queue.pending_message_groups)
if message.group_id and queue.fifo_queue: if message.group_id and queue.fifo_queue:
if message.group_id in queue.pending_message_groups: if message.group_id in currently_pending_groups:
# There is already one active message with the same # A previous call is still processing messages in this group, so we cannot deliver this one.
# group, so we cannot deliver this one.
continue continue
queue.pending_messages.add(message) queue.pending_messages.add(message)

View File

@ -232,6 +232,14 @@ class SQSResponse(BaseResponse):
queue_name = self._get_queue_name() queue_name = self._get_queue_name()
if not message_group_id:
queue = self.sqs_backend.get_queue(queue_name)
if queue.attributes.get("FifoQueue", False):
return self._error(
"MissingParameter",
"The request must contain the parameter MessageGroupId.",
)
message = self.sqs_backend.send_message( message = self.sqs_backend.send_message(
queue_name, queue_name,
message, message,

View File

@ -95,7 +95,10 @@ class StepFunctionResponse(BaseResponse):
def start_execution(self): def start_execution(self):
arn = self._get_param("stateMachineArn") arn = self._get_param("stateMachineArn")
name = self._get_param("name") name = self._get_param("name")
try:
execution = self.stepfunction_backend.start_execution(arn, name) execution = self.stepfunction_backend.start_execution(arn, name)
except AWSError as err:
return err.response()
response = { response = {
"executionArn": execution.execution_arn, "executionArn": execution.execution_arn,
"startDate": execution.start_date, "startDate": execution.start_date,

View File

@ -41,7 +41,7 @@ install_requires = [
"PyYAML>=5.1", "PyYAML>=5.1",
"pytz", "pytz",
"python-dateutil<3.0.0,>=2.1", "python-dateutil<3.0.0,>=2.1",
"python-jose<4.0.0", "python-jose[cryptography]>=3.1.0,<4.0.0",
"docker>=2.5.1", "docker>=2.5.1",
"jsondiff>=1.1.2", "jsondiff>=1.1.2",
"aws-xray-sdk!=0.96,>=0.93", "aws-xray-sdk!=0.96,>=0.93",

View File

@ -572,10 +572,10 @@ def test_boto3_create_stack_set_with_yaml():
@mock_s3 @mock_s3
def test_create_stack_set_from_s3_url(): def test_create_stack_set_from_s3_url():
s3 = boto3.client("s3") s3 = boto3.client("s3")
s3_conn = boto3.resource("s3") s3_conn = boto3.resource("s3", region_name="us-east-1")
bucket = s3_conn.create_bucket(Bucket="foobar") s3_conn.create_bucket(Bucket="foobar")
key = s3_conn.Object("foobar", "template-key").put(Body=dummy_template_json) s3_conn.Object("foobar", "template-key").put(Body=dummy_template_json)
key_url = s3.generate_presigned_url( key_url = s3.generate_presigned_url(
ClientMethod="get_object", Params={"Bucket": "foobar", "Key": "template-key"} ClientMethod="get_object", Params={"Bucket": "foobar", "Key": "template-key"}
) )
@ -715,10 +715,10 @@ def test_create_stack_with_role_arn():
@mock_s3 @mock_s3
def test_create_stack_from_s3_url(): def test_create_stack_from_s3_url():
s3 = boto3.client("s3") s3 = boto3.client("s3")
s3_conn = boto3.resource("s3") s3_conn = boto3.resource("s3", region_name="us-east-1")
bucket = s3_conn.create_bucket(Bucket="foobar") s3_conn.create_bucket(Bucket="foobar")
key = s3_conn.Object("foobar", "template-key").put(Body=dummy_template_json) s3_conn.Object("foobar", "template-key").put(Body=dummy_template_json)
key_url = s3.generate_presigned_url( key_url = s3.generate_presigned_url(
ClientMethod="get_object", Params={"Bucket": "foobar", "Key": "template-key"} ClientMethod="get_object", Params={"Bucket": "foobar", "Key": "template-key"}
) )
@ -770,7 +770,7 @@ def test_update_stack_with_previous_value():
@mock_ec2 @mock_ec2
def test_update_stack_from_s3_url(): def test_update_stack_from_s3_url():
s3 = boto3.client("s3") s3 = boto3.client("s3")
s3_conn = boto3.resource("s3") s3_conn = boto3.resource("s3", region_name="us-east-1")
cf_conn = boto3.client("cloudformation", region_name="us-east-1") cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_stack( cf_conn.create_stack(
@ -799,10 +799,10 @@ def test_update_stack_from_s3_url():
@mock_s3 @mock_s3
def test_create_change_set_from_s3_url(): def test_create_change_set_from_s3_url():
s3 = boto3.client("s3") s3 = boto3.client("s3")
s3_conn = boto3.resource("s3") s3_conn = boto3.resource("s3", region_name="us-east-1")
bucket = s3_conn.create_bucket(Bucket="foobar") s3_conn.create_bucket(Bucket="foobar")
key = s3_conn.Object("foobar", "template-key").put(Body=dummy_template_json) s3_conn.Object("foobar", "template-key").put(Body=dummy_template_json)
key_url = s3.generate_presigned_url( key_url = s3.generate_presigned_url(
ClientMethod="get_object", Params={"Bucket": "foobar", "Key": "template-key"} ClientMethod="get_object", Params={"Bucket": "foobar", "Key": "template-key"}
) )
@ -819,7 +819,7 @@ def test_create_change_set_from_s3_url():
in response["Id"] in response["Id"]
) )
assert ( assert (
"arn:aws:cloudformation:us-east-1:123456789:stack/NewStack" "arn:aws:cloudformation:us-west-1:123456789:stack/NewStack"
in response["StackId"] in response["StackId"]
) )
@ -838,7 +838,31 @@ def test_describe_change_set():
stack["ChangeSetName"].should.equal("NewChangeSet") stack["ChangeSetName"].should.equal("NewChangeSet")
stack["StackName"].should.equal("NewStack") stack["StackName"].should.equal("NewStack")
stack["Status"].should.equal("REVIEW_IN_PROGRESS") stack["Status"].should.equal("CREATE_COMPLETE")
stack["ExecutionStatus"].should.equal("AVAILABLE")
two_secs_ago = datetime.now(tz=pytz.UTC) - timedelta(seconds=2)
assert (
two_secs_ago < stack["CreationTime"] < datetime.now(tz=pytz.UTC)
), "Change set should have been created recently"
stack["Changes"].should.have.length_of(1)
stack["Changes"][0].should.equal(
dict(
{
"Type": "Resource",
"ResourceChange": {
"Action": "Add",
"LogicalResourceId": "EC2Instance1",
"ResourceType": "AWS::EC2::Instance",
},
}
)
)
# Execute change set
cf_conn.execute_change_set(ChangeSetName="NewChangeSet")
# Verify that the changes have been applied
stack = cf_conn.describe_change_set(ChangeSetName="NewChangeSet")
stack["Changes"].should.have.length_of(1)
cf_conn.create_change_set( cf_conn.create_change_set(
StackName="NewStack", StackName="NewStack",
@ -868,7 +892,7 @@ def test_execute_change_set_w_arn():
) )
ec2.describe_instances()["Reservations"].should.have.length_of(0) ec2.describe_instances()["Reservations"].should.have.length_of(0)
cf_conn.describe_change_set(ChangeSetName="NewChangeSet")["Status"].should.equal( cf_conn.describe_change_set(ChangeSetName="NewChangeSet")["Status"].should.equal(
"REVIEW_IN_PROGRESS" "CREATE_COMPLETE"
) )
# Execute change set # Execute change set
cf_conn.execute_change_set(ChangeSetName=change_set["Id"]) cf_conn.execute_change_set(ChangeSetName=change_set["Id"])
@ -882,7 +906,7 @@ def test_execute_change_set_w_arn():
@mock_cloudformation @mock_cloudformation
def test_execute_change_set_w_name(): def test_execute_change_set_w_name():
cf_conn = boto3.client("cloudformation", region_name="us-east-1") cf_conn = boto3.client("cloudformation", region_name="us-east-1")
change_set = cf_conn.create_change_set( cf_conn.create_change_set(
StackName="NewStack", StackName="NewStack",
TemplateBody=dummy_template_json, TemplateBody=dummy_template_json,
ChangeSetName="NewChangeSet", ChangeSetName="NewChangeSet",
@ -1216,9 +1240,7 @@ def test_delete_stack_with_export():
@mock_cloudformation @mock_cloudformation
def test_export_names_must_be_unique(): def test_export_names_must_be_unique():
cf = boto3.resource("cloudformation", region_name="us-east-1") cf = boto3.resource("cloudformation", region_name="us-east-1")
first_stack = cf.create_stack( cf.create_stack(StackName="test_stack", TemplateBody=dummy_output_template_json)
StackName="test_stack", TemplateBody=dummy_output_template_json
)
with assert_raises(ClientError): with assert_raises(ClientError):
cf.create_stack(StackName="test_stack", TemplateBody=dummy_output_template_json) cf.create_stack(StackName="test_stack", TemplateBody=dummy_output_template_json)
@ -1232,9 +1254,7 @@ def test_stack_with_imports():
output_stack = cf.create_stack( output_stack = cf.create_stack(
StackName="test_stack1", TemplateBody=dummy_output_template_json StackName="test_stack1", TemplateBody=dummy_output_template_json
) )
import_stack = cf.create_stack( cf.create_stack(StackName="test_stack2", TemplateBody=dummy_import_template_json)
StackName="test_stack2", TemplateBody=dummy_import_template_json
)
output_stack.outputs.should.have.length_of(1) output_stack.outputs.should.have.length_of(1)
output = output_stack.outputs[0]["OutputValue"] output = output_stack.outputs[0]["OutputValue"]

View File

@ -1,7 +1,6 @@
from __future__ import unicode_literals from __future__ import unicode_literals
import json import json
import base64
from decimal import Decimal from decimal import Decimal
import boto import boto
@ -18,16 +17,17 @@ import boto.sqs
import boto.vpc import boto.vpc
import boto3 import boto3
import sure # noqa import sure # noqa
from string import Template
from moto import ( from moto import (
mock_autoscaling_deprecated, mock_autoscaling_deprecated,
mock_autoscaling,
mock_cloudformation, mock_cloudformation,
mock_cloudformation_deprecated, mock_cloudformation_deprecated,
mock_datapipeline_deprecated, mock_datapipeline_deprecated,
mock_dynamodb2, mock_dynamodb2,
mock_ec2, mock_ec2,
mock_ec2_deprecated, mock_ec2_deprecated,
mock_elb,
mock_elb_deprecated, mock_elb_deprecated,
mock_events, mock_events,
mock_iam_deprecated, mock_iam_deprecated,
@ -36,18 +36,14 @@ from moto import (
mock_logs, mock_logs,
mock_rds_deprecated, mock_rds_deprecated,
mock_rds2, mock_rds2,
mock_rds2_deprecated,
mock_redshift,
mock_redshift_deprecated, mock_redshift_deprecated,
mock_route53_deprecated, mock_route53_deprecated,
mock_s3, mock_s3,
mock_sns_deprecated, mock_sns_deprecated,
mock_sqs,
mock_sqs_deprecated, mock_sqs_deprecated,
mock_elbv2, mock_elbv2,
) )
from moto.core import ACCOUNT_ID from moto.core import ACCOUNT_ID
from moto.dynamodb2.models import Table
from tests.test_cloudformation.fixtures import ( from tests.test_cloudformation.fixtures import (
ec2_classic_eip, ec2_classic_eip,
@ -2496,3 +2492,271 @@ def test_stack_events_create_rule_as_target():
log_groups["logGroups"][0]["logGroupName"].should.equal(rules["Rules"][0]["Arn"]) log_groups["logGroups"][0]["logGroupName"].should.equal(rules["Rules"][0]["Arn"])
log_groups["logGroups"][0]["retentionInDays"].should.equal(3) log_groups["logGroups"][0]["retentionInDays"].should.equal(3)
@mock_cloudformation
@mock_events
def test_stack_events_update_rule_integration():
events_template = Template(
"""{
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
"Event": {
"Type": "AWS::Events::Rule",
"Properties": {
"Name": "$Name",
"State": "$State",
"ScheduleExpression": "rate(5 minutes)",
},
}
},
} """
)
cf_conn = boto3.client("cloudformation", "us-west-2")
original_template = events_template.substitute(Name="Foo", State="ENABLED")
cf_conn.create_stack(StackName="test_stack", TemplateBody=original_template)
rules = boto3.client("events", "us-west-2").list_rules()
rules["Rules"].should.have.length_of(1)
rules["Rules"][0]["Name"].should.equal("Foo")
rules["Rules"][0]["State"].should.equal("ENABLED")
update_template = events_template.substitute(Name="Bar", State="DISABLED")
cf_conn.update_stack(StackName="test_stack", TemplateBody=update_template)
rules = boto3.client("events", "us-west-2").list_rules()
rules["Rules"].should.have.length_of(1)
rules["Rules"][0]["Name"].should.equal("Bar")
rules["Rules"][0]["State"].should.equal("DISABLED")
@mock_cloudformation
@mock_autoscaling
def test_autoscaling_propagate_tags():
autoscaling_group_with_tags = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
"AutoScalingGroup": {
"Type": "AWS::AutoScaling::AutoScalingGroup",
"Properties": {
"AutoScalingGroupName": "test-scaling-group",
"DesiredCapacity": 1,
"MinSize": 1,
"MaxSize": 50,
"LaunchConfigurationName": "test-launch-config",
"AvailabilityZones": ["us-east-1a"],
"Tags": [
{
"Key": "test-key-propagate",
"Value": "test",
"PropagateAtLaunch": True,
},
{
"Key": "test-key-no-propagate",
"Value": "test",
"PropagateAtLaunch": False,
},
],
},
"DependsOn": "LaunchConfig",
},
"LaunchConfig": {
"Type": "AWS::AutoScaling::LaunchConfiguration",
"Properties": {"LaunchConfigurationName": "test-launch-config"},
},
},
}
boto3.client("cloudformation", "us-east-1").create_stack(
StackName="propagate_tags_test",
TemplateBody=json.dumps(autoscaling_group_with_tags),
)
autoscaling = boto3.client("autoscaling", "us-east-1")
autoscaling_group_tags = autoscaling.describe_auto_scaling_groups()[
"AutoScalingGroups"
][0]["Tags"]
propagation_dict = {
tag["Key"]: tag["PropagateAtLaunch"] for tag in autoscaling_group_tags
}
assert propagation_dict["test-key-propagate"]
assert not propagation_dict["test-key-no-propagate"]
@mock_cloudformation
@mock_events
def test_stack_eventbus_create_from_cfn_integration():
eventbus_template = """{
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
"EventBus": {
"Type": "AWS::Events::EventBus",
"Properties": {
"Name": "MyCustomEventBus"
},
}
},
}"""
cf_conn = boto3.client("cloudformation", "us-west-2")
cf_conn.create_stack(StackName="test_stack", TemplateBody=eventbus_template)
event_buses = boto3.client("events", "us-west-2").list_event_buses(
NamePrefix="MyCustom"
)
event_buses["EventBuses"].should.have.length_of(1)
event_buses["EventBuses"][0]["Name"].should.equal("MyCustomEventBus")
@mock_cloudformation
@mock_events
def test_stack_events_delete_eventbus_integration():
eventbus_template = """{
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
"EventBus": {
"Type": "AWS::Events::EventBus",
"Properties": {
"Name": "MyCustomEventBus"
},
}
},
}"""
cf_conn = boto3.client("cloudformation", "us-west-2")
cf_conn.create_stack(StackName="test_stack", TemplateBody=eventbus_template)
event_buses = boto3.client("events", "us-west-2").list_event_buses(
NamePrefix="MyCustom"
)
event_buses["EventBuses"].should.have.length_of(1)
cf_conn.delete_stack(StackName="test_stack")
event_buses = boto3.client("events", "us-west-2").list_event_buses(
NamePrefix="MyCustom"
)
event_buses["EventBuses"].should.have.length_of(0)
@mock_cloudformation
@mock_events
def test_stack_events_delete_from_cfn_integration():
eventbus_template = Template(
"""{
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
"$resource_name": {
"Type": "AWS::Events::EventBus",
"Properties": {
"Name": "$name"
},
}
},
}"""
)
cf_conn = boto3.client("cloudformation", "us-west-2")
original_template = eventbus_template.substitute(
{"resource_name": "original", "name": "MyCustomEventBus"}
)
cf_conn.create_stack(StackName="test_stack", TemplateBody=original_template)
original_event_buses = boto3.client("events", "us-west-2").list_event_buses(
NamePrefix="MyCustom"
)
original_event_buses["EventBuses"].should.have.length_of(1)
original_eventbus = original_event_buses["EventBuses"][0]
updated_template = eventbus_template.substitute(
{"resource_name": "updated", "name": "AnotherEventBus"}
)
cf_conn.update_stack(StackName="test_stack", TemplateBody=updated_template)
update_event_buses = boto3.client("events", "us-west-2").list_event_buses(
NamePrefix="AnotherEventBus"
)
update_event_buses["EventBuses"].should.have.length_of(1)
update_event_buses["EventBuses"][0]["Arn"].shouldnt.equal(original_eventbus["Arn"])
@mock_cloudformation
@mock_events
def test_stack_events_update_from_cfn_integration():
eventbus_template = Template(
"""{
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
"EventBus": {
"Type": "AWS::Events::EventBus",
"Properties": {
"Name": "$name"
},
}
},
}"""
)
cf_conn = boto3.client("cloudformation", "us-west-2")
original_template = eventbus_template.substitute({"name": "MyCustomEventBus"})
cf_conn.create_stack(StackName="test_stack", TemplateBody=original_template)
original_event_buses = boto3.client("events", "us-west-2").list_event_buses(
NamePrefix="MyCustom"
)
original_event_buses["EventBuses"].should.have.length_of(1)
original_eventbus = original_event_buses["EventBuses"][0]
updated_template = eventbus_template.substitute({"name": "NewEventBus"})
cf_conn.update_stack(StackName="test_stack", TemplateBody=updated_template)
update_event_buses = boto3.client("events", "us-west-2").list_event_buses(
NamePrefix="NewEventBus"
)
update_event_buses["EventBuses"].should.have.length_of(1)
update_event_buses["EventBuses"][0]["Name"].should.equal("NewEventBus")
update_event_buses["EventBuses"][0]["Arn"].shouldnt.equal(original_eventbus["Arn"])
@mock_cloudformation
@mock_events
def test_stack_events_get_attribute_integration():
eventbus_template = """{
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
"EventBus": {
"Type": "AWS::Events::EventBus",
"Properties": {
"Name": "MyEventBus"
},
}
},
"Outputs": {
"bus_arn": {"Value": {"Fn::GetAtt": ["EventBus", "Arn"]}},
"bus_name": {"Value": {"Fn::GetAtt": ["EventBus", "Name"]}},
}
}"""
cf = boto3.client("cloudformation", "us-west-2")
events = boto3.client("events", "us-west-2")
cf.create_stack(StackName="test_stack", TemplateBody=eventbus_template)
stack = cf.describe_stacks(StackName="test_stack")["Stacks"][0]
outputs = stack["Outputs"]
output_arn = list(filter(lambda item: item["OutputKey"] == "bus_arn", outputs))[0]
output_name = list(filter(lambda item: item["OutputKey"] == "bus_name", outputs))[0]
event_bus = events.list_event_buses(NamePrefix="MyEventBus")["EventBuses"][0]
output_arn["OutputValue"].should.equal(event_bus["Arn"])
output_name["OutputValue"].should.equal(event_bus["Name"])

View File

@ -5,6 +5,7 @@ import os
import boto3 import boto3
from nose.tools import raises from nose.tools import raises
import botocore import botocore
import sure # noqa
from moto.cloudformation.exceptions import ValidationError from moto.cloudformation.exceptions import ValidationError

View File

@ -92,6 +92,37 @@ def test_get_dashboard_fail():
raise RuntimeError("Should of raised error") raise RuntimeError("Should of raised error")
@mock_cloudwatch
def test_delete_invalid_alarm():
cloudwatch = boto3.client("cloudwatch", "eu-west-1")
cloudwatch.put_metric_alarm(
AlarmName="testalarm1",
MetricName="cpu",
Namespace="blah",
Period=10,
EvaluationPeriods=5,
Statistic="Average",
Threshold=2,
ComparisonOperator="GreaterThanThreshold",
ActionsEnabled=True,
)
# trying to delete an alarm which is not created along with valid alarm.
with assert_raises(ClientError) as e:
cloudwatch.delete_alarms(AlarmNames=["InvalidAlarmName", "testalarm1"])
e.exception.response["Error"]["Code"].should.equal("ResourceNotFound")
resp = cloudwatch.describe_alarms(AlarmNames=["testalarm1"])
# making sure other alarms are not deleted in case of an error.
len(resp["MetricAlarms"]).should.equal(1)
# test to check if the error raises if only one invalid alarm is tried to delete.
with assert_raises(ClientError) as e:
cloudwatch.delete_alarms(AlarmNames=["InvalidAlarmName"])
e.exception.response["Error"]["Code"].should.equal("ResourceNotFound")
@mock_cloudwatch @mock_cloudwatch
def test_alarm_state(): def test_alarm_state():
client = boto3.client("cloudwatch", region_name="eu-central-1") client = boto3.client("cloudwatch", region_name="eu-central-1")

View File

@ -99,6 +99,106 @@ def test_instance_launch_and_terminate():
instance.state.should.equal("terminated") instance.state.should.equal("terminated")
@mock_ec2
def test_instance_terminate_discard_volumes():
ec2_resource = boto3.resource("ec2", "us-west-1")
result = ec2_resource.create_instances(
ImageId="ami-d3adb33f",
MinCount=1,
MaxCount=1,
BlockDeviceMappings=[
{
"DeviceName": "/dev/sda1",
"Ebs": {"VolumeSize": 50, "DeleteOnTermination": True},
}
],
)
instance = result[0]
instance_volume_ids = []
for volume in instance.volumes.all():
instance_volume_ids.append(volume.volume_id)
instance.terminate()
instance.wait_until_terminated()
assert not list(ec2_resource.volumes.all())
@mock_ec2
def test_instance_terminate_keep_volumes():
ec2_resource = boto3.resource("ec2", "us-west-1")
result = ec2_resource.create_instances(
ImageId="ami-d3adb33f",
MinCount=1,
MaxCount=1,
BlockDeviceMappings=[{"DeviceName": "/dev/sda1", "Ebs": {"VolumeSize": 50}}],
)
instance = result[0]
instance_volume_ids = []
for volume in instance.volumes.all():
instance_volume_ids.append(volume.volume_id)
instance.terminate()
instance.wait_until_terminated()
assert len(instance_volume_ids) == 1
volume = ec2_resource.Volume(instance_volume_ids[0])
volume.state.should.equal("available")
@mock_ec2
def test_instance_terminate_detach_volumes():
ec2_resource = boto3.resource("ec2", "us-west-1")
result = ec2_resource.create_instances(
ImageId="ami-d3adb33f",
MinCount=1,
MaxCount=1,
BlockDeviceMappings=[
{"DeviceName": "/dev/sda1", "Ebs": {"VolumeSize": 50}},
{"DeviceName": "/dev/sda2", "Ebs": {"VolumeSize": 50}},
],
)
instance = result[0]
for volume in instance.volumes.all():
response = instance.detach_volume(VolumeId=volume.volume_id)
response["State"].should.equal("detaching")
instance.terminate()
instance.wait_until_terminated()
assert len(list(ec2_resource.volumes.all())) == 2
@mock_ec2
def test_instance_detach_volume_wrong_path():
ec2_resource = boto3.resource("ec2", "us-west-1")
result = ec2_resource.create_instances(
ImageId="ami-d3adb33f",
MinCount=1,
MaxCount=1,
BlockDeviceMappings=[{"DeviceName": "/dev/sda1", "Ebs": {"VolumeSize": 50}},],
)
instance = result[0]
for volume in instance.volumes.all():
with assert_raises(ClientError) as ex:
instance.detach_volume(VolumeId=volume.volume_id, Device="/dev/sdf")
ex.exception.response["Error"]["Code"].should.equal(
"InvalidAttachment.NotFound"
)
ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
ex.exception.response["Error"]["Message"].should.equal(
"The volume {0} is not attached to instance {1} as device {2}".format(
volume.volume_id, instance.instance_id, "/dev/sdf"
)
)
@mock_ec2_deprecated @mock_ec2_deprecated
def test_terminate_empty_instances(): def test_terminate_empty_instances():
conn = boto.connect_ec2("the_key", "the_secret") conn = boto.connect_ec2("the_key", "the_secret")
@ -1416,14 +1516,14 @@ def test_modify_delete_on_termination():
result = ec2_client.create_instances(ImageId="ami-12345678", MinCount=1, MaxCount=1) result = ec2_client.create_instances(ImageId="ami-12345678", MinCount=1, MaxCount=1)
instance = result[0] instance = result[0]
instance.load() instance.load()
instance.block_device_mappings[0]["Ebs"]["DeleteOnTermination"].should.be(False) instance.block_device_mappings[0]["Ebs"]["DeleteOnTermination"].should.be(True)
instance.modify_attribute( instance.modify_attribute(
BlockDeviceMappings=[ BlockDeviceMappings=[
{"DeviceName": "/dev/sda1", "Ebs": {"DeleteOnTermination": True}} {"DeviceName": "/dev/sda1", "Ebs": {"DeleteOnTermination": False}}
] ]
) )
instance.load() instance.load()
instance.block_device_mappings[0]["Ebs"]["DeleteOnTermination"].should.be(True) instance.block_device_mappings[0]["Ebs"]["DeleteOnTermination"].should.be(False)
@mock_ec2 @mock_ec2

View File

@ -275,3 +275,32 @@ def test_duplicate_network_acl_entry():
rule_number rule_number
) )
) )
@mock_ec2
def test_describe_network_acls():
conn = boto3.client("ec2", region_name="us-west-2")
vpc = conn.create_vpc(CidrBlock="10.0.0.0/16")
vpc_id = vpc["Vpc"]["VpcId"]
network_acl = conn.create_network_acl(VpcId=vpc_id)
network_acl_id = network_acl["NetworkAcl"]["NetworkAclId"]
resp = conn.describe_network_acls(NetworkAclIds=[network_acl_id])
result = resp["NetworkAcls"]
result.should.have.length_of(1)
result[0]["NetworkAclId"].should.equal(network_acl_id)
resp2 = conn.describe_network_acls()["NetworkAcls"]
resp2.should.have.length_of(3)
with assert_raises(ClientError) as ex:
conn.describe_network_acls(NetworkAclIds=["1"])
str(ex.exception).should.equal(
"An error occurred (InvalidRouteTableID.NotFound) when calling the "
"DescribeNetworkAcls operation: The routeTable ID '1' does not exist"
)

View File

@ -4,6 +4,7 @@ import unittest
import boto3 import boto3
import sure # noqa import sure # noqa
from botocore.exceptions import ClientError from botocore.exceptions import ClientError
from nose.tools import assert_raises from nose.tools import assert_raises
@ -201,6 +202,35 @@ def test_remove_targets():
assert targets_before - 1 == targets_after assert targets_before - 1 == targets_after
@mock_events
def test_put_targets():
client = boto3.client("events", "us-west-2")
rule_name = "my-event"
rule_data = {
"Name": rule_name,
"ScheduleExpression": "rate(5 minutes)",
"EventPattern": '{"source": ["test-source"]}',
}
client.put_rule(**rule_data)
targets = client.list_targets_by_rule(Rule=rule_name)["Targets"]
targets_before = len(targets)
assert targets_before == 0
targets_data = [{"Arn": "test_arn", "Id": "test_id"}]
resp = client.put_targets(Rule=rule_name, Targets=targets_data)
assert resp["FailedEntryCount"] == 0
assert len(resp["FailedEntries"]) == 0
targets = client.list_targets_by_rule(Rule=rule_name)["Targets"]
targets_after = len(targets)
assert targets_before + 1 == targets_after
assert targets[0]["Arn"] == "test_arn"
assert targets[0]["Id"] == "test_id"
@mock_events @mock_events
def test_permissions(): def test_permissions():
client = boto3.client("events", "eu-central-1") client = boto3.client("events", "eu-central-1")

View File

@ -206,6 +206,26 @@ def test_remove_role_from_instance_profile():
dict(profile.roles).should.be.empty dict(profile.roles).should.be.empty
@mock_iam()
def test_delete_instance_profile():
conn = boto3.client("iam", region_name="us-east-1")
conn.create_role(
RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/"
)
conn.create_instance_profile(InstanceProfileName="my-profile")
conn.add_role_to_instance_profile(
InstanceProfileName="my-profile", RoleName="my-role"
)
with assert_raises(conn.exceptions.DeleteConflictException):
conn.delete_instance_profile(InstanceProfileName="my-profile")
conn.remove_role_from_instance_profile(
InstanceProfileName="my-profile", RoleName="my-role"
)
conn.delete_instance_profile(InstanceProfileName="my-profile")
with assert_raises(conn.exceptions.NoSuchEntityException):
profile = conn.get_instance_profile(InstanceProfileName="my-profile")
@mock_iam() @mock_iam()
def test_get_login_profile(): def test_get_login_profile():
conn = boto3.client("iam", region_name="us-east-1") conn = boto3.client("iam", region_name="us-east-1")
@ -2815,3 +2835,36 @@ def test_list_user_tags():
[{"Key": "Stan", "Value": "The Caddy"}, {"Key": "like-a", "Value": "glove"}] [{"Key": "Stan", "Value": "The Caddy"}, {"Key": "like-a", "Value": "glove"}]
) )
response["IsTruncated"].should_not.be.ok response["IsTruncated"].should_not.be.ok
@mock_iam()
def test_delete_role_with_instance_profiles_present():
iam = boto3.client("iam", region_name="us-east-1")
trust_policy = """
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "ec2.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
"""
trust_policy = trust_policy.strip()
iam.create_role(RoleName="Role1", AssumeRolePolicyDocument=trust_policy)
iam.create_instance_profile(InstanceProfileName="IP1")
iam.add_role_to_instance_profile(InstanceProfileName="IP1", RoleName="Role1")
iam.create_role(RoleName="Role2", AssumeRolePolicyDocument=trust_policy)
iam.delete_role(RoleName="Role2")
role_names = [role["RoleName"] for role in iam.list_roles()["Roles"]]
assert "Role1" in role_names
assert "Role2" not in role_names

View File

@ -4521,3 +4521,36 @@ def test_creating_presigned_post():
].read() ].read()
== fdata == fdata
) )
@mock_s3
def test_encryption():
# Create Bucket so that test can run
conn = boto3.client("s3", region_name="us-east-1")
conn.create_bucket(Bucket="mybucket")
with assert_raises(ClientError) as exc:
conn.get_bucket_encryption(Bucket="mybucket")
sse_config = {
"Rules": [
{
"ApplyServerSideEncryptionByDefault": {
"SSEAlgorithm": "aws:kms",
"KMSMasterKeyID": "12345678",
}
}
]
}
conn.put_bucket_encryption(
Bucket="mybucket", ServerSideEncryptionConfiguration=sse_config
)
resp = conn.get_bucket_encryption(Bucket="mybucket")
assert "ServerSideEncryptionConfiguration" in resp
assert resp["ServerSideEncryptionConfiguration"] == sse_config
conn.delete_bucket_encryption(Bucket="mybucket")
with assert_raises(ClientError) as exc:
conn.get_bucket_encryption(Bucket="mybucket")

View File

@ -733,25 +733,33 @@ def test_put_secret_value_versions_differ_if_same_secret_put_twice():
def test_put_secret_value_maintains_description_and_tags(): def test_put_secret_value_maintains_description_and_tags():
conn = boto3.client("secretsmanager", region_name="us-west-2") conn = boto3.client("secretsmanager", region_name="us-west-2")
conn.create_secret( previous_response = conn.create_secret(
Name=DEFAULT_SECRET_NAME, Name=DEFAULT_SECRET_NAME,
SecretString="foosecret", SecretString="foosecret",
Description="desc", Description="desc",
Tags=[{"Key": "Foo", "Value": "Bar"}, {"Key": "Mykey", "Value": "Myvalue"}], Tags=[{"Key": "Foo", "Value": "Bar"}, {"Key": "Mykey", "Value": "Myvalue"}],
) )
previous_version_id = previous_response["VersionId"]
conn = boto3.client("secretsmanager", region_name="us-west-2") conn = boto3.client("secretsmanager", region_name="us-west-2")
conn.put_secret_value( current_response = conn.put_secret_value(
SecretId=DEFAULT_SECRET_NAME, SecretId=DEFAULT_SECRET_NAME,
SecretString="dupe_secret", SecretString="dupe_secret",
VersionStages=["AWSCURRENT"], VersionStages=["AWSCURRENT"],
) )
current_version_id = current_response["VersionId"]
secret_details = conn.describe_secret(SecretId=DEFAULT_SECRET_NAME) secret_details = conn.describe_secret(SecretId=DEFAULT_SECRET_NAME)
assert secret_details["Tags"] == [ assert secret_details["Tags"] == [
{"Key": "Foo", "Value": "Bar"}, {"Key": "Foo", "Value": "Bar"},
{"Key": "Mykey", "Value": "Myvalue"}, {"Key": "Mykey", "Value": "Myvalue"},
] ]
assert secret_details["Description"] == "desc" assert secret_details["Description"] == "desc"
assert secret_details["VersionIdsToStages"] is not None
assert previous_version_id in secret_details["VersionIdsToStages"]
assert current_version_id in secret_details["VersionIdsToStages"]
assert secret_details["VersionIdsToStages"][previous_version_id] == ["AWSPREVIOUS"]
assert secret_details["VersionIdsToStages"][current_version_id] == ["AWSCURRENT"]
@mock_secretsmanager @mock_secretsmanager

View File

@ -1164,7 +1164,7 @@ def test_send_message_batch_with_empty_list():
@mock_sqs @mock_sqs
def test_batch_change_message_visibility(): def test_batch_change_message_visibility():
if os.environ.get("TEST_SERVER_MODE", "false").lower() == "true": if settings.TEST_SERVER_MODE:
raise SkipTest("Cant manipulate time in server mode") raise SkipTest("Cant manipulate time in server mode")
with freeze_time("2015-01-01 12:00:00"): with freeze_time("2015-01-01 12:00:00"):
@ -1174,9 +1174,15 @@ def test_batch_change_message_visibility():
) )
queue_url = resp["QueueUrl"] queue_url = resp["QueueUrl"]
sqs.send_message(QueueUrl=queue_url, MessageBody="msg1") sqs.send_message(
sqs.send_message(QueueUrl=queue_url, MessageBody="msg2") QueueUrl=queue_url, MessageBody="msg1", MessageGroupId="group1"
sqs.send_message(QueueUrl=queue_url, MessageBody="msg3") )
sqs.send_message(
QueueUrl=queue_url, MessageBody="msg2", MessageGroupId="group2"
)
sqs.send_message(
QueueUrl=queue_url, MessageBody="msg3", MessageGroupId="group3"
)
with freeze_time("2015-01-01 12:01:00"): with freeze_time("2015-01-01 12:01:00"):
receive_resp = sqs.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=2) receive_resp = sqs.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=2)
@ -1529,7 +1535,7 @@ def test_create_fifo_queue_with_dlq():
@mock_sqs @mock_sqs
def test_queue_with_dlq(): def test_queue_with_dlq():
if os.environ.get("TEST_SERVER_MODE", "false").lower() == "true": if settings.TEST_SERVER_MODE:
raise SkipTest("Cant manipulate time in server mode") raise SkipTest("Cant manipulate time in server mode")
sqs = boto3.client("sqs", region_name="us-east-1") sqs = boto3.client("sqs", region_name="us-east-1")
@ -1554,8 +1560,12 @@ def test_queue_with_dlq():
) )
queue_url2 = resp["QueueUrl"] queue_url2 = resp["QueueUrl"]
sqs.send_message(QueueUrl=queue_url2, MessageBody="msg1") sqs.send_message(
sqs.send_message(QueueUrl=queue_url2, MessageBody="msg2") QueueUrl=queue_url2, MessageBody="msg1", MessageGroupId="group"
)
sqs.send_message(
QueueUrl=queue_url2, MessageBody="msg2", MessageGroupId="group"
)
with freeze_time("2015-01-01 13:00:00"): with freeze_time("2015-01-01 13:00:00"):
resp = sqs.receive_message( resp = sqs.receive_message(
@ -1686,20 +1696,24 @@ def test_receive_messages_with_message_group_id():
queue.set_attributes(Attributes={"VisibilityTimeout": "3600"}) queue.set_attributes(Attributes={"VisibilityTimeout": "3600"})
queue.send_message(MessageBody="message-1", MessageGroupId="group") queue.send_message(MessageBody="message-1", MessageGroupId="group")
queue.send_message(MessageBody="message-2", MessageGroupId="group") queue.send_message(MessageBody="message-2", MessageGroupId="group")
queue.send_message(MessageBody="message-3", MessageGroupId="group")
queue.send_message(MessageBody="separate-message", MessageGroupId="anothergroup")
messages = queue.receive_messages() messages = queue.receive_messages(MaxNumberOfMessages=2)
messages.should.have.length_of(1) messages.should.have.length_of(2)
message = messages[0] messages[0].attributes["MessageGroupId"].should.equal("group")
# received message is not deleted! # Different client can not 'see' messages from the group until they are processed
messages_for_client_2 = queue.receive_messages(WaitTimeSeconds=0)
messages = queue.receive_messages(WaitTimeSeconds=0) messages_for_client_2.should.have.length_of(1)
messages.should.have.length_of(0) messages_for_client_2[0].body.should.equal("separate-message")
# message is now processed, next one should be available # message is now processed, next one should be available
for message in messages:
message.delete() message.delete()
messages = queue.receive_messages() messages = queue.receive_messages()
messages.should.have.length_of(1) messages.should.have.length_of(1)
messages[0].body.should.equal("message-3")
@mock_sqs @mock_sqs
@ -1730,7 +1744,7 @@ def test_receive_messages_with_message_group_id_on_requeue():
@mock_sqs @mock_sqs
def test_receive_messages_with_message_group_id_on_visibility_timeout(): def test_receive_messages_with_message_group_id_on_visibility_timeout():
if os.environ.get("TEST_SERVER_MODE", "false").lower() == "true": if settings.TEST_SERVER_MODE:
raise SkipTest("Cant manipulate time in server mode") raise SkipTest("Cant manipulate time in server mode")
with freeze_time("2015-01-01 12:00:00"): with freeze_time("2015-01-01 12:00:00"):
@ -1746,11 +1760,11 @@ def test_receive_messages_with_message_group_id_on_visibility_timeout():
messages.should.have.length_of(1) messages.should.have.length_of(1)
message = messages[0] message = messages[0]
# received message is not deleted! # received message is not processed yet
messages_for_second_client = queue.receive_messages(WaitTimeSeconds=0)
messages = queue.receive_messages(WaitTimeSeconds=0) messages_for_second_client.should.have.length_of(0)
messages.should.have.length_of(0)
for message in messages:
message.change_visibility(VisibilityTimeout=10) message.change_visibility(VisibilityTimeout=10)
with freeze_time("2015-01-01 12:00:05"): with freeze_time("2015-01-01 12:00:05"):
@ -1794,3 +1808,20 @@ def test_list_queues_limits_to_1000_queues():
list(resource.queues.filter(QueueNamePrefix="test-queue")).should.have.length_of( list(resource.queues.filter(QueueNamePrefix="test-queue")).should.have.length_of(
1000 1000
) )
@mock_sqs
def test_send_messages_to_fifo_without_message_group_id():
sqs = boto3.resource("sqs", region_name="eu-west-3")
queue = sqs.create_queue(
QueueName="blah.fifo",
Attributes={"FifoQueue": "true", "ContentBasedDeduplication": "true"},
)
with assert_raises(Exception) as e:
queue.send_message(MessageBody="message-1")
ex = e.exception
ex.response["Error"]["Code"].should.equal("MissingParameter")
ex.response["Error"]["Message"].should.equal(
"The request must contain the parameter MessageGroupId."
)

View File

@ -253,6 +253,15 @@ def test_state_machine_throws_error_when_describing_unknown_machine():
client.describe_state_machine(stateMachineArn=unknown_state_machine) client.describe_state_machine(stateMachineArn=unknown_state_machine)
@mock_stepfunctions
@mock_sts
def test_state_machine_throws_error_when_describing_bad_arn():
client = boto3.client("stepfunctions", region_name=region)
#
with assert_raises(ClientError) as exc:
client.describe_state_machine(stateMachineArn="bad")
@mock_stepfunctions @mock_stepfunctions
@mock_sts @mock_sts
def test_state_machine_throws_error_when_describing_machine_in_different_account(): def test_state_machine_throws_error_when_describing_machine_in_different_account():
@ -362,6 +371,15 @@ def test_state_machine_start_execution():
execution["startDate"].should.be.a(datetime) execution["startDate"].should.be.a(datetime)
@mock_stepfunctions
@mock_sts
def test_state_machine_start_execution_bad_arn_raises_exception():
client = boto3.client("stepfunctions", region_name=region)
#
with assert_raises(ClientError) as exc:
client.start_execution(stateMachineArn="bad")
@mock_stepfunctions @mock_stepfunctions
@mock_sts @mock_sts
def test_state_machine_start_execution_with_custom_name(): def test_state_machine_start_execution_with_custom_name():
@ -446,7 +464,7 @@ def test_state_machine_describe_execution():
@mock_stepfunctions @mock_stepfunctions
@mock_sts @mock_sts
def test_state_machine_throws_error_when_describing_unknown_machine(): def test_execution_throws_error_when_describing_unknown_execution():
client = boto3.client("stepfunctions", region_name=region) client = boto3.client("stepfunctions", region_name=region)
# #
with assert_raises(ClientError) as exc: with assert_raises(ClientError) as exc: