Iam cloudformation update, singificant cloudformation refactoring (#3218)

* IAM User Cloudformation Enhancements: update, delete, getatt.

* AWS::IAM::Policy Support

* Added unit tests for AWS:IAM:Policy for roles and groups.  Fixed bug related to groups.

* AWS:IAM:AccessKey CloudFormation support.

* Refactor of CloudFormation parsing.py methods to simplify and standardize how they call to the models.  Adjusted some models accordingly.

* Further model CloudFormation support changes to align with revised CloudFormation logic.  Mostly avoidance of getting resoure name from properties.

* Support for Kinesis Stream RetentionPeriodHours param.

* Kinesis Stream Cloudformation Tag Support.

* Added omitted 'region' param to boto3.client() calls in new tests.

Co-authored-by: Joseph Weitekamp <jweite@amazon.com>
This commit is contained in:
jweite 2020-08-27 05:11:47 -04:00 committed by GitHub
parent 3b06ce689e
commit 49d92861c0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
25 changed files with 1914 additions and 320 deletions

View File

@ -702,10 +702,13 @@ class EventSourceMapping(CloudFormationModel):
)
for esm in esms:
if esm.logical_resource_id in resource_name:
lambda_backend.delete_event_source_mapping
if esm.uuid == resource_name:
esm.delete(region_name)
@property
def physical_resource_id(self):
return self.uuid
class LambdaVersion(CloudFormationModel):
def __init__(self, spec):

View File

@ -246,12 +246,14 @@ def generate_resource_name(resource_type, stack_name, logical_id):
return "{0}{1}".format(
stack_name[:max_stack_name_portion_len], right_hand_part_of_name
).lower()
elif resource_type == "AWS::IAM::Policy":
return "{0}-{1}-{2}".format(stack_name[:5], logical_id[:4], random_suffix())
else:
return "{0}-{1}-{2}".format(stack_name, logical_id, random_suffix())
def parse_resource(
logical_id, resource_json, resources_map, add_name_to_resource_json=True
resource_json, resources_map,
):
resource_type = resource_json["Type"]
resource_class = resource_class_from_type(resource_type)
@ -263,21 +265,37 @@ def parse_resource(
)
return None
if "Properties" not in resource_json:
resource_json["Properties"] = {}
resource_json = clean_json(resource_json, resources_map)
resource_name = generate_resource_name(
return resource_class, resource_json, resource_type
def parse_resource_and_generate_name(
logical_id, resource_json, resources_map,
):
resource_tuple = parse_resource(resource_json, resources_map)
if not resource_tuple:
return None
resource_class, resource_json, resource_type = resource_tuple
generated_resource_name = generate_resource_name(
resource_type, resources_map.get("AWS::StackName"), logical_id
)
resource_name_property = resource_name_property_from_type(resource_type)
if resource_name_property:
if "Properties" not in resource_json:
resource_json["Properties"] = dict()
if (
add_name_to_resource_json
and resource_name_property not in resource_json["Properties"]
"Properties" in resource_json
and resource_name_property in resource_json["Properties"]
):
resource_json["Properties"][resource_name_property] = resource_name
if resource_name_property in resource_json["Properties"]:
resource_name = resource_json["Properties"][resource_name_property]
else:
resource_name = generated_resource_name
else:
resource_name = generated_resource_name
return resource_class, resource_json, resource_name
@ -289,12 +307,14 @@ def parse_and_create_resource(logical_id, resource_json, resources_map, region_n
return None
resource_type = resource_json["Type"]
resource_tuple = parse_resource(logical_id, resource_json, resources_map)
resource_tuple = parse_resource_and_generate_name(
logical_id, resource_json, resources_map
)
if not resource_tuple:
return None
resource_class, resource_json, resource_name = resource_tuple
resource_class, resource_json, resource_physical_name = resource_tuple
resource = resource_class.create_from_cloudformation_json(
resource_name, resource_json, region_name
resource_physical_name, resource_json, region_name
)
resource.type = resource_type
resource.logical_resource_id = logical_id
@ -302,28 +322,34 @@ def parse_and_create_resource(logical_id, resource_json, resources_map, region_n
def parse_and_update_resource(logical_id, resource_json, resources_map, region_name):
resource_class, new_resource_json, new_resource_name = parse_resource(
logical_id, resource_json, resources_map, False
)
original_resource = resources_map[logical_id]
new_resource = resource_class.update_from_cloudformation_json(
original_resource=original_resource,
new_resource_name=new_resource_name,
cloudformation_json=new_resource_json,
region_name=region_name,
)
new_resource.type = resource_json["Type"]
new_resource.logical_resource_id = logical_id
return new_resource
def parse_and_delete_resource(logical_id, resource_json, resources_map, region_name):
resource_class, resource_json, resource_name = parse_resource(
resource_class, resource_json, new_resource_name = parse_resource_and_generate_name(
logical_id, resource_json, resources_map
)
resource_class.delete_from_cloudformation_json(
resource_name, resource_json, region_name
)
original_resource = resources_map[logical_id]
if not hasattr(
resource_class.update_from_cloudformation_json, "__isabstractmethod__"
):
new_resource = resource_class.update_from_cloudformation_json(
original_resource=original_resource,
new_resource_name=new_resource_name,
cloudformation_json=resource_json,
region_name=region_name,
)
new_resource.type = resource_json["Type"]
new_resource.logical_resource_id = logical_id
return new_resource
else:
return None
def parse_and_delete_resource(resource_name, resource_json, resources_map, region_name):
resource_class, resource_json, _ = parse_resource(resource_json, resources_map)
if not hasattr(
resource_class.delete_from_cloudformation_json, "__isabstractmethod__"
):
resource_class.delete_from_cloudformation_json(
resource_name, resource_json, region_name
)
def parse_condition(condition, resources_map, condition_map):
@ -614,28 +640,36 @@ class ResourceMap(collections_abc.Mapping):
)
self._parsed_resources[resource_name] = new_resource
for resource_name, resource in resources_by_action["Remove"].items():
resource_json = old_template[resource_name]
for logical_name, _ in resources_by_action["Remove"].items():
resource_json = old_template[logical_name]
resource = self._parsed_resources[logical_name]
# ToDo: Standardize this.
if hasattr(resource, "physical_resource_id"):
resource_name = self._parsed_resources[
logical_name
].physical_resource_id
else:
resource_name = None
parse_and_delete_resource(
resource_name, resource_json, self, self._region_name
)
self._parsed_resources.pop(resource_name)
self._parsed_resources.pop(logical_name)
tries = 1
while resources_by_action["Modify"] and tries < 5:
for resource_name, resource in resources_by_action["Modify"].copy().items():
resource_json = new_template[resource_name]
for logical_name, _ in resources_by_action["Modify"].copy().items():
resource_json = new_template[logical_name]
try:
changed_resource = parse_and_update_resource(
resource_name, resource_json, self, self._region_name
logical_name, resource_json, self, self._region_name
)
except Exception as e:
# skip over dependency violations, and try again in a
# second pass
last_exception = e
else:
self._parsed_resources[resource_name] = changed_resource
del resources_by_action["Modify"][resource_name]
self._parsed_resources[logical_name] = changed_resource
del resources_by_action["Modify"][logical_name]
tries += 1
if tries == 5:
raise last_exception
@ -650,22 +684,20 @@ class ResourceMap(collections_abc.Mapping):
if parsed_resource and hasattr(parsed_resource, "delete"):
parsed_resource.delete(self._region_name)
else:
resource_name_attribute = (
parsed_resource.cloudformation_name_type()
if hasattr(parsed_resource, "cloudformation_name_type")
else resource_name_property_from_type(parsed_resource.type)
if hasattr(parsed_resource, "physical_resource_id"):
resource_name = parsed_resource.physical_resource_id
else:
resource_name = None
resource_json = self._resource_json_map[
parsed_resource.logical_resource_id
]
parse_and_delete_resource(
resource_name, resource_json, self, self._region_name,
)
if resource_name_attribute:
resource_json = self._resource_json_map[
parsed_resource.logical_resource_id
]
resource_name = resource_json["Properties"][
resource_name_attribute
]
parse_and_delete_resource(
resource_name, resource_json, self, self._region_name
)
self._parsed_resources.pop(parsed_resource.logical_resource_id)
self._parsed_resources.pop(parsed_resource.logical_resource_id)
except Exception as e:
# skip over dependency violations, and try again in a
# second pass

View File

@ -511,10 +511,9 @@ class LogGroup(CloudFormationModel):
cls, resource_name, cloudformation_json, region_name
):
properties = cloudformation_json["Properties"]
log_group_name = properties["LogGroupName"]
tags = properties.get("Tags", {})
return logs_backends[region_name].create_log_group(
log_group_name, tags, **properties
resource_name, tags, **properties
)

View File

@ -90,9 +90,9 @@ class Pipeline(CloudFormationModel):
datapipeline_backend = datapipeline_backends[region_name]
properties = cloudformation_json["Properties"]
cloudformation_unique_id = "cf-" + properties["Name"]
cloudformation_unique_id = "cf-" + resource_name
pipeline = datapipeline_backend.create_pipeline(
properties["Name"], cloudformation_unique_id
resource_name, cloudformation_unique_id
)
datapipeline_backend.put_pipeline_definition(
pipeline.pipeline_id, properties["PipelineObjects"]

View File

@ -461,7 +461,7 @@ class Table(CloudFormationModel):
params["streams"] = properties["StreamSpecification"]
table = dynamodb_backends[region_name].create_table(
name=properties["TableName"], **params
name=resource_name, **params
)
return table
@ -469,11 +469,7 @@ class Table(CloudFormationModel):
def delete_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
properties = cloudformation_json["Properties"]
table = dynamodb_backends[region_name].delete_table(
name=properties["TableName"]
)
table = dynamodb_backends[region_name].delete_table(name=resource_name)
return table
def _generate_arn(self, name):

View File

@ -80,15 +80,11 @@ class Repository(BaseObject, CloudFormationModel):
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
properties = cloudformation_json["Properties"]
ecr_backend = ecr_backends[region_name]
return ecr_backend.create_repository(
# RepositoryName is optional in CloudFormation, thus create a random
# name if necessary
repository_name=properties.get(
"RepositoryName", "ecrrepository{0}".format(int(random() * 10 ** 6))
)
repository_name=resource_name
)
@classmethod

View File

@ -82,36 +82,24 @@ class Cluster(BaseObject, CloudFormationModel):
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
# if properties is not provided, cloudformation will use the default values for all properties
if "Properties" in cloudformation_json:
properties = cloudformation_json["Properties"]
else:
properties = {}
ecs_backend = ecs_backends[region_name]
return ecs_backend.create_cluster(
# ClusterName is optional in CloudFormation, thus create a random
# name if necessary
cluster_name=properties.get(
"ClusterName", "ecscluster{0}".format(int(random() * 10 ** 6))
)
cluster_name=resource_name
)
@classmethod
def update_from_cloudformation_json(
cls, original_resource, new_resource_name, cloudformation_json, region_name
):
properties = cloudformation_json["Properties"]
if original_resource.name != properties["ClusterName"]:
if original_resource.name != new_resource_name:
ecs_backend = ecs_backends[region_name]
ecs_backend.delete_cluster(original_resource.arn)
return ecs_backend.create_cluster(
# ClusterName is optional in CloudFormation, thus create a
# random name if necessary
cluster_name=properties.get(
"ClusterName", "ecscluster{0}".format(int(random() * 10 ** 6))
)
cluster_name=new_resource_name
)
else:
# no-op when nothing changed between old and new resources
@ -355,14 +343,13 @@ class Service(BaseObject, CloudFormationModel):
task_definition = properties["TaskDefinition"].family
else:
task_definition = properties["TaskDefinition"]
service_name = "{0}Service{1}".format(cluster, int(random() * 10 ** 6))
desired_count = properties["DesiredCount"]
# TODO: LoadBalancers
# TODO: Role
ecs_backend = ecs_backends[region_name]
return ecs_backend.create_service(
cluster, service_name, desired_count, task_definition_str=task_definition
cluster, resource_name, desired_count, task_definition_str=task_definition
)
@classmethod
@ -386,12 +373,9 @@ class Service(BaseObject, CloudFormationModel):
# TODO: LoadBalancers
# TODO: Role
ecs_backend.delete_service(cluster_name, service_name)
new_service_name = "{0}Service{1}".format(
cluster_name, int(random() * 10 ** 6)
)
return ecs_backend.create_service(
cluster_name,
new_service_name,
new_resource_name,
desired_count,
task_definition_str=task_definition,
)

View File

@ -160,7 +160,6 @@ class FakeTargetGroup(CloudFormationModel):
elbv2_backend = elbv2_backends[region_name]
name = properties.get("Name")
vpc_id = properties.get("VpcId")
protocol = properties.get("Protocol")
port = properties.get("Port")
@ -175,7 +174,7 @@ class FakeTargetGroup(CloudFormationModel):
target_type = properties.get("TargetType")
target_group = elbv2_backend.create_target_group(
name=name,
name=resource_name,
vpc_id=vpc_id,
protocol=protocol,
port=port,
@ -437,13 +436,12 @@ class FakeLoadBalancer(CloudFormationModel):
elbv2_backend = elbv2_backends[region_name]
name = properties.get("Name", resource_name)
security_groups = properties.get("SecurityGroups")
subnet_ids = properties.get("Subnets")
scheme = properties.get("Scheme", "internet-facing")
load_balancer = elbv2_backend.create_load_balancer(
name, security_groups, subnet_ids, scheme=scheme
resource_name, security_groups, subnet_ids, scheme=scheme
)
return load_balancer

View File

@ -88,7 +88,7 @@ class Rule(CloudFormationModel):
):
properties = cloudformation_json["Properties"]
event_backend = events_backends[region_name]
event_name = properties.get("Name") or resource_name
event_name = resource_name
return event_backend.put_rule(name=event_name, **properties)
@classmethod
@ -104,9 +104,8 @@ class Rule(CloudFormationModel):
def delete_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
properties = cloudformation_json["Properties"]
event_backend = events_backends[region_name]
event_name = properties.get("Name") or resource_name
event_name = resource_name
event_backend.delete_rule(name=event_name)
@ -176,7 +175,7 @@ class EventBus(CloudFormationModel):
):
properties = cloudformation_json["Properties"]
event_backend = events_backends[region_name]
event_name = properties["Name"]
event_name = resource_name
event_source_name = properties.get("EventSourceName")
return event_backend.create_event_bus(
name=event_name, event_source_name=event_source_name
@ -195,9 +194,8 @@ class EventBus(CloudFormationModel):
def delete_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
properties = cloudformation_json["Properties"]
event_backend = events_backends[region_name]
event_bus_name = properties["Name"]
event_bus_name = resource_name
event_backend.delete_event_bus(event_bus_name)

View File

@ -12,7 +12,6 @@ import re
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from six.moves.urllib.parse import urlparse
from uuid import uuid4
from moto.core.exceptions import RESTError
from moto.core import BaseBackend, BaseModel, ACCOUNT_ID, CloudFormationModel
@ -84,7 +83,11 @@ class VirtualMfaDevice(object):
return iso_8601_datetime_without_milliseconds(self.enable_date)
class Policy(BaseModel):
class Policy(CloudFormationModel):
# Note: This class does not implement the CloudFormation support for AWS::IAM::Policy, as that CF resource
# is for creating *inline* policies. That is done in class InlinePolicy.
is_attachable = False
def __init__(
@ -295,8 +298,149 @@ aws_managed_policies = [
]
class InlinePolicy(Policy):
"""TODO: is this needed?"""
class InlinePolicy(CloudFormationModel):
# Represents an Inline Policy created by CloudFormation
def __init__(
self,
resource_name,
policy_name,
policy_document,
group_names,
role_names,
user_names,
):
self.name = resource_name
self.policy_name = None
self.policy_document = None
self.group_names = None
self.role_names = None
self.user_names = None
self.update(policy_name, policy_document, group_names, role_names, user_names)
def update(
self, policy_name, policy_document, group_names, role_names, user_names,
):
self.policy_name = policy_name
self.policy_document = (
json.dumps(policy_document)
if isinstance(policy_document, dict)
else policy_document
)
self.group_names = group_names
self.role_names = role_names
self.user_names = user_names
@staticmethod
def cloudformation_name_type():
return None # Resource never gets named after by template PolicyName!
@staticmethod
def cloudformation_type():
return "AWS::IAM::Policy"
@classmethod
def create_from_cloudformation_json(
cls, resource_physical_name, cloudformation_json, region_name
):
properties = cloudformation_json.get("Properties", {})
policy_document = properties.get("PolicyDocument")
policy_name = properties.get("PolicyName")
user_names = properties.get("Users")
role_names = properties.get("Roles")
group_names = properties.get("Groups")
return iam_backend.create_inline_policy(
resource_physical_name,
policy_name,
policy_document,
group_names,
role_names,
user_names,
)
@classmethod
def update_from_cloudformation_json(
cls, original_resource, new_resource_name, cloudformation_json, region_name,
):
properties = cloudformation_json["Properties"]
if cls.is_replacement_update(properties):
resource_name_property = cls.cloudformation_name_type()
if resource_name_property not in properties:
properties[resource_name_property] = new_resource_name
new_resource = cls.create_from_cloudformation_json(
properties[resource_name_property], cloudformation_json, region_name
)
properties[resource_name_property] = original_resource.name
cls.delete_from_cloudformation_json(
original_resource.name, cloudformation_json, region_name
)
return new_resource
else: # No Interruption
properties = cloudformation_json.get("Properties", {})
policy_document = properties.get("PolicyDocument")
policy_name = properties.get("PolicyName", original_resource.name)
user_names = properties.get("Users")
role_names = properties.get("Roles")
group_names = properties.get("Groups")
return iam_backend.update_inline_policy(
original_resource.name,
policy_name,
policy_document,
group_names,
role_names,
user_names,
)
@classmethod
def delete_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
iam_backend.delete_inline_policy(resource_name)
@staticmethod
def is_replacement_update(properties):
properties_requiring_replacement_update = []
return any(
[
property_requiring_replacement in properties
for property_requiring_replacement in properties_requiring_replacement_update
]
)
@property
def physical_resource_id(self):
return self.name
def apply_policy(self, backend):
if self.user_names:
for user_name in self.user_names:
backend.put_user_policy(
user_name, self.policy_name, self.policy_document
)
if self.role_names:
for role_name in self.role_names:
backend.put_role_policy(
role_name, self.policy_name, self.policy_document
)
if self.group_names:
for group_name in self.group_names:
backend.put_group_policy(
group_name, self.policy_name, self.policy_document
)
def unapply_policy(self, backend):
if self.user_names:
for user_name in self.user_names:
backend.delete_user_policy(user_name, self.policy_name)
if self.role_names:
for role_name in self.role_names:
backend.delete_role_policy(role_name, self.policy_name)
if self.group_names:
for group_name in self.group_names:
backend.delete_group_policy(group_name, self.policy_name)
class Role(CloudFormationModel):
@ -338,11 +482,13 @@ class Role(CloudFormationModel):
@classmethod
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
cls, resource_physical_name, cloudformation_json, region_name
):
properties = cloudformation_json["Properties"]
role_name = (
properties["RoleName"] if "RoleName" in properties else str(uuid4())[0:5]
properties["RoleName"]
if "RoleName" in properties
else resource_physical_name
)
role = iam_backend.create_role(
@ -416,13 +562,15 @@ class InstanceProfile(CloudFormationModel):
@classmethod
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
cls, resource_physical_name, cloudformation_json, region_name
):
properties = cloudformation_json["Properties"]
role_ids = properties["Roles"]
return iam_backend.create_instance_profile(
name=resource_name, path=properties.get("Path", "/"), role_ids=role_ids
name=resource_physical_name,
path=properties.get("Path", "/"),
role_ids=role_ids,
)
@property
@ -475,12 +623,12 @@ class SigningCertificate(BaseModel):
return iso_8601_datetime_without_milliseconds(self.upload_date)
class AccessKey(BaseModel):
def __init__(self, user_name):
class AccessKey(CloudFormationModel):
def __init__(self, user_name, status="Active"):
self.user_name = user_name
self.access_key_id = "AKIA" + random_access_key()
self.secret_access_key = random_alphanumeric(40)
self.status = "Active"
self.status = status
self.create_date = datetime.utcnow()
self.last_used = None
@ -499,6 +647,66 @@ class AccessKey(BaseModel):
return self.secret_access_key
raise UnformattedGetAttTemplateException()
@staticmethod
def cloudformation_name_type():
return None # Resource never gets named after by template PolicyName!
@staticmethod
def cloudformation_type():
return "AWS::IAM::AccessKey"
@classmethod
def create_from_cloudformation_json(
cls, resource_physical_name, cloudformation_json, region_name
):
properties = cloudformation_json.get("Properties", {})
user_name = properties.get("UserName")
status = properties.get("Status", "Active")
return iam_backend.create_access_key(user_name, status=status,)
@classmethod
def update_from_cloudformation_json(
cls, original_resource, new_resource_name, cloudformation_json, region_name,
):
properties = cloudformation_json["Properties"]
if cls.is_replacement_update(properties):
new_resource = cls.create_from_cloudformation_json(
new_resource_name, cloudformation_json, region_name
)
cls.delete_from_cloudformation_json(
original_resource.physical_resource_id, cloudformation_json, region_name
)
return new_resource
else: # No Interruption
properties = cloudformation_json.get("Properties", {})
status = properties.get("Status")
return iam_backend.update_access_key(
original_resource.user_name, original_resource.access_key_id, status
)
@classmethod
def delete_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
iam_backend.delete_access_key_by_name(resource_name)
@staticmethod
def is_replacement_update(properties):
properties_requiring_replacement_update = ["Serial", "UserName"]
return any(
[
property_requiring_replacement in properties
for property_requiring_replacement in properties_requiring_replacement_update
]
)
@property
def physical_resource_id(self):
return self.access_key_id
class SshPublicKey(BaseModel):
def __init__(self, user_name, ssh_public_key_body):
@ -564,8 +772,14 @@ class Group(BaseModel):
def list_policies(self):
return self.policies.keys()
def delete_policy(self, policy_name):
if policy_name not in self.policies:
raise IAMNotFoundException("Policy {0} not found".format(policy_name))
class User(BaseModel):
del self.policies[policy_name]
class User(CloudFormationModel):
def __init__(self, name, path=None, tags=None):
self.name = name
self.id = random_resource_id()
@ -614,8 +828,8 @@ class User(BaseModel):
del self.policies[policy_name]
def create_access_key(self):
access_key = AccessKey(self.name)
def create_access_key(self, status="Active"):
access_key = AccessKey(self.name, status)
self.access_keys.append(access_key)
return access_key
@ -633,9 +847,11 @@ class User(BaseModel):
key = self.get_access_key_by_id(access_key_id)
self.access_keys.remove(key)
def update_access_key(self, access_key_id, status):
def update_access_key(self, access_key_id, status=None):
key = self.get_access_key_by_id(access_key_id)
key.status = status
if status is not None:
key.status = status
return key
def get_access_key_by_id(self, access_key_id):
for key in self.access_keys:
@ -646,6 +862,15 @@ class User(BaseModel):
"The Access Key with id {0} cannot be found".format(access_key_id)
)
def has_access_key(self, access_key_id):
return any(
[
access_key
for access_key in self.access_keys
if access_key.access_key_id == access_key_id
]
)
def upload_ssh_public_key(self, ssh_public_key_body):
pubkey = SshPublicKey(self.name, ssh_public_key_body)
self.ssh_public_keys.append(pubkey)
@ -677,7 +902,7 @@ class User(BaseModel):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == "Arn":
raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "Arn" ]"')
return self.arn
raise UnformattedGetAttTemplateException()
def to_csv(self):
@ -752,6 +977,66 @@ class User(BaseModel):
access_key_2_last_used,
)
@staticmethod
def cloudformation_name_type():
return "UserName"
@staticmethod
def cloudformation_type():
return "AWS::IAM::User"
@classmethod
def create_from_cloudformation_json(
cls, resource_physical_name, cloudformation_json, region_name
):
properties = cloudformation_json.get("Properties", {})
path = properties.get("Path")
return iam_backend.create_user(resource_physical_name, path)
@classmethod
def update_from_cloudformation_json(
cls, original_resource, new_resource_name, cloudformation_json, region_name,
):
properties = cloudformation_json["Properties"]
if cls.is_replacement_update(properties):
resource_name_property = cls.cloudformation_name_type()
if resource_name_property not in properties:
properties[resource_name_property] = new_resource_name
new_resource = cls.create_from_cloudformation_json(
properties[resource_name_property], cloudformation_json, region_name
)
properties[resource_name_property] = original_resource.name
cls.delete_from_cloudformation_json(
original_resource.name, cloudformation_json, region_name
)
return new_resource
else: # No Interruption
if "Path" in properties:
original_resource.path = properties["Path"]
return original_resource
@classmethod
def delete_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
iam_backend.delete_user(resource_name)
@staticmethod
def is_replacement_update(properties):
properties_requiring_replacement_update = ["UserName"]
return any(
[
property_requiring_replacement in properties
for property_requiring_replacement in properties_requiring_replacement_update
]
)
@property
def physical_resource_id(self):
return self.name
class AccountPasswordPolicy(BaseModel):
def __init__(
@ -984,6 +1269,8 @@ class IAMBackend(BaseBackend):
self.virtual_mfa_devices = {}
self.account_password_policy = None
self.account_summary = AccountSummary(self)
self.inline_policies = {}
self.access_keys = {}
super(IAMBackend, self).__init__()
def _init_managed_policies(self):
@ -1478,6 +1765,10 @@ class IAMBackend(BaseBackend):
group = self.get_group(group_name)
return group.list_policies()
def delete_group_policy(self, group_name, policy_name):
group = self.get_group(group_name)
group.delete_policy(policy_name)
def get_group_policy(self, group_name, policy_name):
group = self.get_group(group_name)
return group.get_policy(policy_name)
@ -1674,14 +1965,15 @@ class IAMBackend(BaseBackend):
def delete_policy(self, policy_arn):
del self.managed_policies[policy_arn]
def create_access_key(self, user_name=None):
def create_access_key(self, user_name=None, status="Active"):
user = self.get_user(user_name)
key = user.create_access_key()
key = user.create_access_key(status)
self.access_keys[key.physical_resource_id] = key
return key
def update_access_key(self, user_name, access_key_id, status):
def update_access_key(self, user_name, access_key_id, status=None):
user = self.get_user(user_name)
user.update_access_key(access_key_id, status)
return user.update_access_key(access_key_id, status)
def get_access_key_last_used(self, access_key_id):
access_keys_list = self.get_all_access_keys_for_all_users()
@ -1706,7 +1998,17 @@ class IAMBackend(BaseBackend):
def delete_access_key(self, access_key_id, user_name):
user = self.get_user(user_name)
user.delete_access_key(access_key_id)
access_key = user.get_access_key_by_id(access_key_id)
self.delete_access_key_by_name(access_key.access_key_id)
def delete_access_key_by_name(self, name):
key = self.access_keys[name]
try: # User may have been deleted before their access key...
user = self.get_user(key.user_name)
user.delete_access_key(key.access_key_id)
except IAMNotFoundException:
pass
del self.access_keys[name]
def upload_ssh_public_key(self, user_name, ssh_public_key_body):
user = self.get_user(user_name)
@ -2017,5 +2319,62 @@ class IAMBackend(BaseBackend):
def get_account_summary(self):
return self.account_summary
def create_inline_policy(
self,
resource_name,
policy_name,
policy_document,
group_names,
role_names,
user_names,
):
if resource_name in self.inline_policies:
raise IAMConflictException(
"EntityAlreadyExists",
"Inline Policy {0} already exists".format(resource_name),
)
inline_policy = InlinePolicy(
resource_name,
policy_name,
policy_document,
group_names,
role_names,
user_names,
)
self.inline_policies[resource_name] = inline_policy
inline_policy.apply_policy(self)
return inline_policy
def get_inline_policy(self, policy_id):
inline_policy = None
try:
inline_policy = self.inline_policies[policy_id]
except KeyError:
raise IAMNotFoundException("Inline policy {0} not found".format(policy_id))
return inline_policy
def update_inline_policy(
self,
resource_name,
policy_name,
policy_document,
group_names,
role_names,
user_names,
):
inline_policy = self.get_inline_policy(resource_name)
inline_policy.unapply_policy(self)
inline_policy.update(
policy_name, policy_document, group_names, role_names, user_names,
)
inline_policy.apply_policy(self)
return inline_policy
def delete_inline_policy(self, policy_id):
inline_policy = self.get_inline_policy(policy_id)
inline_policy.unapply_policy(self)
del self.inline_policies[policy_id]
iam_backend = IAMBackend()

View File

@ -135,7 +135,7 @@ class Shard(BaseModel):
class Stream(CloudFormationModel):
def __init__(self, stream_name, shard_count, region_name):
def __init__(self, stream_name, shard_count, retention_period_hours, region_name):
self.stream_name = stream_name
self.creation_datetime = datetime.datetime.now()
self.region = region_name
@ -145,6 +145,7 @@ class Stream(CloudFormationModel):
self.status = "ACTIVE"
self.shard_count = None
self.update_shard_count(shard_count)
self.retention_period_hours = retention_period_hours
def update_shard_count(self, shard_count):
# ToDo: This was extracted from init. It's only accurate for new streams.
@ -213,6 +214,7 @@ class Stream(CloudFormationModel):
"StreamName": self.stream_name,
"StreamStatus": self.status,
"HasMoreShards": False,
"RetentionPeriodHours": self.retention_period_hours,
"Shards": [shard.to_json() for shard in self.shards.values()],
}
}
@ -243,9 +245,19 @@ class Stream(CloudFormationModel):
):
properties = cloudformation_json.get("Properties", {})
shard_count = properties.get("ShardCount", 1)
name = properties.get("Name", resource_name)
retention_period_hours = properties.get("RetentionPeriodHours", resource_name)
tags = {
tag_item["Key"]: tag_item["Value"]
for tag_item in properties.get("Tags", [])
}
backend = kinesis_backends[region_name]
return backend.create_stream(name, shard_count, region_name)
stream = backend.create_stream(
resource_name, shard_count, retention_period_hours, region_name
)
if any(tags):
backend.add_tags_to_stream(stream.stream_name, tags)
return stream
@classmethod
def update_from_cloudformation_json(
@ -269,6 +281,15 @@ class Stream(CloudFormationModel):
else: # No Interruption
if "ShardCount" in properties:
original_resource.update_shard_count(properties["ShardCount"])
if "RetentionPeriodHours" in properties:
original_resource.retention_period_hours = properties[
"RetentionPeriodHours"
]
if "Tags" in properties:
original_resource.tags = {
tag_item["Key"]: tag_item["Value"]
for tag_item in properties.get("Tags", [])
}
return original_resource
@classmethod
@ -276,9 +297,7 @@ class Stream(CloudFormationModel):
cls, resource_name, cloudformation_json, region_name
):
backend = kinesis_backends[region_name]
properties = cloudformation_json.get("Properties", {})
stream_name = properties.get(cls.cloudformation_name_type(), resource_name)
backend.delete_stream(stream_name)
backend.delete_stream(resource_name)
@staticmethod
def is_replacement_update(properties):
@ -398,10 +417,12 @@ class KinesisBackend(BaseBackend):
self.streams = OrderedDict()
self.delivery_streams = {}
def create_stream(self, stream_name, shard_count, region_name):
def create_stream(
self, stream_name, shard_count, retention_period_hours, region_name
):
if stream_name in self.streams:
raise ResourceInUseError(stream_name)
stream = Stream(stream_name, shard_count, region_name)
stream = Stream(stream_name, shard_count, retention_period_hours, region_name)
self.streams[stream_name] = stream
return stream

View File

@ -25,7 +25,10 @@ class KinesisResponse(BaseResponse):
def create_stream(self):
stream_name = self.parameters.get("StreamName")
shard_count = self.parameters.get("ShardCount")
self.kinesis_backend.create_stream(stream_name, shard_count, self.region)
retention_period_hours = self.parameters.get("RetentionPeriodHours")
self.kinesis_backend.create_stream(
stream_name, shard_count, retention_period_hours, self.region
)
return ""
def describe_stream(self):

View File

@ -4,7 +4,6 @@ import boto.rds
from jinja2 import Template
from moto.core import BaseBackend, CloudFormationModel
from moto.core.utils import get_random_hex
from moto.ec2.models import ec2_backends
from moto.rds.exceptions import UnformattedGetAttTemplateException
from moto.rds2.models import rds2_backends
@ -33,9 +32,6 @@ class Database(CloudFormationModel):
):
properties = cloudformation_json["Properties"]
db_instance_identifier = properties.get(cls.cloudformation_name_type())
if not db_instance_identifier:
db_instance_identifier = resource_name.lower() + get_random_hex(12)
db_security_groups = properties.get("DBSecurityGroups")
if not db_security_groups:
db_security_groups = []
@ -48,7 +44,7 @@ class Database(CloudFormationModel):
"availability_zone": properties.get("AvailabilityZone"),
"backup_retention_period": properties.get("BackupRetentionPeriod"),
"db_instance_class": properties.get("DBInstanceClass"),
"db_instance_identifier": db_instance_identifier,
"db_instance_identifier": resource_name,
"db_name": properties.get("DBName"),
"db_subnet_group_name": db_subnet_group_name,
"engine": properties.get("Engine"),
@ -229,7 +225,7 @@ class SecurityGroup(CloudFormationModel):
cls, resource_name, cloudformation_json, region_name
):
properties = cloudformation_json["Properties"]
group_name = resource_name.lower() + get_random_hex(12)
group_name = resource_name.lower()
description = properties["GroupDescription"]
security_group_ingress_rules = properties.get("DBSecurityGroupIngress", [])
tags = properties.get("Tags")
@ -303,9 +299,7 @@ class SubnetGroup(CloudFormationModel):
cls, resource_name, cloudformation_json, region_name
):
properties = cloudformation_json["Properties"]
subnet_name = properties.get(cls.cloudformation_name_type())
if not subnet_name:
subnet_name = resource_name.lower() + get_random_hex(12)
subnet_name = resource_name.lower()
description = properties["DBSubnetGroupDescription"]
subnet_ids = properties["SubnetIds"]
tags = properties.get("Tags")

View File

@ -10,7 +10,6 @@ from jinja2 import Template
from re import compile as re_compile
from moto.compat import OrderedDict
from moto.core import BaseBackend, BaseModel, CloudFormationModel
from moto.core.utils import get_random_hex
from moto.core.utils import iso_8601_datetime_with_milliseconds
from moto.ec2.models import ec2_backends
from .exceptions import (
@ -371,9 +370,6 @@ class Database(CloudFormationModel):
):
properties = cloudformation_json["Properties"]
db_instance_identifier = properties.get(cls.cloudformation_name_type())
if not db_instance_identifier:
db_instance_identifier = resource_name.lower() + get_random_hex(12)
db_security_groups = properties.get("DBSecurityGroups")
if not db_security_groups:
db_security_groups = []
@ -386,7 +382,7 @@ class Database(CloudFormationModel):
"availability_zone": properties.get("AvailabilityZone"),
"backup_retention_period": properties.get("BackupRetentionPeriod"),
"db_instance_class": properties.get("DBInstanceClass"),
"db_instance_identifier": db_instance_identifier,
"db_instance_identifier": resource_name,
"db_name": properties.get("DBName"),
"db_subnet_group_name": db_subnet_group_name,
"engine": properties.get("Engine"),
@ -650,7 +646,7 @@ class SecurityGroup(CloudFormationModel):
cls, resource_name, cloudformation_json, region_name
):
properties = cloudformation_json["Properties"]
group_name = resource_name.lower() + get_random_hex(12)
group_name = resource_name.lower()
description = properties["GroupDescription"]
security_group_ingress_rules = properties.get("DBSecurityGroupIngress", [])
tags = properties.get("Tags")
@ -759,9 +755,6 @@ class SubnetGroup(CloudFormationModel):
):
properties = cloudformation_json["Properties"]
subnet_name = properties.get(cls.cloudformation_name_type())
if not subnet_name:
subnet_name = resource_name.lower() + get_random_hex(12)
description = properties["DBSubnetGroupDescription"]
subnet_ids = properties["SubnetIds"]
tags = properties.get("Tags")
@ -770,7 +763,7 @@ class SubnetGroup(CloudFormationModel):
subnets = [ec2_backend.get_subnet(subnet_id) for subnet_id in subnet_ids]
rds2_backend = rds2_backends[region_name]
subnet_group = rds2_backend.create_subnet_group(
subnet_name, description, subnets, tags
resource_name, description, subnets, tags
)
return subnet_group

View File

@ -298,10 +298,9 @@ class FakeZone(CloudFormationModel):
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
properties = cloudformation_json["Properties"]
name = properties["Name"]
hosted_zone = route53_backend.create_hosted_zone(name, private_zone=False)
hosted_zone = route53_backend.create_hosted_zone(
resource_name, private_zone=False
)
return hosted_zone

View File

@ -1086,7 +1086,7 @@ class FakeBucket(CloudFormationModel):
):
bucket = s3_backend.create_bucket(resource_name, region_name)
properties = cloudformation_json["Properties"]
properties = cloudformation_json.get("Properties", {})
if "BucketEncryption" in properties:
bucket_encryption = cfn_to_api_encryption(properties["BucketEncryption"])
@ -1129,9 +1129,7 @@ class FakeBucket(CloudFormationModel):
def delete_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
properties = cloudformation_json["Properties"]
bucket_name = properties[cls.cloudformation_name_type()]
s3_backend.delete_bucket(bucket_name)
s3_backend.delete_bucket(resource_name)
def to_config_dict(self):
"""Return the AWS Config JSON format of this S3 bucket.

View File

@ -104,7 +104,7 @@ class Topic(CloudFormationModel):
sns_backend = sns_backends[region_name]
properties = cloudformation_json["Properties"]
topic = sns_backend.create_topic(properties.get(cls.cloudformation_name_type()))
topic = sns_backend.create_topic(resource_name)
for subscription in properties.get("Subscription", []):
sns_backend.subscribe(
topic.arn, subscription["Endpoint"], subscription["Protocol"]

View File

@ -374,10 +374,7 @@ class Queue(CloudFormationModel):
sqs_backend = sqs_backends[region_name]
return sqs_backend.create_queue(
name=properties["QueueName"],
tags=tags_dict,
region=region_name,
**properties
name=resource_name, tags=tags_dict, region=region_name, **properties
)
@classmethod
@ -385,7 +382,7 @@ class Queue(CloudFormationModel):
cls, original_resource, new_resource_name, cloudformation_json, region_name
):
properties = cloudformation_json["Properties"]
queue_name = properties["QueueName"]
queue_name = original_resource.name
sqs_backend = sqs_backends[region_name]
queue = sqs_backend.get_queue(queue_name)
@ -402,10 +399,8 @@ class Queue(CloudFormationModel):
def delete_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
properties = cloudformation_json["Properties"]
queue_name = properties["QueueName"]
sqs_backend = sqs_backends[region_name]
sqs_backend.delete_queue(queue_name)
sqs_backend.delete_queue(resource_name)
@property
def approximate_number_of_messages_delayed(self):

View File

@ -592,7 +592,7 @@ def test_boto3_create_stack_set_with_yaml():
@mock_cloudformation
@mock_s3
def test_create_stack_set_from_s3_url():
s3 = boto3.client("s3")
s3 = boto3.client("s3", region_name="us-east-1")
s3_conn = boto3.resource("s3", region_name="us-east-1")
s3_conn.create_bucket(Bucket="foobar")
@ -704,7 +704,7 @@ def test_boto3_create_stack_with_short_form_func_yaml():
@mock_s3
@mock_cloudformation
def test_get_template_summary():
s3 = boto3.client("s3")
s3 = boto3.client("s3", region_name="us-east-1")
s3_conn = boto3.resource("s3", region_name="us-east-1")
conn = boto3.client("cloudformation", region_name="us-east-1")
@ -802,7 +802,7 @@ def test_create_stack_with_role_arn():
@mock_cloudformation
@mock_s3
def test_create_stack_from_s3_url():
s3 = boto3.client("s3")
s3 = boto3.client("s3", region_name="us-east-1")
s3_conn = boto3.resource("s3", region_name="us-east-1")
s3_conn.create_bucket(Bucket="foobar")
@ -857,7 +857,7 @@ def test_update_stack_with_previous_value():
@mock_s3
@mock_ec2
def test_update_stack_from_s3_url():
s3 = boto3.client("s3")
s3 = boto3.client("s3", region_name="us-east-1")
s3_conn = boto3.resource("s3", region_name="us-east-1")
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
@ -886,7 +886,7 @@ def test_update_stack_from_s3_url():
@mock_cloudformation
@mock_s3
def test_create_change_set_from_s3_url():
s3 = boto3.client("s3")
s3 = boto3.client("s3", region_name="us-east-1")
s3_conn = boto3.resource("s3", region_name="us-east-1")
s3_conn.create_bucket(Bucket="foobar")

View File

@ -118,7 +118,7 @@ def test_boto3_yaml_validate_successful():
@mock_cloudformation
@mock_s3
def test_boto3_yaml_validate_template_url_successful():
s3 = boto3.client("s3")
s3 = boto3.client("s3", region_name="us-east-1")
s3_conn = boto3.resource("s3", region_name="us-east-1")
s3_conn.create_bucket(Bucket="foobar")

View File

@ -5,12 +5,9 @@ import json
import boto
import boto3
import csv
import os
import sure # noqa
import sys
from boto.exception import BotoServerError
from botocore.exceptions import ClientError
from dateutil.tz import tzutc
from moto import mock_iam, mock_iam_deprecated, settings
from moto.core import ACCOUNT_ID

File diff suppressed because it is too large Load Diff

View File

@ -73,6 +73,12 @@ Resources:
Properties:
Name: MyStream
ShardCount: 4
RetentionPeriodHours: 48
Tags:
- Key: TagKey1
Value: TagValue1
- Key: TagKey2
Value: TagValue2
""".strip()
cf_conn.create_stack(StackName=stack_name, TemplateBody=template)
@ -83,6 +89,14 @@ Resources:
stream_description = kinesis_conn.describe_stream(StreamName="MyStream")[
"StreamDescription"
]
stream_description["RetentionPeriodHours"].should.equal(48)
tags = kinesis_conn.list_tags_for_stream(StreamName="MyStream")["Tags"]
tag1_value = [tag for tag in tags if tag["Key"] == "TagKey1"][0]["Value"]
tag2_value = [tag for tag in tags if tag["Key"] == "TagKey2"][0]["Value"]
tag1_value.should.equal("TagValue1")
tag2_value.should.equal("TagValue2")
shards_provisioned = len(
[
shard
@ -98,12 +112,27 @@ Resources:
Type: AWS::Kinesis::Stream
Properties:
ShardCount: 6
RetentionPeriodHours: 24
Tags:
- Key: TagKey1
Value: TagValue1a
- Key: TagKey2
Value: TagValue2a
""".strip()
cf_conn.update_stack(StackName=stack_name, TemplateBody=template)
stream_description = kinesis_conn.describe_stream(StreamName="MyStream")[
"StreamDescription"
]
stream_description["RetentionPeriodHours"].should.equal(24)
tags = kinesis_conn.list_tags_for_stream(StreamName="MyStream")["Tags"]
tag1_value = [tag for tag in tags if tag["Key"] == "TagKey1"][0]["Value"]
tag2_value = [tag for tag in tags if tag["Key"] == "TagKey2"][0]["Value"]
tag1_value.should.equal("TagValue1a")
tag2_value.should.equal("TagValue2a")
shards_provisioned = len(
[
shard

View File

@ -2,7 +2,6 @@
from __future__ import unicode_literals
import datetime
import os
import sys
from boto3 import Session
@ -11,7 +10,6 @@ from six.moves.urllib.error import HTTPError
from functools import wraps
from gzip import GzipFile
from io import BytesIO
import mimetypes
import zlib
import pickle
import uuid
@ -36,7 +34,7 @@ from nose.tools import assert_raises
import sure # noqa
from moto import settings, mock_s3, mock_s3_deprecated, mock_config, mock_cloudformation
from moto import settings, mock_s3, mock_s3_deprecated, mock_config
import moto.s3.models as s3model
from moto.core.exceptions import InvalidNextTokenException
from moto.core.utils import py2_strip_unicode_keys
@ -4686,142 +4684,3 @@ def test_presigned_put_url_with_custom_headers():
s3.delete_object(Bucket=bucket, Key=key)
s3.delete_bucket(Bucket=bucket)
@mock_s3
@mock_cloudformation
def test_s3_bucket_cloudformation_basic():
s3 = boto3.client("s3", region_name="us-east-1")
cf = boto3.client("cloudformation", region_name="us-east-1")
template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {"testInstance": {"Type": "AWS::S3::Bucket", "Properties": {},}},
"Outputs": {"Bucket": {"Value": {"Ref": "testInstance"}}},
}
template_json = json.dumps(template)
stack_id = cf.create_stack(StackName="test_stack", TemplateBody=template_json)[
"StackId"
]
stack_description = cf.describe_stacks(StackName="test_stack")["Stacks"][0]
s3.head_bucket(Bucket=stack_description["Outputs"][0]["OutputValue"])
@mock_s3
@mock_cloudformation
def test_s3_bucket_cloudformation_with_properties():
s3 = boto3.client("s3", region_name="us-east-1")
cf = boto3.client("cloudformation", region_name="us-east-1")
bucket_name = "MyBucket"
template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
"testInstance": {
"Type": "AWS::S3::Bucket",
"Properties": {
"BucketName": bucket_name,
"BucketEncryption": {
"ServerSideEncryptionConfiguration": [
{
"ServerSideEncryptionByDefault": {
"SSEAlgorithm": "AES256"
}
}
]
},
},
}
},
"Outputs": {"Bucket": {"Value": {"Ref": "testInstance"}}},
}
template_json = json.dumps(template)
stack_id = cf.create_stack(StackName="test_stack", TemplateBody=template_json)[
"StackId"
]
stack_description = cf.describe_stacks(StackName="test_stack")["Stacks"][0]
s3.head_bucket(Bucket=bucket_name)
encryption = s3.get_bucket_encryption(Bucket=bucket_name)
encryption["ServerSideEncryptionConfiguration"]["Rules"][0][
"ApplyServerSideEncryptionByDefault"
]["SSEAlgorithm"].should.equal("AES256")
@mock_s3
@mock_cloudformation
def test_s3_bucket_cloudformation_update_no_interruption():
s3 = boto3.client("s3", region_name="us-east-1")
cf = boto3.client("cloudformation", region_name="us-east-1")
template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {"testInstance": {"Type": "AWS::S3::Bucket"}},
"Outputs": {"Bucket": {"Value": {"Ref": "testInstance"}}},
}
template_json = json.dumps(template)
cf.create_stack(StackName="test_stack", TemplateBody=template_json)
stack_description = cf.describe_stacks(StackName="test_stack")["Stacks"][0]
s3.head_bucket(Bucket=stack_description["Outputs"][0]["OutputValue"])
template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
"testInstance": {
"Type": "AWS::S3::Bucket",
"Properties": {
"BucketEncryption": {
"ServerSideEncryptionConfiguration": [
{
"ServerSideEncryptionByDefault": {
"SSEAlgorithm": "AES256"
}
}
]
}
},
}
},
"Outputs": {"Bucket": {"Value": {"Ref": "testInstance"}}},
}
template_json = json.dumps(template)
cf.update_stack(StackName="test_stack", TemplateBody=template_json)
encryption = s3.get_bucket_encryption(
Bucket=stack_description["Outputs"][0]["OutputValue"]
)
encryption["ServerSideEncryptionConfiguration"]["Rules"][0][
"ApplyServerSideEncryptionByDefault"
]["SSEAlgorithm"].should.equal("AES256")
@mock_s3
@mock_cloudformation
def test_s3_bucket_cloudformation_update_replacement():
s3 = boto3.client("s3", region_name="us-east-1")
cf = boto3.client("cloudformation", region_name="us-east-1")
template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {"testInstance": {"Type": "AWS::S3::Bucket"}},
"Outputs": {"Bucket": {"Value": {"Ref": "testInstance"}}},
}
template_json = json.dumps(template)
cf.create_stack(StackName="test_stack", TemplateBody=template_json)
stack_description = cf.describe_stacks(StackName="test_stack")["Stacks"][0]
s3.head_bucket(Bucket=stack_description["Outputs"][0]["OutputValue"])
template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
"testInstance": {
"Type": "AWS::S3::Bucket",
"Properties": {"BucketName": "MyNewBucketName"},
}
},
"Outputs": {"Bucket": {"Value": {"Ref": "testInstance"}}},
}
template_json = json.dumps(template)
cf.update_stack(StackName="test_stack", TemplateBody=template_json)
stack_description = cf.describe_stacks(StackName="test_stack")["Stacks"][0]
s3.head_bucket(Bucket=stack_description["Outputs"][0]["OutputValue"])

View File

@ -0,0 +1,145 @@
import json
import boto3
import sure # noqa
from moto import mock_s3, mock_cloudformation
@mock_s3
@mock_cloudformation
def test_s3_bucket_cloudformation_basic():
s3 = boto3.client("s3", region_name="us-east-1")
cf = boto3.client("cloudformation", region_name="us-east-1")
template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {"testInstance": {"Type": "AWS::S3::Bucket", "Properties": {},}},
"Outputs": {"Bucket": {"Value": {"Ref": "testInstance"}}},
}
template_json = json.dumps(template)
stack_id = cf.create_stack(StackName="test_stack", TemplateBody=template_json)[
"StackId"
]
stack_description = cf.describe_stacks(StackName="test_stack")["Stacks"][0]
s3.head_bucket(Bucket=stack_description["Outputs"][0]["OutputValue"])
@mock_s3
@mock_cloudformation
def test_s3_bucket_cloudformation_with_properties():
s3 = boto3.client("s3", region_name="us-east-1")
cf = boto3.client("cloudformation", region_name="us-east-1")
bucket_name = "MyBucket"
template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
"testInstance": {
"Type": "AWS::S3::Bucket",
"Properties": {
"BucketName": bucket_name,
"BucketEncryption": {
"ServerSideEncryptionConfiguration": [
{
"ServerSideEncryptionByDefault": {
"SSEAlgorithm": "AES256"
}
}
]
},
},
}
},
"Outputs": {"Bucket": {"Value": {"Ref": "testInstance"}}},
}
template_json = json.dumps(template)
stack_id = cf.create_stack(StackName="test_stack", TemplateBody=template_json)[
"StackId"
]
stack_description = cf.describe_stacks(StackName="test_stack")["Stacks"][0]
s3.head_bucket(Bucket=bucket_name)
encryption = s3.get_bucket_encryption(Bucket=bucket_name)
encryption["ServerSideEncryptionConfiguration"]["Rules"][0][
"ApplyServerSideEncryptionByDefault"
]["SSEAlgorithm"].should.equal("AES256")
@mock_s3
@mock_cloudformation
def test_s3_bucket_cloudformation_update_no_interruption():
s3 = boto3.client("s3", region_name="us-east-1")
cf = boto3.client("cloudformation", region_name="us-east-1")
template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {"testInstance": {"Type": "AWS::S3::Bucket"}},
"Outputs": {"Bucket": {"Value": {"Ref": "testInstance"}}},
}
template_json = json.dumps(template)
cf.create_stack(StackName="test_stack", TemplateBody=template_json)
stack_description = cf.describe_stacks(StackName="test_stack")["Stacks"][0]
s3.head_bucket(Bucket=stack_description["Outputs"][0]["OutputValue"])
template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
"testInstance": {
"Type": "AWS::S3::Bucket",
"Properties": {
"BucketEncryption": {
"ServerSideEncryptionConfiguration": [
{
"ServerSideEncryptionByDefault": {
"SSEAlgorithm": "AES256"
}
}
]
}
},
}
},
"Outputs": {"Bucket": {"Value": {"Ref": "testInstance"}}},
}
template_json = json.dumps(template)
cf.update_stack(StackName="test_stack", TemplateBody=template_json)
encryption = s3.get_bucket_encryption(
Bucket=stack_description["Outputs"][0]["OutputValue"]
)
encryption["ServerSideEncryptionConfiguration"]["Rules"][0][
"ApplyServerSideEncryptionByDefault"
]["SSEAlgorithm"].should.equal("AES256")
@mock_s3
@mock_cloudformation
def test_s3_bucket_cloudformation_update_replacement():
s3 = boto3.client("s3", region_name="us-east-1")
cf = boto3.client("cloudformation", region_name="us-east-1")
template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {"testInstance": {"Type": "AWS::S3::Bucket"}},
"Outputs": {"Bucket": {"Value": {"Ref": "testInstance"}}},
}
template_json = json.dumps(template)
cf.create_stack(StackName="test_stack", TemplateBody=template_json)
stack_description = cf.describe_stacks(StackName="test_stack")["Stacks"][0]
s3.head_bucket(Bucket=stack_description["Outputs"][0]["OutputValue"])
template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
"testInstance": {
"Type": "AWS::S3::Bucket",
"Properties": {"BucketName": "MyNewBucketName"},
}
},
"Outputs": {"Bucket": {"Value": {"Ref": "testInstance"}}},
}
template_json = json.dumps(template)
cf.update_stack(StackName="test_stack", TemplateBody=template_json)
stack_description = cf.describe_stacks(StackName="test_stack")["Stacks"][0]
s3.head_bucket(Bucket=stack_description["Outputs"][0]["OutputValue"])