Merge pull request #55 from spulec/master

Merge upstream
This commit is contained in:
Bert Blommers 2020-08-07 13:42:09 +01:00 committed by GitHub
commit 0f6e5a1b0e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
23 changed files with 1143 additions and 89 deletions

View File

@ -21,6 +21,7 @@ from moto.batch import models as batch_models # noqa
from moto.cloudwatch import models as cloudwatch_models # noqa
from moto.datapipeline import models as datapipeline_models # noqa
from moto.dynamodb2 import models as dynamodb2_models # noqa
from moto.ec2 import models as ec2_models
from moto.ecr import models as ecr_models # noqa
from moto.ecs import models as ecs_models # noqa
from moto.elb import models as elb_models # noqa
@ -33,15 +34,13 @@ from moto.rds import models as rds_models # noqa
from moto.rds2 import models as rds2_models # noqa
from moto.redshift import models as redshift_models # noqa
from moto.route53 import models as route53_models # noqa
from moto.s3 import models as s3_models # noqa
from moto.s3 import models as s3_models, s3_backend # noqa
from moto.s3.utils import bucket_and_name_from_url
from moto.sns import models as sns_models # noqa
from moto.sqs import models as sqs_models # noqa
# End ugly list of imports
from moto.ec2 import models as ec2_models
from moto.s3 import models as _, s3_backend # noqa
from moto.s3.utils import bucket_and_name_from_url
from moto.core import ACCOUNT_ID, CloudFormationModel
from .utils import random_suffix
from .exceptions import (
@ -212,7 +211,6 @@ def clean_json(resource_json, resources_map):
def resource_class_from_type(resource_type):
if resource_type in NULL_MODELS:
return None
if resource_type not in MODEL_MAP:
logger.warning("No Moto CloudFormation support for %s", resource_type)
return None
@ -221,6 +219,9 @@ def resource_class_from_type(resource_type):
def resource_name_property_from_type(resource_type):
for model in MODEL_LIST:
if model.cloudformation_type() == resource_type:
return model.cloudformation_name_type()
return NAME_TYPE_MAP.get(resource_type)
@ -249,7 +250,9 @@ def generate_resource_name(resource_type, stack_name, logical_id):
return "{0}-{1}-{2}".format(stack_name, logical_id, random_suffix())
def parse_resource(logical_id, resource_json, resources_map):
def parse_resource(
logical_id, resource_json, resources_map, add_name_to_resource_json=True
):
resource_type = resource_json["Type"]
resource_class = resource_class_from_type(resource_type)
if not resource_class:
@ -261,21 +264,20 @@ def parse_resource(logical_id, resource_json, resources_map):
return None
resource_json = clean_json(resource_json, resources_map)
resource_name = generate_resource_name(
resource_type, resources_map.get("AWS::StackName"), logical_id
)
resource_name_property = resource_name_property_from_type(resource_type)
if resource_name_property:
if "Properties" not in resource_json:
resource_json["Properties"] = dict()
if resource_name_property not in resource_json["Properties"]:
resource_json["Properties"][
resource_name_property
] = generate_resource_name(
resource_type, resources_map.get("AWS::StackName"), logical_id
)
resource_name = resource_json["Properties"][resource_name_property]
else:
resource_name = generate_resource_name(
resource_type, resources_map.get("AWS::StackName"), logical_id
)
if (
add_name_to_resource_json
and resource_name_property not in resource_json["Properties"]
):
resource_json["Properties"][resource_name_property] = resource_name
if resource_name_property in resource_json["Properties"]:
resource_name = resource_json["Properties"][resource_name_property]
return resource_class, resource_json, resource_name
@ -301,7 +303,7 @@ def parse_and_create_resource(logical_id, resource_json, resources_map, region_n
def parse_and_update_resource(logical_id, resource_json, resources_map, region_name):
resource_class, new_resource_json, new_resource_name = parse_resource(
logical_id, resource_json, resources_map
logical_id, resource_json, resources_map, False
)
original_resource = resources_map[logical_id]
new_resource = resource_class.update_from_cloudformation_json(
@ -647,6 +649,23 @@ class ResourceMap(collections_abc.Mapping):
try:
if parsed_resource and hasattr(parsed_resource, "delete"):
parsed_resource.delete(self._region_name)
else:
resource_name_attribute = (
parsed_resource.cloudformation_name_type()
if hasattr(parsed_resource, "cloudformation_name_type")
else resource_name_property_from_type(parsed_resource.type)
)
if resource_name_attribute:
resource_json = self._resource_json_map[
parsed_resource.logical_resource_id
]
resource_name = resource_json["Properties"][
resource_name_attribute
]
parse_and_delete_resource(
resource_name, resource_json, self, self._region_name
)
self._parsed_resources.pop(parsed_resource.logical_resource_id)
except Exception as e:
# skip over dependency violations, and try again in a
# second pass

View File

@ -538,20 +538,25 @@ class BaseModel(object):
# Parent class for every Model that can be instantiated by CloudFormation
# On subclasses, implement the two methods as @staticmethod to ensure correct behaviour of the CF parser
class CloudFormationModel(BaseModel):
@staticmethod
@abstractmethod
def cloudformation_name_type(self):
def cloudformation_name_type():
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-name.html
# This must be implemented as a staticmethod with no parameters
# Return None for resources that do not have a name property
pass
@staticmethod
@abstractmethod
def cloudformation_type(self):
def cloudformation_type():
# This must be implemented as a staticmethod with no parameters
# See for example https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-dynamodb-table.html
return "AWS::SERVICE::RESOURCE"
@abstractmethod
def create_from_cloudformation_json(self):
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
# This must be implemented as a classmethod with parameters:
# cls, resource_name, cloudformation_json, region_name
# Extract the resource parameters from the cloudformation json
@ -559,7 +564,9 @@ class CloudFormationModel(BaseModel):
pass
@abstractmethod
def update_from_cloudformation_json(self):
def update_from_cloudformation_json(
cls, original_resource, new_resource_name, cloudformation_json, region_name
):
# This must be implemented as a classmethod with parameters:
# cls, original_resource, new_resource_name, cloudformation_json, region_name
# Extract the resource parameters from the cloudformation json,
@ -568,7 +575,9 @@ class CloudFormationModel(BaseModel):
pass
@abstractmethod
def delete_from_cloudformation_json(self):
def delete_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
# This must be implemented as a classmethod with parameters:
# cls, resource_name, cloudformation_json, region_name
# Extract the resource parameters from the cloudformation json

View File

@ -160,7 +160,6 @@ AMIS = _load_resource(
or resource_filename(__name__, "resources/amis.json"),
)
OWNER_ID = ACCOUNT_ID
@ -1405,7 +1404,6 @@ class Ami(TaggedEC2Resource):
class AmiBackend(object):
AMI_REGEX = re.compile("ami-[a-z0-9]+")
def __init__(self):
@ -2118,11 +2116,16 @@ class SecurityGroupBackend(object):
vpc_id=None,
):
group = self.get_security_group_by_name_or_id(group_name_or_id, vpc_id)
if ip_ranges and not isinstance(ip_ranges, list):
ip_ranges = [ip_ranges]
if ip_ranges:
if isinstance(ip_ranges, str) or (
six.PY2 and isinstance(ip_ranges, unicode) # noqa
):
ip_ranges = [{"CidrIp": str(ip_ranges)}]
elif not isinstance(ip_ranges, list):
ip_ranges = [json.loads(ip_ranges)]
if ip_ranges:
for cidr in ip_ranges:
if not is_valid_cidr(cidr):
if not is_valid_cidr(cidr["CidrIp"]):
raise InvalidCIDRSubnetError(cidr=cidr)
self._verify_group_will_respect_rule_count_limit(
@ -2200,10 +2203,14 @@ class SecurityGroupBackend(object):
group = self.get_security_group_by_name_or_id(group_name_or_id, vpc_id)
if ip_ranges and not isinstance(ip_ranges, list):
ip_ranges = [ip_ranges]
if isinstance(ip_ranges, str) and "CidrIp" not in ip_ranges:
ip_ranges = [{"CidrIp": ip_ranges}]
else:
ip_ranges = [json.loads(ip_ranges)]
if ip_ranges:
for cidr in ip_ranges:
if not is_valid_cidr(cidr):
if not is_valid_cidr(cidr["CidrIp"]):
raise InvalidCIDRSubnetError(cidr=cidr)
self._verify_group_will_respect_rule_count_limit(
@ -2259,9 +2266,13 @@ class SecurityGroupBackend(object):
if source_group:
source_groups.append(source_group)
for ip in ip_ranges:
ip_ranges = [ip.get("CidrIp") if ip.get("CidrIp") == "0.0.0.0/0" else ip]
security_rule = SecurityRule(
ip_protocol, from_port, to_port, ip_ranges, source_groups
)
if security_rule in group.egress_rules:
group.egress_rules.remove(security_rule)
return security_rule
@ -3063,6 +3074,21 @@ class VPCBackend(object):
return vpc_end_point
def get_vpc_end_point_services(self):
vpc_end_point_services = self.vpc_end_points.values()
services = []
for value in vpc_end_point_services:
services.append(value.service_name)
availability_zones = EC2Backend.describe_availability_zones(self)
return {
"servicesDetails": vpc_end_point_services,
"services": services,
"availability_zones": availability_zones,
}
class VPCPeeringConnectionStatus(object):
def __init__(self, code="initiating-request", message=""):
@ -3737,7 +3763,6 @@ class VPCEndPoint(TaggedEC2Resource):
tag_specifications=None,
private_dns_enabled=None,
):
self.id = id
self.vpc_id = vpc_id
self.service_name = service_name

View File

@ -20,7 +20,11 @@ def parse_sg_attributes_from_dict(sg_attributes):
ip_ranges = []
ip_ranges_tree = sg_attributes.get("IpRanges") or {}
for ip_range_idx in sorted(ip_ranges_tree.keys()):
ip_ranges.append(ip_ranges_tree[ip_range_idx]["CidrIp"][0])
ip_range = {"CidrIp": ip_ranges_tree[ip_range_idx]["CidrIp"][0]}
if ip_ranges_tree[ip_range_idx].get("Description"):
ip_range["Description"] = ip_ranges_tree[ip_range_idx].get("Description")[0]
ip_ranges.append(ip_range)
source_groups = []
source_group_ids = []
@ -61,6 +65,7 @@ class SecurityGroups(BaseResponse):
source_groups,
source_group_ids,
) = parse_sg_attributes_from_dict(querytree)
yield (
group_name_or_id,
ip_protocol,
@ -211,7 +216,10 @@ DESCRIBE_SECURITY_GROUPS_RESPONSE = (
<ipRanges>
{% for ip_range in rule.ip_ranges %}
<item>
<cidrIp>{{ ip_range }}</cidrIp>
<cidrIp>{{ ip_range['CidrIp'] }}</cidrIp>
{% if ip_range['Description'] %}
<description>{{ ip_range['Description'] }}</description>
{% endif %}
</item>
{% endfor %}
</ipRanges>

View File

@ -191,6 +191,11 @@ class VPCs(BaseResponse):
template = self.response_template(CREATE_VPC_END_POINT)
return template.render(vpc_end_point=vpc_end_point)
def describe_vpc_endpoint_services(self):
vpc_end_point_services = self.ec2_backend.get_vpc_end_point_services()
template = self.response_template(DESCRIBE_VPC_ENDPOINT_RESPONSE)
return template.render(vpc_end_points=vpc_end_point_services)
CREATE_VPC_RESPONSE = """
<CreateVpcResponse xmlns="http://ec2.amazonaws.com/doc/{{doc_date}}/">
@ -449,3 +454,35 @@ CREATE_VPC_END_POINT = """ <CreateVpcEndpointResponse xmlns="http://monitoring.a
<creationTimestamp>{{ vpc_end_point.created_at }}</creationTimestamp>
</vpcEndpoint>
</CreateVpcEndpointResponse>"""
DESCRIBE_VPC_ENDPOINT_RESPONSE = """<DescribeVpcEndpointServicesResponse xmlns="http://ec2.amazonaws.com/doc/2016-11-15/">
<requestId>19a9ff46-7df6-49b8-9726-3df27527089d</requestId>
<serviceNameSet>
{% for serviceName in vpc_end_points.services %}
<item>{{ serviceName }}</item>
{% endfor %}
</serviceNameSet>
<serviceDetailSet>
<item>
{% for service in vpc_end_points.servicesDetails %}
<owner>amazon</owner>
<serviceType>
<item>
<serviceType>{{ service.type }}</serviceType>
</item>
</serviceType>
<baseEndpointDnsNameSet>
<item>{{ ".".join((service.service_name.split(".")[::-1])) }}</item>
</baseEndpointDnsNameSet>
<acceptanceRequired>false</acceptanceRequired>
<availabilityZoneSet>
{% for zone in vpc_end_points.availability_zones %}
<item>{{ zone.name }}</item>
{% endfor %}
</availabilityZoneSet>
<serviceName>{{ service.service_name }}</serviceName>
<vpcEndpointPolicySupported>true</vpcEndpointPolicySupported>
{% endfor %}
</item>
</serviceDetailSet>
</DescribeVpcEndpointServicesResponse>"""

View File

@ -53,6 +53,7 @@ class Shard(BaseModel):
self.starting_hash = starting_hash
self.ending_hash = ending_hash
self.records = OrderedDict()
self.is_open = True
@property
def shard_id(self):
@ -116,29 +117,41 @@ class Shard(BaseModel):
return r.sequence_number
def to_json(self):
return {
response = {
"HashKeyRange": {
"EndingHashKey": str(self.ending_hash),
"StartingHashKey": str(self.starting_hash),
},
"SequenceNumberRange": {
"EndingSequenceNumber": self.get_max_sequence_number(),
"StartingSequenceNumber": self.get_min_sequence_number(),
},
"ShardId": self.shard_id,
}
if not self.is_open:
response["SequenceNumberRange"][
"EndingSequenceNumber"
] = self.get_max_sequence_number()
return response
class Stream(CloudFormationModel):
def __init__(self, stream_name, shard_count, region):
def __init__(self, stream_name, shard_count, region_name):
self.stream_name = stream_name
self.shard_count = shard_count
self.creation_datetime = datetime.datetime.now()
self.region = region
self.region = region_name
self.account_number = ACCOUNT_ID
self.shards = {}
self.tags = {}
self.status = "ACTIVE"
self.shard_count = None
self.update_shard_count(shard_count)
def update_shard_count(self, shard_count):
# ToDo: This was extracted from init. It's only accurate for new streams.
# It doesn't (yet) try to accurately mimic the more complex re-sharding behavior.
# It makes the stream as if it had been created with this number of shards.
# Logically consistent, but not what AWS does.
self.shard_count = shard_count
step = 2 ** 128 // shard_count
hash_ranges = itertools.chain(
@ -146,7 +159,6 @@ class Stream(CloudFormationModel):
[(shard_count - 1, (shard_count - 1) * step, 2 ** 128)],
)
for index, start, end in hash_ranges:
shard = Shard(index, start, end)
self.shards[shard.shard_id] = shard
@ -229,10 +241,65 @@ class Stream(CloudFormationModel):
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
properties = cloudformation_json["Properties"]
region = properties.get("Region", "us-east-1")
properties = cloudformation_json.get("Properties", {})
shard_count = properties.get("ShardCount", 1)
return Stream(properties["Name"], shard_count, region)
name = properties.get("Name", resource_name)
backend = kinesis_backends[region_name]
return backend.create_stream(name, shard_count, region_name)
@classmethod
def update_from_cloudformation_json(
cls, original_resource, new_resource_name, cloudformation_json, region_name,
):
properties = cloudformation_json["Properties"]
if Stream.is_replacement_update(properties):
resource_name_property = cls.cloudformation_name_type()
if resource_name_property not in properties:
properties[resource_name_property] = new_resource_name
new_resource = cls.create_from_cloudformation_json(
properties[resource_name_property], cloudformation_json, region_name
)
properties[resource_name_property] = original_resource.name
cls.delete_from_cloudformation_json(
original_resource.name, cloudformation_json, region_name
)
return new_resource
else: # No Interruption
if "ShardCount" in properties:
original_resource.update_shard_count(properties["ShardCount"])
return original_resource
@classmethod
def delete_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
backend = kinesis_backends[region_name]
properties = cloudformation_json.get("Properties", {})
stream_name = properties.get(cls.cloudformation_name_type(), resource_name)
backend.delete_stream(stream_name)
@staticmethod
def is_replacement_update(properties):
properties_requiring_replacement_update = ["BucketName", "ObjectLockEnabled"]
return any(
[
property_requiring_replacement in properties
for property_requiring_replacement in properties_requiring_replacement_update
]
)
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == "Arn":
return self.arn
raise UnformattedGetAttTemplateException()
@property
def physical_resource_id(self):
return self.stream_name
class FirehoseRecord(BaseModel):
@ -331,10 +398,10 @@ class KinesisBackend(BaseBackend):
self.streams = OrderedDict()
self.delivery_streams = {}
def create_stream(self, stream_name, shard_count, region):
def create_stream(self, stream_name, shard_count, region_name):
if stream_name in self.streams:
raise ResourceInUseError(stream_name)
stream = Stream(stream_name, shard_count, region)
stream = Stream(stream_name, shard_count, region_name)
self.streams[stream_name] = stream
return stream

View File

@ -74,3 +74,41 @@ class DuplicatePolicyException(JsonRESTError):
super(DuplicatePolicyException, self).__init__(
"DuplicatePolicyException", "A policy with the same name already exists."
)
class PolicyTypeAlreadyEnabledException(JsonRESTError):
code = 400
def __init__(self):
super(PolicyTypeAlreadyEnabledException, self).__init__(
"PolicyTypeAlreadyEnabledException",
"The specified policy type is already enabled.",
)
class PolicyTypeNotEnabledException(JsonRESTError):
code = 400
def __init__(self):
super(PolicyTypeNotEnabledException, self).__init__(
"PolicyTypeNotEnabledException",
"This operation can be performed only for enabled policy types.",
)
class RootNotFoundException(JsonRESTError):
code = 400
def __init__(self):
super(RootNotFoundException, self).__init__(
"RootNotFoundException", "You specified a root that doesn't exist."
)
class TargetNotFoundException(JsonRESTError):
code = 400
def __init__(self):
super(TargetNotFoundException, self).__init__(
"TargetNotFoundException", "You specified a target that doesn't exist."
)

View File

@ -17,6 +17,10 @@ from moto.organizations.exceptions import (
AccountAlreadyRegisteredException,
AWSOrganizationsNotInUseException,
AccountNotRegisteredException,
RootNotFoundException,
PolicyTypeAlreadyEnabledException,
PolicyTypeNotEnabledException,
TargetNotFoundException,
)
@ -124,6 +128,13 @@ class FakeOrganizationalUnit(BaseModel):
class FakeRoot(FakeOrganizationalUnit):
SUPPORTED_POLICY_TYPES = [
"AISERVICES_OPT_OUT_POLICY",
"BACKUP_POLICY",
"SERVICE_CONTROL_POLICY",
"TAG_POLICY",
]
def __init__(self, organization, **kwargs):
super(FakeRoot, self).__init__(organization, **kwargs)
self.type = "ROOT"
@ -141,20 +152,55 @@ class FakeRoot(FakeOrganizationalUnit):
"PolicyTypes": self.policy_types,
}
def add_policy_type(self, policy_type):
if policy_type not in self.SUPPORTED_POLICY_TYPES:
raise InvalidInputException("You specified an invalid value.")
if any(type["Type"] == policy_type for type in self.policy_types):
raise PolicyTypeAlreadyEnabledException
self.policy_types.append({"Type": policy_type, "Status": "ENABLED"})
def remove_policy_type(self, policy_type):
if not FakePolicy.supported_policy_type(policy_type):
raise InvalidInputException("You specified an invalid value.")
if all(type["Type"] != policy_type for type in self.policy_types):
raise PolicyTypeNotEnabledException
self.policy_types.remove({"Type": policy_type, "Status": "ENABLED"})
class FakePolicy(BaseModel):
SUPPORTED_POLICY_TYPES = [
"AISERVICES_OPT_OUT_POLICY",
"BACKUP_POLICY",
"SERVICE_CONTROL_POLICY",
"TAG_POLICY",
]
class FakeServiceControlPolicy(BaseModel):
def __init__(self, organization, **kwargs):
self.content = kwargs.get("Content")
self.description = kwargs.get("Description")
self.name = kwargs.get("Name")
self.type = kwargs.get("Type")
self.id = utils.make_random_service_control_policy_id()
self.id = utils.make_random_policy_id()
self.aws_managed = False
self.organization_id = organization.id
self.master_account_id = organization.master_account_id
self._arn_format = utils.SCP_ARN_FORMAT
self.attachments = []
if not FakePolicy.supported_policy_type(self.type):
raise InvalidInputException("You specified an invalid value.")
elif self.type == "AISERVICES_OPT_OUT_POLICY":
self._arn_format = utils.AI_POLICY_ARN_FORMAT
elif self.type == "SERVICE_CONTROL_POLICY":
self._arn_format = utils.SCP_ARN_FORMAT
else:
raise NotImplementedError(
"The {0} policy type has not been implemented".format(self.type)
)
@property
def arn(self):
return self._arn_format.format(
@ -176,6 +222,10 @@ class FakeServiceControlPolicy(BaseModel):
}
}
@staticmethod
def supported_policy_type(policy_type):
return policy_type in FakePolicy.SUPPORTED_POLICY_TYPES
class FakeServiceAccess(BaseModel):
# List of trusted services, which support trusted access with Organizations
@ -283,6 +333,13 @@ class OrganizationsBackend(BaseBackend):
self.services = []
self.admins = []
def _get_root_by_id(self, root_id):
root = next((ou for ou in self.ou if ou.id == root_id), None)
if not root:
raise RootNotFoundException
return root
def create_organization(self, **kwargs):
self.org = FakeOrganization(kwargs["FeatureSet"])
root_ou = FakeRoot(self.org)
@ -292,7 +349,7 @@ class OrganizationsBackend(BaseBackend):
)
master_account.id = self.org.master_account_id
self.accounts.append(master_account)
default_policy = FakeServiceControlPolicy(
default_policy = FakePolicy(
self.org,
Name="FullAWSAccess",
Description="Allows access to every operation",
@ -452,7 +509,7 @@ class OrganizationsBackend(BaseBackend):
)
def create_policy(self, **kwargs):
new_policy = FakeServiceControlPolicy(self.org, **kwargs)
new_policy = FakePolicy(self.org, **kwargs)
for policy in self.policies:
if kwargs["Name"] == policy.name:
raise DuplicatePolicyException
@ -460,7 +517,7 @@ class OrganizationsBackend(BaseBackend):
return new_policy.describe()
def describe_policy(self, **kwargs):
if re.compile(utils.SCP_ID_REGEX).match(kwargs["PolicyId"]):
if re.compile(utils.POLICY_ID_REGEX).match(kwargs["PolicyId"]):
policy = next(
(p for p in self.policies if p.id == kwargs["PolicyId"]), None
)
@ -540,7 +597,13 @@ class OrganizationsBackend(BaseBackend):
)
def list_policies_for_target(self, **kwargs):
if re.compile(utils.OU_ID_REGEX).match(kwargs["TargetId"]):
filter = kwargs["Filter"]
if re.match(utils.ROOT_ID_REGEX, kwargs["TargetId"]):
obj = next((ou for ou in self.ou if ou.id == kwargs["TargetId"]), None)
if obj is None:
raise TargetNotFoundException
elif re.compile(utils.OU_ID_REGEX).match(kwargs["TargetId"]):
obj = next((ou for ou in self.ou if ou.id == kwargs["TargetId"]), None)
if obj is None:
raise RESTError(
@ -553,14 +616,25 @@ class OrganizationsBackend(BaseBackend):
raise AccountNotFoundException
else:
raise InvalidInputException("You specified an invalid value.")
if not FakePolicy.supported_policy_type(filter):
raise InvalidInputException("You specified an invalid value.")
if filter not in ["AISERVICES_OPT_OUT_POLICY", "SERVICE_CONTROL_POLICY"]:
raise NotImplementedError(
"The {0} policy type has not been implemented".format(filter)
)
return dict(
Policies=[
p.describe()["Policy"]["PolicySummary"] for p in obj.attached_policies
p.describe()["Policy"]["PolicySummary"]
for p in obj.attached_policies
if p.type == filter
]
)
def list_targets_for_policy(self, **kwargs):
if re.compile(utils.SCP_ID_REGEX).match(kwargs["PolicyId"]):
if re.compile(utils.POLICY_ID_REGEX).match(kwargs["PolicyId"]):
policy = next(
(p for p in self.policies if p.id == kwargs["PolicyId"]), None
)
@ -733,5 +807,19 @@ class OrganizationsBackend(BaseBackend):
if not admin.services:
self.admins.remove(admin)
def enable_policy_type(self, **kwargs):
root = self._get_root_by_id(kwargs["RootId"])
root.add_policy_type(kwargs["PolicyType"])
return dict(Root=root.describe())
def disable_policy_type(self, **kwargs):
root = self._get_root_by_id(kwargs["RootId"])
root.remove_policy_type(kwargs["PolicyType"])
return dict(Root=root.describe())
organizations_backend = OrganizationsBackend()

View File

@ -191,3 +191,13 @@ class OrganizationsResponse(BaseResponse):
**self.request_params
)
)
def enable_policy_type(self):
return json.dumps(
self.organizations_backend.enable_policy_type(**self.request_params)
)
def disable_policy_type(self):
return json.dumps(
self.organizations_backend.disable_policy_type(**self.request_params)
)

View File

@ -14,6 +14,9 @@ ACCOUNT_ARN_FORMAT = "arn:aws:organizations::{0}:account/{1}/{2}"
ROOT_ARN_FORMAT = "arn:aws:organizations::{0}:root/{1}/{2}"
OU_ARN_FORMAT = "arn:aws:organizations::{0}:ou/{1}/{2}"
SCP_ARN_FORMAT = "arn:aws:organizations::{0}:policy/{1}/service_control_policy/{2}"
AI_POLICY_ARN_FORMAT = (
"arn:aws:organizations::{0}:policy/{1}/aiservices_opt_out_policy/{2}"
)
CHARSET = string.ascii_lowercase + string.digits
ORG_ID_SIZE = 10
@ -21,7 +24,7 @@ ROOT_ID_SIZE = 4
ACCOUNT_ID_SIZE = 12
OU_ID_SUFFIX_SIZE = 8
CREATE_ACCOUNT_STATUS_ID_SIZE = 8
SCP_ID_SIZE = 8
POLICY_ID_SIZE = 8
EMAIL_REGEX = "^.+@[a-zA-Z0-9-.]+.[a-zA-Z]{2,3}|[0-9]{1,3}$"
ORG_ID_REGEX = r"o-[a-z0-9]{%s}" % ORG_ID_SIZE
@ -29,7 +32,7 @@ ROOT_ID_REGEX = r"r-[a-z0-9]{%s}" % ROOT_ID_SIZE
OU_ID_REGEX = r"ou-[a-z0-9]{%s}-[a-z0-9]{%s}" % (ROOT_ID_SIZE, OU_ID_SUFFIX_SIZE)
ACCOUNT_ID_REGEX = r"[0-9]{%s}" % ACCOUNT_ID_SIZE
CREATE_ACCOUNT_STATUS_ID_REGEX = r"car-[a-z0-9]{%s}" % CREATE_ACCOUNT_STATUS_ID_SIZE
SCP_ID_REGEX = r"%s|p-[a-z0-9]{%s}" % (DEFAULT_POLICY_ID, SCP_ID_SIZE)
POLICY_ID_REGEX = r"%s|p-[a-z0-9]{%s}" % (DEFAULT_POLICY_ID, POLICY_ID_SIZE)
def make_random_org_id():
@ -76,8 +79,8 @@ def make_random_create_account_status_id():
)
def make_random_service_control_policy_id():
def make_random_policy_id():
# The regex pattern for a policy ID string requires "p-" followed by
# from 8 to 128 lower-case letters or digits.
# e.g. 'p-k2av4a8a'
return "p-" + "".join(random.choice(CHARSET) for x in range(SCP_ID_SIZE))
return "p-" + "".join(random.choice(CHARSET) for x in range(POLICY_ID_SIZE))

View File

@ -0,0 +1,33 @@
from collections import OrderedDict
def cfn_to_api_encryption(bucket_encryption_properties):
sse_algorithm = bucket_encryption_properties["ServerSideEncryptionConfiguration"][
0
]["ServerSideEncryptionByDefault"]["SSEAlgorithm"]
kms_master_key_id = bucket_encryption_properties[
"ServerSideEncryptionConfiguration"
][0]["ServerSideEncryptionByDefault"].get("KMSMasterKeyID")
apply_server_side_encryption_by_default = OrderedDict()
apply_server_side_encryption_by_default["SSEAlgorithm"] = sse_algorithm
if kms_master_key_id:
apply_server_side_encryption_by_default["KMSMasterKeyID"] = kms_master_key_id
rule = OrderedDict(
{"ApplyServerSideEncryptionByDefault": apply_server_side_encryption_by_default}
)
bucket_encryption = OrderedDict(
{"@xmlns": "http://s3.amazonaws.com/doc/2006-03-01/"}
)
bucket_encryption["Rule"] = rule
return bucket_encryption
def is_replacement_update(properties):
properties_requiring_replacement_update = ["BucketName", "ObjectLockEnabled"]
return any(
[
property_requiring_replacement in properties
for property_requiring_replacement in properties_requiring_replacement_update
]
)

View File

@ -43,6 +43,7 @@ from .exceptions import (
WrongPublicAccessBlockAccountIdError,
NoSuchUpload,
)
from .cloud_formation import cfn_to_api_encryption, is_replacement_update
from .utils import clean_key_name, _VersionedKeyStore
MAX_BUCKET_NAME_LENGTH = 63
@ -1084,8 +1085,54 @@ class FakeBucket(CloudFormationModel):
cls, resource_name, cloudformation_json, region_name
):
bucket = s3_backend.create_bucket(resource_name, region_name)
properties = cloudformation_json["Properties"]
if "BucketEncryption" in properties:
bucket_encryption = cfn_to_api_encryption(properties["BucketEncryption"])
s3_backend.put_bucket_encryption(
bucket_name=resource_name, encryption=[bucket_encryption]
)
return bucket
@classmethod
def update_from_cloudformation_json(
cls, original_resource, new_resource_name, cloudformation_json, region_name,
):
properties = cloudformation_json["Properties"]
if is_replacement_update(properties):
resource_name_property = cls.cloudformation_name_type()
if resource_name_property not in properties:
properties[resource_name_property] = new_resource_name
new_resource = cls.create_from_cloudformation_json(
properties[resource_name_property], cloudformation_json, region_name
)
properties[resource_name_property] = original_resource.name
cls.delete_from_cloudformation_json(
original_resource.name, cloudformation_json, region_name
)
return new_resource
else: # No Interruption
if "BucketEncryption" in properties:
bucket_encryption = cfn_to_api_encryption(
properties["BucketEncryption"]
)
s3_backend.put_bucket_encryption(
bucket_name=original_resource.name, encryption=[bucket_encryption]
)
return original_resource
@classmethod
def delete_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
properties = cloudformation_json["Properties"]
bucket_name = properties[cls.cloudformation_name_type()]
s3_backend.delete_bucket(bucket_name)
def to_config_dict(self):
"""Return the AWS Config JSON format of this S3 bucket.

View File

@ -51,6 +51,8 @@ def random_password(
if include_space:
password += " "
required_characters += " "
if exclude_characters:
password = _exclude_characters(password, exclude_characters)
password = "".join(
six.text_type(random.choice(password)) for x in range(password_length)
@ -61,7 +63,6 @@ def random_password(
password, required_characters
)
password = _exclude_characters(password, exclude_characters)
return password

View File

@ -844,6 +844,7 @@ class SQSBackend(BaseBackend):
def purge_queue(self, queue_name):
queue = self.get_queue(queue_name)
queue._messages = []
queue._pending_messages = set()
def list_dead_letter_source_queues(self, queue_name):
dlq = self.get_queue(queue_name)

View File

@ -1,6 +1,7 @@
from __future__ import unicode_literals
import copy
import json
# Ensure 'assert_raises' context manager support for Python 2.6
import tests.backport_assert_raises # noqa
@ -272,9 +273,10 @@ def test_authorize_ip_range_and_revoke():
# There are two egress rules associated with the security group:
# the default outbound rule and the new one
int(egress_security_group.rules_egress[1].to_port).should.equal(2222)
egress_security_group.rules_egress[1].grants[0].cidr_ip.should.equal(
"123.123.123.123/32"
)
actual_cidr = egress_security_group.rules_egress[1].grants[0].cidr_ip
# Deal with Python2 dict->unicode, instead of dict->string
actual_cidr = json.loads(actual_cidr.replace("u'", "'").replace("'", '"'))
actual_cidr.should.equal({"CidrIp": "123.123.123.123/32"})
# Wrong Cidr should throw error
egress_security_group.revoke.when.called_with(
@ -690,6 +692,68 @@ def test_add_same_rule_twice_throws_error():
sg.authorize_ingress(IpPermissions=ip_permissions)
@mock_ec2
def test_description_in_ip_permissions():
ec2 = boto3.resource("ec2", region_name="us-west-1")
conn = boto3.client("ec2", region_name="us-east-1")
vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16")
sg = conn.create_security_group(
GroupName="sg1", Description="Test security group sg1", VpcId=vpc.id
)
ip_permissions = [
{
"IpProtocol": "tcp",
"FromPort": 27017,
"ToPort": 27017,
"IpRanges": [{"CidrIp": "1.2.3.4/32", "Description": "testDescription"}],
}
]
conn.authorize_security_group_ingress(
GroupId=sg["GroupId"], IpPermissions=ip_permissions
)
result = conn.describe_security_groups(GroupIds=[sg["GroupId"]])
assert (
result["SecurityGroups"][0]["IpPermissions"][0]["IpRanges"][0]["Description"]
== "testDescription"
)
assert (
result["SecurityGroups"][0]["IpPermissions"][0]["IpRanges"][0]["CidrIp"]
== "1.2.3.4/32"
)
sg = conn.create_security_group(
GroupName="sg2", Description="Test security group sg1", VpcId=vpc.id
)
ip_permissions = [
{
"IpProtocol": "tcp",
"FromPort": 27017,
"ToPort": 27017,
"IpRanges": [{"CidrIp": "1.2.3.4/32"}],
}
]
conn.authorize_security_group_ingress(
GroupId=sg["GroupId"], IpPermissions=ip_permissions
)
result = conn.describe_security_groups(GroupIds=[sg["GroupId"]])
assert (
result["SecurityGroups"][0]["IpPermissions"][0]["IpRanges"][0].get(
"Description"
)
is None
)
assert (
result["SecurityGroups"][0]["IpPermissions"][0]["IpRanges"][0]["CidrIp"]
== "1.2.3.4/32"
)
@mock_ec2
def test_security_group_tagging_boto3():
conn = boto3.client("ec2", region_name="us-east-1")
@ -868,7 +932,7 @@ def test_revoke_security_group_egress():
{
"FromPort": 0,
"IpProtocol": "-1",
"IpRanges": [{"CidrIp": "0.0.0.0/0"},],
"IpRanges": [{"CidrIp": "0.0.0.0/0"}],
"ToPort": 123,
},
]

View File

@ -825,3 +825,34 @@ def test_describe_classic_link_dns_support_multiple():
assert response.get("Vpcs").sort(key=lambda x: x["VpcId"]) == expected.sort(
key=lambda x: x["VpcId"]
)
@mock_ec2
def test_describe_vpc_end_point_services():
ec2 = boto3.client("ec2", region_name="us-west-1")
vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16")
route_table = ec2.create_route_table(VpcId=vpc["Vpc"]["VpcId"])
ec2.create_vpc_endpoint(
VpcId=vpc["Vpc"]["VpcId"],
ServiceName="com.amazonaws.us-east-1.s3",
RouteTableIds=[route_table["RouteTable"]["RouteTableId"]],
VpcEndpointType="gateway",
)
vpc_end_point_services = ec2.describe_vpc_endpoint_services()
assert vpc_end_point_services.get("ServiceDetails").should.be.true
assert vpc_end_point_services.get("ServiceNames").should.be.true
assert vpc_end_point_services.get("ServiceNames") == ["com.amazonaws.us-east-1.s3"]
assert (
vpc_end_point_services.get("ServiceDetails")[0]
.get("ServiceType", [])[0]
.get("ServiceType")
== "gateway"
)
assert vpc_end_point_services.get("ServiceDetails")[0].get("AvailabilityZones") == [
"us-west-1a",
"us-west-1b",
]

View File

@ -10,6 +10,8 @@ from boto.kinesis.exceptions import ResourceNotFoundException, InvalidArgumentEx
from moto import mock_kinesis, mock_kinesis_deprecated
from moto.core import ACCOUNT_ID
import sure # noqa
@mock_kinesis_deprecated
def test_create_cluster():
@ -601,9 +603,6 @@ def test_split_shard():
stream = stream_response["StreamDescription"]
shards = stream["Shards"]
shards.should.have.length_of(2)
sum(
[shard["SequenceNumberRange"]["EndingSequenceNumber"] for shard in shards]
).should.equal(99)
shard_range = shards[0]["HashKeyRange"]
new_starting_hash = (
@ -616,9 +615,6 @@ def test_split_shard():
stream = stream_response["StreamDescription"]
shards = stream["Shards"]
shards.should.have.length_of(3)
sum(
[shard["SequenceNumberRange"]["EndingSequenceNumber"] for shard in shards]
).should.equal(99)
shard_range = shards[2]["HashKeyRange"]
new_starting_hash = (
@ -631,9 +627,6 @@ def test_split_shard():
stream = stream_response["StreamDescription"]
shards = stream["Shards"]
shards.should.have.length_of(4)
sum(
[shard["SequenceNumberRange"]["EndingSequenceNumber"] for shard in shards]
).should.equal(99)
@mock_kinesis_deprecated
@ -662,9 +655,6 @@ def test_merge_shards():
stream = stream_response["StreamDescription"]
shards = stream["Shards"]
shards.should.have.length_of(4)
sum(
[shard["SequenceNumberRange"]["EndingSequenceNumber"] for shard in shards]
).should.equal(99)
conn.merge_shards(stream_name, "shardId-000000000000", "shardId-000000000001")
@ -672,17 +662,23 @@ def test_merge_shards():
stream = stream_response["StreamDescription"]
shards = stream["Shards"]
shards.should.have.length_of(3)
sum(
[shard["SequenceNumberRange"]["EndingSequenceNumber"] for shard in shards]
).should.equal(99)
active_shards = [
shard
for shard in shards
if "EndingSequenceNumber" not in shard["SequenceNumberRange"]
]
active_shards.should.have.length_of(3)
conn.merge_shards(stream_name, "shardId-000000000002", "shardId-000000000000")
stream_response = conn.describe_stream(stream_name)
stream = stream_response["StreamDescription"]
shards = stream["Shards"]
shards.should.have.length_of(2)
sum(
[shard["SequenceNumberRange"]["EndingSequenceNumber"] for shard in shards]
).should.equal(99)
active_shards = [
shard
for shard in shards
if "EndingSequenceNumber" not in shard["SequenceNumberRange"]
]
active_shards.should.have.length_of(2)

View File

@ -0,0 +1,144 @@
import boto3
import sure # noqa
from moto import mock_kinesis, mock_cloudformation
@mock_cloudformation
def test_kinesis_cloudformation_create_stream():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
stack_name = "MyStack"
template = '{"Resources":{"MyStream":{"Type":"AWS::Kinesis::Stream"}}}'
cf_conn.create_stack(StackName=stack_name, TemplateBody=template)
provisioned_resource = cf_conn.list_stack_resources(StackName=stack_name)[
"StackResourceSummaries"
][0]
provisioned_resource["LogicalResourceId"].should.equal("MyStream")
len(provisioned_resource["PhysicalResourceId"]).should.be.greater_than(0)
@mock_cloudformation
@mock_kinesis
def test_kinesis_cloudformation_get_attr():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
stack_name = "MyStack"
template = """
Resources:
TheStream:
Type: AWS::Kinesis::Stream
Outputs:
StreamName:
Value: !Ref TheStream
StreamArn:
Value: !GetAtt TheStream.Arn
""".strip()
cf_conn.create_stack(StackName=stack_name, TemplateBody=template)
stack_description = cf_conn.describe_stacks(StackName=stack_name)["Stacks"][0]
output_stream_name = [
output["OutputValue"]
for output in stack_description["Outputs"]
if output["OutputKey"] == "StreamName"
][0]
output_stream_arn = [
output["OutputValue"]
for output in stack_description["Outputs"]
if output["OutputKey"] == "StreamArn"
][0]
kinesis_conn = boto3.client("kinesis", region_name="us-east-1")
stream_description = kinesis_conn.describe_stream(StreamName=output_stream_name)[
"StreamDescription"
]
output_stream_arn.should.equal(stream_description["StreamARN"])
@mock_cloudformation
@mock_kinesis
def test_kinesis_cloudformation_update():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
stack_name = "MyStack"
template = """
Resources:
TheStream:
Type: AWS::Kinesis::Stream
Properties:
Name: MyStream
ShardCount: 4
""".strip()
cf_conn.create_stack(StackName=stack_name, TemplateBody=template)
stack_description = cf_conn.describe_stacks(StackName=stack_name)["Stacks"][0]
stack_description["StackName"].should.equal(stack_name)
kinesis_conn = boto3.client("kinesis", region_name="us-east-1")
stream_description = kinesis_conn.describe_stream(StreamName="MyStream")[
"StreamDescription"
]
shards_provisioned = len(
[
shard
for shard in stream_description["Shards"]
if "EndingSequenceNumber" not in shard["SequenceNumberRange"]
]
)
shards_provisioned.should.equal(4)
template = """
Resources:
TheStream:
Type: AWS::Kinesis::Stream
Properties:
ShardCount: 6
""".strip()
cf_conn.update_stack(StackName=stack_name, TemplateBody=template)
stream_description = kinesis_conn.describe_stream(StreamName="MyStream")[
"StreamDescription"
]
shards_provisioned = len(
[
shard
for shard in stream_description["Shards"]
if "EndingSequenceNumber" not in shard["SequenceNumberRange"]
]
)
shards_provisioned.should.equal(6)
@mock_cloudformation
@mock_kinesis
def test_kinesis_cloudformation_delete():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
stack_name = "MyStack"
template = """
Resources:
TheStream:
Type: AWS::Kinesis::Stream
Properties:
Name: MyStream
""".strip()
cf_conn.create_stack(StackName=stack_name, TemplateBody=template)
stack_description = cf_conn.describe_stacks(StackName=stack_name)["Stacks"][0]
stack_description["StackName"].should.equal(stack_name)
kinesis_conn = boto3.client("kinesis", region_name="us-east-1")
stream_description = kinesis_conn.describe_stream(StreamName="MyStream")[
"StreamDescription"
]
stream_description["StreamName"].should.equal("MyStream")
cf_conn.delete_stack(StackName=stack_name)
streams = kinesis_conn.list_streams()["StreamNames"]
len(streams).should.equal(0)

View File

@ -31,9 +31,9 @@ def test_make_random_create_account_status_id():
create_account_status_id.should.match(utils.CREATE_ACCOUNT_STATUS_ID_REGEX)
def test_make_random_service_control_policy_id():
service_control_policy_id = utils.make_random_service_control_policy_id()
service_control_policy_id.should.match(utils.SCP_ID_REGEX)
def test_make_random_policy_id():
policy_id = utils.make_random_policy_id()
policy_id.should.match(utils.POLICY_ID_REGEX)
def validate_organization(response):
@ -128,7 +128,7 @@ def validate_create_account_status(create_status):
def validate_policy_summary(org, summary):
summary.should.be.a(dict)
summary.should.have.key("Id").should.match(utils.SCP_ID_REGEX)
summary.should.have.key("Id").should.match(utils.POLICY_ID_REGEX)
summary.should.have.key("Arn").should.equal(
utils.SCP_ARN_FORMAT.format(org["MasterAccountId"], org["Id"], summary["Id"])
)

View File

@ -379,6 +379,30 @@ def test_create_policy():
policy["Content"].should.equal(json.dumps(policy_doc01))
@mock_organizations
def test_create_policy_errors():
# given
client = boto3.client("organizations", region_name="us-east-1")
client.create_organization(FeatureSet="ALL")
# invalid policy type
# when
with assert_raises(ClientError) as e:
client.create_policy(
Content=json.dumps(policy_doc01),
Description="moto",
Name="moto",
Type="MOTO",
)
# then
ex = e.exception
ex.operation_name.should.equal("CreatePolicy")
ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
ex.response["Error"]["Code"].should.contain("InvalidInputException")
ex.response["Error"]["Message"].should.equal("You specified an invalid value.")
@mock_organizations
def test_describe_policy():
client = boto3.client("organizations", region_name="us-east-1")
@ -468,7 +492,7 @@ def test_delete_policy():
def test_delete_policy_exception():
client = boto3.client("organizations", region_name="us-east-1")
org = client.create_organization(FeatureSet="ALL")["Organization"]
non_existent_policy_id = utils.make_random_service_control_policy_id()
non_existent_policy_id = utils.make_random_policy_id()
with assert_raises(ClientError) as e:
response = client.delete_policy(PolicyId=non_existent_policy_id)
ex = e.exception
@ -571,7 +595,7 @@ def test_update_policy():
def test_update_policy_exception():
client = boto3.client("organizations", region_name="us-east-1")
org = client.create_organization(FeatureSet="ALL")["Organization"]
non_existent_policy_id = utils.make_random_service_control_policy_id()
non_existent_policy_id = utils.make_random_policy_id()
with assert_raises(ClientError) as e:
response = client.update_policy(PolicyId=non_existent_policy_id)
ex = e.exception
@ -631,6 +655,7 @@ def test_list_policies_for_target():
def test_list_policies_for_target_exception():
client = boto3.client("organizations", region_name="us-east-1")
client.create_organization(FeatureSet="ALL")["Organization"]
root_id = client.list_roots()["Roots"][0]["Id"]
ou_id = "ou-gi99-i7r8eh2i2"
account_id = "126644886543"
with assert_raises(ClientError) as e:
@ -664,6 +689,34 @@ def test_list_policies_for_target_exception():
ex.response["Error"]["Code"].should.contain("InvalidInputException")
ex.response["Error"]["Message"].should.equal("You specified an invalid value.")
# not existing root
# when
with assert_raises(ClientError) as e:
client.list_policies_for_target(
TargetId="r-0000", Filter="SERVICE_CONTROL_POLICY"
)
# then
ex = e.exception
ex.operation_name.should.equal("ListPoliciesForTarget")
ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
ex.response["Error"]["Code"].should.contain("TargetNotFoundException")
ex.response["Error"]["Message"].should.equal(
"You specified a target that doesn't exist."
)
# invalid policy type
# when
with assert_raises(ClientError) as e:
client.list_policies_for_target(TargetId=root_id, Filter="MOTO")
# then
ex = e.exception
ex.operation_name.should.equal("ListPoliciesForTarget")
ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
ex.response["Error"]["Code"].should.contain("InvalidInputException")
ex.response["Error"]["Message"].should.equal("You specified an invalid value.")
@mock_organizations
def test_list_targets_for_policy():
@ -1305,3 +1358,211 @@ def test_deregister_delegated_administrator_erros():
ex.response["Error"]["Message"].should.equal(
"You specified an unrecognized service principal."
)
@mock_organizations
def test_enable_policy_type():
# given
client = boto3.client("organizations", region_name="us-east-1")
org = client.create_organization(FeatureSet="ALL")["Organization"]
root_id = client.list_roots()["Roots"][0]["Id"]
# when
response = client.enable_policy_type(
RootId=root_id, PolicyType="AISERVICES_OPT_OUT_POLICY"
)
# then
root = response["Root"]
root["Id"].should.equal(root_id)
root["Arn"].should.equal(
utils.ROOT_ARN_FORMAT.format(org["MasterAccountId"], org["Id"], root_id)
)
root["Name"].should.equal("Root")
sorted(root["PolicyTypes"], key=lambda x: x["Type"]).should.equal(
[
{"Type": "AISERVICES_OPT_OUT_POLICY", "Status": "ENABLED"},
{"Type": "SERVICE_CONTROL_POLICY", "Status": "ENABLED"},
]
)
@mock_organizations
def test_enable_policy_type_errors():
# given
client = boto3.client("organizations", region_name="us-east-1")
client.create_organization(FeatureSet="ALL")
root_id = client.list_roots()["Roots"][0]["Id"]
# not existing root
# when
with assert_raises(ClientError) as e:
client.enable_policy_type(
RootId="r-0000", PolicyType="AISERVICES_OPT_OUT_POLICY"
)
# then
ex = e.exception
ex.operation_name.should.equal("EnablePolicyType")
ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
ex.response["Error"]["Code"].should.contain("RootNotFoundException")
ex.response["Error"]["Message"].should.equal(
"You specified a root that doesn't exist."
)
# enable policy again ('SERVICE_CONTROL_POLICY' is enabled by default)
# when
with assert_raises(ClientError) as e:
client.enable_policy_type(RootId=root_id, PolicyType="SERVICE_CONTROL_POLICY")
# then
ex = e.exception
ex.operation_name.should.equal("EnablePolicyType")
ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
ex.response["Error"]["Code"].should.contain("PolicyTypeAlreadyEnabledException")
ex.response["Error"]["Message"].should.equal(
"The specified policy type is already enabled."
)
# invalid policy type
# when
with assert_raises(ClientError) as e:
client.enable_policy_type(RootId=root_id, PolicyType="MOTO")
# then
ex = e.exception
ex.operation_name.should.equal("EnablePolicyType")
ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
ex.response["Error"]["Code"].should.contain("InvalidInputException")
ex.response["Error"]["Message"].should.equal("You specified an invalid value.")
@mock_organizations
def test_disable_policy_type():
# given
client = boto3.client("organizations", region_name="us-east-1")
org = client.create_organization(FeatureSet="ALL")["Organization"]
root_id = client.list_roots()["Roots"][0]["Id"]
client.enable_policy_type(RootId=root_id, PolicyType="AISERVICES_OPT_OUT_POLICY")
# when
response = client.disable_policy_type(
RootId=root_id, PolicyType="AISERVICES_OPT_OUT_POLICY"
)
# then
root = response["Root"]
root["Id"].should.equal(root_id)
root["Arn"].should.equal(
utils.ROOT_ARN_FORMAT.format(org["MasterAccountId"], org["Id"], root_id)
)
root["Name"].should.equal("Root")
root["PolicyTypes"].should.equal(
[{"Type": "SERVICE_CONTROL_POLICY", "Status": "ENABLED"}]
)
@mock_organizations
def test_disable_policy_type_errors():
# given
client = boto3.client("organizations", region_name="us-east-1")
client.create_organization(FeatureSet="ALL")
root_id = client.list_roots()["Roots"][0]["Id"]
# not existing root
# when
with assert_raises(ClientError) as e:
client.disable_policy_type(
RootId="r-0000", PolicyType="AISERVICES_OPT_OUT_POLICY"
)
# then
ex = e.exception
ex.operation_name.should.equal("DisablePolicyType")
ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
ex.response["Error"]["Code"].should.contain("RootNotFoundException")
ex.response["Error"]["Message"].should.equal(
"You specified a root that doesn't exist."
)
# disable not enabled policy
# when
with assert_raises(ClientError) as e:
client.disable_policy_type(
RootId=root_id, PolicyType="AISERVICES_OPT_OUT_POLICY"
)
# then
ex = e.exception
ex.operation_name.should.equal("DisablePolicyType")
ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
ex.response["Error"]["Code"].should.contain("PolicyTypeNotEnabledException")
ex.response["Error"]["Message"].should.equal(
"This operation can be performed only for enabled policy types."
)
# invalid policy type
# when
with assert_raises(ClientError) as e:
client.disable_policy_type(RootId=root_id, PolicyType="MOTO")
# then
ex = e.exception
ex.operation_name.should.equal("DisablePolicyType")
ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
ex.response["Error"]["Code"].should.contain("InvalidInputException")
ex.response["Error"]["Message"].should.equal("You specified an invalid value.")
@mock_organizations
def test_aiservices_opt_out_policy():
# given
client = boto3.client("organizations", region_name="us-east-1")
org = client.create_organization(FeatureSet="ALL")["Organization"]
root_id = client.list_roots()["Roots"][0]["Id"]
client.enable_policy_type(RootId=root_id, PolicyType="AISERVICES_OPT_OUT_POLICY")
ai_policy = {
"services": {
"@@operators_allowed_for_child_policies": ["@@none"],
"default": {
"@@operators_allowed_for_child_policies": ["@@none"],
"opt_out_policy": {
"@@operators_allowed_for_child_policies": ["@@none"],
"@@assign": "optOut",
},
},
}
}
# when
response = client.create_policy(
Content=json.dumps(ai_policy),
Description="Opt out of all AI services",
Name="ai-opt-out",
Type="AISERVICES_OPT_OUT_POLICY",
)
# then
summary = response["Policy"]["PolicySummary"]
policy_id = summary["Id"]
summary["Id"].should.match(utils.POLICY_ID_REGEX)
summary["Arn"].should.equal(
utils.AI_POLICY_ARN_FORMAT.format(
org["MasterAccountId"], org["Id"], summary["Id"]
)
)
summary["Name"].should.equal("ai-opt-out")
summary["Description"].should.equal("Opt out of all AI services")
summary["Type"].should.equal("AISERVICES_OPT_OUT_POLICY")
summary["AwsManaged"].should_not.be.ok
json.loads(response["Policy"]["Content"]).should.equal(ai_policy)
# when
client.attach_policy(PolicyId=policy_id, TargetId=root_id)
# then
response = client.list_policies_for_target(
TargetId=root_id, Filter="AISERVICES_OPT_OUT_POLICY"
)
response["Policies"].should.have.length_of(1)
response["Policies"][0]["Id"].should.equal(policy_id)

View File

@ -36,7 +36,7 @@ from nose.tools import assert_raises
import sure # noqa
from moto import settings, mock_s3, mock_s3_deprecated, mock_config
from moto import settings, mock_s3, mock_s3_deprecated, mock_config, mock_cloudformation
import moto.s3.models as s3model
from moto.core.exceptions import InvalidNextTokenException
from moto.core.utils import py2_strip_unicode_keys
@ -4686,3 +4686,142 @@ def test_presigned_put_url_with_custom_headers():
s3.delete_object(Bucket=bucket, Key=key)
s3.delete_bucket(Bucket=bucket)
@mock_s3
@mock_cloudformation
def test_s3_bucket_cloudformation_basic():
s3 = boto3.client("s3", region_name="us-east-1")
cf = boto3.client("cloudformation", region_name="us-east-1")
template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {"testInstance": {"Type": "AWS::S3::Bucket", "Properties": {},}},
"Outputs": {"Bucket": {"Value": {"Ref": "testInstance"}}},
}
template_json = json.dumps(template)
stack_id = cf.create_stack(StackName="test_stack", TemplateBody=template_json)[
"StackId"
]
stack_description = cf.describe_stacks(StackName="test_stack")["Stacks"][0]
s3.head_bucket(Bucket=stack_description["Outputs"][0]["OutputValue"])
@mock_s3
@mock_cloudformation
def test_s3_bucket_cloudformation_with_properties():
s3 = boto3.client("s3", region_name="us-east-1")
cf = boto3.client("cloudformation", region_name="us-east-1")
bucket_name = "MyBucket"
template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
"testInstance": {
"Type": "AWS::S3::Bucket",
"Properties": {
"BucketName": bucket_name,
"BucketEncryption": {
"ServerSideEncryptionConfiguration": [
{
"ServerSideEncryptionByDefault": {
"SSEAlgorithm": "AES256"
}
}
]
},
},
}
},
"Outputs": {"Bucket": {"Value": {"Ref": "testInstance"}}},
}
template_json = json.dumps(template)
stack_id = cf.create_stack(StackName="test_stack", TemplateBody=template_json)[
"StackId"
]
stack_description = cf.describe_stacks(StackName="test_stack")["Stacks"][0]
s3.head_bucket(Bucket=bucket_name)
encryption = s3.get_bucket_encryption(Bucket=bucket_name)
encryption["ServerSideEncryptionConfiguration"]["Rules"][0][
"ApplyServerSideEncryptionByDefault"
]["SSEAlgorithm"].should.equal("AES256")
@mock_s3
@mock_cloudformation
def test_s3_bucket_cloudformation_update_no_interruption():
s3 = boto3.client("s3", region_name="us-east-1")
cf = boto3.client("cloudformation", region_name="us-east-1")
template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {"testInstance": {"Type": "AWS::S3::Bucket"}},
"Outputs": {"Bucket": {"Value": {"Ref": "testInstance"}}},
}
template_json = json.dumps(template)
cf.create_stack(StackName="test_stack", TemplateBody=template_json)
stack_description = cf.describe_stacks(StackName="test_stack")["Stacks"][0]
s3.head_bucket(Bucket=stack_description["Outputs"][0]["OutputValue"])
template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
"testInstance": {
"Type": "AWS::S3::Bucket",
"Properties": {
"BucketEncryption": {
"ServerSideEncryptionConfiguration": [
{
"ServerSideEncryptionByDefault": {
"SSEAlgorithm": "AES256"
}
}
]
}
},
}
},
"Outputs": {"Bucket": {"Value": {"Ref": "testInstance"}}},
}
template_json = json.dumps(template)
cf.update_stack(StackName="test_stack", TemplateBody=template_json)
encryption = s3.get_bucket_encryption(
Bucket=stack_description["Outputs"][0]["OutputValue"]
)
encryption["ServerSideEncryptionConfiguration"]["Rules"][0][
"ApplyServerSideEncryptionByDefault"
]["SSEAlgorithm"].should.equal("AES256")
@mock_s3
@mock_cloudformation
def test_s3_bucket_cloudformation_update_replacement():
s3 = boto3.client("s3", region_name="us-east-1")
cf = boto3.client("cloudformation", region_name="us-east-1")
template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {"testInstance": {"Type": "AWS::S3::Bucket"}},
"Outputs": {"Bucket": {"Value": {"Ref": "testInstance"}}},
}
template_json = json.dumps(template)
cf.create_stack(StackName="test_stack", TemplateBody=template_json)
stack_description = cf.describe_stacks(StackName="test_stack")["Stacks"][0]
s3.head_bucket(Bucket=stack_description["Outputs"][0]["OutputValue"])
template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
"testInstance": {
"Type": "AWS::S3::Bucket",
"Properties": {"BucketName": "MyNewBucketName"},
}
},
"Outputs": {"Bucket": {"Value": {"Ref": "testInstance"}}},
}
template_json = json.dumps(template)
cf.update_stack(StackName="test_stack", TemplateBody=template_json)
stack_description = cf.describe_stacks(StackName="test_stack")["Stacks"][0]
s3.head_bucket(Bucket=stack_description["Outputs"][0]["OutputValue"])

View File

@ -338,6 +338,7 @@ def test_get_random_exclude_characters_and_symbols():
PasswordLength=20, ExcludeCharacters="xyzDje@?!."
)
assert any(c in "xyzDje@?!." for c in random_password["RandomPassword"]) == False
assert len(random_password["RandomPassword"]) == 20
@mock_secretsmanager

View File

@ -1098,6 +1098,38 @@ def test_purge_action():
queue.count().should.equal(0)
@mock_sqs
def test_purge_queue_before_delete_message():
client = boto3.client("sqs", region_name="us-east-1")
create_resp = client.create_queue(
QueueName="test-dlr-queue.fifo", Attributes={"FifoQueue": "true"}
)
queue_url = create_resp["QueueUrl"]
client.send_message(
QueueUrl=queue_url,
MessageGroupId="test",
MessageDeduplicationId="first_message",
MessageBody="first_message",
)
receive_resp1 = client.receive_message(QueueUrl=queue_url)
# purge before call delete_message
client.purge_queue(QueueUrl=queue_url)
client.send_message(
QueueUrl=queue_url,
MessageGroupId="test",
MessageDeduplicationId="second_message",
MessageBody="second_message",
)
receive_resp2 = client.receive_message(QueueUrl=queue_url)
len(receive_resp2.get("Messages", [])).should.equal(1)
receive_resp2["Messages"][0]["Body"].should.equal("second_message")
@mock_sqs_deprecated
def test_delete_message_after_visibility_timeout():
VISIBILITY_TIMEOUT = 1