From 3342d49a43c260ad8016968b7d36cb54c382270e Mon Sep 17 00:00:00 2001 From: jweite Date: Sat, 1 Aug 2020 17:43:03 -0400 Subject: [PATCH 1/7] S3 cloudformation update (#3199) * First cut of S3 Cloudformation Update support: encryption property. * Update type support for S3. Abstract base class for CloudFormation-aware models, as designed by @bblommers, introduced to decentralize CloudFormation resource and name property values to model objects. * Blackened... * Un-renamed param in s3.models.update_from_cloudformation_json() and its call to stay compatible with other modules. Co-authored-by: Bert Blommers Co-authored-by: Joseph Weitekamp Co-authored-by: Bert Blommers --- moto/cloudformation/parsing.py | 38 ++++----- moto/core/models.py | 1 + moto/s3/cloud_formation.py | 33 ++++++++ moto/s3/models.py | 47 +++++++++++ tests/test_s3/test_s3.py | 141 ++++++++++++++++++++++++++++++++- 5 files changed, 241 insertions(+), 19 deletions(-) create mode 100644 moto/s3/cloud_formation.py diff --git a/moto/cloudformation/parsing.py b/moto/cloudformation/parsing.py index 2c212a148..a1e1bb18b 100644 --- a/moto/cloudformation/parsing.py +++ b/moto/cloudformation/parsing.py @@ -21,6 +21,7 @@ from moto.batch import models as batch_models # noqa from moto.cloudwatch import models as cloudwatch_models # noqa from moto.datapipeline import models as datapipeline_models # noqa from moto.dynamodb2 import models as dynamodb2_models # noqa +from moto.ec2 import models as ec2_models from moto.ecr import models as ecr_models # noqa from moto.ecs import models as ecs_models # noqa from moto.elb import models as elb_models # noqa @@ -33,15 +34,13 @@ from moto.rds import models as rds_models # noqa from moto.rds2 import models as rds2_models # noqa from moto.redshift import models as redshift_models # noqa from moto.route53 import models as route53_models # noqa -from moto.s3 import models as s3_models # noqa +from moto.s3 import models as s3_models, s3_backend # noqa +from moto.s3.utils import bucket_and_name_from_url from moto.sns import models as sns_models # noqa from moto.sqs import models as sqs_models # noqa # End ugly list of imports -from moto.ec2 import models as ec2_models -from moto.s3 import models as _, s3_backend # noqa -from moto.s3.utils import bucket_and_name_from_url from moto.core import ACCOUNT_ID, CloudFormationModel from .utils import random_suffix from .exceptions import ( @@ -212,7 +211,6 @@ def clean_json(resource_json, resources_map): def resource_class_from_type(resource_type): if resource_type in NULL_MODELS: return None - if resource_type not in MODEL_MAP: logger.warning("No Moto CloudFormation support for %s", resource_type) return None @@ -221,6 +219,9 @@ def resource_class_from_type(resource_type): def resource_name_property_from_type(resource_type): + for model in MODEL_LIST: + if model.cloudformation_type() == resource_type: + return model.cloudformation_name_type() return NAME_TYPE_MAP.get(resource_type) @@ -249,7 +250,9 @@ def generate_resource_name(resource_type, stack_name, logical_id): return "{0}-{1}-{2}".format(stack_name, logical_id, random_suffix()) -def parse_resource(logical_id, resource_json, resources_map): +def parse_resource( + logical_id, resource_json, resources_map, add_name_to_resource_json=True +): resource_type = resource_json["Type"] resource_class = resource_class_from_type(resource_type) if not resource_class: @@ -261,21 +264,20 @@ def parse_resource(logical_id, resource_json, resources_map): return None resource_json = clean_json(resource_json, resources_map) + resource_name = generate_resource_name( + resource_type, resources_map.get("AWS::StackName"), logical_id + ) resource_name_property = resource_name_property_from_type(resource_type) if resource_name_property: if "Properties" not in resource_json: resource_json["Properties"] = dict() - if resource_name_property not in resource_json["Properties"]: - resource_json["Properties"][ - resource_name_property - ] = generate_resource_name( - resource_type, resources_map.get("AWS::StackName"), logical_id - ) - resource_name = resource_json["Properties"][resource_name_property] - else: - resource_name = generate_resource_name( - resource_type, resources_map.get("AWS::StackName"), logical_id - ) + if ( + add_name_to_resource_json + and resource_name_property not in resource_json["Properties"] + ): + resource_json["Properties"][resource_name_property] = resource_name + if resource_name_property in resource_json["Properties"]: + resource_name = resource_json["Properties"][resource_name_property] return resource_class, resource_json, resource_name @@ -301,7 +303,7 @@ def parse_and_create_resource(logical_id, resource_json, resources_map, region_n def parse_and_update_resource(logical_id, resource_json, resources_map, region_name): resource_class, new_resource_json, new_resource_name = parse_resource( - logical_id, resource_json, resources_map + logical_id, resource_json, resources_map, False ) original_resource = resources_map[logical_id] new_resource = resource_class.update_from_cloudformation_json( diff --git a/moto/core/models.py b/moto/core/models.py index ded6a4fc1..cf78be3f8 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -540,6 +540,7 @@ class BaseModel(object): class CloudFormationModel(BaseModel): @abstractmethod def cloudformation_name_type(self): + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-name.html # This must be implemented as a staticmethod with no parameters # Return None for resources that do not have a name property pass diff --git a/moto/s3/cloud_formation.py b/moto/s3/cloud_formation.py new file mode 100644 index 000000000..0bf6022ef --- /dev/null +++ b/moto/s3/cloud_formation.py @@ -0,0 +1,33 @@ +from collections import OrderedDict + + +def cfn_to_api_encryption(bucket_encryption_properties): + + sse_algorithm = bucket_encryption_properties["ServerSideEncryptionConfiguration"][ + 0 + ]["ServerSideEncryptionByDefault"]["SSEAlgorithm"] + kms_master_key_id = bucket_encryption_properties[ + "ServerSideEncryptionConfiguration" + ][0]["ServerSideEncryptionByDefault"].get("KMSMasterKeyID") + apply_server_side_encryption_by_default = OrderedDict() + apply_server_side_encryption_by_default["SSEAlgorithm"] = sse_algorithm + if kms_master_key_id: + apply_server_side_encryption_by_default["KMSMasterKeyID"] = kms_master_key_id + rule = OrderedDict( + {"ApplyServerSideEncryptionByDefault": apply_server_side_encryption_by_default} + ) + bucket_encryption = OrderedDict( + {"@xmlns": "http://s3.amazonaws.com/doc/2006-03-01/"} + ) + bucket_encryption["Rule"] = rule + return bucket_encryption + + +def is_replacement_update(properties): + properties_requiring_replacement_update = ["BucketName", "ObjectLockEnabled"] + return any( + [ + property_requiring_replacement in properties + for property_requiring_replacement in properties_requiring_replacement_update + ] + ) diff --git a/moto/s3/models.py b/moto/s3/models.py index 800601690..70e33fdfb 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -43,6 +43,7 @@ from .exceptions import ( WrongPublicAccessBlockAccountIdError, NoSuchUpload, ) +from .cloud_formation import cfn_to_api_encryption, is_replacement_update from .utils import clean_key_name, _VersionedKeyStore MAX_BUCKET_NAME_LENGTH = 63 @@ -1084,8 +1085,54 @@ class FakeBucket(CloudFormationModel): cls, resource_name, cloudformation_json, region_name ): bucket = s3_backend.create_bucket(resource_name, region_name) + + properties = cloudformation_json["Properties"] + + if "BucketEncryption" in properties: + bucket_encryption = cfn_to_api_encryption(properties["BucketEncryption"]) + s3_backend.put_bucket_encryption( + bucket_name=resource_name, encryption=[bucket_encryption] + ) + return bucket + @classmethod + def update_from_cloudformation_json( + cls, original_resource, new_resource_name, cloudformation_json, region_name, + ): + properties = cloudformation_json["Properties"] + + if is_replacement_update(properties): + resource_name_property = cls.cloudformation_name_type() + if resource_name_property not in properties: + properties[resource_name_property] = new_resource_name + new_resource = cls.create_from_cloudformation_json( + properties[resource_name_property], cloudformation_json, region_name + ) + properties[resource_name_property] = original_resource.name + cls.delete_from_cloudformation_json( + original_resource.name, cloudformation_json, region_name + ) + return new_resource + + else: # No Interruption + if "BucketEncryption" in properties: + bucket_encryption = cfn_to_api_encryption( + properties["BucketEncryption"] + ) + s3_backend.put_bucket_encryption( + bucket_name=original_resource.name, encryption=[bucket_encryption] + ) + return original_resource + + @classmethod + def delete_from_cloudformation_json( + cls, resource_name, cloudformation_json, region_name + ): + properties = cloudformation_json["Properties"] + bucket_name = properties[cls.cloudformation_name_type()] + s3_backend.delete_bucket(bucket_name) + def to_config_dict(self): """Return the AWS Config JSON format of this S3 bucket. diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 57f745437..c8e3ed4de 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -36,7 +36,7 @@ from nose.tools import assert_raises import sure # noqa -from moto import settings, mock_s3, mock_s3_deprecated, mock_config +from moto import settings, mock_s3, mock_s3_deprecated, mock_config, mock_cloudformation import moto.s3.models as s3model from moto.core.exceptions import InvalidNextTokenException from moto.core.utils import py2_strip_unicode_keys @@ -4686,3 +4686,142 @@ def test_presigned_put_url_with_custom_headers(): s3.delete_object(Bucket=bucket, Key=key) s3.delete_bucket(Bucket=bucket) + + +@mock_s3 +@mock_cloudformation +def test_s3_bucket_cloudformation_basic(): + s3 = boto3.client("s3", region_name="us-east-1") + cf = boto3.client("cloudformation", region_name="us-east-1") + + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": {"testInstance": {"Type": "AWS::S3::Bucket", "Properties": {},}}, + "Outputs": {"Bucket": {"Value": {"Ref": "testInstance"}}}, + } + template_json = json.dumps(template) + stack_id = cf.create_stack(StackName="test_stack", TemplateBody=template_json)[ + "StackId" + ] + stack_description = cf.describe_stacks(StackName="test_stack")["Stacks"][0] + + s3.head_bucket(Bucket=stack_description["Outputs"][0]["OutputValue"]) + + +@mock_s3 +@mock_cloudformation +def test_s3_bucket_cloudformation_with_properties(): + s3 = boto3.client("s3", region_name="us-east-1") + cf = boto3.client("cloudformation", region_name="us-east-1") + + bucket_name = "MyBucket" + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "testInstance": { + "Type": "AWS::S3::Bucket", + "Properties": { + "BucketName": bucket_name, + "BucketEncryption": { + "ServerSideEncryptionConfiguration": [ + { + "ServerSideEncryptionByDefault": { + "SSEAlgorithm": "AES256" + } + } + ] + }, + }, + } + }, + "Outputs": {"Bucket": {"Value": {"Ref": "testInstance"}}}, + } + template_json = json.dumps(template) + stack_id = cf.create_stack(StackName="test_stack", TemplateBody=template_json)[ + "StackId" + ] + stack_description = cf.describe_stacks(StackName="test_stack")["Stacks"][0] + s3.head_bucket(Bucket=bucket_name) + + encryption = s3.get_bucket_encryption(Bucket=bucket_name) + encryption["ServerSideEncryptionConfiguration"]["Rules"][0][ + "ApplyServerSideEncryptionByDefault" + ]["SSEAlgorithm"].should.equal("AES256") + + +@mock_s3 +@mock_cloudformation +def test_s3_bucket_cloudformation_update_no_interruption(): + s3 = boto3.client("s3", region_name="us-east-1") + cf = boto3.client("cloudformation", region_name="us-east-1") + + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": {"testInstance": {"Type": "AWS::S3::Bucket"}}, + "Outputs": {"Bucket": {"Value": {"Ref": "testInstance"}}}, + } + template_json = json.dumps(template) + cf.create_stack(StackName="test_stack", TemplateBody=template_json) + stack_description = cf.describe_stacks(StackName="test_stack")["Stacks"][0] + s3.head_bucket(Bucket=stack_description["Outputs"][0]["OutputValue"]) + + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "testInstance": { + "Type": "AWS::S3::Bucket", + "Properties": { + "BucketEncryption": { + "ServerSideEncryptionConfiguration": [ + { + "ServerSideEncryptionByDefault": { + "SSEAlgorithm": "AES256" + } + } + ] + } + }, + } + }, + "Outputs": {"Bucket": {"Value": {"Ref": "testInstance"}}}, + } + template_json = json.dumps(template) + cf.update_stack(StackName="test_stack", TemplateBody=template_json) + encryption = s3.get_bucket_encryption( + Bucket=stack_description["Outputs"][0]["OutputValue"] + ) + encryption["ServerSideEncryptionConfiguration"]["Rules"][0][ + "ApplyServerSideEncryptionByDefault" + ]["SSEAlgorithm"].should.equal("AES256") + + +@mock_s3 +@mock_cloudformation +def test_s3_bucket_cloudformation_update_replacement(): + s3 = boto3.client("s3", region_name="us-east-1") + cf = boto3.client("cloudformation", region_name="us-east-1") + + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": {"testInstance": {"Type": "AWS::S3::Bucket"}}, + "Outputs": {"Bucket": {"Value": {"Ref": "testInstance"}}}, + } + template_json = json.dumps(template) + cf.create_stack(StackName="test_stack", TemplateBody=template_json) + stack_description = cf.describe_stacks(StackName="test_stack")["Stacks"][0] + s3.head_bucket(Bucket=stack_description["Outputs"][0]["OutputValue"]) + + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "testInstance": { + "Type": "AWS::S3::Bucket", + "Properties": {"BucketName": "MyNewBucketName"}, + } + }, + "Outputs": {"Bucket": {"Value": {"Ref": "testInstance"}}}, + } + template_json = json.dumps(template) + cf.update_stack(StackName="test_stack", TemplateBody=template_json) + stack_description = cf.describe_stacks(StackName="test_stack")["Stacks"][0] + s3.head_bucket(Bucket=stack_description["Outputs"][0]["OutputValue"]) From 252d679b275d840311dec3b337563764c970a966 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Anton=20Gr=C3=BCbel?= Date: Sun, 2 Aug 2020 11:56:19 +0200 Subject: [PATCH 2/7] Organizations - implement Policy Type functionality (#3207) * Add organizations.enable_policy_type * Add organizations.disable_policy_type * Add support for AISERVICES_OPT_OUT_POLICY --- moto/organizations/exceptions.py | 38 +++ moto/organizations/models.py | 106 ++++++- moto/organizations/responses.py | 10 + moto/organizations/utils.py | 11 +- .../organizations_test_utils.py | 8 +- .../test_organizations_boto3.py | 265 +++++++++++++++++- 6 files changed, 419 insertions(+), 19 deletions(-) diff --git a/moto/organizations/exceptions.py b/moto/organizations/exceptions.py index 2d1ee7328..ca64b9931 100644 --- a/moto/organizations/exceptions.py +++ b/moto/organizations/exceptions.py @@ -74,3 +74,41 @@ class DuplicatePolicyException(JsonRESTError): super(DuplicatePolicyException, self).__init__( "DuplicatePolicyException", "A policy with the same name already exists." ) + + +class PolicyTypeAlreadyEnabledException(JsonRESTError): + code = 400 + + def __init__(self): + super(PolicyTypeAlreadyEnabledException, self).__init__( + "PolicyTypeAlreadyEnabledException", + "The specified policy type is already enabled.", + ) + + +class PolicyTypeNotEnabledException(JsonRESTError): + code = 400 + + def __init__(self): + super(PolicyTypeNotEnabledException, self).__init__( + "PolicyTypeNotEnabledException", + "This operation can be performed only for enabled policy types.", + ) + + +class RootNotFoundException(JsonRESTError): + code = 400 + + def __init__(self): + super(RootNotFoundException, self).__init__( + "RootNotFoundException", "You specified a root that doesn't exist." + ) + + +class TargetNotFoundException(JsonRESTError): + code = 400 + + def __init__(self): + super(TargetNotFoundException, self).__init__( + "TargetNotFoundException", "You specified a target that doesn't exist." + ) diff --git a/moto/organizations/models.py b/moto/organizations/models.py index 6c8029e3d..09bd62b79 100644 --- a/moto/organizations/models.py +++ b/moto/organizations/models.py @@ -17,6 +17,10 @@ from moto.organizations.exceptions import ( AccountAlreadyRegisteredException, AWSOrganizationsNotInUseException, AccountNotRegisteredException, + RootNotFoundException, + PolicyTypeAlreadyEnabledException, + PolicyTypeNotEnabledException, + TargetNotFoundException, ) @@ -124,6 +128,13 @@ class FakeOrganizationalUnit(BaseModel): class FakeRoot(FakeOrganizationalUnit): + SUPPORTED_POLICY_TYPES = [ + "AISERVICES_OPT_OUT_POLICY", + "BACKUP_POLICY", + "SERVICE_CONTROL_POLICY", + "TAG_POLICY", + ] + def __init__(self, organization, **kwargs): super(FakeRoot, self).__init__(organization, **kwargs) self.type = "ROOT" @@ -141,20 +152,55 @@ class FakeRoot(FakeOrganizationalUnit): "PolicyTypes": self.policy_types, } + def add_policy_type(self, policy_type): + if policy_type not in self.SUPPORTED_POLICY_TYPES: + raise InvalidInputException("You specified an invalid value.") + + if any(type["Type"] == policy_type for type in self.policy_types): + raise PolicyTypeAlreadyEnabledException + + self.policy_types.append({"Type": policy_type, "Status": "ENABLED"}) + + def remove_policy_type(self, policy_type): + if not FakePolicy.supported_policy_type(policy_type): + raise InvalidInputException("You specified an invalid value.") + + if all(type["Type"] != policy_type for type in self.policy_types): + raise PolicyTypeNotEnabledException + + self.policy_types.remove({"Type": policy_type, "Status": "ENABLED"}) + + +class FakePolicy(BaseModel): + SUPPORTED_POLICY_TYPES = [ + "AISERVICES_OPT_OUT_POLICY", + "BACKUP_POLICY", + "SERVICE_CONTROL_POLICY", + "TAG_POLICY", + ] -class FakeServiceControlPolicy(BaseModel): def __init__(self, organization, **kwargs): self.content = kwargs.get("Content") self.description = kwargs.get("Description") self.name = kwargs.get("Name") self.type = kwargs.get("Type") - self.id = utils.make_random_service_control_policy_id() + self.id = utils.make_random_policy_id() self.aws_managed = False self.organization_id = organization.id self.master_account_id = organization.master_account_id - self._arn_format = utils.SCP_ARN_FORMAT self.attachments = [] + if not FakePolicy.supported_policy_type(self.type): + raise InvalidInputException("You specified an invalid value.") + elif self.type == "AISERVICES_OPT_OUT_POLICY": + self._arn_format = utils.AI_POLICY_ARN_FORMAT + elif self.type == "SERVICE_CONTROL_POLICY": + self._arn_format = utils.SCP_ARN_FORMAT + else: + raise NotImplementedError( + "The {0} policy type has not been implemented".format(self.type) + ) + @property def arn(self): return self._arn_format.format( @@ -176,6 +222,10 @@ class FakeServiceControlPolicy(BaseModel): } } + @staticmethod + def supported_policy_type(policy_type): + return policy_type in FakePolicy.SUPPORTED_POLICY_TYPES + class FakeServiceAccess(BaseModel): # List of trusted services, which support trusted access with Organizations @@ -283,6 +333,13 @@ class OrganizationsBackend(BaseBackend): self.services = [] self.admins = [] + def _get_root_by_id(self, root_id): + root = next((ou for ou in self.ou if ou.id == root_id), None) + if not root: + raise RootNotFoundException + + return root + def create_organization(self, **kwargs): self.org = FakeOrganization(kwargs["FeatureSet"]) root_ou = FakeRoot(self.org) @@ -292,7 +349,7 @@ class OrganizationsBackend(BaseBackend): ) master_account.id = self.org.master_account_id self.accounts.append(master_account) - default_policy = FakeServiceControlPolicy( + default_policy = FakePolicy( self.org, Name="FullAWSAccess", Description="Allows access to every operation", @@ -452,7 +509,7 @@ class OrganizationsBackend(BaseBackend): ) def create_policy(self, **kwargs): - new_policy = FakeServiceControlPolicy(self.org, **kwargs) + new_policy = FakePolicy(self.org, **kwargs) for policy in self.policies: if kwargs["Name"] == policy.name: raise DuplicatePolicyException @@ -460,7 +517,7 @@ class OrganizationsBackend(BaseBackend): return new_policy.describe() def describe_policy(self, **kwargs): - if re.compile(utils.SCP_ID_REGEX).match(kwargs["PolicyId"]): + if re.compile(utils.POLICY_ID_REGEX).match(kwargs["PolicyId"]): policy = next( (p for p in self.policies if p.id == kwargs["PolicyId"]), None ) @@ -540,7 +597,13 @@ class OrganizationsBackend(BaseBackend): ) def list_policies_for_target(self, **kwargs): - if re.compile(utils.OU_ID_REGEX).match(kwargs["TargetId"]): + filter = kwargs["Filter"] + + if re.match(utils.ROOT_ID_REGEX, kwargs["TargetId"]): + obj = next((ou for ou in self.ou if ou.id == kwargs["TargetId"]), None) + if obj is None: + raise TargetNotFoundException + elif re.compile(utils.OU_ID_REGEX).match(kwargs["TargetId"]): obj = next((ou for ou in self.ou if ou.id == kwargs["TargetId"]), None) if obj is None: raise RESTError( @@ -553,14 +616,25 @@ class OrganizationsBackend(BaseBackend): raise AccountNotFoundException else: raise InvalidInputException("You specified an invalid value.") + + if not FakePolicy.supported_policy_type(filter): + raise InvalidInputException("You specified an invalid value.") + + if filter not in ["AISERVICES_OPT_OUT_POLICY", "SERVICE_CONTROL_POLICY"]: + raise NotImplementedError( + "The {0} policy type has not been implemented".format(filter) + ) + return dict( Policies=[ - p.describe()["Policy"]["PolicySummary"] for p in obj.attached_policies + p.describe()["Policy"]["PolicySummary"] + for p in obj.attached_policies + if p.type == filter ] ) def list_targets_for_policy(self, **kwargs): - if re.compile(utils.SCP_ID_REGEX).match(kwargs["PolicyId"]): + if re.compile(utils.POLICY_ID_REGEX).match(kwargs["PolicyId"]): policy = next( (p for p in self.policies if p.id == kwargs["PolicyId"]), None ) @@ -733,5 +807,19 @@ class OrganizationsBackend(BaseBackend): if not admin.services: self.admins.remove(admin) + def enable_policy_type(self, **kwargs): + root = self._get_root_by_id(kwargs["RootId"]) + + root.add_policy_type(kwargs["PolicyType"]) + + return dict(Root=root.describe()) + + def disable_policy_type(self, **kwargs): + root = self._get_root_by_id(kwargs["RootId"]) + + root.remove_policy_type(kwargs["PolicyType"]) + + return dict(Root=root.describe()) + organizations_backend = OrganizationsBackend() diff --git a/moto/organizations/responses.py b/moto/organizations/responses.py index 4689db5d7..ae0bb731b 100644 --- a/moto/organizations/responses.py +++ b/moto/organizations/responses.py @@ -191,3 +191,13 @@ class OrganizationsResponse(BaseResponse): **self.request_params ) ) + + def enable_policy_type(self): + return json.dumps( + self.organizations_backend.enable_policy_type(**self.request_params) + ) + + def disable_policy_type(self): + return json.dumps( + self.organizations_backend.disable_policy_type(**self.request_params) + ) diff --git a/moto/organizations/utils.py b/moto/organizations/utils.py index e71357ce6..cec34834c 100644 --- a/moto/organizations/utils.py +++ b/moto/organizations/utils.py @@ -14,6 +14,9 @@ ACCOUNT_ARN_FORMAT = "arn:aws:organizations::{0}:account/{1}/{2}" ROOT_ARN_FORMAT = "arn:aws:organizations::{0}:root/{1}/{2}" OU_ARN_FORMAT = "arn:aws:organizations::{0}:ou/{1}/{2}" SCP_ARN_FORMAT = "arn:aws:organizations::{0}:policy/{1}/service_control_policy/{2}" +AI_POLICY_ARN_FORMAT = ( + "arn:aws:organizations::{0}:policy/{1}/aiservices_opt_out_policy/{2}" +) CHARSET = string.ascii_lowercase + string.digits ORG_ID_SIZE = 10 @@ -21,7 +24,7 @@ ROOT_ID_SIZE = 4 ACCOUNT_ID_SIZE = 12 OU_ID_SUFFIX_SIZE = 8 CREATE_ACCOUNT_STATUS_ID_SIZE = 8 -SCP_ID_SIZE = 8 +POLICY_ID_SIZE = 8 EMAIL_REGEX = "^.+@[a-zA-Z0-9-.]+.[a-zA-Z]{2,3}|[0-9]{1,3}$" ORG_ID_REGEX = r"o-[a-z0-9]{%s}" % ORG_ID_SIZE @@ -29,7 +32,7 @@ ROOT_ID_REGEX = r"r-[a-z0-9]{%s}" % ROOT_ID_SIZE OU_ID_REGEX = r"ou-[a-z0-9]{%s}-[a-z0-9]{%s}" % (ROOT_ID_SIZE, OU_ID_SUFFIX_SIZE) ACCOUNT_ID_REGEX = r"[0-9]{%s}" % ACCOUNT_ID_SIZE CREATE_ACCOUNT_STATUS_ID_REGEX = r"car-[a-z0-9]{%s}" % CREATE_ACCOUNT_STATUS_ID_SIZE -SCP_ID_REGEX = r"%s|p-[a-z0-9]{%s}" % (DEFAULT_POLICY_ID, SCP_ID_SIZE) +POLICY_ID_REGEX = r"%s|p-[a-z0-9]{%s}" % (DEFAULT_POLICY_ID, POLICY_ID_SIZE) def make_random_org_id(): @@ -76,8 +79,8 @@ def make_random_create_account_status_id(): ) -def make_random_service_control_policy_id(): +def make_random_policy_id(): # The regex pattern for a policy ID string requires "p-" followed by # from 8 to 128 lower-case letters or digits. # e.g. 'p-k2av4a8a' - return "p-" + "".join(random.choice(CHARSET) for x in range(SCP_ID_SIZE)) + return "p-" + "".join(random.choice(CHARSET) for x in range(POLICY_ID_SIZE)) diff --git a/tests/test_organizations/organizations_test_utils.py b/tests/test_organizations/organizations_test_utils.py index 12189c530..4c26d788d 100644 --- a/tests/test_organizations/organizations_test_utils.py +++ b/tests/test_organizations/organizations_test_utils.py @@ -31,9 +31,9 @@ def test_make_random_create_account_status_id(): create_account_status_id.should.match(utils.CREATE_ACCOUNT_STATUS_ID_REGEX) -def test_make_random_service_control_policy_id(): - service_control_policy_id = utils.make_random_service_control_policy_id() - service_control_policy_id.should.match(utils.SCP_ID_REGEX) +def test_make_random_policy_id(): + policy_id = utils.make_random_policy_id() + policy_id.should.match(utils.POLICY_ID_REGEX) def validate_organization(response): @@ -128,7 +128,7 @@ def validate_create_account_status(create_status): def validate_policy_summary(org, summary): summary.should.be.a(dict) - summary.should.have.key("Id").should.match(utils.SCP_ID_REGEX) + summary.should.have.key("Id").should.match(utils.POLICY_ID_REGEX) summary.should.have.key("Arn").should.equal( utils.SCP_ARN_FORMAT.format(org["MasterAccountId"], org["Id"], summary["Id"]) ) diff --git a/tests/test_organizations/test_organizations_boto3.py b/tests/test_organizations/test_organizations_boto3.py index 90bee1edb..647236118 100644 --- a/tests/test_organizations/test_organizations_boto3.py +++ b/tests/test_organizations/test_organizations_boto3.py @@ -379,6 +379,30 @@ def test_create_policy(): policy["Content"].should.equal(json.dumps(policy_doc01)) +@mock_organizations +def test_create_policy_errors(): + # given + client = boto3.client("organizations", region_name="us-east-1") + client.create_organization(FeatureSet="ALL") + + # invalid policy type + # when + with assert_raises(ClientError) as e: + client.create_policy( + Content=json.dumps(policy_doc01), + Description="moto", + Name="moto", + Type="MOTO", + ) + + # then + ex = e.exception + ex.operation_name.should.equal("CreatePolicy") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("InvalidInputException") + ex.response["Error"]["Message"].should.equal("You specified an invalid value.") + + @mock_organizations def test_describe_policy(): client = boto3.client("organizations", region_name="us-east-1") @@ -468,7 +492,7 @@ def test_delete_policy(): def test_delete_policy_exception(): client = boto3.client("organizations", region_name="us-east-1") org = client.create_organization(FeatureSet="ALL")["Organization"] - non_existent_policy_id = utils.make_random_service_control_policy_id() + non_existent_policy_id = utils.make_random_policy_id() with assert_raises(ClientError) as e: response = client.delete_policy(PolicyId=non_existent_policy_id) ex = e.exception @@ -571,7 +595,7 @@ def test_update_policy(): def test_update_policy_exception(): client = boto3.client("organizations", region_name="us-east-1") org = client.create_organization(FeatureSet="ALL")["Organization"] - non_existent_policy_id = utils.make_random_service_control_policy_id() + non_existent_policy_id = utils.make_random_policy_id() with assert_raises(ClientError) as e: response = client.update_policy(PolicyId=non_existent_policy_id) ex = e.exception @@ -631,6 +655,7 @@ def test_list_policies_for_target(): def test_list_policies_for_target_exception(): client = boto3.client("organizations", region_name="us-east-1") client.create_organization(FeatureSet="ALL")["Organization"] + root_id = client.list_roots()["Roots"][0]["Id"] ou_id = "ou-gi99-i7r8eh2i2" account_id = "126644886543" with assert_raises(ClientError) as e: @@ -664,6 +689,34 @@ def test_list_policies_for_target_exception(): ex.response["Error"]["Code"].should.contain("InvalidInputException") ex.response["Error"]["Message"].should.equal("You specified an invalid value.") + # not existing root + # when + with assert_raises(ClientError) as e: + client.list_policies_for_target( + TargetId="r-0000", Filter="SERVICE_CONTROL_POLICY" + ) + + # then + ex = e.exception + ex.operation_name.should.equal("ListPoliciesForTarget") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("TargetNotFoundException") + ex.response["Error"]["Message"].should.equal( + "You specified a target that doesn't exist." + ) + + # invalid policy type + # when + with assert_raises(ClientError) as e: + client.list_policies_for_target(TargetId=root_id, Filter="MOTO") + + # then + ex = e.exception + ex.operation_name.should.equal("ListPoliciesForTarget") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("InvalidInputException") + ex.response["Error"]["Message"].should.equal("You specified an invalid value.") + @mock_organizations def test_list_targets_for_policy(): @@ -1305,3 +1358,211 @@ def test_deregister_delegated_administrator_erros(): ex.response["Error"]["Message"].should.equal( "You specified an unrecognized service principal." ) + + +@mock_organizations +def test_enable_policy_type(): + # given + client = boto3.client("organizations", region_name="us-east-1") + org = client.create_organization(FeatureSet="ALL")["Organization"] + root_id = client.list_roots()["Roots"][0]["Id"] + + # when + response = client.enable_policy_type( + RootId=root_id, PolicyType="AISERVICES_OPT_OUT_POLICY" + ) + + # then + root = response["Root"] + root["Id"].should.equal(root_id) + root["Arn"].should.equal( + utils.ROOT_ARN_FORMAT.format(org["MasterAccountId"], org["Id"], root_id) + ) + root["Name"].should.equal("Root") + sorted(root["PolicyTypes"], key=lambda x: x["Type"]).should.equal( + [ + {"Type": "AISERVICES_OPT_OUT_POLICY", "Status": "ENABLED"}, + {"Type": "SERVICE_CONTROL_POLICY", "Status": "ENABLED"}, + ] + ) + + +@mock_organizations +def test_enable_policy_type_errors(): + # given + client = boto3.client("organizations", region_name="us-east-1") + client.create_organization(FeatureSet="ALL") + root_id = client.list_roots()["Roots"][0]["Id"] + + # not existing root + # when + with assert_raises(ClientError) as e: + client.enable_policy_type( + RootId="r-0000", PolicyType="AISERVICES_OPT_OUT_POLICY" + ) + + # then + ex = e.exception + ex.operation_name.should.equal("EnablePolicyType") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("RootNotFoundException") + ex.response["Error"]["Message"].should.equal( + "You specified a root that doesn't exist." + ) + + # enable policy again ('SERVICE_CONTROL_POLICY' is enabled by default) + # when + with assert_raises(ClientError) as e: + client.enable_policy_type(RootId=root_id, PolicyType="SERVICE_CONTROL_POLICY") + + # then + ex = e.exception + ex.operation_name.should.equal("EnablePolicyType") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("PolicyTypeAlreadyEnabledException") + ex.response["Error"]["Message"].should.equal( + "The specified policy type is already enabled." + ) + + # invalid policy type + # when + with assert_raises(ClientError) as e: + client.enable_policy_type(RootId=root_id, PolicyType="MOTO") + + # then + ex = e.exception + ex.operation_name.should.equal("EnablePolicyType") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("InvalidInputException") + ex.response["Error"]["Message"].should.equal("You specified an invalid value.") + + +@mock_organizations +def test_disable_policy_type(): + # given + client = boto3.client("organizations", region_name="us-east-1") + org = client.create_organization(FeatureSet="ALL")["Organization"] + root_id = client.list_roots()["Roots"][0]["Id"] + client.enable_policy_type(RootId=root_id, PolicyType="AISERVICES_OPT_OUT_POLICY") + + # when + response = client.disable_policy_type( + RootId=root_id, PolicyType="AISERVICES_OPT_OUT_POLICY" + ) + + # then + root = response["Root"] + root["Id"].should.equal(root_id) + root["Arn"].should.equal( + utils.ROOT_ARN_FORMAT.format(org["MasterAccountId"], org["Id"], root_id) + ) + root["Name"].should.equal("Root") + root["PolicyTypes"].should.equal( + [{"Type": "SERVICE_CONTROL_POLICY", "Status": "ENABLED"}] + ) + + +@mock_organizations +def test_disable_policy_type_errors(): + # given + client = boto3.client("organizations", region_name="us-east-1") + client.create_organization(FeatureSet="ALL") + root_id = client.list_roots()["Roots"][0]["Id"] + + # not existing root + # when + with assert_raises(ClientError) as e: + client.disable_policy_type( + RootId="r-0000", PolicyType="AISERVICES_OPT_OUT_POLICY" + ) + + # then + ex = e.exception + ex.operation_name.should.equal("DisablePolicyType") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("RootNotFoundException") + ex.response["Error"]["Message"].should.equal( + "You specified a root that doesn't exist." + ) + + # disable not enabled policy + # when + with assert_raises(ClientError) as e: + client.disable_policy_type( + RootId=root_id, PolicyType="AISERVICES_OPT_OUT_POLICY" + ) + + # then + ex = e.exception + ex.operation_name.should.equal("DisablePolicyType") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("PolicyTypeNotEnabledException") + ex.response["Error"]["Message"].should.equal( + "This operation can be performed only for enabled policy types." + ) + + # invalid policy type + # when + with assert_raises(ClientError) as e: + client.disable_policy_type(RootId=root_id, PolicyType="MOTO") + + # then + ex = e.exception + ex.operation_name.should.equal("DisablePolicyType") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("InvalidInputException") + ex.response["Error"]["Message"].should.equal("You specified an invalid value.") + + +@mock_organizations +def test_aiservices_opt_out_policy(): + # given + client = boto3.client("organizations", region_name="us-east-1") + org = client.create_organization(FeatureSet="ALL")["Organization"] + root_id = client.list_roots()["Roots"][0]["Id"] + client.enable_policy_type(RootId=root_id, PolicyType="AISERVICES_OPT_OUT_POLICY") + ai_policy = { + "services": { + "@@operators_allowed_for_child_policies": ["@@none"], + "default": { + "@@operators_allowed_for_child_policies": ["@@none"], + "opt_out_policy": { + "@@operators_allowed_for_child_policies": ["@@none"], + "@@assign": "optOut", + }, + }, + } + } + + # when + response = client.create_policy( + Content=json.dumps(ai_policy), + Description="Opt out of all AI services", + Name="ai-opt-out", + Type="AISERVICES_OPT_OUT_POLICY", + ) + + # then + summary = response["Policy"]["PolicySummary"] + policy_id = summary["Id"] + summary["Id"].should.match(utils.POLICY_ID_REGEX) + summary["Arn"].should.equal( + utils.AI_POLICY_ARN_FORMAT.format( + org["MasterAccountId"], org["Id"], summary["Id"] + ) + ) + summary["Name"].should.equal("ai-opt-out") + summary["Description"].should.equal("Opt out of all AI services") + summary["Type"].should.equal("AISERVICES_OPT_OUT_POLICY") + summary["AwsManaged"].should_not.be.ok + json.loads(response["Policy"]["Content"]).should.equal(ai_policy) + + # when + client.attach_policy(PolicyId=policy_id, TargetId=root_id) + + # then + response = client.list_policies_for_target( + TargetId=root_id, Filter="AISERVICES_OPT_OUT_POLICY" + ) + response["Policies"].should.have.length_of(1) + response["Policies"][0]["Id"].should.equal(policy_id) From 061c609a8f24bd84d0d0892a78074aad5c35b42a Mon Sep 17 00:00:00 2001 From: Ninh Khong Date: Mon, 3 Aug 2020 19:42:42 +0700 Subject: [PATCH 3/7] Fix secretsmanager random password wrong length (#3213) * Enhance function get_parameter by parameter name, version or labels * Fix random password with exclude characters return wrong length --- moto/secretsmanager/utils.py | 3 ++- tests/test_secretsmanager/test_secretsmanager.py | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/moto/secretsmanager/utils.py b/moto/secretsmanager/utils.py index 6033db613..ab0f584f0 100644 --- a/moto/secretsmanager/utils.py +++ b/moto/secretsmanager/utils.py @@ -51,6 +51,8 @@ def random_password( if include_space: password += " " required_characters += " " + if exclude_characters: + password = _exclude_characters(password, exclude_characters) password = "".join( six.text_type(random.choice(password)) for x in range(password_length) @@ -61,7 +63,6 @@ def random_password( password, required_characters ) - password = _exclude_characters(password, exclude_characters) return password diff --git a/tests/test_secretsmanager/test_secretsmanager.py b/tests/test_secretsmanager/test_secretsmanager.py index 0bd66b128..69e055bb2 100644 --- a/tests/test_secretsmanager/test_secretsmanager.py +++ b/tests/test_secretsmanager/test_secretsmanager.py @@ -338,6 +338,7 @@ def test_get_random_exclude_characters_and_symbols(): PasswordLength=20, ExcludeCharacters="xyzDje@?!." ) assert any(c in "xyzDje@?!." for c in random_password["RandomPassword"]) == False + assert len(random_password["RandomPassword"]) == 20 @mock_secretsmanager From 99736c3101a4cdc896053a119b0bb45ea921023b Mon Sep 17 00:00:00 2001 From: Yuuki Takahashi <20282867+yktakaha4@users.noreply.github.com> Date: Mon, 3 Aug 2020 23:09:25 +0900 Subject: [PATCH 4/7] fix clear pending messages when call purge_queue (#3208) --- moto/sqs/models.py | 1 + tests/test_sqs/test_sqs.py | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 33 insertions(+) diff --git a/moto/sqs/models.py b/moto/sqs/models.py index a3642c17e..a34e95c4f 100644 --- a/moto/sqs/models.py +++ b/moto/sqs/models.py @@ -844,6 +844,7 @@ class SQSBackend(BaseBackend): def purge_queue(self, queue_name): queue = self.get_queue(queue_name) queue._messages = [] + queue._pending_messages = set() def list_dead_letter_source_queues(self, queue_name): dlq = self.get_queue(queue_name) diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index 61edcaa9b..4de5b9018 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -1098,6 +1098,38 @@ def test_purge_action(): queue.count().should.equal(0) +@mock_sqs +def test_purge_queue_before_delete_message(): + client = boto3.client("sqs", region_name="us-east-1") + + create_resp = client.create_queue( + QueueName="test-dlr-queue.fifo", Attributes={"FifoQueue": "true"} + ) + queue_url = create_resp["QueueUrl"] + + client.send_message( + QueueUrl=queue_url, + MessageGroupId="test", + MessageDeduplicationId="first_message", + MessageBody="first_message", + ) + receive_resp1 = client.receive_message(QueueUrl=queue_url) + + # purge before call delete_message + client.purge_queue(QueueUrl=queue_url) + + client.send_message( + QueueUrl=queue_url, + MessageGroupId="test", + MessageDeduplicationId="second_message", + MessageBody="second_message", + ) + receive_resp2 = client.receive_message(QueueUrl=queue_url) + + len(receive_resp2.get("Messages", [])).should.equal(1) + receive_resp2["Messages"][0]["Body"].should.equal("second_message") + + @mock_sqs_deprecated def test_delete_message_after_visibility_timeout(): VISIBILITY_TIMEOUT = 1 From da07adae525a51849b5b18b6f840e979e2d364fc Mon Sep 17 00:00:00 2001 From: jweite Date: Mon, 3 Aug 2020 11:04:05 -0400 Subject: [PATCH 5/7] * Support for CloudFormation update and delete of Kinesis Streams (#3212) * Support for CloudFormation stack resource deletion via backend resource method delete_from_cloudformation_json() via parse_and_delete_resource(). * Correction to the inappropriate inclusion of EndingSequenceNumber in open shards. This attribute should only appear in closed shards. This regretfully prevents confirmation of consistent record counts after split/merge in unit tests. * Added parameters/decorator to CloudFormationModel method declarations to calm-down Pycharm. Co-authored-by: Joseph Weitekamp --- moto/cloudformation/parsing.py | 17 +++ moto/core/models.py | 18 ++- moto/kinesis/models.py | 89 +++++++++-- tests/test_kinesis/test_kinesis.py | 36 ++--- .../test_kinesis_cloudformation.py | 144 ++++++++++++++++++ 5 files changed, 268 insertions(+), 36 deletions(-) create mode 100644 tests/test_kinesis/test_kinesis_cloudformation.py diff --git a/moto/cloudformation/parsing.py b/moto/cloudformation/parsing.py index a1e1bb18b..272856367 100644 --- a/moto/cloudformation/parsing.py +++ b/moto/cloudformation/parsing.py @@ -649,6 +649,23 @@ class ResourceMap(collections_abc.Mapping): try: if parsed_resource and hasattr(parsed_resource, "delete"): parsed_resource.delete(self._region_name) + else: + resource_name_attribute = ( + parsed_resource.cloudformation_name_type() + if hasattr(parsed_resource, "cloudformation_name_type") + else resource_name_property_from_type(parsed_resource.type) + ) + if resource_name_attribute: + resource_json = self._resource_json_map[ + parsed_resource.logical_resource_id + ] + resource_name = resource_json["Properties"][ + resource_name_attribute + ] + parse_and_delete_resource( + resource_name, resource_json, self, self._region_name + ) + self._parsed_resources.pop(parsed_resource.logical_resource_id) except Exception as e: # skip over dependency violations, and try again in a # second pass diff --git a/moto/core/models.py b/moto/core/models.py index cf78be3f8..ae241322c 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -538,21 +538,25 @@ class BaseModel(object): # Parent class for every Model that can be instantiated by CloudFormation # On subclasses, implement the two methods as @staticmethod to ensure correct behaviour of the CF parser class CloudFormationModel(BaseModel): + @staticmethod @abstractmethod - def cloudformation_name_type(self): + def cloudformation_name_type(): # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-name.html # This must be implemented as a staticmethod with no parameters # Return None for resources that do not have a name property pass + @staticmethod @abstractmethod - def cloudformation_type(self): + def cloudformation_type(): # This must be implemented as a staticmethod with no parameters # See for example https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-dynamodb-table.html return "AWS::SERVICE::RESOURCE" @abstractmethod - def create_from_cloudformation_json(self): + def create_from_cloudformation_json( + cls, resource_name, cloudformation_json, region_name + ): # This must be implemented as a classmethod with parameters: # cls, resource_name, cloudformation_json, region_name # Extract the resource parameters from the cloudformation json @@ -560,7 +564,9 @@ class CloudFormationModel(BaseModel): pass @abstractmethod - def update_from_cloudformation_json(self): + def update_from_cloudformation_json( + cls, original_resource, new_resource_name, cloudformation_json, region_name + ): # This must be implemented as a classmethod with parameters: # cls, original_resource, new_resource_name, cloudformation_json, region_name # Extract the resource parameters from the cloudformation json, @@ -569,7 +575,9 @@ class CloudFormationModel(BaseModel): pass @abstractmethod - def delete_from_cloudformation_json(self): + def delete_from_cloudformation_json( + cls, resource_name, cloudformation_json, region_name + ): # This must be implemented as a classmethod with parameters: # cls, resource_name, cloudformation_json, region_name # Extract the resource parameters from the cloudformation json diff --git a/moto/kinesis/models.py b/moto/kinesis/models.py index c4b04d924..a9c4f5476 100644 --- a/moto/kinesis/models.py +++ b/moto/kinesis/models.py @@ -53,6 +53,7 @@ class Shard(BaseModel): self.starting_hash = starting_hash self.ending_hash = ending_hash self.records = OrderedDict() + self.is_open = True @property def shard_id(self): @@ -116,29 +117,41 @@ class Shard(BaseModel): return r.sequence_number def to_json(self): - return { + response = { "HashKeyRange": { "EndingHashKey": str(self.ending_hash), "StartingHashKey": str(self.starting_hash), }, "SequenceNumberRange": { - "EndingSequenceNumber": self.get_max_sequence_number(), "StartingSequenceNumber": self.get_min_sequence_number(), }, "ShardId": self.shard_id, } + if not self.is_open: + response["SequenceNumberRange"][ + "EndingSequenceNumber" + ] = self.get_max_sequence_number() + return response class Stream(CloudFormationModel): - def __init__(self, stream_name, shard_count, region): + def __init__(self, stream_name, shard_count, region_name): self.stream_name = stream_name - self.shard_count = shard_count self.creation_datetime = datetime.datetime.now() - self.region = region + self.region = region_name self.account_number = ACCOUNT_ID self.shards = {} self.tags = {} self.status = "ACTIVE" + self.shard_count = None + self.update_shard_count(shard_count) + + def update_shard_count(self, shard_count): + # ToDo: This was extracted from init. It's only accurate for new streams. + # It doesn't (yet) try to accurately mimic the more complex re-sharding behavior. + # It makes the stream as if it had been created with this number of shards. + # Logically consistent, but not what AWS does. + self.shard_count = shard_count step = 2 ** 128 // shard_count hash_ranges = itertools.chain( @@ -146,7 +159,6 @@ class Stream(CloudFormationModel): [(shard_count - 1, (shard_count - 1) * step, 2 ** 128)], ) for index, start, end in hash_ranges: - shard = Shard(index, start, end) self.shards[shard.shard_id] = shard @@ -229,10 +241,65 @@ class Stream(CloudFormationModel): def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name ): - properties = cloudformation_json["Properties"] - region = properties.get("Region", "us-east-1") + properties = cloudformation_json.get("Properties", {}) shard_count = properties.get("ShardCount", 1) - return Stream(properties["Name"], shard_count, region) + name = properties.get("Name", resource_name) + backend = kinesis_backends[region_name] + return backend.create_stream(name, shard_count, region_name) + + @classmethod + def update_from_cloudformation_json( + cls, original_resource, new_resource_name, cloudformation_json, region_name, + ): + properties = cloudformation_json["Properties"] + + if Stream.is_replacement_update(properties): + resource_name_property = cls.cloudformation_name_type() + if resource_name_property not in properties: + properties[resource_name_property] = new_resource_name + new_resource = cls.create_from_cloudformation_json( + properties[resource_name_property], cloudformation_json, region_name + ) + properties[resource_name_property] = original_resource.name + cls.delete_from_cloudformation_json( + original_resource.name, cloudformation_json, region_name + ) + return new_resource + + else: # No Interruption + if "ShardCount" in properties: + original_resource.update_shard_count(properties["ShardCount"]) + return original_resource + + @classmethod + def delete_from_cloudformation_json( + cls, resource_name, cloudformation_json, region_name + ): + backend = kinesis_backends[region_name] + properties = cloudformation_json.get("Properties", {}) + stream_name = properties.get(cls.cloudformation_name_type(), resource_name) + backend.delete_stream(stream_name) + + @staticmethod + def is_replacement_update(properties): + properties_requiring_replacement_update = ["BucketName", "ObjectLockEnabled"] + return any( + [ + property_requiring_replacement in properties + for property_requiring_replacement in properties_requiring_replacement_update + ] + ) + + def get_cfn_attribute(self, attribute_name): + from moto.cloudformation.exceptions import UnformattedGetAttTemplateException + + if attribute_name == "Arn": + return self.arn + raise UnformattedGetAttTemplateException() + + @property + def physical_resource_id(self): + return self.stream_name class FirehoseRecord(BaseModel): @@ -331,10 +398,10 @@ class KinesisBackend(BaseBackend): self.streams = OrderedDict() self.delivery_streams = {} - def create_stream(self, stream_name, shard_count, region): + def create_stream(self, stream_name, shard_count, region_name): if stream_name in self.streams: raise ResourceInUseError(stream_name) - stream = Stream(stream_name, shard_count, region) + stream = Stream(stream_name, shard_count, region_name) self.streams[stream_name] = stream return stream diff --git a/tests/test_kinesis/test_kinesis.py b/tests/test_kinesis/test_kinesis.py index b3251bb0f..85f248572 100644 --- a/tests/test_kinesis/test_kinesis.py +++ b/tests/test_kinesis/test_kinesis.py @@ -10,6 +10,8 @@ from boto.kinesis.exceptions import ResourceNotFoundException, InvalidArgumentEx from moto import mock_kinesis, mock_kinesis_deprecated from moto.core import ACCOUNT_ID +import sure # noqa + @mock_kinesis_deprecated def test_create_cluster(): @@ -601,9 +603,6 @@ def test_split_shard(): stream = stream_response["StreamDescription"] shards = stream["Shards"] shards.should.have.length_of(2) - sum( - [shard["SequenceNumberRange"]["EndingSequenceNumber"] for shard in shards] - ).should.equal(99) shard_range = shards[0]["HashKeyRange"] new_starting_hash = ( @@ -616,9 +615,6 @@ def test_split_shard(): stream = stream_response["StreamDescription"] shards = stream["Shards"] shards.should.have.length_of(3) - sum( - [shard["SequenceNumberRange"]["EndingSequenceNumber"] for shard in shards] - ).should.equal(99) shard_range = shards[2]["HashKeyRange"] new_starting_hash = ( @@ -631,9 +627,6 @@ def test_split_shard(): stream = stream_response["StreamDescription"] shards = stream["Shards"] shards.should.have.length_of(4) - sum( - [shard["SequenceNumberRange"]["EndingSequenceNumber"] for shard in shards] - ).should.equal(99) @mock_kinesis_deprecated @@ -662,9 +655,6 @@ def test_merge_shards(): stream = stream_response["StreamDescription"] shards = stream["Shards"] shards.should.have.length_of(4) - sum( - [shard["SequenceNumberRange"]["EndingSequenceNumber"] for shard in shards] - ).should.equal(99) conn.merge_shards(stream_name, "shardId-000000000000", "shardId-000000000001") @@ -672,17 +662,23 @@ def test_merge_shards(): stream = stream_response["StreamDescription"] shards = stream["Shards"] - shards.should.have.length_of(3) - sum( - [shard["SequenceNumberRange"]["EndingSequenceNumber"] for shard in shards] - ).should.equal(99) + active_shards = [ + shard + for shard in shards + if "EndingSequenceNumber" not in shard["SequenceNumberRange"] + ] + active_shards.should.have.length_of(3) + conn.merge_shards(stream_name, "shardId-000000000002", "shardId-000000000000") stream_response = conn.describe_stream(stream_name) stream = stream_response["StreamDescription"] shards = stream["Shards"] - shards.should.have.length_of(2) - sum( - [shard["SequenceNumberRange"]["EndingSequenceNumber"] for shard in shards] - ).should.equal(99) + active_shards = [ + shard + for shard in shards + if "EndingSequenceNumber" not in shard["SequenceNumberRange"] + ] + + active_shards.should.have.length_of(2) diff --git a/tests/test_kinesis/test_kinesis_cloudformation.py b/tests/test_kinesis/test_kinesis_cloudformation.py new file mode 100644 index 000000000..7f3aef0de --- /dev/null +++ b/tests/test_kinesis/test_kinesis_cloudformation.py @@ -0,0 +1,144 @@ +import boto3 +import sure # noqa + +from moto import mock_kinesis, mock_cloudformation + + +@mock_cloudformation +def test_kinesis_cloudformation_create_stream(): + cf_conn = boto3.client("cloudformation", region_name="us-east-1") + + stack_name = "MyStack" + + template = '{"Resources":{"MyStream":{"Type":"AWS::Kinesis::Stream"}}}' + + cf_conn.create_stack(StackName=stack_name, TemplateBody=template) + + provisioned_resource = cf_conn.list_stack_resources(StackName=stack_name)[ + "StackResourceSummaries" + ][0] + provisioned_resource["LogicalResourceId"].should.equal("MyStream") + len(provisioned_resource["PhysicalResourceId"]).should.be.greater_than(0) + + +@mock_cloudformation +@mock_kinesis +def test_kinesis_cloudformation_get_attr(): + cf_conn = boto3.client("cloudformation", region_name="us-east-1") + + stack_name = "MyStack" + + template = """ +Resources: + TheStream: + Type: AWS::Kinesis::Stream +Outputs: + StreamName: + Value: !Ref TheStream + StreamArn: + Value: !GetAtt TheStream.Arn +""".strip() + + cf_conn.create_stack(StackName=stack_name, TemplateBody=template) + stack_description = cf_conn.describe_stacks(StackName=stack_name)["Stacks"][0] + output_stream_name = [ + output["OutputValue"] + for output in stack_description["Outputs"] + if output["OutputKey"] == "StreamName" + ][0] + output_stream_arn = [ + output["OutputValue"] + for output in stack_description["Outputs"] + if output["OutputKey"] == "StreamArn" + ][0] + + kinesis_conn = boto3.client("kinesis", region_name="us-east-1") + stream_description = kinesis_conn.describe_stream(StreamName=output_stream_name)[ + "StreamDescription" + ] + output_stream_arn.should.equal(stream_description["StreamARN"]) + + +@mock_cloudformation +@mock_kinesis +def test_kinesis_cloudformation_update(): + cf_conn = boto3.client("cloudformation", region_name="us-east-1") + + stack_name = "MyStack" + + template = """ +Resources: + TheStream: + Type: AWS::Kinesis::Stream + Properties: + Name: MyStream + ShardCount: 4 +""".strip() + + cf_conn.create_stack(StackName=stack_name, TemplateBody=template) + stack_description = cf_conn.describe_stacks(StackName=stack_name)["Stacks"][0] + stack_description["StackName"].should.equal(stack_name) + + kinesis_conn = boto3.client("kinesis", region_name="us-east-1") + stream_description = kinesis_conn.describe_stream(StreamName="MyStream")[ + "StreamDescription" + ] + shards_provisioned = len( + [ + shard + for shard in stream_description["Shards"] + if "EndingSequenceNumber" not in shard["SequenceNumberRange"] + ] + ) + shards_provisioned.should.equal(4) + + template = """ + Resources: + TheStream: + Type: AWS::Kinesis::Stream + Properties: + ShardCount: 6 + """.strip() + cf_conn.update_stack(StackName=stack_name, TemplateBody=template) + + stream_description = kinesis_conn.describe_stream(StreamName="MyStream")[ + "StreamDescription" + ] + shards_provisioned = len( + [ + shard + for shard in stream_description["Shards"] + if "EndingSequenceNumber" not in shard["SequenceNumberRange"] + ] + ) + shards_provisioned.should.equal(6) + + +@mock_cloudformation +@mock_kinesis +def test_kinesis_cloudformation_delete(): + cf_conn = boto3.client("cloudformation", region_name="us-east-1") + + stack_name = "MyStack" + + template = """ +Resources: + TheStream: + Type: AWS::Kinesis::Stream + Properties: + Name: MyStream +""".strip() + + cf_conn.create_stack(StackName=stack_name, TemplateBody=template) + stack_description = cf_conn.describe_stacks(StackName=stack_name)["Stacks"][0] + stack_description["StackName"].should.equal(stack_name) + + kinesis_conn = boto3.client("kinesis", region_name="us-east-1") + stream_description = kinesis_conn.describe_stream(StreamName="MyStream")[ + "StreamDescription" + ] + stream_description["StreamName"].should.equal("MyStream") + + cf_conn.delete_stack(StackName=stack_name) + streams = kinesis_conn.list_streams()["StreamNames"] + len(streams).should.equal(0) From a7ddcd7da314507975246a256a5ebc4aaca1f4be Mon Sep 17 00:00:00 2001 From: usmangani1 Date: Tue, 4 Aug 2020 11:20:57 +0530 Subject: [PATCH 6/7] Fix:EC2-authorize_security_group_ingress- add description to IP-Ranges (#3196) * Fix:EC2-authorize_security_group_ingress- add description to IP-Ranges * Fix:EC2-authorize_security_group_ingress- add test when description is not present. * part commit * Fix:fixed build errors * Linting * Allow for Python2 string/unicodes Co-authored-by: usmankb Co-authored-by: Bert Blommers --- moto/ec2/models.py | 26 +++++++--- moto/ec2/responses/security_groups.py | 12 ++++- tests/test_ec2/test_security_groups.py | 72 ++++++++++++++++++++++++-- 3 files changed, 96 insertions(+), 14 deletions(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index e6c57dcdd..2498726b8 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -160,7 +160,6 @@ AMIS = _load_resource( or resource_filename(__name__, "resources/amis.json"), ) - OWNER_ID = ACCOUNT_ID @@ -1405,7 +1404,6 @@ class Ami(TaggedEC2Resource): class AmiBackend(object): - AMI_REGEX = re.compile("ami-[a-z0-9]+") def __init__(self): @@ -2118,11 +2116,16 @@ class SecurityGroupBackend(object): vpc_id=None, ): group = self.get_security_group_by_name_or_id(group_name_or_id, vpc_id) - if ip_ranges and not isinstance(ip_ranges, list): - ip_ranges = [ip_ranges] + if ip_ranges: + if isinstance(ip_ranges, str) or ( + six.PY2 and isinstance(ip_ranges, unicode) # noqa + ): + ip_ranges = [{"CidrIp": str(ip_ranges)}] + elif not isinstance(ip_ranges, list): + ip_ranges = [json.loads(ip_ranges)] if ip_ranges: for cidr in ip_ranges: - if not is_valid_cidr(cidr): + if not is_valid_cidr(cidr["CidrIp"]): raise InvalidCIDRSubnetError(cidr=cidr) self._verify_group_will_respect_rule_count_limit( @@ -2200,10 +2203,14 @@ class SecurityGroupBackend(object): group = self.get_security_group_by_name_or_id(group_name_or_id, vpc_id) if ip_ranges and not isinstance(ip_ranges, list): - ip_ranges = [ip_ranges] + + if isinstance(ip_ranges, str) and "CidrIp" not in ip_ranges: + ip_ranges = [{"CidrIp": ip_ranges}] + else: + ip_ranges = [json.loads(ip_ranges)] if ip_ranges: for cidr in ip_ranges: - if not is_valid_cidr(cidr): + if not is_valid_cidr(cidr["CidrIp"]): raise InvalidCIDRSubnetError(cidr=cidr) self._verify_group_will_respect_rule_count_limit( @@ -2259,9 +2266,13 @@ class SecurityGroupBackend(object): if source_group: source_groups.append(source_group) + for ip in ip_ranges: + ip_ranges = [ip.get("CidrIp") if ip.get("CidrIp") == "0.0.0.0/0" else ip] + security_rule = SecurityRule( ip_protocol, from_port, to_port, ip_ranges, source_groups ) + if security_rule in group.egress_rules: group.egress_rules.remove(security_rule) return security_rule @@ -3737,7 +3748,6 @@ class VPCEndPoint(TaggedEC2Resource): tag_specifications=None, private_dns_enabled=None, ): - self.id = id self.vpc_id = vpc_id self.service_name = service_name diff --git a/moto/ec2/responses/security_groups.py b/moto/ec2/responses/security_groups.py index f0002d5bd..af84b7738 100644 --- a/moto/ec2/responses/security_groups.py +++ b/moto/ec2/responses/security_groups.py @@ -20,7 +20,11 @@ def parse_sg_attributes_from_dict(sg_attributes): ip_ranges = [] ip_ranges_tree = sg_attributes.get("IpRanges") or {} for ip_range_idx in sorted(ip_ranges_tree.keys()): - ip_ranges.append(ip_ranges_tree[ip_range_idx]["CidrIp"][0]) + ip_range = {"CidrIp": ip_ranges_tree[ip_range_idx]["CidrIp"][0]} + if ip_ranges_tree[ip_range_idx].get("Description"): + ip_range["Description"] = ip_ranges_tree[ip_range_idx].get("Description")[0] + + ip_ranges.append(ip_range) source_groups = [] source_group_ids = [] @@ -61,6 +65,7 @@ class SecurityGroups(BaseResponse): source_groups, source_group_ids, ) = parse_sg_attributes_from_dict(querytree) + yield ( group_name_or_id, ip_protocol, @@ -211,7 +216,10 @@ DESCRIBE_SECURITY_GROUPS_RESPONSE = ( {% for ip_range in rule.ip_ranges %} - {{ ip_range }} + {{ ip_range['CidrIp'] }} + {% if ip_range['Description'] %} + {{ ip_range['Description'] }} + {% endif %} {% endfor %} diff --git a/tests/test_ec2/test_security_groups.py b/tests/test_ec2/test_security_groups.py index 7e936b7a5..90f395507 100644 --- a/tests/test_ec2/test_security_groups.py +++ b/tests/test_ec2/test_security_groups.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals import copy +import json # Ensure 'assert_raises' context manager support for Python 2.6 import tests.backport_assert_raises # noqa @@ -272,9 +273,10 @@ def test_authorize_ip_range_and_revoke(): # There are two egress rules associated with the security group: # the default outbound rule and the new one int(egress_security_group.rules_egress[1].to_port).should.equal(2222) - egress_security_group.rules_egress[1].grants[0].cidr_ip.should.equal( - "123.123.123.123/32" - ) + actual_cidr = egress_security_group.rules_egress[1].grants[0].cidr_ip + # Deal with Python2 dict->unicode, instead of dict->string + actual_cidr = json.loads(actual_cidr.replace("u'", "'").replace("'", '"')) + actual_cidr.should.equal({"CidrIp": "123.123.123.123/32"}) # Wrong Cidr should throw error egress_security_group.revoke.when.called_with( @@ -690,6 +692,68 @@ def test_add_same_rule_twice_throws_error(): sg.authorize_ingress(IpPermissions=ip_permissions) +@mock_ec2 +def test_description_in_ip_permissions(): + ec2 = boto3.resource("ec2", region_name="us-west-1") + conn = boto3.client("ec2", region_name="us-east-1") + vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16") + sg = conn.create_security_group( + GroupName="sg1", Description="Test security group sg1", VpcId=vpc.id + ) + + ip_permissions = [ + { + "IpProtocol": "tcp", + "FromPort": 27017, + "ToPort": 27017, + "IpRanges": [{"CidrIp": "1.2.3.4/32", "Description": "testDescription"}], + } + ] + conn.authorize_security_group_ingress( + GroupId=sg["GroupId"], IpPermissions=ip_permissions + ) + + result = conn.describe_security_groups(GroupIds=[sg["GroupId"]]) + + assert ( + result["SecurityGroups"][0]["IpPermissions"][0]["IpRanges"][0]["Description"] + == "testDescription" + ) + assert ( + result["SecurityGroups"][0]["IpPermissions"][0]["IpRanges"][0]["CidrIp"] + == "1.2.3.4/32" + ) + + sg = conn.create_security_group( + GroupName="sg2", Description="Test security group sg1", VpcId=vpc.id + ) + + ip_permissions = [ + { + "IpProtocol": "tcp", + "FromPort": 27017, + "ToPort": 27017, + "IpRanges": [{"CidrIp": "1.2.3.4/32"}], + } + ] + conn.authorize_security_group_ingress( + GroupId=sg["GroupId"], IpPermissions=ip_permissions + ) + + result = conn.describe_security_groups(GroupIds=[sg["GroupId"]]) + + assert ( + result["SecurityGroups"][0]["IpPermissions"][0]["IpRanges"][0].get( + "Description" + ) + is None + ) + assert ( + result["SecurityGroups"][0]["IpPermissions"][0]["IpRanges"][0]["CidrIp"] + == "1.2.3.4/32" + ) + + @mock_ec2 def test_security_group_tagging_boto3(): conn = boto3.client("ec2", region_name="us-east-1") @@ -868,7 +932,7 @@ def test_revoke_security_group_egress(): { "FromPort": 0, "IpProtocol": "-1", - "IpRanges": [{"CidrIp": "0.0.0.0/0"},], + "IpRanges": [{"CidrIp": "0.0.0.0/0"}], "ToPort": 123, }, ] From 9894e1785a610f1dd8c9bfce1fa416692b3e0c2f Mon Sep 17 00:00:00 2001 From: usmangani1 Date: Thu, 6 Aug 2020 10:56:44 +0530 Subject: [PATCH 7/7] Enhancement : Ec2 - Add describe-vpc-endpoint-services method support. (#3108) * Enhancement : Ec2 - Add describe-vpc-endpoint-services method support. * Fix:EC2-describe_vpc_endPoint_services changed the template * Fixed comments * Linting Co-authored-by: usmankb Co-authored-by: Bert Blommers --- moto/ec2/models.py | 15 +++++++++++++++ moto/ec2/responses/vpcs.py | 37 +++++++++++++++++++++++++++++++++++++ tests/test_ec2/test_vpcs.py | 31 +++++++++++++++++++++++++++++++ 3 files changed, 83 insertions(+) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 2498726b8..63ebd1738 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -3074,6 +3074,21 @@ class VPCBackend(object): return vpc_end_point + def get_vpc_end_point_services(self): + vpc_end_point_services = self.vpc_end_points.values() + + services = [] + for value in vpc_end_point_services: + services.append(value.service_name) + + availability_zones = EC2Backend.describe_availability_zones(self) + + return { + "servicesDetails": vpc_end_point_services, + "services": services, + "availability_zones": availability_zones, + } + class VPCPeeringConnectionStatus(object): def __init__(self, code="initiating-request", message=""): diff --git a/moto/ec2/responses/vpcs.py b/moto/ec2/responses/vpcs.py index 59222207d..fc752fa7d 100644 --- a/moto/ec2/responses/vpcs.py +++ b/moto/ec2/responses/vpcs.py @@ -191,6 +191,11 @@ class VPCs(BaseResponse): template = self.response_template(CREATE_VPC_END_POINT) return template.render(vpc_end_point=vpc_end_point) + def describe_vpc_endpoint_services(self): + vpc_end_point_services = self.ec2_backend.get_vpc_end_point_services() + template = self.response_template(DESCRIBE_VPC_ENDPOINT_RESPONSE) + return template.render(vpc_end_points=vpc_end_point_services) + CREATE_VPC_RESPONSE = """ @@ -449,3 +454,35 @@ CREATE_VPC_END_POINT = """ + 19a9ff46-7df6-49b8-9726-3df27527089d + + {% for serviceName in vpc_end_points.services %} + {{ serviceName }} + {% endfor %} + + + + {% for service in vpc_end_points.servicesDetails %} + amazon + + + {{ service.type }} + + + + {{ ".".join((service.service_name.split(".")[::-1])) }} + + false + + {% for zone in vpc_end_points.availability_zones %} + {{ zone.name }} + {% endfor %} + + {{ service.service_name }} + true + {% endfor %} + + +""" diff --git a/tests/test_ec2/test_vpcs.py b/tests/test_ec2/test_vpcs.py index 1bc3ddd98..35705e482 100644 --- a/tests/test_ec2/test_vpcs.py +++ b/tests/test_ec2/test_vpcs.py @@ -825,3 +825,34 @@ def test_describe_classic_link_dns_support_multiple(): assert response.get("Vpcs").sort(key=lambda x: x["VpcId"]) == expected.sort( key=lambda x: x["VpcId"] ) + + +@mock_ec2 +def test_describe_vpc_end_point_services(): + ec2 = boto3.client("ec2", region_name="us-west-1") + vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16") + + route_table = ec2.create_route_table(VpcId=vpc["Vpc"]["VpcId"]) + + ec2.create_vpc_endpoint( + VpcId=vpc["Vpc"]["VpcId"], + ServiceName="com.amazonaws.us-east-1.s3", + RouteTableIds=[route_table["RouteTable"]["RouteTableId"]], + VpcEndpointType="gateway", + ) + + vpc_end_point_services = ec2.describe_vpc_endpoint_services() + + assert vpc_end_point_services.get("ServiceDetails").should.be.true + assert vpc_end_point_services.get("ServiceNames").should.be.true + assert vpc_end_point_services.get("ServiceNames") == ["com.amazonaws.us-east-1.s3"] + assert ( + vpc_end_point_services.get("ServiceDetails")[0] + .get("ServiceType", [])[0] + .get("ServiceType") + == "gateway" + ) + assert vpc_end_point_services.get("ServiceDetails")[0].get("AvailabilityZones") == [ + "us-west-1a", + "us-west-1b", + ]