From 3ae6841b48491d357b38905a2b9c61431a216605 Mon Sep 17 00:00:00 2001 From: Dmytro Kazanzhy Date: Fri, 28 Jan 2022 01:22:51 +0200 Subject: [PATCH] Local: Add RDS methods: copy_db_snapshot, copy_db_cluster_snapshot, create_db_cluster_snapshot, delete_db_cluster_snapshot, describe_db_cluster_snapshots (#4790) --- IMPLEMENTATION_COVERAGE.md | 10 +- moto/rds2/exceptions.py | 18 ++ moto/rds2/models.py | 359 ++++++++++++++++++++++-- moto/rds2/responses.py | 138 ++++++++- moto/resourcegroupstaggingapi/models.py | 17 +- tests/test_rds2/test_rds2.py | 31 ++ tests/test_rds2/test_rds2_clusters.py | 295 ++++++++++++++++++- 7 files changed, 822 insertions(+), 46 deletions(-) diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index 4762a4cbf..6ce58cc03 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -3800,7 +3800,7 @@ ## rds
-14% implemented +17% implemented - [ ] add_role_to_db_cluster - [ ] add_role_to_db_instance @@ -3820,7 +3820,7 @@ - [X] create_db_cluster - [ ] create_db_cluster_endpoint - [ ] create_db_cluster_parameter_group -- [ ] create_db_cluster_snapshot +- [X] create_db_cluster_snapshot - [ ] create_db_instance - [ ] create_db_instance_read_replica - [X] create_db_parameter_group @@ -3837,7 +3837,7 @@ - [X] delete_db_cluster - [ ] delete_db_cluster_endpoint - [ ] delete_db_cluster_parameter_group -- [ ] delete_db_cluster_snapshot +- [X] delete_db_cluster_snapshot - [ ] delete_db_instance - [ ] delete_db_instance_automated_backup - [X] delete_db_parameter_group @@ -3859,7 +3859,7 @@ - [ ] describe_db_cluster_parameter_groups - [ ] describe_db_cluster_parameters - [ ] describe_db_cluster_snapshot_attributes -- [ ] describe_db_cluster_snapshots +- [X] describe_db_cluster_snapshots - [X] describe_db_clusters - [ ] describe_db_engine_versions - [ ] describe_db_instance_automated_backups @@ -3928,7 +3928,7 @@ - [ ] reset_db_cluster_parameter_group - [ ] reset_db_parameter_group - [ ] restore_db_cluster_from_s3 -- [ ] restore_db_cluster_from_snapshot +- [X] restore_db_cluster_from_snapshot - [ ] restore_db_cluster_to_point_in_time - [X] restore_db_instance_from_db_snapshot - [ ] restore_db_instance_from_s3 diff --git a/moto/rds2/exceptions.py b/moto/rds2/exceptions.py index 7615dce86..89b92c0de 100644 --- a/moto/rds2/exceptions.py +++ b/moto/rds2/exceptions.py @@ -129,3 +129,21 @@ class DBClusterNotFoundError(RDSClientError): "DBClusterNotFoundFault", "DBCluster {} not found.".format(cluster_identifier), ) + + +class DBClusterSnapshotNotFoundError(RDSClientError): + def __init__(self, snapshot_identifier): + super().__init__( + "DBClusterSnapshotNotFoundFault", + "DBClusterSnapshot {} not found.".format(snapshot_identifier), + ) + + +class DBClusterSnapshotAlreadyExistsError(RDSClientError): + def __init__(self, database_snapshot_identifier): + super().__init__( + "DBClusterSnapshotAlreadyExistsFault", + "Cannot create the snapshot because a snapshot with the identifier {} already exists.".format( + database_snapshot_identifier + ), + ) diff --git a/moto/rds2/models.py b/moto/rds2/models.py index 34841b3a5..f6b71e4fa 100644 --- a/moto/rds2/models.py +++ b/moto/rds2/models.py @@ -15,6 +15,8 @@ from moto.ec2.models import ec2_backends from .exceptions import ( RDSClientError, DBClusterNotFoundError, + DBClusterSnapshotAlreadyExistsError, + DBClusterSnapshotNotFoundError, DBInstanceNotFoundError, DBSnapshotNotFoundError, DBSecurityGroupNotFoundError, @@ -36,18 +38,30 @@ class Cluster: def __init__(self, **kwargs): self.db_name = kwargs.get("db_name") self.db_cluster_identifier = kwargs.get("db_cluster_identifier") + self.db_cluster_instance_class = kwargs.get("db_cluster_instance_class") self.deletion_protection = kwargs.get("deletion_protection") self.engine = kwargs.get("engine") self.engine_version = kwargs.get("engine_version") if not self.engine_version: - # Set default - self.engine_version = "5.6.mysql_aurora.1.22.5" # TODO: depends on engine + self.engine_version = Cluster.default_engine_version(self.engine) self.engine_mode = kwargs.get("engine_mode") or "provisioned" + self.iops = kwargs.get("iops") self.status = "active" self.region = kwargs.get("region") self.cluster_create_time = iso_8601_datetime_with_milliseconds( datetime.datetime.now() ) + self.copy_tags_to_snapshot = kwargs.get("copy_tags_to_snapshot") + if self.copy_tags_to_snapshot is None: + self.copy_tags_to_snapshot = True + self.storage_type = kwargs.get("storage_type") + if self.storage_type is None: + self.storage_type = Cluster.default_storage_type(iops=self.iops) + self.allocated_storage = kwargs.get("allocated_storage") + if self.allocated_storage is None: + self.allocated_storage = Cluster.default_allocated_storage( + engine=self.engine, storage_type=self.storage_type + ) self.master_username = kwargs.get("master_username") if not self.master_username: raise InvalidParameterValue( @@ -69,7 +83,7 @@ class Cluster: f"{self.region}b", f"{self.region}c", ] - self.parameter_group = kwargs.get("parameter_group") or "default.aurora5.6" + self.parameter_group = kwargs.get("parameter_group") or "default.aurora8.0" self.subnet_group = "default" self.status = "creating" self.url_identifier = "".join( @@ -77,7 +91,9 @@ class Cluster: ) self.endpoint = f"{self.db_cluster_identifier}.cluster-{self.url_identifier}.{self.region}.rds.amazonaws.com" self.reader_endpoint = f"{self.db_cluster_identifier}.cluster-ro-{self.url_identifier}.{self.region}.rds.amazonaws.com" - self.port = kwargs.get("port") or 3306 + self.port = kwargs.get("port") + if self.port is None: + self.port = Cluster.default_port(self.engine) self.preferred_backup_window = "01:37-02:07" self.preferred_maintenance_window = "wed:02:40-wed:03:10" # This should default to the default security group @@ -88,7 +104,13 @@ class Cluster: self.resource_id = "cluster-" + "".join( random.choice(string.ascii_uppercase + string.digits) for _ in range(26) ) - self.arn = f"arn:aws:rds:{self.region}:{ACCOUNT_ID}:cluster:{self.db_cluster_identifier}" + self.tags = kwargs.get("tags", []) + + @property + def db_cluster_arn(self): + return "arn:aws:rds:{0}:{1}:cluster:{2}".format( + self.region, ACCOUNT_ID, self.db_cluster_identifier + ) def to_xml(self): template = Template( @@ -113,6 +135,13 @@ class Cluster: false {{ cluster.engine_version }} {{ cluster.port }} + {% if cluster.iops %} + {{ cluster.iops }} + io1 + {% else %} + {{ cluster.storage_type }} + {% endif %} + {{ cluster.db_cluster_instance_class }} {{ cluster.master_username }} {{ cluster.preferred_backup_window }} {{ cluster.preferred_maintenance_window }} @@ -129,20 +158,167 @@ class Cluster: {{ cluster.hosted_zone_id }} false {{ cluster.resource_id }} - {{ cluster.arn }} + {{ cluster.db_cluster_arn }} false {{ cluster.engine_mode }} {{ 'true' if cluster.deletion_protection else 'false' }} false - false + {{ cluster.copy_tags_to_snapshot }} false - + + {%- for tag in cluster.tags -%} + + {{ tag['Key'] }} + {{ tag['Value'] }} + + {%- endfor -%} + """ ) return template.render(cluster=self) + @staticmethod + def default_engine_version(engine): + return { + "aurora": "8.0.mysql_aurora.3.01.0", + "mysql": "8.0", + "mariadb": "10.5", + "postgres": "13.5", + "oracle-ee": "19c", + "oracle-se2": "19c", + "oracle-se1": "19c", + "oracle-se": "19c", + "sqlserver-ee": "15.00", + "sqlserver-ex": "15.00", + "sqlserver-se": "15.00", + "sqlserver-web": "15.00", + }[engine] + + @staticmethod + def default_port(engine): + return { + "aurora": 3306, + "mysql": 3306, + "mariadb": 3306, + "postgres": 5432, + "oracle-ee": 1521, + "oracle-se2": 1521, + "oracle-se1": 1521, + "oracle-se": 1521, + "sqlserver-ee": 1433, + "sqlserver-ex": 1433, + "sqlserver-se": 1433, + "sqlserver-web": 1433, + }[engine] + + @staticmethod + def default_storage_type(iops): + if iops is None: + return "gp2" + else: + return "io1" + + @staticmethod + def default_allocated_storage(engine, storage_type): + return { + "aurora": {"gp2": 0, "io1": 0, "standard": 0}, + "mysql": {"gp2": 20, "io1": 100, "standard": 5}, + "mariadb": {"gp2": 20, "io1": 100, "standard": 5}, + "postgres": {"gp2": 20, "io1": 100, "standard": 5}, + "oracle-ee": {"gp2": 20, "io1": 100, "standard": 10}, + "oracle-se2": {"gp2": 20, "io1": 100, "standard": 10}, + "oracle-se1": {"gp2": 20, "io1": 100, "standard": 10}, + "oracle-se": {"gp2": 20, "io1": 100, "standard": 10}, + "sqlserver-ee": {"gp2": 200, "io1": 200, "standard": 200}, + "sqlserver-ex": {"gp2": 20, "io1": 100, "standard": 20}, + "sqlserver-se": {"gp2": 200, "io1": 200, "standard": 200}, + "sqlserver-web": {"gp2": 20, "io1": 100, "standard": 20}, + }[engine][storage_type] + + def get_tags(self): + return self.tags + + def add_tags(self, tags): + new_keys = [tag_set["Key"] for tag_set in tags] + self.tags = [tag_set for tag_set in self.tags if tag_set["Key"] not in new_keys] + self.tags.extend(tags) + return self.tags + + def remove_tags(self, tag_keys): + self.tags = [tag_set for tag_set in self.tags if tag_set["Key"] not in tag_keys] + + +class ClusterSnapshot(BaseModel): + + SUPPORTED_FILTERS = { + "db-cluster-id": FilterDef( + ["cluster.db_cluster_arn", "cluster.db_cluster_identifier"], + "DB Cluster Identifiers", + ), + "db-cluster-snapshot-id": FilterDef( + ["snapshot_id"], "DB Cluster Snapshot Identifiers" + ), + "dbi-resource-id": FilterDef(["database.dbi_resource_id"], "Dbi Resource Ids"), + "snapshot-type": FilterDef(None, "Snapshot Types"), + "engine": FilterDef(["database.engine"], "Engine Names"), + } + + def __init__(self, cluster, snapshot_id, tags): + self.cluster = cluster + self.snapshot_id = snapshot_id + self.tags = tags + self.created_at = iso_8601_datetime_with_milliseconds(datetime.datetime.now()) + + @property + def snapshot_arn(self): + return "arn:aws:rds:{0}:{1}:snapshot:{2}".format( + self.cluster.region, ACCOUNT_ID, self.snapshot_id + ) + + def to_xml(self): + template = Template( + """ + + {{ snapshot.snapshot_id }} + {{ snapshot.created_at }} + {{ cluster.db_cluster_identifier }} + {{ snapshot.created_at }} + {{ 100 }} + {{ cluster.allocated_storage }} + {{ cluster.master_username }} + {{ cluster.port }} + {{ cluster.engine }} + available + manual + {{ snapshot.snapshot_arn }} + {{ cluster.region }} + {% if cluster.iops %} + {{ cluster.iops }} + io1 + {% else %} + {{ cluster.storage_type }} + {% endif %} + + {{ cluster.license_model }} + + """ + ) + return template.render(snapshot=self, cluster=self.cluster) + + def get_tags(self): + return self.tags + + def add_tags(self, tags): + new_keys = [tag_set["Key"] for tag_set in tags] + self.tags = [tag_set for tag_set in self.tags if tag_set["Key"] not in new_keys] + self.tags.extend(tags) + return self.tags + + def remove_tags(self, tag_keys): + self.tags = [tag_set for tag_set in self.tags if tag_set["Key"] not in tag_keys] + class Database(CloudFormationModel): @@ -657,7 +833,7 @@ class Database(CloudFormationModel): backend.delete_database(self.db_instance_identifier) -class Snapshot(BaseModel): +class DatabaseSnapshot(BaseModel): SUPPORTED_FILTERS = { "db-instance-id": FilterDef( @@ -952,11 +1128,12 @@ class RDS2Backend(BaseBackend): def __init__(self, region): self.region = region self.arn_regex = re_compile( - r"^arn:aws:rds:.*:[0-9]*:(db|es|og|pg|ri|secgrp|snapshot|subgrp):.*$" + r"^arn:aws:rds:.*:[0-9]*:(db|cluster|es|og|pg|ri|secgrp|snapshot|subgrp):.*$" ) self.clusters = OrderedDict() self.databases = OrderedDict() - self.snapshots = OrderedDict() + self.database_snapshots = OrderedDict() + self.cluster_snapshots = OrderedDict() self.db_parameter_groups = {} self.option_groups = {} self.security_groups = {} @@ -983,29 +1160,55 @@ class RDS2Backend(BaseBackend): self.databases[database_id] = database return database - def create_snapshot( + def create_database_snapshot( self, db_instance_identifier, db_snapshot_identifier, tags=None ): database = self.databases.get(db_instance_identifier) if not database: raise DBInstanceNotFoundError(db_instance_identifier) - if db_snapshot_identifier in self.snapshots: + if db_snapshot_identifier in self.database_snapshots: raise DBSnapshotAlreadyExistsError(db_snapshot_identifier) - if len(self.snapshots) >= int(os.environ.get("MOTO_RDS_SNAPSHOT_LIMIT", "100")): + if len(self.database_snapshots) >= int( + os.environ.get("MOTO_RDS_SNAPSHOT_LIMIT", "100") + ): raise SnapshotQuotaExceededError() if tags is None: tags = list() if database.copy_tags_to_snapshot and not tags: tags = database.get_tags() - snapshot = Snapshot(database, db_snapshot_identifier, tags) - self.snapshots[db_snapshot_identifier] = snapshot + snapshot = DatabaseSnapshot(database, db_snapshot_identifier, tags) + self.database_snapshots[db_snapshot_identifier] = snapshot return snapshot - def delete_snapshot(self, db_snapshot_identifier): - if db_snapshot_identifier not in self.snapshots: + def copy_database_snapshot( + self, source_snapshot_identifier, target_snapshot_identifier, tags=None, + ): + if source_snapshot_identifier not in self.database_snapshots: + raise DBSnapshotNotFoundError(source_snapshot_identifier) + if target_snapshot_identifier in self.database_snapshots: + raise DBSnapshotAlreadyExistsError(target_snapshot_identifier) + if len(self.database_snapshots) >= int( + os.environ.get("MOTO_RDS_SNAPSHOT_LIMIT", "100") + ): + raise SnapshotQuotaExceededError() + + source_snapshot = self.database_snapshots[source_snapshot_identifier] + if tags is None: + tags = source_snapshot.tags + else: + tags = self._merge_tags(source_snapshot.tags, tags) + target_snapshot = DatabaseSnapshot( + source_snapshot.database, target_snapshot_identifier, tags + ) + self.database_snapshots[target_snapshot_identifier] = target_snapshot + + return target_snapshot + + def delete_database_snapshot(self, db_snapshot_identifier): + if db_snapshot_identifier not in self.database_snapshots: raise DBSnapshotNotFoundError(db_snapshot_identifier) - return self.snapshots.pop(db_snapshot_identifier) + return self.database_snapshots.pop(db_snapshot_identifier) def create_database_replica(self, db_kwargs): database_id = db_kwargs["db_instance_identifier"] @@ -1034,10 +1237,10 @@ class RDS2Backend(BaseBackend): raise DBInstanceNotFoundError(db_instance_identifier) return list(databases.values()) - def describe_snapshots( + def describe_database_snapshots( self, db_instance_identifier, db_snapshot_identifier, filters=None ): - snapshots = self.snapshots + snapshots = self.database_snapshots if db_instance_identifier: filters = merge_filters( filters, {"db-instance-id": [db_instance_identifier]} @@ -1047,7 +1250,7 @@ class RDS2Backend(BaseBackend): filters, {"db-snapshot-id": [db_snapshot_identifier]} ) if filters: - snapshots = self._filter_resources(snapshots, filters, Snapshot) + snapshots = self._filter_resources(snapshots, filters, DatabaseSnapshot) if db_snapshot_identifier and not snapshots and not db_instance_identifier: raise DBSnapshotNotFoundError(db_snapshot_identifier) return list(snapshots.values()) @@ -1068,7 +1271,7 @@ class RDS2Backend(BaseBackend): return database def restore_db_instance_from_db_snapshot(self, from_snapshot_id, overrides): - snapshot = self.describe_snapshots( + snapshot = self.describe_database_snapshots( db_instance_identifier=None, db_snapshot_identifier=from_snapshot_id )[0] original_database = snapshot.database @@ -1096,7 +1299,9 @@ class RDS2Backend(BaseBackend): if database.status != "available": raise InvalidDBInstanceStateError(db_instance_identifier, "stop") if db_snapshot_identifier: - self.create_snapshot(db_instance_identifier, db_snapshot_identifier) + self.create_database_snapshot( + db_instance_identifier, db_snapshot_identifier + ) database.status = "stopped" return database @@ -1127,7 +1332,7 @@ class RDS2Backend(BaseBackend): "Can't delete Instance with protection enabled" ) if db_snapshot_name: - self.create_snapshot(db_instance_identifier, db_snapshot_name) + self.create_database_snapshot(db_instance_identifier, db_snapshot_name) database = self.databases.pop(db_instance_identifier) if database.is_replica: primary = self.find_db_from_id(database.source_db_identifier) @@ -1430,11 +1635,75 @@ class RDS2Backend(BaseBackend): cluster.status = "available" # Already set the final status in the background return initial_state + def create_cluster_snapshot( + self, db_cluster_identifier, db_snapshot_identifier, tags=None + ): + cluster = self.clusters.get(db_cluster_identifier) + if cluster is None: + raise DBClusterNotFoundError(db_cluster_identifier) + if db_snapshot_identifier in self.cluster_snapshots: + raise DBClusterSnapshotAlreadyExistsError(db_snapshot_identifier) + if len(self.cluster_snapshots) >= int( + os.environ.get("MOTO_RDS_SNAPSHOT_LIMIT", "100") + ): + raise SnapshotQuotaExceededError() + if tags is None: + tags = list() + if cluster.copy_tags_to_snapshot: + tags += cluster.get_tags() + snapshot = ClusterSnapshot(cluster, db_snapshot_identifier, tags) + self.cluster_snapshots[db_snapshot_identifier] = snapshot + return snapshot + + def copy_cluster_snapshot( + self, source_snapshot_identifier, target_snapshot_identifier, tags=None + ): + if source_snapshot_identifier not in self.cluster_snapshots: + raise DBClusterSnapshotNotFoundError(source_snapshot_identifier) + if target_snapshot_identifier in self.cluster_snapshots: + raise DBClusterSnapshotAlreadyExistsError(target_snapshot_identifier) + if len(self.cluster_snapshots) >= int( + os.environ.get("MOTO_RDS_SNAPSHOT_LIMIT", "100") + ): + raise SnapshotQuotaExceededError() + source_snapshot = self.cluster_snapshots[source_snapshot_identifier] + if tags is None: + tags = source_snapshot.tags + else: + tags = self._merge_tags(source_snapshot.tags, tags) + target_snapshot = ClusterSnapshot( + source_snapshot.cluster, target_snapshot_identifier, tags + ) + self.cluster_snapshots[target_snapshot_identifier] = target_snapshot + return target_snapshot + + def delete_cluster_snapshot(self, db_snapshot_identifier): + if db_snapshot_identifier not in self.cluster_snapshots: + raise DBClusterSnapshotNotFoundError(db_snapshot_identifier) + + return self.cluster_snapshots.pop(db_snapshot_identifier) + def describe_db_clusters(self, cluster_identifier): if cluster_identifier: return [self.clusters[cluster_identifier]] return self.clusters.values() + def describe_cluster_snapshots( + self, db_cluster_identifier, db_snapshot_identifier, filters=None + ): + snapshots = self.cluster_snapshots + if db_cluster_identifier: + filters = merge_filters(filters, {"db-cluster-id": [db_cluster_identifier]}) + if db_snapshot_identifier: + filters = merge_filters( + filters, {"db-cluster-snapshot-id": [db_snapshot_identifier]} + ) + if filters: + snapshots = self._filter_resources(snapshots, filters, ClusterSnapshot) + if db_snapshot_identifier and not snapshots and not db_cluster_identifier: + raise DBClusterSnapshotNotFoundError(db_snapshot_identifier) + return list(snapshots.values()) + def delete_db_cluster(self, cluster_identifier): if cluster_identifier in self.clusters: if self.clusters[cluster_identifier].deletion_protection: @@ -1457,6 +1726,18 @@ class RDS2Backend(BaseBackend): cluster.status = "available" # This is the final status - already setting it in the background return temp_state + def restore_db_cluster_from_snapshot(self, from_snapshot_id, overrides): + snapshot = self.describe_cluster_snapshots( + db_cluster_identifier=None, db_snapshot_identifier=from_snapshot_id + )[0] + original_cluster = snapshot.cluster + new_cluster_props = copy.deepcopy(original_cluster.__dict__) + for key, value in overrides.items(): + if value: + new_cluster_props[key] = value + + return self.create_db_cluster(new_cluster_props) + def stop_db_cluster(self, cluster_identifier): if cluster_identifier not in self.clusters: raise DBClusterNotFoundError(cluster_identifier) @@ -1477,6 +1758,9 @@ class RDS2Backend(BaseBackend): if resource_type == "db": # Database if resource_name in self.databases: return self.databases[resource_name].get_tags() + elif resource_type == "cluster": # Cluster + if resource_name in self.clusters: + return self.clusters[resource_name].get_tags() elif resource_type == "es": # Event Subscription # TODO: Complete call to tags on resource type Event # Subscription @@ -1495,8 +1779,10 @@ class RDS2Backend(BaseBackend): if resource_name in self.security_groups: return self.security_groups[resource_name].get_tags() elif resource_type == "snapshot": # DB Snapshot - if resource_name in self.snapshots: - return self.snapshots[resource_name].get_tags() + if resource_name in self.database_snapshots: + return self.database_snapshots[resource_name].get_tags() + if resource_name in self.cluster_snapshots: + return self.cluster_snapshots[resource_name].get_tags() elif resource_type == "subgrp": # DB subnet group if resource_name in self.subnet_groups: return self.subnet_groups[resource_name].get_tags() @@ -1527,8 +1813,10 @@ class RDS2Backend(BaseBackend): if resource_name in self.security_groups: return self.security_groups[resource_name].remove_tags(tag_keys) elif resource_type == "snapshot": # DB Snapshot - if resource_name in self.snapshots: - return self.snapshots[resource_name].remove_tags(tag_keys) + if resource_name in self.database_snapshots: + return self.database_snapshots[resource_name].remove_tags(tag_keys) + if resource_name in self.cluster_snapshots: + return self.cluster_snapshots[resource_name].remove_tags(tag_keys) elif resource_type == "subgrp": # DB subnet group if resource_name in self.subnet_groups: return self.subnet_groups[resource_name].remove_tags(tag_keys) @@ -1558,8 +1846,10 @@ class RDS2Backend(BaseBackend): if resource_name in self.security_groups: return self.security_groups[resource_name].add_tags(tags) elif resource_type == "snapshot": # DB Snapshot - if resource_name in self.snapshots: - return self.snapshots[resource_name].add_tags(tags) + if resource_name in self.database_snapshots: + return self.database_snapshots[resource_name].add_tags(tags) + if resource_name in self.cluster_snapshots: + return self.cluster_snapshots[resource_name].add_tags(tags) elif resource_type == "subgrp": # DB subnet group if resource_name in self.subnet_groups: return self.subnet_groups[resource_name].add_tags(tags) @@ -1580,6 +1870,13 @@ class RDS2Backend(BaseBackend): except ValueError as e: raise InvalidParameterCombination(str(e)) + @staticmethod + def _merge_tags(old_tags: list, new_tags: list): + tags_dict = dict() + tags_dict.update({d["Key"]: d["Value"] for d in old_tags}) + tags_dict.update({d["Key"]: d["Value"] for d in new_tags}) + return [{"Key": k, "Value": v} for k, v in tags_dict.items()] + class OptionGroup(object): def __init__(self, name, engine_name, major_engine_version, description=None): diff --git a/moto/rds2/responses.py b/moto/rds2/responses.py index 7d075a342..710eb64c4 100644 --- a/moto/rds2/responses.py +++ b/moto/rds2/responses.py @@ -98,11 +98,17 @@ class RDS2Response(BaseResponse): "engine": self._get_param("Engine"), "engine_version": self._get_param("EngineVersion"), "engine_mode": self._get_param("EngineMode"), + "allocated_storage": self._get_param("AllocatedStorage"), + "iops": self._get_param("Iops"), + "storage_type": self._get_param("StorageType"), "master_username": self._get_param("MasterUsername"), "master_user_password": self._get_param("MasterUserPassword"), "port": self._get_param("Port"), "parameter_group": self._get_param("DBClusterParameterGroup"), "region": self.region, + "db_cluster_instance_class": self._get_param("DBClusterInstanceClass"), + "copy_tags_to_snapshot": self._get_param("CopyTagsToSnapshot"), + "tags": self.unpack_complex_list_params("Tags.Tag", ("Key", "Value")), } def unpack_complex_list_params(self, label, names): @@ -191,17 +197,27 @@ class RDS2Response(BaseResponse): db_instance_identifier = self._get_param("DBInstanceIdentifier") db_snapshot_identifier = self._get_param("DBSnapshotIdentifier") tags = self.unpack_complex_list_params("Tags.Tag", ("Key", "Value")) - snapshot = self.backend.create_snapshot( + snapshot = self.backend.create_database_snapshot( db_instance_identifier, db_snapshot_identifier, tags ) template = self.response_template(CREATE_SNAPSHOT_TEMPLATE) return template.render(snapshot=snapshot) + def copy_db_snapshot(self): + source_snapshot_identifier = self._get_param("SourceDBSnapshotIdentifier") + target_snapshot_identifier = self._get_param("TargetDBSnapshotIdentifier") + tags = self.unpack_complex_list_params("Tags.Tag", ("Key", "Value")) + snapshot = self.backend.copy_database_snapshot( + source_snapshot_identifier, target_snapshot_identifier, tags, + ) + template = self.response_template(COPY_SNAPSHOT_TEMPLATE) + return template.render(snapshot=snapshot) + def describe_db_snapshots(self): db_instance_identifier = self._get_param("DBInstanceIdentifier") db_snapshot_identifier = self._get_param("DBSnapshotIdentifier") filters = filters_from_querystring(self.querystring) - snapshots = self.backend.describe_snapshots( + snapshots = self.backend.describe_database_snapshots( db_instance_identifier, db_snapshot_identifier, filters ) template = self.response_template(DESCRIBE_SNAPSHOTS_TEMPLATE) @@ -209,7 +225,7 @@ class RDS2Response(BaseResponse): def delete_db_snapshot(self): db_snapshot_identifier = self._get_param("DBSnapshotIdentifier") - snapshot = self.backend.delete_snapshot(db_snapshot_identifier) + snapshot = self.backend.delete_database_snapshot(db_snapshot_identifier) template = self.response_template(DELETE_SNAPSHOT_TEMPLATE) return template.render(snapshot=snapshot) @@ -483,6 +499,55 @@ class RDS2Response(BaseResponse): template = self.response_template(STOP_CLUSTER_TEMPLATE) return template.render(cluster=cluster) + def create_db_cluster_snapshot(self): + db_cluster_identifier = self._get_param("DBClusterIdentifier") + db_snapshot_identifier = self._get_param("DBClusterSnapshotIdentifier") + tags = self.unpack_complex_list_params("Tags.Tag", ("Key", "Value")) + snapshot = self.backend.create_cluster_snapshot( + db_cluster_identifier, db_snapshot_identifier, tags + ) + template = self.response_template(CREATE_CLUSTER_SNAPSHOT_TEMPLATE) + return template.render(snapshot=snapshot) + + def copy_db_cluster_snapshot(self): + source_snapshot_identifier = self._get_param( + "SourceDBClusterSnapshotIdentifier" + ) + target_snapshot_identifier = self._get_param( + "TargetDBClusterSnapshotIdentifier" + ) + tags = self.unpack_complex_list_params("Tags.Tag", ("Key", "Value")) + snapshot = self.backend.copy_cluster_snapshot( + source_snapshot_identifier, target_snapshot_identifier, tags, + ) + template = self.response_template(COPY_CLUSTER_SNAPSHOT_TEMPLATE) + return template.render(snapshot=snapshot) + + def describe_db_cluster_snapshots(self): + db_cluster_identifier = self._get_param("DBClusterIdentifier") + db_snapshot_identifier = self._get_param("DBClusterSnapshotIdentifier") + filters = filters_from_querystring(self.querystring) + snapshots = self.backend.describe_cluster_snapshots( + db_cluster_identifier, db_snapshot_identifier, filters + ) + template = self.response_template(DESCRIBE_CLUSTER_SNAPSHOTS_TEMPLATE) + return template.render(snapshots=snapshots) + + def delete_db_cluster_snapshot(self): + db_snapshot_identifier = self._get_param("DBClusterSnapshotIdentifier") + snapshot = self.backend.delete_cluster_snapshot(db_snapshot_identifier) + template = self.response_template(DELETE_CLUSTER_SNAPSHOT_TEMPLATE) + return template.render(snapshot=snapshot) + + def restore_db_cluster_from_snapshot(self): + db_snapshot_identifier = self._get_param("SnapshotIdentifier") + db_kwargs = self._get_db_cluster_kwargs() + new_cluster = self.backend.restore_db_cluster_from_snapshot( + db_snapshot_identifier, db_kwargs + ) + template = self.response_template(RESTORE_CLUSTER_FROM_SNAPSHOT_TEMPLATE) + return template.render(cluster=new_cluster) + CREATE_DATABASE_TEMPLATE = """ @@ -591,6 +656,16 @@ CREATE_SNAPSHOT_TEMPLATE = """ + + {{ snapshot.to_xml() }} + + + 523e3218-afc7-11c3-90f5-f90431260ab4 + + +""" + DESCRIBE_SNAPSHOTS_TEMPLATE = """ @@ -824,7 +899,6 @@ REMOVE_TAGS_FROM_RESOURCE_TEMPLATE = """ {{ cluster.to_xml() }} @@ -867,3 +941,59 @@ STOP_CLUSTER_TEMPLATE = """ + + {{ cluster.to_xml() }} + + + 523e3218-afc7-11c3-90f5-f90431260ab4 + + +""" + +CREATE_CLUSTER_SNAPSHOT_TEMPLATE = """ + + {{ snapshot.to_xml() }} + + + 523e3218-afc7-11c3-90f5-f90431260ab4 + + +""" + +COPY_CLUSTER_SNAPSHOT_TEMPLATE = """ + + {{ snapshot.to_xml() }} + + + 523e3218-afc7-11c3-90f5-f90431260ab4 + + +""" + +DESCRIBE_CLUSTER_SNAPSHOTS_TEMPLATE = """ + + + {%- for snapshot in snapshots -%} + {{ snapshot.to_xml() }} + {%- endfor -%} + + {% if marker %} + {{ marker }} + {% endif %} + + + 523e3218-afc7-11c3-90f5-f90431260ab4 + +""" + +DELETE_CLUSTER_SNAPSHOT_TEMPLATE = """ + + {{ snapshot.to_xml() }} + + + 523e3218-afc7-11c3-90f5-f90431260ab4 + + +""" diff --git a/moto/resourcegroupstaggingapi/models.py b/moto/resourcegroupstaggingapi/models.py index b4c43e41c..bd37ec0f0 100644 --- a/moto/resourcegroupstaggingapi/models.py +++ b/moto/resourcegroupstaggingapi/models.py @@ -383,7 +383,22 @@ class ResourceGroupsTaggingAPIBackend(BaseBackend): or "rds" in resource_type_filters or "rds:snapshot" in resource_type_filters ): - for snapshot in self.rds_backend.snapshots.values(): + for snapshot in self.rds_backend.database_snapshots.values(): + tags = snapshot.get_tags() + if not tags or not tag_filter(tags): + continue + yield { + "ResourceARN": snapshot.snapshot_arn, + "Tags": tags, + } + + # RDS Cluster Snapshot + if ( + not resource_type_filters + or "rds" in resource_type_filters + or "rds:cluster-snapshot" in resource_type_filters + ): + for snapshot in self.rds_backend.cluster_snapshots.values(): tags = snapshot.get_tags() if not tags or not tag_filter(tags): continue diff --git a/tests/test_rds2/test_rds2.py b/tests/test_rds2/test_rds2.py index bce663f97..ab792ff7b 100644 --- a/tests/test_rds2/test_rds2.py +++ b/tests/test_rds2/test_rds2.py @@ -556,6 +556,37 @@ def test_create_db_snapshots_copy_tags(): ) +@mock_rds2 +def test_copy_db_snapshots(): + conn = boto3.client("rds", region_name="us-west-2") + + conn.create_db_instance( + DBInstanceIdentifier="db-primary-1", + AllocatedStorage=10, + Engine="postgres", + DBName="staging-postgres", + DBInstanceClass="db.m1.small", + MasterUsername="root", + MasterUserPassword="hunter2", + Port=1234, + DBSecurityGroups=["my_sg"], + ) + + conn.create_db_snapshot( + DBInstanceIdentifier="db-primary-1", DBSnapshotIdentifier="snapshot-1" + ).get("DBSnapshot") + + target_snapshot = conn.copy_db_snapshot( + SourceDBSnapshotIdentifier="snapshot-1", TargetDBSnapshotIdentifier="snapshot-2" + ).get("DBSnapshot") + + target_snapshot.get("Engine").should.equal("postgres") + target_snapshot.get("DBInstanceIdentifier").should.equal("db-primary-1") + target_snapshot.get("DBSnapshotIdentifier").should.equal("snapshot-2") + result = conn.list_tags_for_resource(ResourceName=target_snapshot["DBSnapshotArn"]) + result["TagList"].should.equal([]) + + @mock_rds2 def test_describe_db_snapshots(): conn = boto3.client("rds", region_name="us-west-2") diff --git a/tests/test_rds2/test_rds2_clusters.py b/tests/test_rds2/test_rds2_clusters.py index 0321fc079..9049bd0b0 100644 --- a/tests/test_rds2/test_rds2_clusters.py +++ b/tests/test_rds2/test_rds2_clusters.py @@ -86,7 +86,7 @@ def test_create_db_cluster__verify_default_properties(): ) cluster.should.have.key("BackupRetentionPeriod").equal(1) cluster.should.have.key("DBClusterIdentifier").equal("cluster-id") - cluster.should.have.key("DBClusterParameterGroup").equal("default.aurora5.6") + cluster.should.have.key("DBClusterParameterGroup").equal("default.aurora8.0") cluster.should.have.key("DBSubnetGroup").equal("default") cluster.should.have.key("Status").equal("creating") cluster.should.have.key("Endpoint").match( @@ -99,7 +99,7 @@ def test_create_db_cluster__verify_default_properties(): cluster.should.have.key("ReaderEndpoint").equal(expected_readonly) cluster.should.have.key("MultiAZ").equal(False) cluster.should.have.key("Engine").equal("aurora") - cluster.should.have.key("EngineVersion").equal("5.6.mysql_aurora.1.22.5") + cluster.should.have.key("EngineVersion").equal("8.0.mysql_aurora.3.01.0") cluster.should.have.key("Port").equal(3306) cluster.should.have.key("MasterUsername").equal("root") cluster.should.have.key("PreferredBackupWindow").equal("01:37-02:07") @@ -140,7 +140,7 @@ def test_create_db_cluster_with_database_name(): cluster = resp["DBCluster"] cluster.should.have.key("DatabaseName").equal("users") cluster.should.have.key("DBClusterIdentifier").equal("cluster-id") - cluster.should.have.key("DBClusterParameterGroup").equal("default.aurora5.6") + cluster.should.have.key("DBClusterParameterGroup").equal("default.aurora8.0") @mock_rds2 @@ -151,7 +151,7 @@ def test_create_db_cluster_additional_parameters(): AvailabilityZones=["eu-north-1b"], DBClusterIdentifier="cluster-id", Engine="aurora", - EngineVersion="5.6.mysql_aurora.1.19.2", + EngineVersion="8.0.mysql_aurora.3.01.0", EngineMode="serverless", MasterUsername="root", MasterUserPassword="hunter2_", @@ -163,7 +163,7 @@ def test_create_db_cluster_additional_parameters(): cluster.should.have.key("AvailabilityZones").equal(["eu-north-1b"]) cluster.should.have.key("Engine").equal("aurora") - cluster.should.have.key("EngineVersion").equal("5.6.mysql_aurora.1.19.2") + cluster.should.have.key("EngineVersion").equal("8.0.mysql_aurora.3.01.0") cluster.should.have.key("EngineMode").equal("serverless") cluster.should.have.key("Port").equal(1234) cluster.should.have.key("DeletionProtection").equal(True) @@ -335,3 +335,288 @@ def test_stop_db_cluster_unknown_cluster(): err = ex.value.response["Error"] err["Code"].should.equal("DBClusterNotFoundFault") err["Message"].should.equal("DBCluster cluster-unknown not found.") + + +@mock_rds2 +def test_create_db_cluster_snapshot_fails_for_unknown_cluster(): + conn = boto3.client("rds", region_name="us-west-2") + with pytest.raises(ClientError) as exc: + conn.create_db_cluster_snapshot( + DBClusterIdentifier="db-primary-1", DBClusterSnapshotIdentifier="snapshot-1" + ) + err = exc.value.response["Error"] + err["Message"].should.equal("DBCluster db-primary-1 not found.") + + +@mock_rds2 +def test_create_db_cluster_snapshot(): + conn = boto3.client("rds", region_name="us-west-2") + conn.create_db_cluster( + DBClusterIdentifier="db-primary-1", + AllocatedStorage=10, + Engine="postgres", + DatabaseName="staging-postgres", + DBClusterInstanceClass="db.m1.small", + MasterUsername="root", + MasterUserPassword="hunter2000", + Port=1234, + ) + + snapshot = conn.create_db_cluster_snapshot( + DBClusterIdentifier="db-primary-1", DBClusterSnapshotIdentifier="g-1", + ).get("DBClusterSnapshot") + + snapshot.get("Engine").should.equal("postgres") + snapshot.get("DBClusterIdentifier").should.equal("db-primary-1") + snapshot.get("DBClusterSnapshotIdentifier").should.equal("g-1") + result = conn.list_tags_for_resource(ResourceName=snapshot["DBClusterSnapshotArn"]) + result["TagList"].should.equal([]) + + +@mock_rds2 +def test_create_db_cluster_snapshot_copy_tags(): + conn = boto3.client("rds", region_name="us-west-2") + + conn.create_db_cluster( + DBClusterIdentifier="db-primary-1", + AllocatedStorage=10, + Engine="postgres", + DatabaseName="staging-postgres", + DBClusterInstanceClass="db.m1.small", + MasterUsername="root", + MasterUserPassword="hunter2000", + Port=1234, + Tags=[{"Key": "foo", "Value": "bar"}, {"Key": "foo1", "Value": "bar1"}], + CopyTagsToSnapshot=True, + ) + + snapshot = conn.create_db_cluster_snapshot( + DBClusterIdentifier="db-primary-1", DBClusterSnapshotIdentifier="g-1" + ).get("DBClusterSnapshot") + + snapshot.get("Engine").should.equal("postgres") + snapshot.get("DBClusterIdentifier").should.equal("db-primary-1") + snapshot.get("DBClusterSnapshotIdentifier").should.equal("g-1") + # breakpoint() + result = conn.list_tags_for_resource(ResourceName=snapshot["DBClusterSnapshotArn"]) + result["TagList"].should.equal( + [{"Value": "bar", "Key": "foo"}, {"Value": "bar1", "Key": "foo1"}] + ) + + +@mock_rds2 +def test_copy_db_cluster_snapshot_fails_for_unknown_snapshot(): + conn = boto3.client("rds", region_name="us-west-2") + + with pytest.raises(ClientError) as exc: + conn.copy_db_cluster_snapshot( + SourceDBClusterSnapshotIdentifier="snapshot-1", + TargetDBClusterSnapshotIdentifier="snapshot-2", + ) + + err = exc.value.response["Error"] + err["Message"].should.equal("DBClusterSnapshot snapshot-1 not found.") + + +@mock_rds2 +def test_copy_db_cluster_snapshot(): + conn = boto3.client("rds", region_name="us-west-2") + + conn.create_db_cluster( + DBClusterIdentifier="db-primary-1", + AllocatedStorage=10, + Engine="postgres", + DatabaseName="staging-postgres", + DBClusterInstanceClass="db.m1.small", + MasterUsername="root", + MasterUserPassword="hunter2000", + Port=1234, + ) + + conn.create_db_cluster_snapshot( + DBClusterIdentifier="db-primary-1", DBClusterSnapshotIdentifier="snapshot-1", + ).get("DBClusterSnapshot") + + target_snapshot = conn.copy_db_cluster_snapshot( + SourceDBClusterSnapshotIdentifier="snapshot-1", + TargetDBClusterSnapshotIdentifier="snapshot-2", + ).get("DBClusterSnapshot") + + target_snapshot.get("Engine").should.equal("postgres") + target_snapshot.get("DBClusterIdentifier").should.equal("db-primary-1") + target_snapshot.get("DBClusterSnapshotIdentifier").should.equal("snapshot-2") + result = conn.list_tags_for_resource( + ResourceName=target_snapshot["DBClusterSnapshotArn"] + ) + result["TagList"].should.equal([]) + + +@mock_rds2 +def test_copy_db_cluster_snapshot_fails_for_existed_target_snapshot(): + conn = boto3.client("rds", region_name="us-west-2") + + conn.create_db_cluster( + DBClusterIdentifier="db-primary-1", + AllocatedStorage=10, + Engine="postgres", + DatabaseName="staging-postgres", + DBClusterInstanceClass="db.m1.small", + MasterUsername="root", + MasterUserPassword="hunter2000", + Port=1234, + ) + + conn.create_db_cluster_snapshot( + DBClusterIdentifier="db-primary-1", DBClusterSnapshotIdentifier="snapshot-1", + ).get("DBClusterSnapshot") + + conn.create_db_cluster_snapshot( + DBClusterIdentifier="db-primary-1", DBClusterSnapshotIdentifier="snapshot-2", + ).get("DBClusterSnapshot") + + with pytest.raises(ClientError) as exc: + conn.copy_db_cluster_snapshot( + SourceDBClusterSnapshotIdentifier="snapshot-1", + TargetDBClusterSnapshotIdentifier="snapshot-2", + ) + + err = exc.value.response["Error"] + err["Message"].should.equal( + "Cannot create the snapshot because a snapshot with the identifier snapshot-2 already exists." + ) + + +@mock_rds2 +def test_describe_db_cluster_snapshots(): + conn = boto3.client("rds", region_name="us-west-2") + conn.create_db_cluster( + DBClusterIdentifier="db-primary-1", + AllocatedStorage=10, + Engine="postgres", + DatabaseName="staging-postgres", + DBClusterInstanceClass="db.m1.small", + MasterUsername="root", + MasterUserPassword="hunter2000", + Port=1234, + ) + + created = conn.create_db_cluster_snapshot( + DBClusterIdentifier="db-primary-1", DBClusterSnapshotIdentifier="snapshot-1" + ).get("DBClusterSnapshot") + + created.get("Engine").should.equal("postgres") + + by_database_id = conn.describe_db_cluster_snapshots( + DBClusterIdentifier="db-primary-1" + ).get("DBClusterSnapshots") + by_snapshot_id = conn.describe_db_cluster_snapshots( + DBClusterSnapshotIdentifier="snapshot-1" + ).get("DBClusterSnapshots") + by_snapshot_id.should.equal(by_database_id) + + snapshot = by_snapshot_id[0] + snapshot.should.equal(created) + snapshot.get("Engine").should.equal("postgres") + + conn.create_db_cluster_snapshot( + DBClusterIdentifier="db-primary-1", DBClusterSnapshotIdentifier="snapshot-2" + ) + snapshots = conn.describe_db_cluster_snapshots( + DBClusterIdentifier="db-primary-1" + ).get("DBClusterSnapshots") + snapshots.should.have.length_of(2) + + +@mock_rds2 +def test_delete_db_cluster_snapshot(): + conn = boto3.client("rds", region_name="us-west-2") + conn.create_db_cluster( + DBClusterIdentifier="db-primary-1", + AllocatedStorage=10, + Engine="postgres", + DatabaseName="staging-postgres", + DBClusterInstanceClass="db.m1.small", + MasterUsername="root", + MasterUserPassword="hunter2000", + Port=1234, + ) + conn.create_db_cluster_snapshot( + DBClusterIdentifier="db-primary-1", DBClusterSnapshotIdentifier="snapshot-1" + ) + + conn.describe_db_cluster_snapshots(DBClusterSnapshotIdentifier="snapshot-1") + conn.delete_db_cluster_snapshot(DBClusterSnapshotIdentifier="snapshot-1") + conn.describe_db_cluster_snapshots.when.called_with( + DBClusterSnapshotIdentifier="snapshot-1" + ).should.throw(ClientError) + + +@mock_rds2 +def test_restore_db_cluster_from_snapshot(): + conn = boto3.client("rds", region_name="us-west-2") + conn.create_db_cluster( + DBClusterIdentifier="db-primary-1", + AllocatedStorage=10, + Engine="postgres", + DatabaseName="staging-postgres", + DBClusterInstanceClass="db.m1.small", + MasterUsername="root", + MasterUserPassword="hunter2000", + Port=1234, + ) + conn.describe_db_clusters()["DBClusters"].should.have.length_of(1) + + conn.create_db_cluster_snapshot( + DBClusterIdentifier="db-primary-1", DBClusterSnapshotIdentifier="snapshot-1" + ) + + # restore + new_cluster = conn.restore_db_cluster_from_snapshot( + DBClusterIdentifier="db-restore-1", + SnapshotIdentifier="snapshot-1", + Engine="postgres", + )["DBCluster"] + new_cluster["DBClusterIdentifier"].should.equal("db-restore-1") + new_cluster["DBClusterInstanceClass"].should.equal("db.m1.small") + new_cluster["StorageType"].should.equal("gp2") + new_cluster["Engine"].should.equal("postgres") + new_cluster["DatabaseName"].should.equal("staging-postgres") + new_cluster["Port"].should.equal(1234) + + # Verify it exists + conn.describe_db_clusters()["DBClusters"].should.have.length_of(2) + conn.describe_db_clusters(DBClusterIdentifier="db-restore-1")[ + "DBClusters" + ].should.have.length_of(1) + + +@mock_rds2 +def test_restore_db_cluster_from_snapshot_and_override_params(): + conn = boto3.client("rds", region_name="us-west-2") + conn.create_db_cluster( + DBClusterIdentifier="db-primary-1", + AllocatedStorage=10, + Engine="postgres", + DatabaseName="staging-postgres", + DBClusterInstanceClass="db.m1.small", + MasterUsername="root", + MasterUserPassword="hunter2000", + Port=1234, + ) + conn.describe_db_clusters()["DBClusters"].should.have.length_of(1) + conn.create_db_cluster_snapshot( + DBClusterIdentifier="db-primary-1", DBClusterSnapshotIdentifier="snapshot-1" + ) + + # restore with some updated attributes + new_cluster = conn.restore_db_cluster_from_snapshot( + DBClusterIdentifier="db-restore-1", + SnapshotIdentifier="snapshot-1", + Engine="postgres", + Port=10000, + DBClusterInstanceClass="db.r6g.xlarge", + )["DBCluster"] + new_cluster["DBClusterIdentifier"].should.equal("db-restore-1") + new_cluster["DBClusterParameterGroup"].should.equal("default.aurora8.0") + new_cluster["DBClusterInstanceClass"].should.equal("db.r6g.xlarge") + new_cluster["Port"].should.equal(10000)