Local: Add RDS methods: copy_db_snapshot, copy_db_cluster_snapshot, create_db_cluster_snapshot, delete_db_cluster_snapshot, describe_db_cluster_snapshots (#4790)

This commit is contained in:
Dmytro Kazanzhy 2022-01-28 01:22:51 +02:00 committed by GitHub
parent e9fada8ebd
commit 3ae6841b48
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 822 additions and 46 deletions

View File

@ -3800,7 +3800,7 @@
## rds
<details>
<summary>14% implemented</summary>
<summary>17% implemented</summary>
- [ ] add_role_to_db_cluster
- [ ] add_role_to_db_instance
@ -3820,7 +3820,7 @@
- [X] create_db_cluster
- [ ] create_db_cluster_endpoint
- [ ] create_db_cluster_parameter_group
- [ ] create_db_cluster_snapshot
- [X] create_db_cluster_snapshot
- [ ] create_db_instance
- [ ] create_db_instance_read_replica
- [X] create_db_parameter_group
@ -3837,7 +3837,7 @@
- [X] delete_db_cluster
- [ ] delete_db_cluster_endpoint
- [ ] delete_db_cluster_parameter_group
- [ ] delete_db_cluster_snapshot
- [X] delete_db_cluster_snapshot
- [ ] delete_db_instance
- [ ] delete_db_instance_automated_backup
- [X] delete_db_parameter_group
@ -3859,7 +3859,7 @@
- [ ] describe_db_cluster_parameter_groups
- [ ] describe_db_cluster_parameters
- [ ] describe_db_cluster_snapshot_attributes
- [ ] describe_db_cluster_snapshots
- [X] describe_db_cluster_snapshots
- [X] describe_db_clusters
- [ ] describe_db_engine_versions
- [ ] describe_db_instance_automated_backups
@ -3928,7 +3928,7 @@
- [ ] reset_db_cluster_parameter_group
- [ ] reset_db_parameter_group
- [ ] restore_db_cluster_from_s3
- [ ] restore_db_cluster_from_snapshot
- [X] restore_db_cluster_from_snapshot
- [ ] restore_db_cluster_to_point_in_time
- [X] restore_db_instance_from_db_snapshot
- [ ] restore_db_instance_from_s3

View File

@ -129,3 +129,21 @@ class DBClusterNotFoundError(RDSClientError):
"DBClusterNotFoundFault",
"DBCluster {} not found.".format(cluster_identifier),
)
class DBClusterSnapshotNotFoundError(RDSClientError):
def __init__(self, snapshot_identifier):
super().__init__(
"DBClusterSnapshotNotFoundFault",
"DBClusterSnapshot {} not found.".format(snapshot_identifier),
)
class DBClusterSnapshotAlreadyExistsError(RDSClientError):
def __init__(self, database_snapshot_identifier):
super().__init__(
"DBClusterSnapshotAlreadyExistsFault",
"Cannot create the snapshot because a snapshot with the identifier {} already exists.".format(
database_snapshot_identifier
),
)

View File

@ -15,6 +15,8 @@ from moto.ec2.models import ec2_backends
from .exceptions import (
RDSClientError,
DBClusterNotFoundError,
DBClusterSnapshotAlreadyExistsError,
DBClusterSnapshotNotFoundError,
DBInstanceNotFoundError,
DBSnapshotNotFoundError,
DBSecurityGroupNotFoundError,
@ -36,18 +38,30 @@ class Cluster:
def __init__(self, **kwargs):
self.db_name = kwargs.get("db_name")
self.db_cluster_identifier = kwargs.get("db_cluster_identifier")
self.db_cluster_instance_class = kwargs.get("db_cluster_instance_class")
self.deletion_protection = kwargs.get("deletion_protection")
self.engine = kwargs.get("engine")
self.engine_version = kwargs.get("engine_version")
if not self.engine_version:
# Set default
self.engine_version = "5.6.mysql_aurora.1.22.5" # TODO: depends on engine
self.engine_version = Cluster.default_engine_version(self.engine)
self.engine_mode = kwargs.get("engine_mode") or "provisioned"
self.iops = kwargs.get("iops")
self.status = "active"
self.region = kwargs.get("region")
self.cluster_create_time = iso_8601_datetime_with_milliseconds(
datetime.datetime.now()
)
self.copy_tags_to_snapshot = kwargs.get("copy_tags_to_snapshot")
if self.copy_tags_to_snapshot is None:
self.copy_tags_to_snapshot = True
self.storage_type = kwargs.get("storage_type")
if self.storage_type is None:
self.storage_type = Cluster.default_storage_type(iops=self.iops)
self.allocated_storage = kwargs.get("allocated_storage")
if self.allocated_storage is None:
self.allocated_storage = Cluster.default_allocated_storage(
engine=self.engine, storage_type=self.storage_type
)
self.master_username = kwargs.get("master_username")
if not self.master_username:
raise InvalidParameterValue(
@ -69,7 +83,7 @@ class Cluster:
f"{self.region}b",
f"{self.region}c",
]
self.parameter_group = kwargs.get("parameter_group") or "default.aurora5.6"
self.parameter_group = kwargs.get("parameter_group") or "default.aurora8.0"
self.subnet_group = "default"
self.status = "creating"
self.url_identifier = "".join(
@ -77,7 +91,9 @@ class Cluster:
)
self.endpoint = f"{self.db_cluster_identifier}.cluster-{self.url_identifier}.{self.region}.rds.amazonaws.com"
self.reader_endpoint = f"{self.db_cluster_identifier}.cluster-ro-{self.url_identifier}.{self.region}.rds.amazonaws.com"
self.port = kwargs.get("port") or 3306
self.port = kwargs.get("port")
if self.port is None:
self.port = Cluster.default_port(self.engine)
self.preferred_backup_window = "01:37-02:07"
self.preferred_maintenance_window = "wed:02:40-wed:03:10"
# This should default to the default security group
@ -88,7 +104,13 @@ class Cluster:
self.resource_id = "cluster-" + "".join(
random.choice(string.ascii_uppercase + string.digits) for _ in range(26)
)
self.arn = f"arn:aws:rds:{self.region}:{ACCOUNT_ID}:cluster:{self.db_cluster_identifier}"
self.tags = kwargs.get("tags", [])
@property
def db_cluster_arn(self):
return "arn:aws:rds:{0}:{1}:cluster:{2}".format(
self.region, ACCOUNT_ID, self.db_cluster_identifier
)
def to_xml(self):
template = Template(
@ -113,6 +135,13 @@ class Cluster:
<MultiAZ>false</MultiAZ>
<EngineVersion>{{ cluster.engine_version }}</EngineVersion>
<Port>{{ cluster.port }}</Port>
{% if cluster.iops %}
<Iops>{{ cluster.iops }}</Iops>
<StorageType>io1</StorageType>
{% else %}
<StorageType>{{ cluster.storage_type }}</StorageType>
{% endif %}
<DBClusterInstanceClass>{{ cluster.db_cluster_instance_class }}</DBClusterInstanceClass>
<MasterUsername>{{ cluster.master_username }}</MasterUsername>
<PreferredBackupWindow>{{ cluster.preferred_backup_window }}</PreferredBackupWindow>
<PreferredMaintenanceWindow>{{ cluster.preferred_maintenance_window }}</PreferredMaintenanceWindow>
@ -129,20 +158,167 @@ class Cluster:
<HostedZoneId>{{ cluster.hosted_zone_id }}</HostedZoneId>
<StorageEncrypted>false</StorageEncrypted>
<DbClusterResourceId>{{ cluster.resource_id }}</DbClusterResourceId>
<DBClusterArn>{{ cluster.arn }}</DBClusterArn>
<DBClusterArn>{{ cluster.db_cluster_arn }}</DBClusterArn>
<AssociatedRoles></AssociatedRoles>
<IAMDatabaseAuthenticationEnabled>false</IAMDatabaseAuthenticationEnabled>
<EngineMode>{{ cluster.engine_mode }}</EngineMode>
<DeletionProtection>{{ 'true' if cluster.deletion_protection else 'false' }}</DeletionProtection>
<HttpEndpointEnabled>false</HttpEndpointEnabled>
<CopyTagsToSnapshot>false</CopyTagsToSnapshot>
<CopyTagsToSnapshot>{{ cluster.copy_tags_to_snapshot }}</CopyTagsToSnapshot>
<CrossAccountClone>false</CrossAccountClone>
<DomainMemberships></DomainMemberships>
<TagList></TagList>
<TagList>
{%- for tag in cluster.tags -%}
<Tag>
<Key>{{ tag['Key'] }}</Key>
<Value>{{ tag['Value'] }}</Value>
</Tag>
{%- endfor -%}
</TagList>
</DBCluster>"""
)
return template.render(cluster=self)
@staticmethod
def default_engine_version(engine):
return {
"aurora": "8.0.mysql_aurora.3.01.0",
"mysql": "8.0",
"mariadb": "10.5",
"postgres": "13.5",
"oracle-ee": "19c",
"oracle-se2": "19c",
"oracle-se1": "19c",
"oracle-se": "19c",
"sqlserver-ee": "15.00",
"sqlserver-ex": "15.00",
"sqlserver-se": "15.00",
"sqlserver-web": "15.00",
}[engine]
@staticmethod
def default_port(engine):
return {
"aurora": 3306,
"mysql": 3306,
"mariadb": 3306,
"postgres": 5432,
"oracle-ee": 1521,
"oracle-se2": 1521,
"oracle-se1": 1521,
"oracle-se": 1521,
"sqlserver-ee": 1433,
"sqlserver-ex": 1433,
"sqlserver-se": 1433,
"sqlserver-web": 1433,
}[engine]
@staticmethod
def default_storage_type(iops):
if iops is None:
return "gp2"
else:
return "io1"
@staticmethod
def default_allocated_storage(engine, storage_type):
return {
"aurora": {"gp2": 0, "io1": 0, "standard": 0},
"mysql": {"gp2": 20, "io1": 100, "standard": 5},
"mariadb": {"gp2": 20, "io1": 100, "standard": 5},
"postgres": {"gp2": 20, "io1": 100, "standard": 5},
"oracle-ee": {"gp2": 20, "io1": 100, "standard": 10},
"oracle-se2": {"gp2": 20, "io1": 100, "standard": 10},
"oracle-se1": {"gp2": 20, "io1": 100, "standard": 10},
"oracle-se": {"gp2": 20, "io1": 100, "standard": 10},
"sqlserver-ee": {"gp2": 200, "io1": 200, "standard": 200},
"sqlserver-ex": {"gp2": 20, "io1": 100, "standard": 20},
"sqlserver-se": {"gp2": 200, "io1": 200, "standard": 200},
"sqlserver-web": {"gp2": 20, "io1": 100, "standard": 20},
}[engine][storage_type]
def get_tags(self):
return self.tags
def add_tags(self, tags):
new_keys = [tag_set["Key"] for tag_set in tags]
self.tags = [tag_set for tag_set in self.tags if tag_set["Key"] not in new_keys]
self.tags.extend(tags)
return self.tags
def remove_tags(self, tag_keys):
self.tags = [tag_set for tag_set in self.tags if tag_set["Key"] not in tag_keys]
class ClusterSnapshot(BaseModel):
SUPPORTED_FILTERS = {
"db-cluster-id": FilterDef(
["cluster.db_cluster_arn", "cluster.db_cluster_identifier"],
"DB Cluster Identifiers",
),
"db-cluster-snapshot-id": FilterDef(
["snapshot_id"], "DB Cluster Snapshot Identifiers"
),
"dbi-resource-id": FilterDef(["database.dbi_resource_id"], "Dbi Resource Ids"),
"snapshot-type": FilterDef(None, "Snapshot Types"),
"engine": FilterDef(["database.engine"], "Engine Names"),
}
def __init__(self, cluster, snapshot_id, tags):
self.cluster = cluster
self.snapshot_id = snapshot_id
self.tags = tags
self.created_at = iso_8601_datetime_with_milliseconds(datetime.datetime.now())
@property
def snapshot_arn(self):
return "arn:aws:rds:{0}:{1}:snapshot:{2}".format(
self.cluster.region, ACCOUNT_ID, self.snapshot_id
)
def to_xml(self):
template = Template(
"""
<DBClusterSnapshot>
<DBClusterSnapshotIdentifier>{{ snapshot.snapshot_id }}</DBClusterSnapshotIdentifier>
<SnapshotCreateTime>{{ snapshot.created_at }}</SnapshotCreateTime>
<DBClusterIdentifier>{{ cluster.db_cluster_identifier }}</DBClusterIdentifier>
<ClusterCreateTime>{{ snapshot.created_at }}</ClusterCreateTime>
<PercentProgress>{{ 100 }}</PercentProgress>
<AllocatedStorage>{{ cluster.allocated_storage }}</AllocatedStorage>
<MasterUsername>{{ cluster.master_username }}</MasterUsername>
<Port>{{ cluster.port }}</Port>
<Engine>{{ cluster.engine }}</Engine>
<Status>available</Status>
<SnapshotType>manual</SnapshotType>
<DBClusterSnapshotArn>{{ snapshot.snapshot_arn }}</DBClusterSnapshotArn>
<SourceRegion>{{ cluster.region }}</SourceRegion>
{% if cluster.iops %}
<Iops>{{ cluster.iops }}</Iops>
<StorageType>io1</StorageType>
{% else %}
<StorageType>{{ cluster.storage_type }}</StorageType>
{% endif %}
<Timezone></Timezone>
<LicenseModel>{{ cluster.license_model }}</LicenseModel>
</DBClusterSnapshot>
"""
)
return template.render(snapshot=self, cluster=self.cluster)
def get_tags(self):
return self.tags
def add_tags(self, tags):
new_keys = [tag_set["Key"] for tag_set in tags]
self.tags = [tag_set for tag_set in self.tags if tag_set["Key"] not in new_keys]
self.tags.extend(tags)
return self.tags
def remove_tags(self, tag_keys):
self.tags = [tag_set for tag_set in self.tags if tag_set["Key"] not in tag_keys]
class Database(CloudFormationModel):
@ -657,7 +833,7 @@ class Database(CloudFormationModel):
backend.delete_database(self.db_instance_identifier)
class Snapshot(BaseModel):
class DatabaseSnapshot(BaseModel):
SUPPORTED_FILTERS = {
"db-instance-id": FilterDef(
@ -952,11 +1128,12 @@ class RDS2Backend(BaseBackend):
def __init__(self, region):
self.region = region
self.arn_regex = re_compile(
r"^arn:aws:rds:.*:[0-9]*:(db|es|og|pg|ri|secgrp|snapshot|subgrp):.*$"
r"^arn:aws:rds:.*:[0-9]*:(db|cluster|es|og|pg|ri|secgrp|snapshot|subgrp):.*$"
)
self.clusters = OrderedDict()
self.databases = OrderedDict()
self.snapshots = OrderedDict()
self.database_snapshots = OrderedDict()
self.cluster_snapshots = OrderedDict()
self.db_parameter_groups = {}
self.option_groups = {}
self.security_groups = {}
@ -983,29 +1160,55 @@ class RDS2Backend(BaseBackend):
self.databases[database_id] = database
return database
def create_snapshot(
def create_database_snapshot(
self, db_instance_identifier, db_snapshot_identifier, tags=None
):
database = self.databases.get(db_instance_identifier)
if not database:
raise DBInstanceNotFoundError(db_instance_identifier)
if db_snapshot_identifier in self.snapshots:
if db_snapshot_identifier in self.database_snapshots:
raise DBSnapshotAlreadyExistsError(db_snapshot_identifier)
if len(self.snapshots) >= int(os.environ.get("MOTO_RDS_SNAPSHOT_LIMIT", "100")):
if len(self.database_snapshots) >= int(
os.environ.get("MOTO_RDS_SNAPSHOT_LIMIT", "100")
):
raise SnapshotQuotaExceededError()
if tags is None:
tags = list()
if database.copy_tags_to_snapshot and not tags:
tags = database.get_tags()
snapshot = Snapshot(database, db_snapshot_identifier, tags)
self.snapshots[db_snapshot_identifier] = snapshot
snapshot = DatabaseSnapshot(database, db_snapshot_identifier, tags)
self.database_snapshots[db_snapshot_identifier] = snapshot
return snapshot
def delete_snapshot(self, db_snapshot_identifier):
if db_snapshot_identifier not in self.snapshots:
def copy_database_snapshot(
self, source_snapshot_identifier, target_snapshot_identifier, tags=None,
):
if source_snapshot_identifier not in self.database_snapshots:
raise DBSnapshotNotFoundError(source_snapshot_identifier)
if target_snapshot_identifier in self.database_snapshots:
raise DBSnapshotAlreadyExistsError(target_snapshot_identifier)
if len(self.database_snapshots) >= int(
os.environ.get("MOTO_RDS_SNAPSHOT_LIMIT", "100")
):
raise SnapshotQuotaExceededError()
source_snapshot = self.database_snapshots[source_snapshot_identifier]
if tags is None:
tags = source_snapshot.tags
else:
tags = self._merge_tags(source_snapshot.tags, tags)
target_snapshot = DatabaseSnapshot(
source_snapshot.database, target_snapshot_identifier, tags
)
self.database_snapshots[target_snapshot_identifier] = target_snapshot
return target_snapshot
def delete_database_snapshot(self, db_snapshot_identifier):
if db_snapshot_identifier not in self.database_snapshots:
raise DBSnapshotNotFoundError(db_snapshot_identifier)
return self.snapshots.pop(db_snapshot_identifier)
return self.database_snapshots.pop(db_snapshot_identifier)
def create_database_replica(self, db_kwargs):
database_id = db_kwargs["db_instance_identifier"]
@ -1034,10 +1237,10 @@ class RDS2Backend(BaseBackend):
raise DBInstanceNotFoundError(db_instance_identifier)
return list(databases.values())
def describe_snapshots(
def describe_database_snapshots(
self, db_instance_identifier, db_snapshot_identifier, filters=None
):
snapshots = self.snapshots
snapshots = self.database_snapshots
if db_instance_identifier:
filters = merge_filters(
filters, {"db-instance-id": [db_instance_identifier]}
@ -1047,7 +1250,7 @@ class RDS2Backend(BaseBackend):
filters, {"db-snapshot-id": [db_snapshot_identifier]}
)
if filters:
snapshots = self._filter_resources(snapshots, filters, Snapshot)
snapshots = self._filter_resources(snapshots, filters, DatabaseSnapshot)
if db_snapshot_identifier and not snapshots and not db_instance_identifier:
raise DBSnapshotNotFoundError(db_snapshot_identifier)
return list(snapshots.values())
@ -1068,7 +1271,7 @@ class RDS2Backend(BaseBackend):
return database
def restore_db_instance_from_db_snapshot(self, from_snapshot_id, overrides):
snapshot = self.describe_snapshots(
snapshot = self.describe_database_snapshots(
db_instance_identifier=None, db_snapshot_identifier=from_snapshot_id
)[0]
original_database = snapshot.database
@ -1096,7 +1299,9 @@ class RDS2Backend(BaseBackend):
if database.status != "available":
raise InvalidDBInstanceStateError(db_instance_identifier, "stop")
if db_snapshot_identifier:
self.create_snapshot(db_instance_identifier, db_snapshot_identifier)
self.create_database_snapshot(
db_instance_identifier, db_snapshot_identifier
)
database.status = "stopped"
return database
@ -1127,7 +1332,7 @@ class RDS2Backend(BaseBackend):
"Can't delete Instance with protection enabled"
)
if db_snapshot_name:
self.create_snapshot(db_instance_identifier, db_snapshot_name)
self.create_database_snapshot(db_instance_identifier, db_snapshot_name)
database = self.databases.pop(db_instance_identifier)
if database.is_replica:
primary = self.find_db_from_id(database.source_db_identifier)
@ -1430,11 +1635,75 @@ class RDS2Backend(BaseBackend):
cluster.status = "available" # Already set the final status in the background
return initial_state
def create_cluster_snapshot(
self, db_cluster_identifier, db_snapshot_identifier, tags=None
):
cluster = self.clusters.get(db_cluster_identifier)
if cluster is None:
raise DBClusterNotFoundError(db_cluster_identifier)
if db_snapshot_identifier in self.cluster_snapshots:
raise DBClusterSnapshotAlreadyExistsError(db_snapshot_identifier)
if len(self.cluster_snapshots) >= int(
os.environ.get("MOTO_RDS_SNAPSHOT_LIMIT", "100")
):
raise SnapshotQuotaExceededError()
if tags is None:
tags = list()
if cluster.copy_tags_to_snapshot:
tags += cluster.get_tags()
snapshot = ClusterSnapshot(cluster, db_snapshot_identifier, tags)
self.cluster_snapshots[db_snapshot_identifier] = snapshot
return snapshot
def copy_cluster_snapshot(
self, source_snapshot_identifier, target_snapshot_identifier, tags=None
):
if source_snapshot_identifier not in self.cluster_snapshots:
raise DBClusterSnapshotNotFoundError(source_snapshot_identifier)
if target_snapshot_identifier in self.cluster_snapshots:
raise DBClusterSnapshotAlreadyExistsError(target_snapshot_identifier)
if len(self.cluster_snapshots) >= int(
os.environ.get("MOTO_RDS_SNAPSHOT_LIMIT", "100")
):
raise SnapshotQuotaExceededError()
source_snapshot = self.cluster_snapshots[source_snapshot_identifier]
if tags is None:
tags = source_snapshot.tags
else:
tags = self._merge_tags(source_snapshot.tags, tags)
target_snapshot = ClusterSnapshot(
source_snapshot.cluster, target_snapshot_identifier, tags
)
self.cluster_snapshots[target_snapshot_identifier] = target_snapshot
return target_snapshot
def delete_cluster_snapshot(self, db_snapshot_identifier):
if db_snapshot_identifier not in self.cluster_snapshots:
raise DBClusterSnapshotNotFoundError(db_snapshot_identifier)
return self.cluster_snapshots.pop(db_snapshot_identifier)
def describe_db_clusters(self, cluster_identifier):
if cluster_identifier:
return [self.clusters[cluster_identifier]]
return self.clusters.values()
def describe_cluster_snapshots(
self, db_cluster_identifier, db_snapshot_identifier, filters=None
):
snapshots = self.cluster_snapshots
if db_cluster_identifier:
filters = merge_filters(filters, {"db-cluster-id": [db_cluster_identifier]})
if db_snapshot_identifier:
filters = merge_filters(
filters, {"db-cluster-snapshot-id": [db_snapshot_identifier]}
)
if filters:
snapshots = self._filter_resources(snapshots, filters, ClusterSnapshot)
if db_snapshot_identifier and not snapshots and not db_cluster_identifier:
raise DBClusterSnapshotNotFoundError(db_snapshot_identifier)
return list(snapshots.values())
def delete_db_cluster(self, cluster_identifier):
if cluster_identifier in self.clusters:
if self.clusters[cluster_identifier].deletion_protection:
@ -1457,6 +1726,18 @@ class RDS2Backend(BaseBackend):
cluster.status = "available" # This is the final status - already setting it in the background
return temp_state
def restore_db_cluster_from_snapshot(self, from_snapshot_id, overrides):
snapshot = self.describe_cluster_snapshots(
db_cluster_identifier=None, db_snapshot_identifier=from_snapshot_id
)[0]
original_cluster = snapshot.cluster
new_cluster_props = copy.deepcopy(original_cluster.__dict__)
for key, value in overrides.items():
if value:
new_cluster_props[key] = value
return self.create_db_cluster(new_cluster_props)
def stop_db_cluster(self, cluster_identifier):
if cluster_identifier not in self.clusters:
raise DBClusterNotFoundError(cluster_identifier)
@ -1477,6 +1758,9 @@ class RDS2Backend(BaseBackend):
if resource_type == "db": # Database
if resource_name in self.databases:
return self.databases[resource_name].get_tags()
elif resource_type == "cluster": # Cluster
if resource_name in self.clusters:
return self.clusters[resource_name].get_tags()
elif resource_type == "es": # Event Subscription
# TODO: Complete call to tags on resource type Event
# Subscription
@ -1495,8 +1779,10 @@ class RDS2Backend(BaseBackend):
if resource_name in self.security_groups:
return self.security_groups[resource_name].get_tags()
elif resource_type == "snapshot": # DB Snapshot
if resource_name in self.snapshots:
return self.snapshots[resource_name].get_tags()
if resource_name in self.database_snapshots:
return self.database_snapshots[resource_name].get_tags()
if resource_name in self.cluster_snapshots:
return self.cluster_snapshots[resource_name].get_tags()
elif resource_type == "subgrp": # DB subnet group
if resource_name in self.subnet_groups:
return self.subnet_groups[resource_name].get_tags()
@ -1527,8 +1813,10 @@ class RDS2Backend(BaseBackend):
if resource_name in self.security_groups:
return self.security_groups[resource_name].remove_tags(tag_keys)
elif resource_type == "snapshot": # DB Snapshot
if resource_name in self.snapshots:
return self.snapshots[resource_name].remove_tags(tag_keys)
if resource_name in self.database_snapshots:
return self.database_snapshots[resource_name].remove_tags(tag_keys)
if resource_name in self.cluster_snapshots:
return self.cluster_snapshots[resource_name].remove_tags(tag_keys)
elif resource_type == "subgrp": # DB subnet group
if resource_name in self.subnet_groups:
return self.subnet_groups[resource_name].remove_tags(tag_keys)
@ -1558,8 +1846,10 @@ class RDS2Backend(BaseBackend):
if resource_name in self.security_groups:
return self.security_groups[resource_name].add_tags(tags)
elif resource_type == "snapshot": # DB Snapshot
if resource_name in self.snapshots:
return self.snapshots[resource_name].add_tags(tags)
if resource_name in self.database_snapshots:
return self.database_snapshots[resource_name].add_tags(tags)
if resource_name in self.cluster_snapshots:
return self.cluster_snapshots[resource_name].add_tags(tags)
elif resource_type == "subgrp": # DB subnet group
if resource_name in self.subnet_groups:
return self.subnet_groups[resource_name].add_tags(tags)
@ -1580,6 +1870,13 @@ class RDS2Backend(BaseBackend):
except ValueError as e:
raise InvalidParameterCombination(str(e))
@staticmethod
def _merge_tags(old_tags: list, new_tags: list):
tags_dict = dict()
tags_dict.update({d["Key"]: d["Value"] for d in old_tags})
tags_dict.update({d["Key"]: d["Value"] for d in new_tags})
return [{"Key": k, "Value": v} for k, v in tags_dict.items()]
class OptionGroup(object):
def __init__(self, name, engine_name, major_engine_version, description=None):

View File

@ -98,11 +98,17 @@ class RDS2Response(BaseResponse):
"engine": self._get_param("Engine"),
"engine_version": self._get_param("EngineVersion"),
"engine_mode": self._get_param("EngineMode"),
"allocated_storage": self._get_param("AllocatedStorage"),
"iops": self._get_param("Iops"),
"storage_type": self._get_param("StorageType"),
"master_username": self._get_param("MasterUsername"),
"master_user_password": self._get_param("MasterUserPassword"),
"port": self._get_param("Port"),
"parameter_group": self._get_param("DBClusterParameterGroup"),
"region": self.region,
"db_cluster_instance_class": self._get_param("DBClusterInstanceClass"),
"copy_tags_to_snapshot": self._get_param("CopyTagsToSnapshot"),
"tags": self.unpack_complex_list_params("Tags.Tag", ("Key", "Value")),
}
def unpack_complex_list_params(self, label, names):
@ -191,17 +197,27 @@ class RDS2Response(BaseResponse):
db_instance_identifier = self._get_param("DBInstanceIdentifier")
db_snapshot_identifier = self._get_param("DBSnapshotIdentifier")
tags = self.unpack_complex_list_params("Tags.Tag", ("Key", "Value"))
snapshot = self.backend.create_snapshot(
snapshot = self.backend.create_database_snapshot(
db_instance_identifier, db_snapshot_identifier, tags
)
template = self.response_template(CREATE_SNAPSHOT_TEMPLATE)
return template.render(snapshot=snapshot)
def copy_db_snapshot(self):
source_snapshot_identifier = self._get_param("SourceDBSnapshotIdentifier")
target_snapshot_identifier = self._get_param("TargetDBSnapshotIdentifier")
tags = self.unpack_complex_list_params("Tags.Tag", ("Key", "Value"))
snapshot = self.backend.copy_database_snapshot(
source_snapshot_identifier, target_snapshot_identifier, tags,
)
template = self.response_template(COPY_SNAPSHOT_TEMPLATE)
return template.render(snapshot=snapshot)
def describe_db_snapshots(self):
db_instance_identifier = self._get_param("DBInstanceIdentifier")
db_snapshot_identifier = self._get_param("DBSnapshotIdentifier")
filters = filters_from_querystring(self.querystring)
snapshots = self.backend.describe_snapshots(
snapshots = self.backend.describe_database_snapshots(
db_instance_identifier, db_snapshot_identifier, filters
)
template = self.response_template(DESCRIBE_SNAPSHOTS_TEMPLATE)
@ -209,7 +225,7 @@ class RDS2Response(BaseResponse):
def delete_db_snapshot(self):
db_snapshot_identifier = self._get_param("DBSnapshotIdentifier")
snapshot = self.backend.delete_snapshot(db_snapshot_identifier)
snapshot = self.backend.delete_database_snapshot(db_snapshot_identifier)
template = self.response_template(DELETE_SNAPSHOT_TEMPLATE)
return template.render(snapshot=snapshot)
@ -483,6 +499,55 @@ class RDS2Response(BaseResponse):
template = self.response_template(STOP_CLUSTER_TEMPLATE)
return template.render(cluster=cluster)
def create_db_cluster_snapshot(self):
db_cluster_identifier = self._get_param("DBClusterIdentifier")
db_snapshot_identifier = self._get_param("DBClusterSnapshotIdentifier")
tags = self.unpack_complex_list_params("Tags.Tag", ("Key", "Value"))
snapshot = self.backend.create_cluster_snapshot(
db_cluster_identifier, db_snapshot_identifier, tags
)
template = self.response_template(CREATE_CLUSTER_SNAPSHOT_TEMPLATE)
return template.render(snapshot=snapshot)
def copy_db_cluster_snapshot(self):
source_snapshot_identifier = self._get_param(
"SourceDBClusterSnapshotIdentifier"
)
target_snapshot_identifier = self._get_param(
"TargetDBClusterSnapshotIdentifier"
)
tags = self.unpack_complex_list_params("Tags.Tag", ("Key", "Value"))
snapshot = self.backend.copy_cluster_snapshot(
source_snapshot_identifier, target_snapshot_identifier, tags,
)
template = self.response_template(COPY_CLUSTER_SNAPSHOT_TEMPLATE)
return template.render(snapshot=snapshot)
def describe_db_cluster_snapshots(self):
db_cluster_identifier = self._get_param("DBClusterIdentifier")
db_snapshot_identifier = self._get_param("DBClusterSnapshotIdentifier")
filters = filters_from_querystring(self.querystring)
snapshots = self.backend.describe_cluster_snapshots(
db_cluster_identifier, db_snapshot_identifier, filters
)
template = self.response_template(DESCRIBE_CLUSTER_SNAPSHOTS_TEMPLATE)
return template.render(snapshots=snapshots)
def delete_db_cluster_snapshot(self):
db_snapshot_identifier = self._get_param("DBClusterSnapshotIdentifier")
snapshot = self.backend.delete_cluster_snapshot(db_snapshot_identifier)
template = self.response_template(DELETE_CLUSTER_SNAPSHOT_TEMPLATE)
return template.render(snapshot=snapshot)
def restore_db_cluster_from_snapshot(self):
db_snapshot_identifier = self._get_param("SnapshotIdentifier")
db_kwargs = self._get_db_cluster_kwargs()
new_cluster = self.backend.restore_db_cluster_from_snapshot(
db_snapshot_identifier, db_kwargs
)
template = self.response_template(RESTORE_CLUSTER_FROM_SNAPSHOT_TEMPLATE)
return template.render(cluster=new_cluster)
CREATE_DATABASE_TEMPLATE = """<CreateDBInstanceResponse xmlns="http://rds.amazonaws.com/doc/2014-09-01/">
<CreateDBInstanceResult>
@ -591,6 +656,16 @@ CREATE_SNAPSHOT_TEMPLATE = """<CreateDBSnapshotResponse xmlns="http://rds.amazon
</CreateDBSnapshotResponse>
"""
COPY_SNAPSHOT_TEMPLATE = """<CopyDBSnapshotResponse xmlns="http://rds.amazonaws.com/doc/2014-09-01/">
<CopyDBSnapshotResult>
{{ snapshot.to_xml() }}
</CopyDBSnapshotResult>
<ResponseMetadata>
<RequestId>523e3218-afc7-11c3-90f5-f90431260ab4</RequestId>
</ResponseMetadata>
</CopyDBSnapshotResponse>
"""
DESCRIBE_SNAPSHOTS_TEMPLATE = """<DescribeDBSnapshotsResponse xmlns="http://rds.amazonaws.com/doc/2014-09-01/">
<DescribeDBSnapshotsResult>
<DBSnapshots>
@ -824,7 +899,6 @@ REMOVE_TAGS_FROM_RESOURCE_TEMPLATE = """<RemoveTagsFromResourceResponse xmlns="h
</ResponseMetadata>
</RemoveTagsFromResourceResponse>"""
CREATE_DB_CLUSTER_TEMPLATE = """<CreateDBClusterResponse xmlns="http://rds.amazonaws.com/doc/2014-09-01/">
<CreateDBClusterResult>
{{ cluster.to_xml() }}
@ -867,3 +941,59 @@ STOP_CLUSTER_TEMPLATE = """<StopDBClusterResponse xmlns="http://rds.amazonaws.co
<RequestId>523e3218-afc7-11c3-90f5-f90431260ab8</RequestId>
</ResponseMetadata>
</StopDBClusterResponse>"""
RESTORE_CLUSTER_FROM_SNAPSHOT_TEMPLATE = """<RestoreDBClusterFromDBSnapshotResponse xmlns="http://rds.amazonaws.com/doc/2014-09-01/">
<RestoreDBClusterFromSnapshotResult>
{{ cluster.to_xml() }}
</RestoreDBClusterFromSnapshotResult>
<ResponseMetadata>
<RequestId>523e3218-afc7-11c3-90f5-f90431260ab4</RequestId>
</ResponseMetadata>
</RestoreDBClusterFromDBSnapshotResponse>
"""
CREATE_CLUSTER_SNAPSHOT_TEMPLATE = """<CreateDBClusterSnapshotResponse xmlns="http://rds.amazonaws.com/doc/2014-09-01/">
<CreateDBClusterSnapshotResult>
{{ snapshot.to_xml() }}
</CreateDBClusterSnapshotResult>
<ResponseMetadata>
<RequestId>523e3218-afc7-11c3-90f5-f90431260ab4</RequestId>
</ResponseMetadata>
</CreateDBClusterSnapshotResponse>
"""
COPY_CLUSTER_SNAPSHOT_TEMPLATE = """<CopyDBClusterSnapshotResponse xmlns="http://rds.amazonaws.com/doc/2014-09-01/">
<CopyDBClusterSnapshotResult>
{{ snapshot.to_xml() }}
</CopyDBClusterSnapshotResult>
<ResponseMetadata>
<RequestId>523e3218-afc7-11c3-90f5-f90431260ab4</RequestId>
</ResponseMetadata>
</CopyDBClusterSnapshotResponse>
"""
DESCRIBE_CLUSTER_SNAPSHOTS_TEMPLATE = """<DescribeDBClusterSnapshotsResponse xmlns="http://rds.amazonaws.com/doc/2014-09-01/">
<DescribeDBClusterSnapshotsResult>
<DBClusterSnapshots>
{%- for snapshot in snapshots -%}
{{ snapshot.to_xml() }}
{%- endfor -%}
</DBClusterSnapshots>
{% if marker %}
<Marker>{{ marker }}</Marker>
{% endif %}
</DescribeDBClusterSnapshotsResult>
<ResponseMetadata>
<RequestId>523e3218-afc7-11c3-90f5-f90431260ab4</RequestId>
</ResponseMetadata>
</DescribeDBClusterSnapshotsResponse>"""
DELETE_CLUSTER_SNAPSHOT_TEMPLATE = """<DeleteDBClusterSnapshotResponse xmlns="http://rds.amazonaws.com/doc/2014-09-01/">
<DeleteDBClusterSnapshotResult>
{{ snapshot.to_xml() }}
</DeleteDBClusterSnapshotResult>
<ResponseMetadata>
<RequestId>523e3218-afc7-11c3-90f5-f90431260ab4</RequestId>
</ResponseMetadata>
</DeleteDBClusterSnapshotResponse>
"""

View File

@ -383,7 +383,22 @@ class ResourceGroupsTaggingAPIBackend(BaseBackend):
or "rds" in resource_type_filters
or "rds:snapshot" in resource_type_filters
):
for snapshot in self.rds_backend.snapshots.values():
for snapshot in self.rds_backend.database_snapshots.values():
tags = snapshot.get_tags()
if not tags or not tag_filter(tags):
continue
yield {
"ResourceARN": snapshot.snapshot_arn,
"Tags": tags,
}
# RDS Cluster Snapshot
if (
not resource_type_filters
or "rds" in resource_type_filters
or "rds:cluster-snapshot" in resource_type_filters
):
for snapshot in self.rds_backend.cluster_snapshots.values():
tags = snapshot.get_tags()
if not tags or not tag_filter(tags):
continue

View File

@ -556,6 +556,37 @@ def test_create_db_snapshots_copy_tags():
)
@mock_rds2
def test_copy_db_snapshots():
conn = boto3.client("rds", region_name="us-west-2")
conn.create_db_instance(
DBInstanceIdentifier="db-primary-1",
AllocatedStorage=10,
Engine="postgres",
DBName="staging-postgres",
DBInstanceClass="db.m1.small",
MasterUsername="root",
MasterUserPassword="hunter2",
Port=1234,
DBSecurityGroups=["my_sg"],
)
conn.create_db_snapshot(
DBInstanceIdentifier="db-primary-1", DBSnapshotIdentifier="snapshot-1"
).get("DBSnapshot")
target_snapshot = conn.copy_db_snapshot(
SourceDBSnapshotIdentifier="snapshot-1", TargetDBSnapshotIdentifier="snapshot-2"
).get("DBSnapshot")
target_snapshot.get("Engine").should.equal("postgres")
target_snapshot.get("DBInstanceIdentifier").should.equal("db-primary-1")
target_snapshot.get("DBSnapshotIdentifier").should.equal("snapshot-2")
result = conn.list_tags_for_resource(ResourceName=target_snapshot["DBSnapshotArn"])
result["TagList"].should.equal([])
@mock_rds2
def test_describe_db_snapshots():
conn = boto3.client("rds", region_name="us-west-2")

View File

@ -86,7 +86,7 @@ def test_create_db_cluster__verify_default_properties():
)
cluster.should.have.key("BackupRetentionPeriod").equal(1)
cluster.should.have.key("DBClusterIdentifier").equal("cluster-id")
cluster.should.have.key("DBClusterParameterGroup").equal("default.aurora5.6")
cluster.should.have.key("DBClusterParameterGroup").equal("default.aurora8.0")
cluster.should.have.key("DBSubnetGroup").equal("default")
cluster.should.have.key("Status").equal("creating")
cluster.should.have.key("Endpoint").match(
@ -99,7 +99,7 @@ def test_create_db_cluster__verify_default_properties():
cluster.should.have.key("ReaderEndpoint").equal(expected_readonly)
cluster.should.have.key("MultiAZ").equal(False)
cluster.should.have.key("Engine").equal("aurora")
cluster.should.have.key("EngineVersion").equal("5.6.mysql_aurora.1.22.5")
cluster.should.have.key("EngineVersion").equal("8.0.mysql_aurora.3.01.0")
cluster.should.have.key("Port").equal(3306)
cluster.should.have.key("MasterUsername").equal("root")
cluster.should.have.key("PreferredBackupWindow").equal("01:37-02:07")
@ -140,7 +140,7 @@ def test_create_db_cluster_with_database_name():
cluster = resp["DBCluster"]
cluster.should.have.key("DatabaseName").equal("users")
cluster.should.have.key("DBClusterIdentifier").equal("cluster-id")
cluster.should.have.key("DBClusterParameterGroup").equal("default.aurora5.6")
cluster.should.have.key("DBClusterParameterGroup").equal("default.aurora8.0")
@mock_rds2
@ -151,7 +151,7 @@ def test_create_db_cluster_additional_parameters():
AvailabilityZones=["eu-north-1b"],
DBClusterIdentifier="cluster-id",
Engine="aurora",
EngineVersion="5.6.mysql_aurora.1.19.2",
EngineVersion="8.0.mysql_aurora.3.01.0",
EngineMode="serverless",
MasterUsername="root",
MasterUserPassword="hunter2_",
@ -163,7 +163,7 @@ def test_create_db_cluster_additional_parameters():
cluster.should.have.key("AvailabilityZones").equal(["eu-north-1b"])
cluster.should.have.key("Engine").equal("aurora")
cluster.should.have.key("EngineVersion").equal("5.6.mysql_aurora.1.19.2")
cluster.should.have.key("EngineVersion").equal("8.0.mysql_aurora.3.01.0")
cluster.should.have.key("EngineMode").equal("serverless")
cluster.should.have.key("Port").equal(1234)
cluster.should.have.key("DeletionProtection").equal(True)
@ -335,3 +335,288 @@ def test_stop_db_cluster_unknown_cluster():
err = ex.value.response["Error"]
err["Code"].should.equal("DBClusterNotFoundFault")
err["Message"].should.equal("DBCluster cluster-unknown not found.")
@mock_rds2
def test_create_db_cluster_snapshot_fails_for_unknown_cluster():
conn = boto3.client("rds", region_name="us-west-2")
with pytest.raises(ClientError) as exc:
conn.create_db_cluster_snapshot(
DBClusterIdentifier="db-primary-1", DBClusterSnapshotIdentifier="snapshot-1"
)
err = exc.value.response["Error"]
err["Message"].should.equal("DBCluster db-primary-1 not found.")
@mock_rds2
def test_create_db_cluster_snapshot():
conn = boto3.client("rds", region_name="us-west-2")
conn.create_db_cluster(
DBClusterIdentifier="db-primary-1",
AllocatedStorage=10,
Engine="postgres",
DatabaseName="staging-postgres",
DBClusterInstanceClass="db.m1.small",
MasterUsername="root",
MasterUserPassword="hunter2000",
Port=1234,
)
snapshot = conn.create_db_cluster_snapshot(
DBClusterIdentifier="db-primary-1", DBClusterSnapshotIdentifier="g-1",
).get("DBClusterSnapshot")
snapshot.get("Engine").should.equal("postgres")
snapshot.get("DBClusterIdentifier").should.equal("db-primary-1")
snapshot.get("DBClusterSnapshotIdentifier").should.equal("g-1")
result = conn.list_tags_for_resource(ResourceName=snapshot["DBClusterSnapshotArn"])
result["TagList"].should.equal([])
@mock_rds2
def test_create_db_cluster_snapshot_copy_tags():
conn = boto3.client("rds", region_name="us-west-2")
conn.create_db_cluster(
DBClusterIdentifier="db-primary-1",
AllocatedStorage=10,
Engine="postgres",
DatabaseName="staging-postgres",
DBClusterInstanceClass="db.m1.small",
MasterUsername="root",
MasterUserPassword="hunter2000",
Port=1234,
Tags=[{"Key": "foo", "Value": "bar"}, {"Key": "foo1", "Value": "bar1"}],
CopyTagsToSnapshot=True,
)
snapshot = conn.create_db_cluster_snapshot(
DBClusterIdentifier="db-primary-1", DBClusterSnapshotIdentifier="g-1"
).get("DBClusterSnapshot")
snapshot.get("Engine").should.equal("postgres")
snapshot.get("DBClusterIdentifier").should.equal("db-primary-1")
snapshot.get("DBClusterSnapshotIdentifier").should.equal("g-1")
# breakpoint()
result = conn.list_tags_for_resource(ResourceName=snapshot["DBClusterSnapshotArn"])
result["TagList"].should.equal(
[{"Value": "bar", "Key": "foo"}, {"Value": "bar1", "Key": "foo1"}]
)
@mock_rds2
def test_copy_db_cluster_snapshot_fails_for_unknown_snapshot():
conn = boto3.client("rds", region_name="us-west-2")
with pytest.raises(ClientError) as exc:
conn.copy_db_cluster_snapshot(
SourceDBClusterSnapshotIdentifier="snapshot-1",
TargetDBClusterSnapshotIdentifier="snapshot-2",
)
err = exc.value.response["Error"]
err["Message"].should.equal("DBClusterSnapshot snapshot-1 not found.")
@mock_rds2
def test_copy_db_cluster_snapshot():
conn = boto3.client("rds", region_name="us-west-2")
conn.create_db_cluster(
DBClusterIdentifier="db-primary-1",
AllocatedStorage=10,
Engine="postgres",
DatabaseName="staging-postgres",
DBClusterInstanceClass="db.m1.small",
MasterUsername="root",
MasterUserPassword="hunter2000",
Port=1234,
)
conn.create_db_cluster_snapshot(
DBClusterIdentifier="db-primary-1", DBClusterSnapshotIdentifier="snapshot-1",
).get("DBClusterSnapshot")
target_snapshot = conn.copy_db_cluster_snapshot(
SourceDBClusterSnapshotIdentifier="snapshot-1",
TargetDBClusterSnapshotIdentifier="snapshot-2",
).get("DBClusterSnapshot")
target_snapshot.get("Engine").should.equal("postgres")
target_snapshot.get("DBClusterIdentifier").should.equal("db-primary-1")
target_snapshot.get("DBClusterSnapshotIdentifier").should.equal("snapshot-2")
result = conn.list_tags_for_resource(
ResourceName=target_snapshot["DBClusterSnapshotArn"]
)
result["TagList"].should.equal([])
@mock_rds2
def test_copy_db_cluster_snapshot_fails_for_existed_target_snapshot():
conn = boto3.client("rds", region_name="us-west-2")
conn.create_db_cluster(
DBClusterIdentifier="db-primary-1",
AllocatedStorage=10,
Engine="postgres",
DatabaseName="staging-postgres",
DBClusterInstanceClass="db.m1.small",
MasterUsername="root",
MasterUserPassword="hunter2000",
Port=1234,
)
conn.create_db_cluster_snapshot(
DBClusterIdentifier="db-primary-1", DBClusterSnapshotIdentifier="snapshot-1",
).get("DBClusterSnapshot")
conn.create_db_cluster_snapshot(
DBClusterIdentifier="db-primary-1", DBClusterSnapshotIdentifier="snapshot-2",
).get("DBClusterSnapshot")
with pytest.raises(ClientError) as exc:
conn.copy_db_cluster_snapshot(
SourceDBClusterSnapshotIdentifier="snapshot-1",
TargetDBClusterSnapshotIdentifier="snapshot-2",
)
err = exc.value.response["Error"]
err["Message"].should.equal(
"Cannot create the snapshot because a snapshot with the identifier snapshot-2 already exists."
)
@mock_rds2
def test_describe_db_cluster_snapshots():
conn = boto3.client("rds", region_name="us-west-2")
conn.create_db_cluster(
DBClusterIdentifier="db-primary-1",
AllocatedStorage=10,
Engine="postgres",
DatabaseName="staging-postgres",
DBClusterInstanceClass="db.m1.small",
MasterUsername="root",
MasterUserPassword="hunter2000",
Port=1234,
)
created = conn.create_db_cluster_snapshot(
DBClusterIdentifier="db-primary-1", DBClusterSnapshotIdentifier="snapshot-1"
).get("DBClusterSnapshot")
created.get("Engine").should.equal("postgres")
by_database_id = conn.describe_db_cluster_snapshots(
DBClusterIdentifier="db-primary-1"
).get("DBClusterSnapshots")
by_snapshot_id = conn.describe_db_cluster_snapshots(
DBClusterSnapshotIdentifier="snapshot-1"
).get("DBClusterSnapshots")
by_snapshot_id.should.equal(by_database_id)
snapshot = by_snapshot_id[0]
snapshot.should.equal(created)
snapshot.get("Engine").should.equal("postgres")
conn.create_db_cluster_snapshot(
DBClusterIdentifier="db-primary-1", DBClusterSnapshotIdentifier="snapshot-2"
)
snapshots = conn.describe_db_cluster_snapshots(
DBClusterIdentifier="db-primary-1"
).get("DBClusterSnapshots")
snapshots.should.have.length_of(2)
@mock_rds2
def test_delete_db_cluster_snapshot():
conn = boto3.client("rds", region_name="us-west-2")
conn.create_db_cluster(
DBClusterIdentifier="db-primary-1",
AllocatedStorage=10,
Engine="postgres",
DatabaseName="staging-postgres",
DBClusterInstanceClass="db.m1.small",
MasterUsername="root",
MasterUserPassword="hunter2000",
Port=1234,
)
conn.create_db_cluster_snapshot(
DBClusterIdentifier="db-primary-1", DBClusterSnapshotIdentifier="snapshot-1"
)
conn.describe_db_cluster_snapshots(DBClusterSnapshotIdentifier="snapshot-1")
conn.delete_db_cluster_snapshot(DBClusterSnapshotIdentifier="snapshot-1")
conn.describe_db_cluster_snapshots.when.called_with(
DBClusterSnapshotIdentifier="snapshot-1"
).should.throw(ClientError)
@mock_rds2
def test_restore_db_cluster_from_snapshot():
conn = boto3.client("rds", region_name="us-west-2")
conn.create_db_cluster(
DBClusterIdentifier="db-primary-1",
AllocatedStorage=10,
Engine="postgres",
DatabaseName="staging-postgres",
DBClusterInstanceClass="db.m1.small",
MasterUsername="root",
MasterUserPassword="hunter2000",
Port=1234,
)
conn.describe_db_clusters()["DBClusters"].should.have.length_of(1)
conn.create_db_cluster_snapshot(
DBClusterIdentifier="db-primary-1", DBClusterSnapshotIdentifier="snapshot-1"
)
# restore
new_cluster = conn.restore_db_cluster_from_snapshot(
DBClusterIdentifier="db-restore-1",
SnapshotIdentifier="snapshot-1",
Engine="postgres",
)["DBCluster"]
new_cluster["DBClusterIdentifier"].should.equal("db-restore-1")
new_cluster["DBClusterInstanceClass"].should.equal("db.m1.small")
new_cluster["StorageType"].should.equal("gp2")
new_cluster["Engine"].should.equal("postgres")
new_cluster["DatabaseName"].should.equal("staging-postgres")
new_cluster["Port"].should.equal(1234)
# Verify it exists
conn.describe_db_clusters()["DBClusters"].should.have.length_of(2)
conn.describe_db_clusters(DBClusterIdentifier="db-restore-1")[
"DBClusters"
].should.have.length_of(1)
@mock_rds2
def test_restore_db_cluster_from_snapshot_and_override_params():
conn = boto3.client("rds", region_name="us-west-2")
conn.create_db_cluster(
DBClusterIdentifier="db-primary-1",
AllocatedStorage=10,
Engine="postgres",
DatabaseName="staging-postgres",
DBClusterInstanceClass="db.m1.small",
MasterUsername="root",
MasterUserPassword="hunter2000",
Port=1234,
)
conn.describe_db_clusters()["DBClusters"].should.have.length_of(1)
conn.create_db_cluster_snapshot(
DBClusterIdentifier="db-primary-1", DBClusterSnapshotIdentifier="snapshot-1"
)
# restore with some updated attributes
new_cluster = conn.restore_db_cluster_from_snapshot(
DBClusterIdentifier="db-restore-1",
SnapshotIdentifier="snapshot-1",
Engine="postgres",
Port=10000,
DBClusterInstanceClass="db.r6g.xlarge",
)["DBCluster"]
new_cluster["DBClusterIdentifier"].should.equal("db-restore-1")
new_cluster["DBClusterParameterGroup"].should.equal("default.aurora8.0")
new_cluster["DBClusterInstanceClass"].should.equal("db.r6g.xlarge")
new_cluster["Port"].should.equal(10000)