RDS: Various improvements (#6186)

This commit is contained in:
Bert Blommers 2023-04-08 10:33:09 +00:00 committed by GitHub
parent 1111e10b87
commit bab61089a3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
19 changed files with 37725 additions and 63 deletions

View File

@ -5141,7 +5141,7 @@
## rds
<details>
<summary>30% implemented</summary>
<summary>37% implemented</summary>
- [ ] add_role_to_db_cluster
- [ ] add_role_to_db_instance
@ -5160,7 +5160,7 @@
- [ ] create_custom_db_engine_version
- [X] create_db_cluster
- [ ] create_db_cluster_endpoint
- [ ] create_db_cluster_parameter_group
- [X] create_db_cluster_parameter_group
- [X] create_db_cluster_snapshot
- [X] create_db_instance
- [X] create_db_instance_read_replica
@ -5171,13 +5171,13 @@
- [X] create_db_snapshot
- [ ] create_db_subnet_group
- [X] create_event_subscription
- [ ] create_global_cluster
- [X] create_global_cluster
- [X] create_option_group
- [ ] delete_blue_green_deployment
- [ ] delete_custom_db_engine_version
- [X] delete_db_cluster
- [ ] delete_db_cluster_endpoint
- [ ] delete_db_cluster_parameter_group
- [X] delete_db_cluster_parameter_group
- [X] delete_db_cluster_snapshot
- [X] delete_db_instance
- [ ] delete_db_instance_automated_backup
@ -5188,7 +5188,7 @@
- [X] delete_db_snapshot
- [ ] delete_db_subnet_group
- [X] delete_event_subscription
- [ ] delete_global_cluster
- [X] delete_global_cluster
- [X] delete_option_group
- [ ] deregister_db_proxy_targets
- [ ] describe_account_attributes
@ -5196,8 +5196,8 @@
- [ ] describe_certificates
- [ ] describe_db_cluster_backtracks
- [ ] describe_db_cluster_endpoints
- [ ] describe_db_cluster_parameter_groups
- [ ] describe_db_cluster_parameters
- [X] describe_db_cluster_parameter_groups
- [X] describe_db_cluster_parameters
- [ ] describe_db_cluster_snapshot_attributes
- [X] describe_db_cluster_snapshots
- [X] describe_db_clusters
@ -5214,14 +5214,14 @@
- [ ] describe_db_security_groups
- [ ] describe_db_snapshot_attributes
- [ ] describe_db_snapshots
- [ ] describe_db_subnet_groups
- [X] describe_db_subnet_groups
- [ ] describe_engine_default_cluster_parameters
- [ ] describe_engine_default_parameters
- [ ] describe_event_categories
- [X] describe_event_subscriptions
- [ ] describe_events
- [X] describe_export_tasks
- [ ] describe_global_clusters
- [X] describe_global_clusters
- [X] describe_option_group_options
- [X] describe_option_groups
- [X] describe_orderable_db_instance_options
@ -5254,12 +5254,12 @@
- [ ] modify_global_cluster
- [X] modify_option_group
- [X] promote_read_replica
- [ ] promote_read_replica_db_cluster
- [X] promote_read_replica_db_cluster
- [ ] purchase_reserved_db_instances_offering
- [ ] reboot_db_cluster
- [X] reboot_db_instance
- [ ] register_db_proxy_targets
- [ ] remove_from_global_cluster
- [X] remove_from_global_cluster
- [ ] remove_role_from_db_cluster
- [ ] remove_role_from_db_instance
- [ ] remove_source_identifier_from_subscription
@ -7108,6 +7108,7 @@
- transfer
- translate
- voice-id
- vpc-lattice
- waf
- waf-regional
- wellarchitected

View File

@ -12,8 +12,6 @@
rds-data
========
.. autoclass:: moto.rdsdata.models.RDSDataServiceBackend
|start-h3| Example usage |end-h3|
.. sourcecode:: python

View File

@ -42,7 +42,7 @@ rds
- [ ] create_custom_db_engine_version
- [X] create_db_cluster
- [ ] create_db_cluster_endpoint
- [ ] create_db_cluster_parameter_group
- [X] create_db_cluster_parameter_group
- [X] create_db_cluster_snapshot
- [X] create_db_instance
- [X] create_db_instance_read_replica
@ -53,13 +53,13 @@ rds
- [X] create_db_snapshot
- [ ] create_db_subnet_group
- [X] create_event_subscription
- [ ] create_global_cluster
- [X] create_global_cluster
- [X] create_option_group
- [ ] delete_blue_green_deployment
- [ ] delete_custom_db_engine_version
- [X] delete_db_cluster
- [ ] delete_db_cluster_endpoint
- [ ] delete_db_cluster_parameter_group
- [X] delete_db_cluster_parameter_group
- [X] delete_db_cluster_snapshot
- [X] delete_db_instance
- [ ] delete_db_instance_automated_backup
@ -70,7 +70,7 @@ rds
- [X] delete_db_snapshot
- [ ] delete_db_subnet_group
- [X] delete_event_subscription
- [ ] delete_global_cluster
- [X] delete_global_cluster
- [X] delete_option_group
- [ ] deregister_db_proxy_targets
- [ ] describe_account_attributes
@ -78,8 +78,8 @@ rds
- [ ] describe_certificates
- [ ] describe_db_cluster_backtracks
- [ ] describe_db_cluster_endpoints
- [ ] describe_db_cluster_parameter_groups
- [ ] describe_db_cluster_parameters
- [X] describe_db_cluster_parameter_groups
- [X] describe_db_cluster_parameters
- [ ] describe_db_cluster_snapshot_attributes
- [X] describe_db_cluster_snapshots
- [X] describe_db_clusters
@ -96,19 +96,19 @@ rds
- [ ] describe_db_security_groups
- [ ] describe_db_snapshot_attributes
- [ ] describe_db_snapshots
- [ ] describe_db_subnet_groups
- [X] describe_db_subnet_groups
- [ ] describe_engine_default_cluster_parameters
- [ ] describe_engine_default_parameters
- [ ] describe_event_categories
- [X] describe_event_subscriptions
- [ ] describe_events
- [X] describe_export_tasks
- [ ] describe_global_clusters
- [X] describe_global_clusters
- [X] describe_option_group_options
- [X] describe_option_groups
- [X] describe_orderable_db_instance_options
Only the Neptune-engine is currently implemented
Only the Aurora-Postgresql and Neptune-engine is currently implemented
- [ ] describe_pending_maintenance_actions
@ -140,12 +140,12 @@ rds
- [ ] modify_global_cluster
- [X] modify_option_group
- [X] promote_read_replica
- [ ] promote_read_replica_db_cluster
- [X] promote_read_replica_db_cluster
- [ ] purchase_reserved_db_instances_offering
- [ ] reboot_db_cluster
- [X] reboot_db_instance
- [ ] register_db_proxy_targets
- [ ] remove_from_global_cluster
- [X] remove_from_global_cluster
- [ ] remove_role_from_db_cluster
- [ ] remove_role_from_db_instance
- [ ] remove_source_identifier_from_subscription

View File

@ -244,6 +244,10 @@ class NeptuneBackend(BaseBackend):
self.global_clusters: Dict[str, GlobalCluster] = dict()
self._db_cluster_options: Optional[List[Dict[str, Any]]] = None
@property
def global_backend(self) -> "NeptuneBackend":
return neptune_backends[self.account_id]["us-east-1"]
@property
def db_cluster_options(self) -> List[Dict[str, Any]]: # type: ignore[misc]
if self._db_cluster_options is None:
@ -297,14 +301,14 @@ class NeptuneBackend(BaseBackend):
storage_encrypted=storage_encrypted,
deletion_protection=deletion_protection,
)
self.global_clusters[global_cluster_identifier] = cluster
self.global_backend.global_clusters[global_cluster_identifier] = cluster
return cluster
def delete_global_cluster(self, global_cluster_identifier: str) -> GlobalCluster:
return self.global_clusters.pop(global_cluster_identifier)
return self.global_backend.global_clusters.pop(global_cluster_identifier)
def describe_global_clusters(self) -> List[GlobalCluster]:
return list(self.global_clusters.values())
return list(self.global_backend.global_clusters.values())
def describe_db_clusters(self, db_cluster_identifier: str) -> List[DBCluster]:
"""

View File

@ -178,3 +178,16 @@ DESCRIBE_GLOBAL_CLUSTERS_TEMPLATE = """<DescribeGlobalClustersResponse xmlns="ht
</GlobalClusters>
</DescribeGlobalClustersResult>
</DescribeGlobalClustersResponse>"""
REMOVE_FROM_GLOBAL_CLUSTER_TEMPLATE = """<RemoveFromGlobalClusterResponse xmlns="http://rds.amazonaws.com/doc/2014-10-31/">
<ResponseMetadata>
<RequestId>1549581b-12b7-11e3-895e-1334aEXAMPLE</RequestId>
</ResponseMetadata>
<RemoveFromGlobalClusterResult>
{% if cluster %}
<GlobalCluster>
{{ cluster.to_xml() }}
</GlobalCluster>
{% endif %}
</RemoveFromGlobalClusterResult>
</RemoveFromGlobalClusterResponse>"""

View File

@ -42,9 +42,11 @@ class DBSecurityGroupNotFoundError(RDSClientError):
class DBSubnetGroupNotFoundError(RDSClientError):
code = 404
def __init__(self, subnet_group_name):
super().__init__(
"DBSubnetGroupNotFound", f"Subnet Group {subnet_group_name} not found."
"DBSubnetGroupNotFoundFault", f"Subnet Group {subnet_group_name} not found."
)
@ -56,6 +58,14 @@ class DBParameterGroupNotFoundError(RDSClientError):
)
class DBClusterParameterGroupNotFoundError(RDSClientError):
def __init__(self, group_name):
super().__init__(
"DBParameterGroupNotFound",
f"DBClusterParameterGroup not found: {group_name}",
)
class OptionGroupNotFoundFaultError(RDSClientError):
def __init__(self, option_group_name):
super().__init__(
@ -175,3 +185,10 @@ class SubscriptionNotFoundError(RDSClientError):
super().__init__(
"SubscriptionNotFoundFault", f"Subscription {subscription_name} not found."
)
class InvalidGlobalClusterStateFault(RDSClientError):
def __init__(self, arn: str):
super().__init__(
"InvalidGlobalClusterStateFault", f"Global Cluster {arn} is not empty"
)

View File

@ -7,11 +7,13 @@ from collections import defaultdict
from jinja2 import Template
from re import compile as re_compile
from collections import OrderedDict
from typing import Dict, List, Optional
from moto.core import BaseBackend, BackendDict, BaseModel, CloudFormationModel
from moto.core.utils import iso_8601_datetime_with_milliseconds
from moto.ec2.models import ec2_backends
from moto.moto_api._internal import mock_random as random
from moto.neptune.models import neptune_backends, NeptuneBackend
from moto.utilities.utils import load_resource
from .exceptions import (
RDSClientError,
DBClusterNotFoundError,
@ -22,6 +24,7 @@ from .exceptions import (
DBSecurityGroupNotFoundError,
DBSubnetGroupNotFoundError,
DBParameterGroupNotFoundError,
DBClusterParameterGroupNotFoundError,
OptionGroupNotFoundFaultError,
InvalidDBClusterStateFaultError,
InvalidDBInstanceStateError,
@ -30,6 +33,7 @@ from .exceptions import (
InvalidParameterValue,
InvalidParameterCombination,
InvalidDBClusterStateFault,
InvalidGlobalClusterStateFault,
ExportTaskNotFoundError,
ExportTaskAlreadyExistsError,
InvalidExportSourceStateError,
@ -45,6 +49,61 @@ from .utils import (
)
def find_cluster(cluster_arn):
arn_parts = cluster_arn.split(":")
region, account = arn_parts[3], arn_parts[4]
return rds_backends[account][region].describe_db_clusters(cluster_arn)[0]
class GlobalCluster(BaseModel):
def __init__(
self,
account_id: str,
global_cluster_identifier: str,
engine: str,
engine_version,
storage_encrypted,
deletion_protection,
):
self.global_cluster_identifier = global_cluster_identifier
self.global_cluster_resource_id = "cluster-" + random.get_random_hex(8)
self.global_cluster_arn = (
f"arn:aws:rds::{account_id}:global-cluster:{global_cluster_identifier}"
)
self.engine = engine
self.engine_version = engine_version or "5.7.mysql_aurora.2.11.2"
self.storage_encrypted = (
storage_encrypted and storage_encrypted.lower() == "true"
)
self.deletion_protection = (
deletion_protection and deletion_protection.lower() == "true"
)
self.members = []
def to_xml(self) -> str:
template = Template(
"""
<GlobalClusterIdentifier>{{ cluster.global_cluster_identifier }}</GlobalClusterIdentifier>
<GlobalClusterResourceId>{{ cluster.global_cluster_resource_id }}</GlobalClusterResourceId>
<GlobalClusterArn>{{ cluster.global_cluster_arn }}</GlobalClusterArn>
<Engine>{{ cluster.engine }}</Engine>
<Status>available</Status>
<EngineVersion>{{ cluster.engine_version }}</EngineVersion>
<StorageEncrypted>{{ 'true' if cluster.storage_encrypted else 'false' }}</StorageEncrypted>
<DeletionProtection>{{ 'true' if cluster.deletion_protection else 'false' }}</DeletionProtection>
<GlobalClusterMembers>
{% for cluster_arn in cluster.members %}
<GlobalClusterMember>
<DBClusterArn>{{ cluster_arn }}</DBClusterArn>
<IsWriter>true</IsWriter>
</GlobalClusterMember>
{% endfor %}
</GlobalClusterMembers>
"""
)
return template.render(cluster=self)
class Cluster:
SUPPORTED_FILTERS = {
"db-cluster-id": FilterDef(
@ -64,6 +123,8 @@ class Cluster:
self.engine_version = Cluster.default_engine_version(self.engine)
self.engine_mode = kwargs.get("engine_mode") or "provisioned"
self.iops = kwargs.get("iops")
self.kms_key_id = kwargs.get("kms_key_id")
self.network_type = kwargs.get("network_type") or "IPV4"
self.status = "active"
self.account_id = kwargs.get("account_id")
self.region_name = kwargs.get("region")
@ -96,7 +157,7 @@ class Cluster:
f"{self.region_name}c",
]
self.parameter_group = kwargs.get("parameter_group") or "default.aurora8.0"
self.subnet_group = "default"
self.subnet_group = kwargs.get("db_subnet_group_name") or "default"
self.status = "creating"
self.url_identifier = "".join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(12)
@ -124,6 +185,28 @@ class Cluster:
self.earliest_restorable_time = iso_8601_datetime_with_milliseconds(
datetime.datetime.utcnow()
)
self.scaling_configuration = kwargs.get("scaling_configuration")
if not self.scaling_configuration and self.engine_mode == "serverless":
# In AWS, this default configuration only shows up when the Cluster is in a ready state, so a few minutes after creation
self.scaling_configuration = {
"min_capacity": 1,
"max_capacity": 16,
"auto_pause": True,
"seconds_until_auto_pause": 300,
"timeout_action": "RollbackCapacityChange",
"seconds_before_timeout": 300,
}
self.global_cluster_identifier = kwargs.get("global_cluster_identifier")
self.cluster_members = list()
self.replication_source_identifier = kwargs.get("replication_source_identifier")
self.read_replica_identifiers = list()
@property
def is_multi_az(self):
return (
len(self.read_replica_identifiers) > 0
or self.replication_source_identifier is not None
)
@property
def db_cluster_arn(self):
@ -172,6 +255,10 @@ class Cluster:
"11.13",
]:
self._enable_http_endpoint = val
elif self.engine == "aurora" and self.engine_version in [
"5.6.mysql_aurora.1.22.5"
]:
self._enable_http_endpoint = val
def get_cfg(self):
cfg = self.__dict__
@ -182,15 +269,18 @@ class Cluster:
def to_xml(self):
template = Template(
"""<DBCluster>
<AllocatedStorage>1</AllocatedStorage>
<AllocatedStorage>{{ cluster.allocated_storage }}</AllocatedStorage>
<AvailabilityZones>
{% for zone in cluster.availability_zones %}
<AvailabilityZone>{{ zone }}</AvailabilityZone>
{% endfor %}
</AvailabilityZones>
<BackupRetentionPeriod>1</BackupRetentionPeriod>
<BacktrackWindow>0</BacktrackWindow>
<DBInstanceStatus>{{ cluster.status }}</DBInstanceStatus>
{% if cluster.db_name %}<DatabaseName>{{ cluster.db_name }}</DatabaseName>{% endif %}
{% if cluster.kms_key_id %}<KmsKeyId>{{ cluster.kms_key_id }}</KmsKeyId>{% endif %}
{% if cluster.network_type %}<NetworkType>{{ cluster.network_type }}</NetworkType>{% endif %}
<DBClusterIdentifier>{{ cluster.db_cluster_identifier }}</DBClusterIdentifier>
<DBClusterParameterGroup>{{ cluster.parameter_group }}</DBClusterParameterGroup>
<DBSubnetGroup>{{ cluster.subnet_group }}</DBSubnetGroup>
@ -200,21 +290,32 @@ class Cluster:
<Status>{{ cluster.status }}</Status>
<Endpoint>{{ cluster.endpoint }}</Endpoint>
<ReaderEndpoint>{{ cluster.reader_endpoint }}</ReaderEndpoint>
<MultiAZ>false</MultiAZ>
<MultiAZ>{{ 'true' if cluster.is_multi_az else 'false' }}</MultiAZ>
<EngineVersion>{{ cluster.engine_version }}</EngineVersion>
<Port>{{ cluster.port }}</Port>
{% if cluster.iops %}
<Iops>{{ cluster.iops }}</Iops>
<StorageType>io1</StorageType>
{% else %}
<StorageType>{{ cluster.storage_type }}</StorageType>
{% endif %}
<DBClusterInstanceClass>{{ cluster.db_cluster_instance_class }}</DBClusterInstanceClass>
{% if cluster.db_cluster_instance_class %}<DBClusterInstanceClass>{{ cluster.db_cluster_instance_class }}</DBClusterInstanceClass>{% endif %}
<MasterUsername>{{ cluster.master_username }}</MasterUsername>
<PreferredBackupWindow>{{ cluster.preferred_backup_window }}</PreferredBackupWindow>
<PreferredMaintenanceWindow>{{ cluster.preferred_maintenance_window }}</PreferredMaintenanceWindow>
<ReadReplicaIdentifiers></ReadReplicaIdentifiers>
<DBClusterMembers></DBClusterMembers>
<ReadReplicaIdentifiers>
{% for replica_id in cluster.read_replica_identifiers %}
<ReadReplicaIdentifier>{{ replica_id }}</ReadReplicaIdentifier>
{% endfor %}
</ReadReplicaIdentifiers>
<DBClusterMembers>
{% for member in cluster.cluster_members %}
<DBClusterMember>
<DBInstanceIdentifier>{{ member }}</DBInstanceIdentifier>
<IsClusterWriter>true</IsClusterWriter>
<DBClusterParameterGroupStatus>in-sync</DBClusterParameterGroupStatus>
<PromotionTier>1</PromotionTier>
</DBClusterMember>
{% endfor %}
</DBClusterMembers>
<VpcSecurityGroups>
{% for id in cluster.vpc_security_groups %}
<VpcSecurityGroup>
@ -248,6 +349,20 @@ class Cluster:
</Tag>
{%- endfor -%}
</TagList>
{% if cluster.scaling_configuration %}
<ScalingConfigurationInfo>
{% if "min_capacity" in cluster.scaling_configuration %}<MinCapacity>{{ cluster.scaling_configuration["min_capacity"] }}</MinCapacity>{% endif %}
{% if "max_capacity" in cluster.scaling_configuration %}<MaxCapacity>{{ cluster.scaling_configuration["max_capacity"] }}</MaxCapacity>{% endif %}
{% if "auto_pause" in cluster.scaling_configuration %}<AutoPause>{{ cluster.scaling_configuration["auto_pause"] }}</AutoPause>{% endif %}
{% if "seconds_until_auto_pause" in cluster.scaling_configuration %}<SecondsUntilAutoPause>{{ cluster.scaling_configuration["seconds_until_auto_pause"] }}</SecondsUntilAutoPause>{% endif %}
{% if "timeout_action" in cluster.scaling_configuration %}<TimeoutAction>{{ cluster.scaling_configuration["timeout_action"] }}</TimeoutAction>{% endif %}
{% if "seconds_before_timeout" in cluster.scaling_configuration %}<SecondsBeforeTimeout>{{ cluster.scaling_configuration["seconds_before_timeout"] }}</SecondsBeforeTimeout>{% endif %}
</ScalingConfigurationInfo>
{% endif %}
{%- if cluster.global_cluster_identifier -%}
<GlobalClusterIdentifier>{{ cluster.global_cluster_identifier }}</GlobalClusterIdentifier>
{%- endif -%}
{%- if cluster.replication_source_identifier -%}<ReplicationSourceIdentifier>{{ cluster.replication_source_identifier }}</ReplicationSourceIdentifier>{%- endif -%}
</DBCluster>"""
)
return template.render(cluster=self)
@ -428,7 +543,9 @@ class Database(CloudFormationModel):
)
self.db_cluster_identifier = kwargs.get("db_cluster_identifier")
self.db_instance_identifier = kwargs.get("db_instance_identifier")
self.source_db_identifier = kwargs.get("source_db_identifier")
self.source_db_identifier = kwargs.get(
"source_db_ide.db_cluster_identifierntifier"
)
self.db_instance_class = kwargs.get("db_instance_class")
self.port = kwargs.get("port")
if self.port is None:
@ -455,7 +572,7 @@ class Database(CloudFormationModel):
if self.db_subnet_group_name:
self.db_subnet_group = rds_backends[self.account_id][
self.region_name
].describe_subnet_groups(self.db_subnet_group_name)[0]
].describe_db_subnet_groups(self.db_subnet_group_name)[0]
else:
self.db_subnet_group = None
self.security_groups = kwargs.get("security_groups", [])
@ -1337,16 +1454,19 @@ class RDSBackend(BaseBackend):
self.arn_regex = re_compile(
r"^arn:aws:rds:.*:[0-9]*:(db|cluster|es|og|pg|ri|secgrp|snapshot|cluster-snapshot|subgrp):.*$"
)
self.clusters = OrderedDict()
self.clusters: Dict[str, Cluster] = OrderedDict()
self.global_clusters = OrderedDict()
self.databases = OrderedDict()
self.database_snapshots = OrderedDict()
self.cluster_snapshots = OrderedDict()
self.export_tasks = OrderedDict()
self.event_subscriptions = OrderedDict()
self.db_parameter_groups = {}
self.db_cluster_parameter_groups = {}
self.option_groups = {}
self.security_groups = {}
self.subnet_groups = {}
self._db_cluster_options = None
def reset(self):
self.neptune.reset()
@ -1356,6 +1476,19 @@ class RDSBackend(BaseBackend):
def neptune(self) -> NeptuneBackend:
return neptune_backends[self.account_id][self.region_name]
@property
def db_cluster_options(self):
if self._db_cluster_options is None:
from moto.rds.utils import decode_orderable_db_instance
decoded_options = load_resource(
__name__, "resources/cluster_options/aurora-postgresql.json"
)
self._db_cluster_options = [
decode_orderable_db_instance(option) for option in decoded_options
]
return self._db_cluster_options
@staticmethod
def default_vpc_endpoint_service(service_region, zones):
"""Default VPC endpoint service."""
@ -1368,6 +1501,16 @@ class RDSBackend(BaseBackend):
def create_db_instance(self, db_kwargs):
database_id = db_kwargs["db_instance_identifier"]
database = Database(**db_kwargs)
cluster_id = database.db_cluster_identifier
if cluster_id is not None:
cluster = self.clusters.get(cluster_id)
if cluster is not None:
if cluster.engine == "aurora" and cluster.engine_mode == "serverless":
raise InvalidParameterValue(
"Instances cannot be added to Aurora Serverless clusters."
)
cluster.cluster_members.append(database_id)
self.databases[database_id] = database
return database
@ -1563,6 +1706,10 @@ class RDSBackend(BaseBackend):
if database.is_replica:
primary = self.find_db_from_id(database.source_db_identifier)
primary.remove_replica(database)
if database.db_cluster_identifier in self.clusters:
self.clusters[database.db_cluster_identifier].cluster_members.remove(
db_instance_identifier
)
database.status = "deleting"
return database
else:
@ -1611,7 +1758,7 @@ class RDSBackend(BaseBackend):
self.subnet_groups[subnet_name] = subnet_group
return subnet_group
def describe_subnet_groups(self, subnet_group_name):
def describe_db_subnet_groups(self, subnet_group_name):
if subnet_group_name:
if subnet_group_name in self.subnet_groups:
return [self.subnet_groups[subnet_group_name]]
@ -1877,11 +2024,24 @@ class RDSBackend(BaseBackend):
return db_parameter_group
def describe_db_cluster_parameters(self):
return []
def create_db_cluster(self, kwargs):
cluster_id = kwargs["db_cluster_identifier"]
kwargs["account_id"] = self.account_id
cluster = Cluster(**kwargs)
self.clusters[cluster_id] = cluster
if (cluster.global_cluster_identifier or "") in self.global_clusters:
global_cluster = self.global_clusters[cluster.global_cluster_identifier]
global_cluster.members.append(cluster.db_cluster_arn)
if cluster.replication_source_identifier:
cluster_identifier = cluster.replication_source_identifier
original_cluster = find_cluster(cluster_identifier)
original_cluster.read_replica_identifiers.append(cluster.db_cluster_arn)
initial_state = copy.deepcopy(cluster) # Return status=creating
cluster.status = "available" # Already set the final status in the background
return initial_state
@ -1900,6 +2060,13 @@ class RDSBackend(BaseBackend):
if v is not None:
setattr(cluster, k, v)
cwl_exports = kwargs.get("enable_cloudwatch_logs_exports") or {}
for exp in cwl_exports.get("DisableLogTypes", []):
cluster.enabled_cloudwatch_logs_exports.remove(exp)
cluster.enabled_cloudwatch_logs_exports.extend(
cwl_exports.get("EnableLogTypes", [])
)
cluster_id = kwargs.get("new_db_cluster_identifier", cluster_id)
self.clusters[cluster_id] = cluster
@ -1907,6 +2074,13 @@ class RDSBackend(BaseBackend):
cluster.status = "available" # Already set the final status in the background
return initial_state
def promote_read_replica_db_cluster(self, db_cluster_identifier: str) -> Cluster:
cluster = self.clusters[db_cluster_identifier]
source_cluster = find_cluster(cluster.replication_source_identifier)
source_cluster.read_replica_identifiers.remove(cluster.db_cluster_arn)
cluster.replication_source_identifier = None
return cluster
def create_db_cluster_snapshot(
self, db_cluster_identifier, db_snapshot_identifier, tags=None
):
@ -1955,7 +2129,9 @@ class RDSBackend(BaseBackend):
return self.cluster_snapshots.pop(db_snapshot_identifier)
def describe_db_clusters(self, cluster_identifier=None, filters=None):
def describe_db_clusters(
self, cluster_identifier=None, filters=None
) -> List[Cluster]:
clusters = self.clusters
clusters_neptune = self.neptune.clusters
if cluster_identifier:
@ -1987,10 +2163,16 @@ class RDSBackend(BaseBackend):
def delete_db_cluster(self, cluster_identifier, snapshot_name=None):
if cluster_identifier in self.clusters:
if self.clusters[cluster_identifier].deletion_protection:
cluster = self.clusters[cluster_identifier]
if cluster.deletion_protection:
raise InvalidParameterValue(
"Can't delete Cluster with protection enabled"
)
global_id = cluster.global_cluster_identifier or ""
if global_id in self.global_clusters:
self.remove_from_global_cluster(global_id, cluster_identifier)
if snapshot_name:
self.create_db_cluster_snapshot(cluster_identifier, snapshot_name)
return self.clusters.pop(cluster_identifier)
@ -2253,12 +2435,118 @@ class RDSBackend(BaseBackend):
def describe_orderable_db_instance_options(self, engine, engine_version):
"""
Only the Neptune-engine is currently implemented
Only the Aurora-Postgresql and Neptune-engine is currently implemented
"""
if engine == "neptune":
return self.neptune.describe_orderable_db_instance_options(engine_version)
if engine == "aurora-postgresql":
if engine_version:
return [
option
for option in self.db_cluster_options
if option["EngineVersion"] == engine_version
]
return self.db_cluster_options
return []
def create_db_cluster_parameter_group(
self,
group_name,
family,
description,
):
group = DBClusterParameterGroup(
account_id=self.account_id,
region=self.region_name,
name=group_name,
family=family,
description=description,
)
self.db_cluster_parameter_groups[group_name] = group
return group
def describe_db_cluster_parameter_groups(self, group_name):
if group_name is not None:
if group_name not in self.db_cluster_parameter_groups:
raise DBClusterParameterGroupNotFoundError(group_name)
return [self.db_cluster_parameter_groups[group_name]]
return list(self.db_cluster_parameter_groups.values())
def delete_db_cluster_parameter_group(self, group_name):
self.db_cluster_parameter_groups.pop(group_name)
def create_global_cluster(
self,
global_cluster_identifier: str,
source_db_cluster_identifier: Optional[str],
engine: Optional[str],
engine_version: Optional[str],
storage_encrypted: Optional[bool],
deletion_protection: Optional[bool],
) -> GlobalCluster:
source_cluster = None
if source_db_cluster_identifier is not None:
# validate our source cluster exists
if not source_db_cluster_identifier.startswith("arn:aws:rds"):
raise InvalidParameterValue("Malformed db cluster arn dbci")
source_cluster = self.describe_db_clusters(
cluster_identifier=source_db_cluster_identifier
)[0]
# We should not specify an engine at the same time, as we'll take it from the source cluster
if engine is not None:
raise InvalidParameterCombination(
"When creating global cluster from existing db cluster, value for engineName should not be specified since it will be inherited from source cluster"
)
engine = source_cluster.engine
engine_version = source_cluster.engine_version
elif engine is None:
raise InvalidParameterValue(
"When creating standalone global cluster, value for engineName should be specified"
)
global_cluster = GlobalCluster(
account_id=self.account_id,
global_cluster_identifier=global_cluster_identifier,
engine=engine,
engine_version=engine_version,
storage_encrypted=storage_encrypted,
deletion_protection=deletion_protection,
)
self.global_clusters[global_cluster_identifier] = global_cluster
if source_cluster is not None:
source_cluster.global_cluster_identifier = global_cluster.global_cluster_arn
global_cluster.members.append(source_cluster.db_cluster_arn)
return global_cluster
def describe_global_clusters(self):
return (
list(self.global_clusters.values())
+ self.neptune.describe_global_clusters()
)
def delete_global_cluster(self, global_cluster_identifier: str) -> GlobalCluster:
try:
return self.neptune.delete_global_cluster(global_cluster_identifier)
except: # noqa: E722 Do not use bare except
pass # It's not a Neptune Global Cluster - assume it's an RDS cluster instead
global_cluster = self.global_clusters[global_cluster_identifier]
if global_cluster.members:
raise InvalidGlobalClusterStateFault(global_cluster.global_cluster_arn)
return self.global_clusters.pop(global_cluster_identifier)
def remove_from_global_cluster(
self, global_cluster_identifier: str, db_cluster_identifier: str
) -> GlobalCluster:
try:
global_cluster = self.global_clusters[global_cluster_identifier]
cluster = self.describe_db_clusters(
cluster_identifier=db_cluster_identifier
)[0]
global_cluster.members.remove(cluster.db_cluster_arn)
return global_cluster
except: # noqa: E722 Do not use bare except
pass
return None
class OptionGroup(object):
def __init__(
@ -2410,4 +2698,24 @@ class DBParameterGroup(CloudFormationModel):
return db_parameter_group
class DBClusterParameterGroup(CloudFormationModel):
def __init__(self, account_id, region, name, description, family):
self.name = name
self.description = description
self.family = family
self.parameters = defaultdict(dict)
self.arn = f"arn:aws:rds:{region}:{account_id}:cpg:{name}"
def to_xml(self):
template = Template(
"""<DBClusterParameterGroup>
<DBClusterParameterGroupName>{{ param_group.name }}</DBClusterParameterGroupName>
<DBParameterGroupFamily>{{ param_group.family }}</DBParameterGroupFamily>
<Description>{{ param_group.description }}</Description>
<DBClusterParameterGroupArn>{{ param_group.arn }}</DBClusterParameterGroupArn>
</DBClusterParameterGroup>"""
)
return template.render(param_group=self)
rds_backends = BackendDict(RDSBackend, "rds")

File diff suppressed because it is too large Load Diff

View File

@ -5,6 +5,12 @@ from moto.core.common_types import TYPE_RESPONSE
from moto.core.responses import BaseResponse
from moto.ec2.models import ec2_backends
from moto.neptune.responses import NeptuneResponse
from moto.neptune.responses import (
CREATE_GLOBAL_CLUSTER_TEMPLATE,
DESCRIBE_GLOBAL_CLUSTERS_TEMPLATE,
DELETE_GLOBAL_CLUSTER_TEMPLATE,
REMOVE_FROM_GLOBAL_CLUSTER_TEMPLATE,
)
from .models import rds_backends, RDSBackend
from .exceptions import DBParameterGroupNotFoundError
@ -26,7 +32,7 @@ class RDSResponse(BaseResponse):
return super()._dispatch(request, full_url, headers)
def __getattribute__(self, name: str):
if name in ["create_db_cluster"]:
if name in ["create_db_cluster", "create_global_cluster"]:
if self._get_param("Engine") == "neptune":
return object.__getattribute__(self.neptune, name)
return object.__getattribute__(self, name)
@ -105,11 +111,12 @@ class RDSResponse(BaseResponse):
"engine": self._get_param("Engine"),
"engine_version": self._get_param("EngineVersion"),
"enable_cloudwatch_logs_exports": self._get_params().get(
"EnableCloudwatchLogsExports"
"CloudwatchLogsExportConfiguration"
),
"enable_iam_database_authentication": self._get_bool_param(
"EnableIAMDatabaseAuthentication"
),
"enable_http_endpoint": self._get_param("EnableHttpEndpoint"),
"license_model": self._get_param("LicenseModel"),
"iops": self._get_int_param("Iops"),
"kms_key_id": self._get_param("KmsKeyId"),
@ -180,22 +187,30 @@ class RDSResponse(BaseResponse):
),
"db_name": self._get_param("DatabaseName"),
"db_cluster_identifier": self._get_param("DBClusterIdentifier"),
"db_subnet_group_name": self._get_param("DBSubnetGroupName"),
"deletion_protection": self._get_bool_param("DeletionProtection"),
"engine": self._get_param("Engine"),
"engine_version": self._get_param("EngineVersion"),
"engine_mode": self._get_param("EngineMode"),
"allocated_storage": self._get_param("AllocatedStorage"),
"global_cluster_identifier": self._get_param("GlobalClusterIdentifier"),
"iops": self._get_param("Iops"),
"storage_type": self._get_param("StorageType"),
"kms_key_id": self._get_param("KmsKeyId"),
"master_username": self._get_param("MasterUsername"),
"master_user_password": self._get_param("MasterUserPassword"),
"network_type": self._get_param("NetworkType"),
"port": self._get_param("Port"),
"parameter_group": self._get_param("DBClusterParameterGroup"),
"parameter_group": self._get_param("DBClusterParameterGroupName"),
"region": self.region,
"db_cluster_instance_class": self._get_param("DBClusterInstanceClass"),
"enable_http_endpoint": self._get_param("EnableHttpEndpoint"),
"copy_tags_to_snapshot": self._get_param("CopyTagsToSnapshot"),
"tags": self.unpack_list_params("Tags", "Tag"),
"scaling_configuration": self._get_dict_param("ScalingConfiguration."),
"replication_source_identifier": self._get_param(
"ReplicationSourceIdentifier"
),
}
def _get_export_task_kwargs(self):
@ -427,7 +442,7 @@ class RDSResponse(BaseResponse):
def describe_db_subnet_groups(self):
subnet_name = self._get_param("DBSubnetGroupName")
subnet_groups = self.backend.describe_subnet_groups(subnet_name)
subnet_groups = self.backend.describe_db_subnet_groups(subnet_name)
template = self.response_template(DESCRIBE_SUBNET_GROUPS_TEMPLATE)
return template.render(subnet_groups=subnet_groups)
@ -569,6 +584,15 @@ class RDSResponse(BaseResponse):
template = self.response_template(DELETE_DB_PARAMETER_GROUP_TEMPLATE)
return template.render(db_parameter_group=db_parameter_group)
def describe_db_cluster_parameters(self):
db_parameter_group_name = self._get_param("DBParameterGroupName")
db_parameter_groups = self.backend.describe_db_cluster_parameters()
if db_parameter_groups is None:
raise DBParameterGroupNotFoundError(db_parameter_group_name)
template = self.response_template(DESCRIBE_DB_CLUSTER_PARAMETERS_TEMPLATE)
return template.render(db_parameter_group=db_parameter_groups)
def create_db_cluster(self):
kwargs = self._get_db_cluster_kwargs()
cluster = self.backend.create_db_cluster(kwargs)
@ -708,13 +732,73 @@ class RDSResponse(BaseResponse):
return template.render(options=options, marker=None)
def describe_global_clusters(self):
return self.neptune.describe_global_clusters()
clusters = self.backend.describe_global_clusters()
template = self.response_template(DESCRIBE_GLOBAL_CLUSTERS_TEMPLATE)
return template.render(clusters=clusters)
def create_global_cluster(self):
return self.neptune.create_global_cluster()
params = self._get_params()
cluster = self.backend.create_global_cluster(
global_cluster_identifier=params["GlobalClusterIdentifier"],
source_db_cluster_identifier=params.get("SourceDBClusterIdentifier"),
engine=params.get("Engine"),
engine_version=params.get("EngineVersion"),
storage_encrypted=params.get("StorageEncrypted"),
deletion_protection=params.get("DeletionProtection"),
)
template = self.response_template(CREATE_GLOBAL_CLUSTER_TEMPLATE)
return template.render(cluster=cluster)
def delete_global_cluster(self):
return self.neptune.delete_global_cluster()
params = self._get_params()
cluster = self.backend.delete_global_cluster(
global_cluster_identifier=params["GlobalClusterIdentifier"],
)
template = self.response_template(DELETE_GLOBAL_CLUSTER_TEMPLATE)
return template.render(cluster=cluster)
def remove_from_global_cluster(self):
params = self._get_params()
global_cluster = self.backend.remove_from_global_cluster(
global_cluster_identifier=params["GlobalClusterIdentifier"],
db_cluster_identifier=params["DbClusterIdentifier"],
)
template = self.response_template(REMOVE_FROM_GLOBAL_CLUSTER_TEMPLATE)
return template.render(cluster=global_cluster)
def create_db_cluster_parameter_group(self):
group_name = self._get_param("DBClusterParameterGroupName")
family = self._get_param("DBParameterGroupFamily")
desc = self._get_param("Description")
db_cluster_parameter_group = self.backend.create_db_cluster_parameter_group(
group_name=group_name,
family=family,
description=desc,
)
template = self.response_template(CREATE_DB_CLUSTER_PARAMETER_GROUP_TEMPLATE)
return template.render(db_cluster_parameter_group=db_cluster_parameter_group)
def describe_db_cluster_parameter_groups(self):
group_name = self._get_param("DBClusterParameterGroupName")
db_parameter_groups = self.backend.describe_db_cluster_parameter_groups(
group_name=group_name,
)
template = self.response_template(DESCRIBE_DB_CLUSTER_PARAMETER_GROUPS_TEMPLATE)
return template.render(db_parameter_groups=db_parameter_groups)
def delete_db_cluster_parameter_group(self):
group_name = self._get_param("DBClusterParameterGroupName")
self.backend.delete_db_cluster_parameter_group(
group_name=group_name,
)
template = self.response_template(DELETE_DB_CLUSTER_PARAMETER_GROUP_TEMPLATE)
return template.render()
def promote_read_replica_db_cluster(self):
db_cluster_identifier = self._get_param("DBClusterIdentifier")
cluster = self.backend.promote_read_replica_db_cluster(db_cluster_identifier)
template = self.response_template(PROMOTE_READ_REPLICA_DB_CLUSTER_TEMPLATE)
return template.render(cluster=cluster)
CREATE_DATABASE_TEMPLATE = """<CreateDBInstanceResponse xmlns="http://rds.amazonaws.com/doc/2014-09-01/">
@ -1048,6 +1132,24 @@ DESCRIBE_DB_PARAMETERS_TEMPLATE = """<DescribeDBParametersResponse xmlns="http:/
</DescribeDBParametersResponse>
"""
DESCRIBE_DB_CLUSTER_PARAMETERS_TEMPLATE = """<DescribeDBClusterParametersResponse xmlns="http://rds.amazonaws.com/doc/2014-09-01/">
<DescribeDBClusterParametersResult>
<Parameters>
{%- for param in db_parameter_group -%}
<Parameter>
{%- for parameter_name, parameter_value in db_parameter.items() -%}
<{{ parameter_name }}>{{ parameter_value }}</{{ parameter_name }}>
{%- endfor -%}
</Parameter>
{%- endfor -%}
</Parameters>
</DescribeDBClusterParametersResult>
<ResponseMetadata>
<RequestId>8c40488f-b9ff-11d3-a15e-7ac49293f4fa</RequestId>
</ResponseMetadata>
</DescribeDBClusterParametersResponse>
"""
LIST_TAGS_FOR_RESOURCE_TEMPLATE = """<ListTagsForResourceResponse xmlns="http://rds.amazonaws.com/doc/2014-10-31/">
<ListTagsForResourceResult>
<TagList>
@ -1261,7 +1363,7 @@ DESCRIBE_ORDERABLE_CLUSTER_OPTIONS = """<DescribeOrderableDBInstanceOptionsRespo
<OrderableDBInstanceOptions>
{% for option in options %}
<OrderableDBInstanceOption>
<OutpostCapable>option["OutpostCapable"]</OutpostCapable>
<OutpostCapable>false</OutpostCapable>
<AvailabilityZones>
{% for zone in option["AvailabilityZones"] %}
<AvailabilityZone>
@ -1306,3 +1408,41 @@ DESCRIBE_ORDERABLE_CLUSTER_OPTIONS = """<DescribeOrderableDBInstanceOptionsRespo
<RequestId>54212dc5-16c4-4eb8-a88e-448691e877ab</RequestId>
</ResponseMetadata>
</DescribeOrderableDBInstanceOptionsResponse>"""
CREATE_DB_CLUSTER_PARAMETER_GROUP_TEMPLATE = """<CreateDBClusterParameterGroupResponse xmlns="http://rds.amazonaws.com/doc/2014-09-01/">
<CreateDBClusterParameterGroupResult>
{{ db_cluster_parameter_group.to_xml() }}
</CreateDBClusterParameterGroupResult>
<ResponseMetadata>
<RequestId>7805c127-af22-11c3-96ac-6999cc5f7e72</RequestId>
</ResponseMetadata>
</CreateDBClusterParameterGroupResponse>"""
DESCRIBE_DB_CLUSTER_PARAMETER_GROUPS_TEMPLATE = """<DescribeDBClusterParameterGroupsResponse xmlns="http://rds.amazonaws.com/doc/2014-09-01/">
<DescribeDBClusterParameterGroupsResult>
<DBClusterParameterGroups>
{%- for db_parameter_group in db_parameter_groups -%}
{{ db_parameter_group.to_xml() }}
{%- endfor -%}
</DBClusterParameterGroups>
</DescribeDBClusterParameterGroupsResult>
<ResponseMetadata>
<RequestId>b75d527a-b98c-11d3-f272-7cd6cce12cc5</RequestId>
</ResponseMetadata>
</DescribeDBClusterParameterGroupsResponse>"""
DELETE_DB_CLUSTER_PARAMETER_GROUP_TEMPLATE = """<DeleteDBClusterParameterGroupResponse xmlns="http://rds.amazonaws.com/doc/2014-09-01/">
<ResponseMetadata>
<RequestId>cad6c267-ba25-11d3-fe11-33d33a9bb7e3</RequestId>
</ResponseMetadata>
</DeleteDBClusterParameterGroupResponse>"""
PROMOTE_READ_REPLICA_DB_CLUSTER_TEMPLATE = """<PromoteReadReplicaDBClusterResponse xmlns="http://rds.amazonaws.com/doc/2014-09-01/">
<PromoteReadReplicaDBClusterResult>
{{ cluster.to_xml() }}
</PromoteReadReplicaDBClusterResult>
<ResponseMetadata>
<RequestId>7369556f-b70d-11c3-faca-6ba18376ea1b</RequestId>
</ResponseMetadata>
</PromoteReadReplicaDBClusterResponse>"""

View File

@ -5,13 +5,15 @@ pwd=$PWD
(
cd terraform-provider-aws || exit
echo "Patching the terraform-provider-aws directory..."
echo "Patches may fail if the patch was already applied, or if the patch is outdated"
PATCH="etc/0001-Patch-Hardcode-endpoints-to-local-server.patch"
(git apply $pwd/etc/0001-Patch-Hardcode-endpoints-to-local-server.patch > /dev/null 2>&1 && echo "Patched endpoints") || echo "Not patching endpoints - Directory was probably already patched."
(git apply $pwd/etc/0002-EC2-reduce-wait-times.patch > /dev/null 2>&1 && echo "Patched EC2") || echo "Not patching EC2 - Directory was probably already patched."
(git apply $pwd/etc/0003-Patch-IAM-wait-times.patch > /dev/null 2>&1 && echo "Patched IAM") || echo "Not patching IAM - Directory was probably already patched."
(git apply $pwd/etc/0005-Route53-Reduce-wait-times.patch > /dev/null 2>&1 && echo "Patched Route53") || echo "Not patching Route53 - Directory was probably already patched."
(git apply $pwd/etc/0006-CF-Reduce-wait-times.patch > /dev/null 2>&1 && echo "Patched CF") || echo "Not patching CF - Directory was probably already patched."
(git apply $pwd/etc/0007-Comprehend-Reduce-wait-times.patch > /dev/null 2>&1 && echo "Patched Comprehend") || echo "Not patching Comprehend - Directory was probably already patched."
(git apply $pwd/etc/0001-Patch-Hardcode-endpoints-to-local-server.patch > /dev/null 2>&1 && echo "Patched endpoints") || echo "!! Not able to patch endpoints"
(git apply $pwd/etc/0002-EC2-reduce-wait-times.patch > /dev/null 2>&1 && echo "Patched EC2") || echo "!! Not able to EC2"
(git apply $pwd/etc/0003-Patch-IAM-wait-times.patch > /dev/null 2>&1 && echo "Patched IAM") || echo "!! Not able to patch IAM"
(git apply $pwd/etc/0005-Route53-Reduce-wait-times.patch > /dev/null 2>&1 && echo "Patched Route53") || echo "!! Not able to patch Route53"
(git apply $pwd/etc/0006-CF-Reduce-wait-times.patch > /dev/null 2>&1 && echo "Patched CF") || echo "!! Not able to patch CF"
(git apply $pwd/etc/0007-Comprehend-Reduce-wait-times.patch > /dev/null 2>&1 && echo "Patched Comprehend") || echo "!! Not able to patch Comprehend"
(git apply $pwd/etc/0008-Patch-RDS-improvements.patch > /dev/null 2>&1 && echo "Patched RDS") || echo "!! Not able to patch RDS"
)
(

View File

@ -0,0 +1,95 @@
From 41f23fcd61cd6d9112f730d54b767e0185997103 Mon Sep 17 00:00:00 2001
From: Bert Blommers <info@bertblommers.nl>
Date: Wed, 5 Apr 2023 12:27:39 +0000
Subject: [PATCH] Patch: RDS improvements
---
internal/service/rds/cluster.go | 12 ++++++------
internal/service/rds/consts.go | 2 +-
internal/service/rds/instance.go | 6 +++---
3 files changed, 10 insertions(+), 10 deletions(-)
diff --git a/internal/service/rds/cluster.go b/internal/service/rds/cluster.go
index e5aeaa106d..c9fc8ffe09 100644
--- a/internal/service/rds/cluster.go
+++ b/internal/service/rds/cluster.go
@@ -1510,8 +1510,8 @@ func waitDBClusterCreated(ctx context.Context, conn *rds.RDS, id string, timeout
Target: []string{ClusterStatusAvailable},
Refresh: statusDBCluster(ctx, conn, id),
Timeout: timeout,
- MinTimeout: 10 * time.Second,
- Delay: 30 * time.Second,
+ MinTimeout: 3 * time.Second,
+ Delay: 3 * time.Second,
}
outputRaw, err := stateConf.WaitForStateContext(ctx)
@@ -1536,8 +1536,8 @@ func waitDBClusterUpdated(ctx context.Context, conn *rds.RDS, id string, timeout
Target: []string{ClusterStatusAvailable},
Refresh: statusDBCluster(ctx, conn, id),
Timeout: timeout,
- MinTimeout: 10 * time.Second,
- Delay: 30 * time.Second,
+ MinTimeout: 3 * time.Second,
+ Delay: 3 * time.Second,
}
outputRaw, err := stateConf.WaitForStateContext(ctx)
@@ -1560,8 +1560,8 @@ func waitDBClusterDeleted(ctx context.Context, conn *rds.RDS, id string, timeout
Target: []string{},
Refresh: statusDBCluster(ctx, conn, id),
Timeout: timeout,
- MinTimeout: 10 * time.Second,
- Delay: 30 * time.Second,
+ MinTimeout: 3 * time.Second,
+ Delay: 3 * time.Second,
}
outputRaw, err := stateConf.WaitForStateContext(ctx)
diff --git a/internal/service/rds/consts.go b/internal/service/rds/consts.go
index dc00aaf5dd..5cc6883a49 100644
--- a/internal/service/rds/consts.go
+++ b/internal/service/rds/consts.go
@@ -215,7 +215,7 @@ func TimeoutAction_Values() []string {
}
const (
- propagationTimeout = 2 * time.Minute
+ propagationTimeout = 2 * time.Second
)
const (
diff --git a/internal/service/rds/instance.go b/internal/service/rds/instance.go
index 6a329b4dd2..a2dcf89ade 100644
--- a/internal/service/rds/instance.go
+++ b/internal/service/rds/instance.go
@@ -2294,7 +2294,7 @@ func findDBInstanceByIDSDKv2(ctx context.Context, conn *rds_sdkv2.Client, id str
func waitDBInstanceAvailableSDKv1(ctx context.Context, conn *rds.RDS, id string, timeout time.Duration, optFns ...tfresource.OptionsFunc) (*rds.DBInstance, error) { //nolint:unparam
options := tfresource.Options{
PollInterval: 10 * time.Second,
- Delay: 1 * time.Minute,
+ Delay: 1 * time.Second,
ContinuousTargetOccurence: 3,
}
for _, fn := range optFns {
@@ -2337,7 +2337,7 @@ func waitDBInstanceAvailableSDKv1(ctx context.Context, conn *rds.RDS, id string,
func waitDBInstanceAvailableSDKv2(ctx context.Context, conn *rds_sdkv2.Client, id string, timeout time.Duration, optFns ...tfresource.OptionsFunc) (*rds.DBInstance, error) { //nolint:unparam
options := tfresource.Options{
PollInterval: 10 * time.Second,
- Delay: 1 * time.Minute,
+ Delay: 1 * time.Second,
ContinuousTargetOccurence: 3,
}
for _, fn := range optFns {
@@ -2380,7 +2380,7 @@ func waitDBInstanceAvailableSDKv2(ctx context.Context, conn *rds_sdkv2.Client, i
func waitDBInstanceDeleted(ctx context.Context, conn *rds.RDS, id string, timeout time.Duration, optFns ...tfresource.OptionsFunc) (*rds.DBInstance, error) { //nolint:unparam
options := tfresource.Options{
PollInterval: 10 * time.Second,
- Delay: 1 * time.Minute,
+ Delay: 1 * time.Second,
ContinuousTargetOccurence: 3,
}
for _, fn := range optFns {
--
2.25.1

View File

@ -0,0 +1,36 @@
### What is the purpose of this folder?
This folder contains git-patches for the Terraform repository. When running Terraform-tests against Moto, these patches will be applied automatically.
See http://docs.getmoto.org/en/latest/docs/contributing/development_tips/tests.html#terraform-tests on how to run the tests.
#### What kind of patches are there?
- Patches that set the endpoint to localhost, to ensure the tests are run against Moto
- Patches that reduce the wait time for resources. AWS may take a few minutes before an EC2 instance is spun up, Moto does this immediately - so it's not necessary for Terraform to wait until resources are ready
- etc
#### How do I create a new patch?
- Checkout the repository, and open a terminal in the root-directory
- Go into the Terraform-directory:
```commandline
cd tests/terraformtests/terraform-provider-aws
```
- Ensure the right Terraform-branch is selected, and is clean:
```commandline
git checkout main
git checkout .
```
- Create a new branch:
```commandline
git checkout -b patch-my-changes
```
- Make the required changes.
- Commit your changes
- Create a patch:
```commandline
git format-patch main
```
- Move the created patch-file into this folder
- Update `tests/terraformtests/bin/run_go_test` with the new patch-file

View File

@ -389,6 +389,18 @@ opensearch:
quicksight:
- TestAccQuickSightUser
- TestAccQuickSightGroup_
rds:
- TestAccRDSCluster_basic
- TestAccRDSCluster_disappears
- TestAccRDSCluster_EnabledCloudWatchLogsExports_
- TestAccRDSCluster_enableHTTPEndpoint
- TestAccRDSCluster_engineMode
- TestAccRDSCluster_EngineMode
- TestAccRDSCluster_GlobalClusterIdentifierEngineMode_
- TestAccRDSCluster_identifier
- TestAccRDSCluster_tags
- TestAccRDSGlobalCluster_basic
- TestAccRDSGlobalCluster_storageEncrypted
redshift:
- TestAccRedshiftServiceAccountDataSource
route53|1:

View File

@ -12,7 +12,9 @@ def test_describe():
@mock_neptune
def test_create_global_cluster():
client = boto3.client("neptune", "us-east-1")
resp = client.create_global_cluster(GlobalClusterIdentifier="g-id")["GlobalCluster"]
resp = client.create_global_cluster(
GlobalClusterIdentifier="g-id", Engine="neptune"
)["GlobalCluster"]
resp.should.have.key("GlobalClusterIdentifier").equals("g-id")
resp.should.have.key("GlobalClusterResourceId")
resp.should.have.key("GlobalClusterArn")
@ -33,6 +35,7 @@ def test_create_global_cluster_with_additional_params():
client = boto3.client("neptune", "us-east-1")
resp = client.create_global_cluster(
GlobalClusterIdentifier="g-id",
Engine="neptune",
EngineVersion="1.0",
DeletionProtection=True,
StorageEncrypted=True,
@ -45,8 +48,8 @@ def test_create_global_cluster_with_additional_params():
@mock_neptune
def test_delete_global_cluster():
client = boto3.client("neptune", "us-east-1")
client.create_global_cluster(GlobalClusterIdentifier="g-id2")
client = boto3.client("neptune", "us-east-2")
client.create_global_cluster(GlobalClusterIdentifier="g-id2", Engine="neptune")
client.delete_global_cluster(GlobalClusterIdentifier="g-id2")

View File

@ -0,0 +1,55 @@
import boto3
import pytest
from botocore.exceptions import ClientError
from moto import mock_rds
from moto.core import DEFAULT_ACCOUNT_ID
@mock_rds
def test_create_describe_delete():
client = boto3.client("rds", "us-east-2")
groups = client.describe_db_cluster_parameter_groups()["DBClusterParameterGroups"]
assert len(groups) == 0
group = client.create_db_cluster_parameter_group(
DBClusterParameterGroupName="groupname",
DBParameterGroupFamily="aurora5.6",
Description="familia",
)["DBClusterParameterGroup"]
assert group["DBClusterParameterGroupName"] == "groupname"
assert group["DBParameterGroupFamily"] == "aurora5.6"
assert group["Description"] == "familia"
assert (
group["DBClusterParameterGroupArn"]
== f"arn:aws:rds:us-east-2:{DEFAULT_ACCOUNT_ID}:cpg:groupname"
)
groups = client.describe_db_cluster_parameter_groups(
DBClusterParameterGroupName="groupname",
)["DBClusterParameterGroups"]
assert len(groups) == 1
assert groups[0]["DBClusterParameterGroupName"] == "groupname"
assert groups[0]["DBParameterGroupFamily"] == "aurora5.6"
assert groups[0]["Description"] == "familia"
assert (
groups[0]["DBClusterParameterGroupArn"]
== f"arn:aws:rds:us-east-2:{DEFAULT_ACCOUNT_ID}:cpg:groupname"
)
client.delete_db_cluster_parameter_group(DBClusterParameterGroupName="groupname")
groups = client.describe_db_cluster_parameter_groups()["DBClusterParameterGroups"]
assert len(groups) == 0
with pytest.raises(ClientError) as exc:
client.describe_db_cluster_parameter_groups(
DBClusterParameterGroupName="groupname",
)
err = exc.value.response["Error"]
assert err["Code"] == "DBParameterGroupNotFound"
assert err["Message"] == "DBClusterParameterGroup not found: groupname"

View File

@ -0,0 +1,11 @@
import boto3
from moto import mock_rds
@mock_rds
def test_describe_db_cluster_parameters():
client = boto3.client("rds", "us-east-2")
resp = client.describe_db_cluster_parameters(DBClusterParameterGroupName="group")
assert resp["Parameters"] == []

View File

@ -0,0 +1,198 @@
import boto3
import pytest
from botocore.exceptions import ClientError
from moto import mock_rds
from moto.core import DEFAULT_ACCOUNT_ID
@mock_rds
def test_create_global_cluster__not_enough_parameters():
client = boto3.client("rds", "us-east-1")
with pytest.raises(ClientError) as exc:
client.create_global_cluster(GlobalClusterIdentifier="gc1")
err = exc.value.response["Error"]
assert err["Code"] == "InvalidParameterValue"
assert (
err["Message"]
== "When creating standalone global cluster, value for engineName should be specified"
)
@mock_rds
def test_global_cluster_members():
# WHEN create_global_cluster is called
# AND create_db_cluster is called with GlobalClusterIdentifier set to the global cluster ARN
# THEN describe_global_cluster shows the second cluster as part of the GlobalClusterMembers
# AND describe_db_clusters shows the cluster as normal
client = boto3.client("rds", "us-east-1")
global_cluster = client.create_global_cluster(
GlobalClusterIdentifier="gc1", Engine="aurora-mysql"
)["GlobalCluster"]
assert global_cluster["GlobalClusterIdentifier"] == "gc1"
assert "GlobalClusterResourceId" in global_cluster
assert (
global_cluster["GlobalClusterArn"]
== f"arn:aws:rds::{DEFAULT_ACCOUNT_ID}:global-cluster:gc1"
)
assert global_cluster["Status"] == "available"
assert global_cluster["Engine"] == "aurora-mysql"
assert global_cluster["EngineVersion"] == "5.7.mysql_aurora.2.11.2"
assert global_cluster["StorageEncrypted"] is False
assert global_cluster["DeletionProtection"] is False
assert global_cluster["GlobalClusterMembers"] == []
resp = client.create_db_cluster(
DBClusterIdentifier="dbci",
GlobalClusterIdentifier="gc1",
Engine="mysql",
MasterUsername="masterusername",
MasterUserPassword="hunter2_",
)["DBCluster"]
cluster_arn = resp["DBClusterArn"]
resp = client.describe_global_clusters(GlobalClusterIdentifier="gc1")
assert len(resp["GlobalClusters"]) == 1
global_cluster = resp["GlobalClusters"][0]
assert global_cluster["GlobalClusterIdentifier"] == "gc1"
assert len(global_cluster["GlobalClusterMembers"]) == 1
assert global_cluster["GlobalClusterMembers"][0]["DBClusterArn"] == cluster_arn
@mock_rds
def test_create_global_cluster_from_regular_cluster():
# WHEN create_db_cluster is called
# AND create_global_cluster is called with SourceDBClusterIdentifier set as the earlier created db cluster
# THEN that db cluster is elevated to a global cluster
# AND it still shows up when calling describe_db_clusters
client = boto3.client("rds", "us-east-1")
resp = client.create_db_cluster(
DBClusterIdentifier="dbci",
Engine="mysql",
MasterUsername="masterusername",
MasterUserPassword="hunter2_",
)["DBCluster"]
cluster_arn = resp["DBClusterArn"]
client.create_global_cluster(
GlobalClusterIdentifier="gc1", SourceDBClusterIdentifier=cluster_arn
)
resp = client.describe_global_clusters(GlobalClusterIdentifier="gc1")
assert len(resp["GlobalClusters"]) == 1
global_cluster = resp["GlobalClusters"][0]
assert global_cluster["GlobalClusterIdentifier"] == "gc1"
assert len(global_cluster["GlobalClusterMembers"]) == 1
assert global_cluster["GlobalClusterMembers"][0]["DBClusterArn"] == cluster_arn
@mock_rds
def test_create_global_cluster_from_regular_cluster__using_name():
client = boto3.client("rds", "us-east-1")
with pytest.raises(ClientError) as exc:
client.create_global_cluster(
GlobalClusterIdentifier="gc1", SourceDBClusterIdentifier="dbci"
)
err = exc.value.response["Error"]
assert err["Code"] == "InvalidParameterValue"
assert err["Message"] == "Malformed db cluster arn dbci"
@mock_rds
def test_create_global_cluster_from_regular_cluster__and_specify_engine():
client = boto3.client("rds", "us-east-1")
resp = client.create_db_cluster(
DBClusterIdentifier="dbci",
Engine="mysql",
MasterUsername="masterusername",
MasterUserPassword="hunter2_",
)["DBCluster"]
cluster_arn = resp["DBClusterArn"]
with pytest.raises(ClientError) as exc:
client.create_global_cluster(
GlobalClusterIdentifier="gc1",
Engine="aurora-mysql",
SourceDBClusterIdentifier=cluster_arn,
)
err = exc.value.response["Error"]
assert err["Code"] == "InvalidParameterCombination"
assert (
err["Message"]
== "When creating global cluster from existing db cluster, value for engineName should not be specified since it will be inherited from source cluster"
)
@mock_rds
def test_delete_non_global_cluster():
# WHEN a global cluster contains a regular cluster
# AND we attempt to delete the global cluster
# THEN we get an error message
# An error occurs (InvalidGlobalClusterStateFault) when calling the DeleteGlobalCluster operation: Global Cluster arn:aws:rds::486285699788:global-cluster:g1 is not empty
client = boto3.client("rds", "us-east-1")
client.create_global_cluster(GlobalClusterIdentifier="gc1", Engine="aurora-mysql")
client.create_db_cluster(
DBClusterIdentifier="dbci",
GlobalClusterIdentifier="gc1",
Engine="mysql",
MasterUsername="masterusername",
MasterUserPassword="hunter2_",
)["DBCluster"]
with pytest.raises(ClientError) as exc:
client.delete_global_cluster(GlobalClusterIdentifier="gc1")
err = exc.value.response["Error"]
assert err["Code"] == "InvalidGlobalClusterStateFault"
assert (
err["Message"]
== f"Global Cluster arn:aws:rds::{DEFAULT_ACCOUNT_ID}:global-cluster:gc1 is not empty"
)
# Delete the child first
client.delete_db_cluster(DBClusterIdentifier="dbci")
# Then we can delete the global cluster
client.delete_global_cluster(GlobalClusterIdentifier="gc1")
assert client.describe_global_clusters()["GlobalClusters"] == []
@mock_rds
def test_remove_from_global_cluster():
client = boto3.client("rds", "us-east-1")
client.create_global_cluster(GlobalClusterIdentifier="gc1", Engine="aurora-mysql")
# Assign to the global cluster
client.create_db_cluster(
DBClusterIdentifier="dbci",
GlobalClusterIdentifier="gc1",
Engine="mysql",
MasterUsername="masterusername",
MasterUserPassword="hunter2_",
)
# Remove it again
client.remove_from_global_cluster(
GlobalClusterIdentifier="gc1",
DbClusterIdentifier="dbci",
)
# Verify it's been removed
resp = client.describe_global_clusters(GlobalClusterIdentifier="gc1")
assert len(resp["GlobalClusters"][0]["GlobalClusterMembers"]) == 0
# Verifying a global cluster that doesn't exist, should fail silently
client.remove_from_global_cluster(
GlobalClusterIdentifier="gc1",
DbClusterIdentifier="dbci",
)

View File

@ -144,7 +144,6 @@ def test_create_db_cluster__verify_default_properties():
"DatabaseName"
) # This was not supplied, so should not be returned
cluster.should.have.key("AllocatedStorage").equal(1)
cluster.should.have.key("AvailabilityZones")
set(cluster["AvailabilityZones"]).should.equal(
{"eu-north-1a", "eu-north-1b", "eu-north-1c"}
@ -226,6 +225,13 @@ def test_create_db_cluster_additional_parameters():
Port=1234,
DeletionProtection=True,
EnableCloudwatchLogsExports=["audit"],
KmsKeyId="some:kms:arn",
NetworkType="IPV4",
DBSubnetGroupName="subnetgroupname",
ScalingConfiguration={
"MinCapacity": 5,
"AutoPause": True,
},
)
cluster = resp["DBCluster"]
@ -237,6 +243,10 @@ def test_create_db_cluster_additional_parameters():
cluster.should.have.key("Port").equal(1234)
cluster.should.have.key("DeletionProtection").equal(True)
cluster.should.have.key("EnabledCloudwatchLogsExports").equals(["audit"])
assert cluster["KmsKeyId"] == "some:kms:arn"
assert cluster["NetworkType"] == "IPV4"
assert cluster["DBSubnetGroup"] == "subnetgroupname"
assert cluster["ScalingConfigurationInfo"] == {"MinCapacity": 5, "AutoPause": True}
@mock_rds
@ -672,7 +682,6 @@ def test_restore_db_cluster_from_snapshot():
)["DBCluster"]
new_cluster["DBClusterIdentifier"].should.equal("db-restore-1")
new_cluster["DBClusterInstanceClass"].should.equal("db.m1.small")
new_cluster["StorageType"].should.equal("gp2")
new_cluster["Engine"].should.equal("postgres")
new_cluster["DatabaseName"].should.equal("staging-postgres")
new_cluster["Port"].should.equal(1234)
@ -778,7 +787,7 @@ def test_add_tags_to_cluster_snapshot():
@mock_rds
def test_create_db_cluster_with_enable_http_endpoint_valid():
def test_create_serverless_db_cluster():
client = boto3.client("rds", region_name="eu-north-1")
resp = client.create_db_cluster(
@ -792,8 +801,14 @@ def test_create_db_cluster_with_enable_http_endpoint_valid():
EnableHttpEndpoint=True,
)
cluster = resp["DBCluster"]
# This is only true for specific engine versions
cluster.should.have.key("HttpEndpointEnabled").equal(True)
# Verify that a default serverless_configuration is added
assert "ScalingConfigurationInfo" in cluster
assert cluster["ScalingConfigurationInfo"]["MinCapacity"] == 1
assert cluster["ScalingConfigurationInfo"]["MaxCapacity"] == 16
@mock_rds
def test_create_db_cluster_with_enable_http_endpoint_invalid():
@ -810,6 +825,7 @@ def test_create_db_cluster_with_enable_http_endpoint_invalid():
EnableHttpEndpoint=True,
)
cluster = resp["DBCluster"]
# This attribute is ignored if an invalid engine version is supplied
cluster.should.have.key("HttpEndpointEnabled").equal(False)
@ -845,3 +861,43 @@ def test_describe_db_clusters_filter_by_engine():
cluster = clusters[0]
assert cluster["DBClusterIdentifier"] == "id2"
assert cluster["Engine"] == "aurora-postgresql"
@mock_rds
def test_replicate_cluster():
# WHEN create_db_cluster is called
# AND create_db_cluster is called again with ReplicationSourceIdentifier set to the first cluster
# THEN promote_read_replica_db_cluster can be called on the second cluster, elevating it to a read/write cluster
us_east = boto3.client("rds", "us-east-1")
us_west = boto3.client("rds", "us-west-1")
original_arn = us_east.create_db_cluster(
DBClusterIdentifier="dbci",
Engine="mysql",
MasterUsername="masterusername",
MasterUserPassword="hunter2_",
)["DBCluster"]["DBClusterArn"]
replica_arn = us_west.create_db_cluster(
DBClusterIdentifier="replica_dbci",
Engine="mysql",
MasterUsername="masterusername",
MasterUserPassword="hunter2_",
ReplicationSourceIdentifier=original_arn,
)["DBCluster"]["DBClusterArn"]
original = us_east.describe_db_clusters()["DBClusters"][0]
assert original["ReadReplicaIdentifiers"] == [replica_arn]
replica = us_west.describe_db_clusters()["DBClusters"][0]
assert replica["ReplicationSourceIdentifier"] == original_arn
assert replica["MultiAZ"] is True
us_west.promote_read_replica_db_cluster(DBClusterIdentifier="replica_dbci")
original = us_east.describe_db_clusters()["DBClusters"][0]
assert original["ReadReplicaIdentifiers"] == []
replica = us_west.describe_db_clusters()["DBClusters"][0]
assert "ReplicationSourceIdentifier" not in replica
assert replica["MultiAZ"] is False

View File

@ -0,0 +1,91 @@
import boto3
import pytest
from botocore.exceptions import ClientError
from moto import mock_rds
@mock_rds
def test_add_instance_as_cluster_member():
# When creating a rds instance with DBClusterIdentifier provided,
# the instance is included as a ClusterMember in the describe_db_clusters call
client = boto3.client("rds", "us-east-1")
client.create_db_cluster(
DBClusterIdentifier="dbci",
Engine="mysql",
MasterUsername="masterusername",
MasterUserPassword="hunter2_",
)["DBCluster"]
client.create_db_instance(
DBInstanceIdentifier="dbi",
DBClusterIdentifier="dbci",
DBInstanceClass="db.r5.large",
Engine="aurora-postgresql",
)
cluster = client.describe_db_clusters()["DBClusters"][0]
assert "DBClusterMembers" in cluster
members = cluster["DBClusterMembers"]
assert len(members) == 1
assert members[0] == {
"DBInstanceIdentifier": "dbi",
"IsClusterWriter": True,
"DBClusterParameterGroupStatus": "in-sync",
"PromotionTier": 1,
}
@mock_rds
def test_remove_instance_from_cluster():
# When creating a rds instance with DBClusterIdentifier provided,
# the instance is included as a ClusterMember in the describe_db_clusters call
client = boto3.client("rds", "us-east-1")
client.create_db_cluster(
DBClusterIdentifier="dbci",
Engine="mysql",
MasterUsername="masterusername",
MasterUserPassword="hunter2_",
)["DBCluster"]
client.create_db_instance(
DBInstanceIdentifier="dbi",
DBClusterIdentifier="dbci",
DBInstanceClass="db.r5.large",
Engine="aurora-postgresql",
)
client.delete_db_instance(
DBInstanceIdentifier="dbi",
SkipFinalSnapshot=True,
)
cluster = client.describe_db_clusters()["DBClusters"][0]
assert "DBClusterMembers" in cluster
members = cluster["DBClusterMembers"]
assert len(members) == 0
@mock_rds
def test_add_instance_to_serverless_cluster():
client = boto3.client("rds", "us-east-1")
client.create_db_cluster(
DBClusterIdentifier="dbci",
Engine="aurora",
EngineMode="serverless",
MasterUsername="masterusername",
MasterUserPassword="hunter2_",
)["DBCluster"]
with pytest.raises(ClientError) as exc:
client.create_db_instance(
DBInstanceIdentifier="dbi",
DBClusterIdentifier="dbci",
DBInstanceClass="db.r5.large",
Engine="aurora-postgresql",
)
err = exc.value.response["Error"]
assert err["Code"] == "InvalidParameterValue"
assert err["Message"] == "Instances cannot be added to Aurora Serverless clusters."