diff --git a/docs/docs/services/rds.rst b/docs/docs/services/rds.rst
index d0017fd1f..489c04a8f 100644
--- a/docs/docs/services/rds.rst
+++ b/docs/docs/services/rds.rst
@@ -16,8 +16,8 @@ rds
.. sourcecode:: python
- @mock_rds2
- def test_rds2_behaviour:
+ @mock_rds
+ def test_rds_behaviour:
boto3.client("rds")
...
diff --git a/moto/__init__.py b/moto/__init__.py
index b6be65d11..461afa6fe 100644
--- a/moto/__init__.py
+++ b/moto/__init__.py
@@ -4,7 +4,12 @@ from contextlib import ContextDecorator
def lazy_load(
- module_name, element, boto3_name=None, backend=None, warn_repurpose=False
+ module_name,
+ element,
+ boto3_name=None,
+ backend=None,
+ warn_repurpose=False,
+ use_instead=None,
):
def f(*args, **kwargs):
if warn_repurpose:
@@ -14,6 +19,14 @@ def lazy_load(
f"Module {element} has been deprecated, and will be repurposed in a later release. "
"Please see https://github.com/spulec/moto/issues/4526 for more information."
)
+ if use_instead:
+ import warnings
+
+ used, recommended = use_instead
+ warnings.warn(
+ f"Module {used} has been deprecated, and will be removed in a later release. Please use {recommended} instead. "
+ "See https://github.com/spulec/moto/issues/4526 for more information."
+ )
module = importlib.import_module(module_name, "moto")
return getattr(module, element)(*args, **kwargs)
@@ -110,8 +123,8 @@ mock_organizations = lazy_load(".organizations", "mock_organizations")
mock_pinpoint = lazy_load(".pinpoint", "mock_pinpoint")
mock_polly = lazy_load(".polly", "mock_polly")
mock_ram = lazy_load(".ram", "mock_ram")
-mock_rds = lazy_load(".rds", "mock_rds", warn_repurpose=True)
-mock_rds2 = lazy_load(".rds2", "mock_rds2", boto3_name="rds")
+mock_rds = lazy_load(".rds", "mock_rds")
+mock_rds2 = lazy_load(".rds", "mock_rds", use_instead=("mock_rds2", "mock_rds"))
mock_redshift = lazy_load(".redshift", "mock_redshift")
mock_redshiftdata = lazy_load(
".redshiftdata", "mock_redshiftdata", boto3_name="redshift-data"
diff --git a/moto/cloudformation/parsing.py b/moto/cloudformation/parsing.py
index f930e80e0..bfed33bfe 100644
--- a/moto/cloudformation/parsing.py
+++ b/moto/cloudformation/parsing.py
@@ -33,7 +33,7 @@ from moto.iam import models # noqa # pylint: disable=all
from moto.kinesis import models # noqa # pylint: disable=all
from moto.kms import models # noqa # pylint: disable=all
from moto.rds import models # noqa # pylint: disable=all
-from moto.rds2 import models # noqa # pylint: disable=all
+from moto.rds import models # noqa # pylint: disable=all
from moto.redshift import models # noqa # pylint: disable=all
from moto.route53 import models # noqa # pylint: disable=all
from moto.s3 import models # noqa # pylint: disable=all
diff --git a/moto/rds/__init__.py b/moto/rds/__init__.py
index d7347b544..f4ad5814a 100644
--- a/moto/rds/__init__.py
+++ b/moto/rds/__init__.py
@@ -1,4 +1,5 @@
from .models import rds_backends
from ..core.models import base_decorator
+rds_backend = rds_backends["us-west-1"]
mock_rds = base_decorator(rds_backends)
diff --git a/moto/rds/exceptions.py b/moto/rds/exceptions.py
index d40493eea..d22fb0e23 100644
--- a/moto/rds/exceptions.py
+++ b/moto/rds/exceptions.py
@@ -1,22 +1,37 @@
-import json
+from jinja2 import Template
from werkzeug.exceptions import BadRequest
class RDSClientError(BadRequest):
def __init__(self, code, message):
super().__init__()
- self.description = json.dumps(
- {
- "Error": {"Code": code, "Message": message, "Type": "Sender"},
- "RequestId": "6876f774-7273-11e4-85dc-39e55ca848d1",
- }
+ template = Template(
+ """
+
+
+ {{ code }}
+ {{ message }}
+ Sender
+
+ 6876f774-7273-11e4-85dc-39e55ca848d1
+ """
)
+ self.description = template.render(code=code, message=message)
class DBInstanceNotFoundError(RDSClientError):
def __init__(self, database_identifier):
super().__init__(
- "DBInstanceNotFound", "Database {0} not found.".format(database_identifier)
+ "DBInstanceNotFound",
+ "DBInstance {0} not found.".format(database_identifier),
+ )
+
+
+class DBSnapshotNotFoundError(RDSClientError):
+ def __init__(self, snapshot_identifier):
+ super().__init__(
+ "DBSnapshotNotFound",
+ "DBSnapshot {} not found.".format(snapshot_identifier),
)
@@ -36,11 +51,145 @@ class DBSubnetGroupNotFoundError(RDSClientError):
)
-class UnformattedGetAttTemplateException(Exception):
- """Duplicated from CloudFormation to prevent circular deps."""
+class DBParameterGroupNotFoundError(RDSClientError):
+ def __init__(self, db_parameter_group_name):
+ super().__init__(
+ "DBParameterGroupNotFound",
+ "DB Parameter Group {0} not found.".format(db_parameter_group_name),
+ )
- description = (
- "Template error: resource {0} does not support attribute type {1} in Fn::GetAtt"
- )
- status_code = 400
+class OptionGroupNotFoundFaultError(RDSClientError):
+ def __init__(self, option_group_name):
+ super().__init__(
+ "OptionGroupNotFoundFault",
+ "Specified OptionGroupName: {0} not found.".format(option_group_name),
+ )
+
+
+class InvalidDBClusterStateFaultError(RDSClientError):
+ def __init__(self, database_identifier):
+ super().__init__(
+ "InvalidDBClusterStateFault",
+ "Invalid DB type, when trying to perform StopDBInstance on {0}e. See AWS RDS documentation on rds.stop_db_instance".format(
+ database_identifier
+ ),
+ )
+
+
+class InvalidDBInstanceStateError(RDSClientError):
+ def __init__(self, database_identifier, istate):
+ estate = (
+ "in available state"
+ if istate == "stop"
+ else "stopped, it cannot be started"
+ )
+ super().__init__(
+ "InvalidDBInstanceState",
+ "Instance {} is not {}.".format(database_identifier, estate),
+ )
+
+
+class SnapshotQuotaExceededError(RDSClientError):
+ def __init__(self):
+ super().__init__(
+ "SnapshotQuotaExceeded",
+ "The request cannot be processed because it would exceed the maximum number of snapshots.",
+ )
+
+
+class DBSnapshotAlreadyExistsError(RDSClientError):
+ def __init__(self, database_snapshot_identifier):
+ super().__init__(
+ "DBSnapshotAlreadyExists",
+ "Cannot create the snapshot because a snapshot with the identifier {} already exists.".format(
+ database_snapshot_identifier
+ ),
+ )
+
+
+class InvalidParameterValue(RDSClientError):
+ def __init__(self, message):
+ super().__init__("InvalidParameterValue", message)
+
+
+class InvalidParameterCombination(RDSClientError):
+ def __init__(self, message):
+ super().__init__("InvalidParameterCombination", message)
+
+
+class InvalidDBClusterStateFault(RDSClientError):
+ def __init__(self, message):
+ super().__init__("InvalidDBClusterStateFault", message)
+
+
+class DBClusterNotFoundError(RDSClientError):
+ def __init__(self, cluster_identifier):
+ super().__init__(
+ "DBClusterNotFoundFault",
+ "DBCluster {} not found.".format(cluster_identifier),
+ )
+
+
+class DBClusterSnapshotNotFoundError(RDSClientError):
+ def __init__(self, snapshot_identifier):
+ super().__init__(
+ "DBClusterSnapshotNotFoundFault",
+ "DBClusterSnapshot {} not found.".format(snapshot_identifier),
+ )
+
+
+class DBClusterSnapshotAlreadyExistsError(RDSClientError):
+ def __init__(self, database_snapshot_identifier):
+ super().__init__(
+ "DBClusterSnapshotAlreadyExistsFault",
+ "Cannot create the snapshot because a snapshot with the identifier {} already exists.".format(
+ database_snapshot_identifier
+ ),
+ )
+
+
+class ExportTaskAlreadyExistsError(RDSClientError):
+ def __init__(self, export_task_identifier):
+ super().__init__(
+ "ExportTaskAlreadyExistsFault",
+ "Cannot start export task because a task with the identifier {} already exists.".format(
+ export_task_identifier
+ ),
+ )
+
+
+class ExportTaskNotFoundError(RDSClientError):
+ def __init__(self, export_task_identifier):
+ super().__init__(
+ "ExportTaskNotFoundFault",
+ "Cannot cancel export task because a task with the identifier {} is not exist.".format(
+ export_task_identifier
+ ),
+ )
+
+
+class InvalidExportSourceStateError(RDSClientError):
+ def __init__(self, status):
+ super().__init__(
+ "InvalidExportSourceStateFault",
+ "Export source should be 'available' but current status is {}.".format(
+ status
+ ),
+ )
+
+
+class SubscriptionAlreadyExistError(RDSClientError):
+ def __init__(self, subscription_name):
+ super().__init__(
+ "SubscriptionAlreadyExistFault",
+ "Subscription {} already exists.".format(subscription_name),
+ )
+
+
+class SubscriptionNotFoundError(RDSClientError):
+ def __init__(self, subscription_name):
+ super().__init__(
+ "SubscriptionNotFoundFault",
+ "Subscription {} not found.".format(subscription_name),
+ )
diff --git a/moto/rds/models.py b/moto/rds/models.py
index d74a5684f..d8da6d9af 100644
--- a/moto/rds/models.py
+++ b/moto/rds/models.py
@@ -1,89 +1,502 @@
-from jinja2 import Template
+import copy
+import datetime
+import os
+import random
+import string
-from moto.core import BaseBackend, CloudFormationModel
-from moto.core.utils import BackendDict
+from collections import defaultdict
+from jinja2 import Template
+from re import compile as re_compile
+from collections import OrderedDict
+from moto.core import BaseBackend, BaseModel, CloudFormationModel, ACCOUNT_ID
+
+from moto.core.utils import iso_8601_datetime_with_milliseconds, BackendDict
from moto.ec2.models import ec2_backends
-from moto.rds.exceptions import UnformattedGetAttTemplateException
-from moto.rds2.models import rds2_backends
+from .exceptions import (
+ RDSClientError,
+ DBClusterNotFoundError,
+ DBClusterSnapshotAlreadyExistsError,
+ DBClusterSnapshotNotFoundError,
+ DBInstanceNotFoundError,
+ DBSnapshotNotFoundError,
+ DBSecurityGroupNotFoundError,
+ DBSubnetGroupNotFoundError,
+ DBParameterGroupNotFoundError,
+ OptionGroupNotFoundFaultError,
+ InvalidDBClusterStateFaultError,
+ InvalidDBInstanceStateError,
+ SnapshotQuotaExceededError,
+ DBSnapshotAlreadyExistsError,
+ InvalidParameterValue,
+ InvalidParameterCombination,
+ InvalidDBClusterStateFault,
+ ExportTaskNotFoundError,
+ ExportTaskAlreadyExistsError,
+ InvalidExportSourceStateError,
+ SubscriptionNotFoundError,
+ SubscriptionAlreadyExistError,
+)
+from .utils import FilterDef, apply_filter, merge_filters, validate_filters
+
+
+class Cluster:
+ def __init__(self, **kwargs):
+ self.db_name = kwargs.get("db_name")
+ self.db_cluster_identifier = kwargs.get("db_cluster_identifier")
+ self.db_cluster_instance_class = kwargs.get("db_cluster_instance_class")
+ self.deletion_protection = kwargs.get("deletion_protection")
+ self.engine = kwargs.get("engine")
+ self.engine_version = kwargs.get("engine_version")
+ if not self.engine_version:
+ self.engine_version = Cluster.default_engine_version(self.engine)
+ self.engine_mode = kwargs.get("engine_mode") or "provisioned"
+ self.iops = kwargs.get("iops")
+ self.status = "active"
+ self.region = kwargs.get("region")
+ self.cluster_create_time = iso_8601_datetime_with_milliseconds(
+ datetime.datetime.now()
+ )
+ self.copy_tags_to_snapshot = kwargs.get("copy_tags_to_snapshot")
+ if self.copy_tags_to_snapshot is None:
+ self.copy_tags_to_snapshot = True
+ self.storage_type = kwargs.get("storage_type")
+ if self.storage_type is None:
+ self.storage_type = Cluster.default_storage_type(iops=self.iops)
+ self.allocated_storage = kwargs.get("allocated_storage")
+ if self.allocated_storage is None:
+ self.allocated_storage = Cluster.default_allocated_storage(
+ engine=self.engine, storage_type=self.storage_type
+ )
+ self.master_username = kwargs.get("master_username")
+ if not self.master_username:
+ raise InvalidParameterValue(
+ "The parameter MasterUsername must be provided and must not be blank."
+ )
+ self.master_user_password = kwargs.get("master_user_password")
+ if not self.master_user_password:
+ raise InvalidParameterValue(
+ "The parameter MasterUserPassword must be provided and must not be blank."
+ )
+ if len(self.master_user_password) < 8:
+ raise InvalidParameterValue(
+ "The parameter MasterUserPassword is not a valid password because it is shorter than 8 characters."
+ )
+ self.availability_zones = kwargs.get("availability_zones")
+ if not self.availability_zones:
+ self.availability_zones = [
+ f"{self.region}a",
+ f"{self.region}b",
+ f"{self.region}c",
+ ]
+ self.parameter_group = kwargs.get("parameter_group") or "default.aurora8.0"
+ self.subnet_group = "default"
+ self.status = "creating"
+ self.url_identifier = "".join(
+ random.choice(string.ascii_lowercase + string.digits) for _ in range(12)
+ )
+ self.endpoint = f"{self.db_cluster_identifier}.cluster-{self.url_identifier}.{self.region}.rds.amazonaws.com"
+ self.reader_endpoint = f"{self.db_cluster_identifier}.cluster-ro-{self.url_identifier}.{self.region}.rds.amazonaws.com"
+ self.port = kwargs.get("port")
+ if self.port is None:
+ self.port = Cluster.default_port(self.engine)
+ self.preferred_backup_window = "01:37-02:07"
+ self.preferred_maintenance_window = "wed:02:40-wed:03:10"
+ # This should default to the default security group
+ self.vpc_security_groups = []
+ self.hosted_zone_id = "".join(
+ random.choice(string.ascii_uppercase + string.digits) for _ in range(14)
+ )
+ self.resource_id = "cluster-" + "".join(
+ random.choice(string.ascii_uppercase + string.digits) for _ in range(26)
+ )
+ self.tags = kwargs.get("tags", [])
+
+ @property
+ def db_cluster_arn(self):
+ return "arn:aws:rds:{0}:{1}:cluster:{2}".format(
+ self.region, ACCOUNT_ID, self.db_cluster_identifier
+ )
+
+ def to_xml(self):
+ template = Template(
+ """
+ 1
+
+ {% for zone in cluster.availability_zones %}
+ {{ zone }}
+ {% endfor %}
+
+ 1
+ {{ cluster.status }}
+ {% if cluster.db_name %}{{ cluster.db_name }}{% endif %}
+ {{ cluster.db_cluster_identifier }}
+ {{ cluster.parameter_group }}
+ {{ cluster.subnet_group }}
+ {{ cluster.cluster_create_time }}
+ {{ cluster.engine }}
+ {{ cluster.status }}
+ {{ cluster.endpoint }}
+ {{ cluster.reader_endpoint }}
+ false
+ {{ cluster.engine_version }}
+ {{ cluster.port }}
+ {% if cluster.iops %}
+ {{ cluster.iops }}
+ io1
+ {% else %}
+ {{ cluster.storage_type }}
+ {% endif %}
+ {{ cluster.db_cluster_instance_class }}
+ {{ cluster.master_username }}
+ {{ cluster.preferred_backup_window }}
+ {{ cluster.preferred_maintenance_window }}
+
+
+
+ {% for id in cluster.vpc_security_groups %}
+
+ {{ id }}
+ active
+
+ {% endfor %}
+
+ {{ cluster.hosted_zone_id }}
+ false
+ {{ cluster.resource_id }}
+ {{ cluster.db_cluster_arn }}
+
+ false
+ {{ cluster.engine_mode }}
+ {{ 'true' if cluster.deletion_protection else 'false' }}
+ false
+ {{ cluster.copy_tags_to_snapshot }}
+ false
+
+
+ {%- for tag in cluster.tags -%}
+
+ {{ tag['Key'] }}
+ {{ tag['Value'] }}
+
+ {%- endfor -%}
+
+ """
+ )
+ return template.render(cluster=self)
+
+ @staticmethod
+ def default_engine_version(engine):
+ return {
+ "aurora": "5.6.mysql_aurora.1.22.5",
+ "aurora-mysql": "5.7.mysql_aurora.2.07.2",
+ "aurora-postgresql": "12.7",
+ "mysql": "8.0.23",
+ "postgres": "13.4",
+ }[engine]
+
+ @staticmethod
+ def default_port(engine):
+ return {
+ "aurora": 3306,
+ "aurora-mysql": 3306,
+ "aurora-postgresql": 5432,
+ "mysql": 3306,
+ "postgres": 5432,
+ }[engine]
+
+ @staticmethod
+ def default_storage_type(iops):
+ if iops is None:
+ return "gp2"
+ else:
+ return "io1"
+
+ @staticmethod
+ def default_allocated_storage(engine, storage_type):
+ return {
+ "aurora": {"gp2": 0, "io1": 0, "standard": 0},
+ "aurora-mysql": {"gp2": 20, "io1": 100, "standard": 10},
+ "aurora-postgresql": {"gp2": 20, "io1": 100, "standard": 10},
+ "mysql": {"gp2": 20, "io1": 100, "standard": 5},
+ "postgres": {"gp2": 20, "io1": 100, "standard": 5},
+ }[engine][storage_type]
+
+ def get_tags(self):
+ return self.tags
+
+ def add_tags(self, tags):
+ new_keys = [tag_set["Key"] for tag_set in tags]
+ self.tags = [tag_set for tag_set in self.tags if tag_set["Key"] not in new_keys]
+ self.tags.extend(tags)
+ return self.tags
+
+ def remove_tags(self, tag_keys):
+ self.tags = [tag_set for tag_set in self.tags if tag_set["Key"] not in tag_keys]
+
+
+class ClusterSnapshot(BaseModel):
+
+ SUPPORTED_FILTERS = {
+ "db-cluster-id": FilterDef(
+ ["cluster.db_cluster_arn", "cluster.db_cluster_identifier"],
+ "DB Cluster Identifiers",
+ ),
+ "db-cluster-snapshot-id": FilterDef(
+ ["snapshot_id"], "DB Cluster Snapshot Identifiers"
+ ),
+ "snapshot-type": FilterDef(None, "Snapshot Types"),
+ "engine": FilterDef(["cluster.engine"], "Engine Names"),
+ }
+
+ def __init__(self, cluster, snapshot_id, tags):
+ self.cluster = cluster
+ self.snapshot_id = snapshot_id
+ self.tags = tags
+ self.status = "available"
+ self.created_at = iso_8601_datetime_with_milliseconds(datetime.datetime.now())
+
+ @property
+ def snapshot_arn(self):
+ return "arn:aws:rds:{0}:{1}:cluster-snapshot:{2}".format(
+ self.cluster.region, ACCOUNT_ID, self.snapshot_id
+ )
+
+ def to_xml(self):
+ template = Template(
+ """
+
+ {{ snapshot.snapshot_id }}
+ {{ snapshot.created_at }}
+ {{ cluster.db_cluster_identifier }}
+ {{ snapshot.created_at }}
+ {{ 100 }}
+ {{ cluster.allocated_storage }}
+ {{ cluster.master_username }}
+ {{ cluster.port }}
+ {{ cluster.engine }}
+ {{ snapshot.status }}
+ manual
+ {{ snapshot.snapshot_arn }}
+ {{ cluster.region }}
+ {% if cluster.iops %}
+ {{ cluster.iops }}
+ io1
+ {% else %}
+ {{ cluster.storage_type }}
+ {% endif %}
+
+ {{ cluster.license_model }}
+
+ """
+ )
+ return template.render(snapshot=self, cluster=self.cluster)
+
+ def get_tags(self):
+ return self.tags
+
+ def add_tags(self, tags):
+ new_keys = [tag_set["Key"] for tag_set in tags]
+ self.tags = [tag_set for tag_set in self.tags if tag_set["Key"] not in new_keys]
+ self.tags.extend(tags)
+ return self.tags
+
+ def remove_tags(self, tag_keys):
+ self.tags = [tag_set for tag_set in self.tags if tag_set["Key"] not in tag_keys]
class Database(CloudFormationModel):
- @classmethod
- def has_cfn_attr(cls, attribute):
- return attribute in ["Endpoint.Address", "Endpoint.Port"]
- def get_cfn_attribute(self, attribute_name):
- if attribute_name == "Endpoint.Address":
- return self.address
- elif attribute_name == "Endpoint.Port":
- return self.port
- raise UnformattedGetAttTemplateException()
+ SUPPORTED_FILTERS = {
+ "db-cluster-id": FilterDef(None, "DB Cluster Identifiers"),
+ "db-instance-id": FilterDef(
+ ["db_instance_arn", "db_instance_identifier"], "DB Instance Identifiers"
+ ),
+ "dbi-resource-id": FilterDef(["dbi_resource_id"], "Dbi Resource Ids"),
+ "domain": FilterDef(None, ""),
+ "engine": FilterDef(["engine"], "Engine Names"),
+ }
- @staticmethod
- def cloudformation_name_type():
- return "DBInstanceIdentifier"
+ default_engine_versions = {
+ "MySQL": "5.6.21",
+ "mysql": "5.6.21",
+ "oracle-se1": "11.2.0.4.v3",
+ "oracle-se": "11.2.0.4.v3",
+ "oracle-ee": "11.2.0.4.v3",
+ "sqlserver-ee": "11.00.2100.60.v1",
+ "sqlserver-se": "11.00.2100.60.v1",
+ "sqlserver-ex": "11.00.2100.60.v1",
+ "sqlserver-web": "11.00.2100.60.v1",
+ "postgres": "9.3.3",
+ }
- @staticmethod
- def cloudformation_type():
- # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbinstance.html
- return "AWS::RDS::DBInstance"
-
- @classmethod
- def create_from_cloudformation_json(
- cls, resource_name, cloudformation_json, region_name, **kwargs
- ):
- properties = cloudformation_json["Properties"]
-
- db_security_groups = properties.get("DBSecurityGroups")
- if not db_security_groups:
- db_security_groups = []
- security_groups = [group.group_name for group in db_security_groups]
- db_subnet_group = properties.get("DBSubnetGroupName")
- db_subnet_group_name = db_subnet_group.subnet_name if db_subnet_group else None
- db_kwargs = {
- "auto_minor_version_upgrade": properties.get("AutoMinorVersionUpgrade"),
- "allocated_storage": properties.get("AllocatedStorage"),
- "availability_zone": properties.get("AvailabilityZone"),
- "backup_retention_period": properties.get("BackupRetentionPeriod"),
- "db_instance_class": properties.get("DBInstanceClass"),
- "db_instance_identifier": resource_name,
- "db_name": properties.get("DBName"),
- "db_subnet_group_name": db_subnet_group_name,
- "engine": properties.get("Engine"),
- "engine_version": properties.get("EngineVersion"),
- "iops": properties.get("Iops"),
- "kms_key_id": properties.get("KmsKeyId"),
- "master_password": properties.get("MasterUserPassword"),
- "master_username": properties.get("MasterUsername"),
- "multi_az": properties.get("MultiAZ"),
- "port": properties.get("Port", 3306),
- "publicly_accessible": properties.get("PubliclyAccessible"),
- "copy_tags_to_snapshot": properties.get("CopyTagsToSnapshot"),
- "region": region_name,
- "security_groups": security_groups,
- "storage_encrypted": properties.get("StorageEncrypted"),
- "storage_type": properties.get("StorageType"),
- "tags": properties.get("Tags"),
- }
-
- rds_backend = rds_backends[region_name]
- source_db_identifier = properties.get("SourceDBInstanceIdentifier")
- if source_db_identifier:
- # Replica
- db_kwargs["source_db_identifier"] = source_db_identifier
- database = rds_backend.create_database_replica(db_kwargs)
+ def __init__(self, **kwargs):
+ self.status = "available"
+ self.is_replica = False
+ self.replicas = []
+ self.region = kwargs.get("region")
+ self.engine = kwargs.get("engine")
+ self.engine_version = kwargs.get("engine_version", None)
+ if not self.engine_version and self.engine in self.default_engine_versions:
+ self.engine_version = self.default_engine_versions[self.engine]
+ self.iops = kwargs.get("iops")
+ self.storage_encrypted = kwargs.get("storage_encrypted", False)
+ if self.storage_encrypted:
+ self.kms_key_id = kwargs.get("kms_key_id", "default_kms_key_id")
else:
- database = rds_backend.create_database(db_kwargs)
- return database
+ self.kms_key_id = kwargs.get("kms_key_id")
+ self.storage_type = kwargs.get("storage_type")
+ if self.storage_type is None:
+ self.storage_type = Database.default_storage_type(iops=self.iops)
+ self.master_username = kwargs.get("master_username")
+ self.master_user_password = kwargs.get("master_user_password")
+ self.auto_minor_version_upgrade = kwargs.get("auto_minor_version_upgrade")
+ if self.auto_minor_version_upgrade is None:
+ self.auto_minor_version_upgrade = True
+ self.allocated_storage = kwargs.get("allocated_storage")
+ if self.allocated_storage is None:
+ self.allocated_storage = Database.default_allocated_storage(
+ engine=self.engine, storage_type=self.storage_type
+ )
+ self.db_instance_identifier = kwargs.get("db_instance_identifier")
+ self.source_db_identifier = kwargs.get("source_db_identifier")
+ self.db_instance_class = kwargs.get("db_instance_class")
+ self.port = kwargs.get("port")
+ if self.port is None:
+ self.port = Database.default_port(self.engine)
+ self.db_instance_identifier = kwargs.get("db_instance_identifier")
+ self.db_name = kwargs.get("db_name")
+ self.instance_create_time = iso_8601_datetime_with_milliseconds(
+ datetime.datetime.now()
+ )
+ self.publicly_accessible = kwargs.get("publicly_accessible")
+ if self.publicly_accessible is None:
+ self.publicly_accessible = True
+ self.copy_tags_to_snapshot = kwargs.get("copy_tags_to_snapshot")
+ if self.copy_tags_to_snapshot is None:
+ self.copy_tags_to_snapshot = False
+ self.backup_retention_period = kwargs.get("backup_retention_period")
+ if self.backup_retention_period is None:
+ self.backup_retention_period = 1
+ self.availability_zone = kwargs.get("availability_zone")
+ self.multi_az = kwargs.get("multi_az")
+ self.db_subnet_group_name = kwargs.get("db_subnet_group_name")
+ if self.db_subnet_group_name:
+ self.db_subnet_group = rds_backends[self.region].describe_subnet_groups(
+ self.db_subnet_group_name
+ )[0]
+ else:
+ self.db_subnet_group = None
+ self.security_groups = kwargs.get("security_groups", [])
+ self.vpc_security_group_ids = kwargs.get("vpc_security_group_ids", [])
+ self.preferred_maintenance_window = kwargs.get(
+ "preferred_maintenance_window", "wed:06:38-wed:07:08"
+ )
+ self.db_parameter_group_name = kwargs.get("db_parameter_group_name")
+ if (
+ self.db_parameter_group_name
+ and not self.is_default_parameter_group(self.db_parameter_group_name)
+ and self.db_parameter_group_name
+ not in rds_backends[self.region].db_parameter_groups
+ ):
+ raise DBParameterGroupNotFoundError(self.db_parameter_group_name)
+
+ self.preferred_backup_window = kwargs.get(
+ "preferred_backup_window", "13:14-13:44"
+ )
+ self.license_model = kwargs.get("license_model", "general-public-license")
+ self.option_group_name = kwargs.get("option_group_name", None)
+ self.option_group_supplied = self.option_group_name is not None
+ if (
+ self.option_group_name
+ and self.option_group_name not in rds_backends[self.region].option_groups
+ ):
+ raise OptionGroupNotFoundFaultError(self.option_group_name)
+ self.default_option_groups = {
+ "MySQL": "default.mysql5.6",
+ "mysql": "default.mysql5.6",
+ "postgres": "default.postgres9.3",
+ }
+ if not self.option_group_name and self.engine in self.default_option_groups:
+ self.option_group_name = self.default_option_groups[self.engine]
+ self.character_set_name = kwargs.get("character_set_name", None)
+ self.enable_iam_database_authentication = kwargs.get(
+ "enable_iam_database_authentication", False
+ )
+ self.dbi_resource_id = "db-M5ENSHXFPU6XHZ4G4ZEI5QIO2U"
+ self.tags = kwargs.get("tags", [])
+ self.deletion_protection = kwargs.get("deletion_protection", False)
+
+ @property
+ def db_instance_arn(self):
+ return "arn:aws:rds:{0}:{1}:db:{2}".format(
+ self.region, ACCOUNT_ID, self.db_instance_identifier
+ )
+
+ @property
+ def physical_resource_id(self):
+ return self.db_instance_identifier
+
+ def db_parameter_groups(self):
+ if not self.db_parameter_group_name or self.is_default_parameter_group(
+ self.db_parameter_group_name
+ ):
+ (
+ db_family,
+ db_parameter_group_name,
+ ) = self.default_db_parameter_group_details()
+ description = "Default parameter group for {0}".format(db_family)
+ return [
+ DBParameterGroup(
+ name=db_parameter_group_name,
+ family=db_family,
+ description=description,
+ tags={},
+ region=self.region,
+ )
+ ]
+ else:
+ if (
+ self.db_parameter_group_name
+ not in rds_backends[self.region].db_parameter_groups
+ ):
+ raise DBParameterGroupNotFoundError(self.db_parameter_group_name)
+
+ return [
+ rds_backends[self.region].db_parameter_groups[
+ self.db_parameter_group_name
+ ]
+ ]
+
+ def is_default_parameter_group(self, param_group_name):
+ return param_group_name.startswith("default.%s" % self.engine.lower())
+
+ def default_db_parameter_group_details(self):
+ if not self.engine_version:
+ return (None, None)
+
+ minor_engine_version = ".".join(str(self.engine_version).rsplit(".")[:-1])
+ db_family = "{0}{1}".format(self.engine.lower(), minor_engine_version)
+
+ return db_family, "default.{0}".format(db_family)
def to_xml(self):
template = Template(
"""
{{ database.backup_retention_period }}
{{ database.status }}
+ {% if database.db_name %}{{ database.db_name }}{% endif %}
{{ database.multi_az }}
-
+
+ {% for vpc_security_group_id in database.vpc_security_group_ids %}
+
+ active
+ {{ vpc_security_group_id }}
+
+ {% endfor %}
+
{{ database.db_instance_identifier }}
+ {{ database.dbi_resource_id }}
+ {{ database.instance_create_time }}
03:50-04:20
wed:06:38-wed:07:08
@@ -105,12 +518,23 @@ class Database(CloudFormationModel):
{{ database.source_db_identifier }}
{% endif %}
{{ database.engine }}
+ {{database.enable_iam_database_authentication|lower }}
{{ database.license_model }}
{{ database.engine_version }}
-
-
+
+ {{ database.option_group_name }}
+ in-sync
+
+
+ {% for db_parameter_group in database.db_parameter_groups() %}
+
+ in-sync
+ {{ db_parameter_group.name }}
+
+ {% endfor %}
+
{% for security_group in database.security_groups %}
@@ -154,29 +578,448 @@ class Database(CloudFormationModel):
{{ database.storage_type }}
{% endif %}
{{ database.db_instance_class }}
- {{ database.instance_create_time }}
{{ database.master_username }}
{{ database.address }}
{{ database.port }}
{{ database.db_instance_arn }}
+
+ {%- for tag in database.tags -%}
+
+ {{ tag['Key'] }}
+ {{ tag['Value'] }}
+
+ {%- endfor -%}
+
+ {{ 'true' if database.deletion_protection else 'false' }}
"""
)
return template.render(database=self)
+ @property
+ def address(self):
+ return "{0}.aaaaaaaaaa.{1}.rds.amazonaws.com".format(
+ self.db_instance_identifier, self.region
+ )
+
+ def add_replica(self, replica):
+ self.replicas.append(replica.db_instance_identifier)
+
+ def remove_replica(self, replica):
+ self.replicas.remove(replica.db_instance_identifier)
+
+ def set_as_replica(self):
+ self.is_replica = True
+ self.replicas = []
+
+ def update(self, db_kwargs):
+ for key, value in db_kwargs.items():
+ if value is not None:
+ setattr(self, key, value)
+
+ @classmethod
+ def has_cfn_attr(cls, attribute):
+ return attribute in ["Endpoint.Address", "Endpoint.Port"]
+
+ def get_cfn_attribute(self, attribute_name):
+ # Local import to avoid circular dependency with cloudformation.parsing
+ from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
+
+ if attribute_name == "Endpoint.Address":
+ return self.address
+ elif attribute_name == "Endpoint.Port":
+ return self.port
+ raise UnformattedGetAttTemplateException()
+
+ @staticmethod
+ def default_port(engine):
+ return {
+ "mysql": 3306,
+ "mariadb": 3306,
+ "postgres": 5432,
+ "oracle-ee": 1521,
+ "oracle-se2": 1521,
+ "oracle-se1": 1521,
+ "oracle-se": 1521,
+ "sqlserver-ee": 1433,
+ "sqlserver-ex": 1433,
+ "sqlserver-se": 1433,
+ "sqlserver-web": 1433,
+ }[engine]
+
+ @staticmethod
+ def default_storage_type(iops):
+ if iops is None:
+ return "gp2"
+ else:
+ return "io1"
+
+ @staticmethod
+ def default_allocated_storage(engine, storage_type):
+ return {
+ "aurora": {"gp2": 0, "io1": 0, "standard": 0},
+ "mysql": {"gp2": 20, "io1": 100, "standard": 5},
+ "mariadb": {"gp2": 20, "io1": 100, "standard": 5},
+ "postgres": {"gp2": 20, "io1": 100, "standard": 5},
+ "oracle-ee": {"gp2": 20, "io1": 100, "standard": 10},
+ "oracle-se2": {"gp2": 20, "io1": 100, "standard": 10},
+ "oracle-se1": {"gp2": 20, "io1": 100, "standard": 10},
+ "oracle-se": {"gp2": 20, "io1": 100, "standard": 10},
+ "sqlserver-ee": {"gp2": 200, "io1": 200, "standard": 200},
+ "sqlserver-ex": {"gp2": 20, "io1": 100, "standard": 20},
+ "sqlserver-se": {"gp2": 200, "io1": 200, "standard": 200},
+ "sqlserver-web": {"gp2": 20, "io1": 100, "standard": 20},
+ }[engine][storage_type]
+
+ @staticmethod
+ def cloudformation_name_type():
+ return "DBInstanceIdentifier"
+
+ @staticmethod
+ def cloudformation_type():
+ # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbinstance.html
+ return "AWS::RDS::DBInstance"
+
+ @classmethod
+ def create_from_cloudformation_json(
+ cls, resource_name, cloudformation_json, region_name, **kwargs
+ ):
+ properties = cloudformation_json["Properties"]
+
+ db_security_groups = properties.get("DBSecurityGroups")
+ if not db_security_groups:
+ db_security_groups = []
+ security_groups = [group.group_name for group in db_security_groups]
+ db_subnet_group = properties.get("DBSubnetGroupName")
+ db_subnet_group_name = db_subnet_group.subnet_name if db_subnet_group else None
+ db_kwargs = {
+ "auto_minor_version_upgrade": properties.get("AutoMinorVersionUpgrade"),
+ "allocated_storage": properties.get("AllocatedStorage"),
+ "availability_zone": properties.get("AvailabilityZone"),
+ "backup_retention_period": properties.get("BackupRetentionPeriod"),
+ "db_instance_class": properties.get("DBInstanceClass"),
+ "db_instance_identifier": resource_name,
+ "db_name": properties.get("DBName"),
+ "db_subnet_group_name": db_subnet_group_name,
+ "engine": properties.get("Engine"),
+ "engine_version": properties.get("EngineVersion"),
+ "iops": properties.get("Iops"),
+ "kms_key_id": properties.get("KmsKeyId"),
+ "master_user_password": properties.get("MasterUserPassword"),
+ "master_username": properties.get("MasterUsername"),
+ "multi_az": properties.get("MultiAZ"),
+ "db_parameter_group_name": properties.get("DBParameterGroupName"),
+ "port": properties.get("Port", 3306),
+ "publicly_accessible": properties.get("PubliclyAccessible"),
+ "copy_tags_to_snapshot": properties.get("CopyTagsToSnapshot"),
+ "region": region_name,
+ "security_groups": security_groups,
+ "storage_encrypted": properties.get("StorageEncrypted"),
+ "storage_type": properties.get("StorageType"),
+ "tags": properties.get("Tags"),
+ "vpc_security_group_ids": properties.get("VpcSecurityGroupIds", []),
+ }
+
+ rds_backend = rds_backends[region_name]
+ source_db_identifier = properties.get("SourceDBInstanceIdentifier")
+ if source_db_identifier:
+ # Replica
+ db_kwargs["source_db_identifier"] = source_db_identifier
+ database = rds_backend.create_database_replica(db_kwargs)
+ else:
+ database = rds_backend.create_database(db_kwargs)
+ return database
+
+ def to_json(self):
+ template = Template(
+ """{
+ "AllocatedStorage": 10,
+ "AutoMinorVersionUpgrade": "{{ database.auto_minor_version_upgrade }}",
+ "AvailabilityZone": "{{ database.availability_zone }}",
+ "BackupRetentionPeriod": "{{ database.backup_retention_period }}",
+ "CharacterSetName": {%- if database.character_set_name -%}{{ database.character_set_name }}{%- else %} null{%- endif -%},
+ "DBInstanceClass": "{{ database.db_instance_class }}",
+ "DBInstanceIdentifier": "{{ database.db_instance_identifier }}",
+ "DBInstanceStatus": "{{ database.status }}",
+ "DBName": {%- if database.db_name -%}"{{ database.db_name }}"{%- else %} null{%- endif -%},
+ {% if database.db_parameter_group_name -%}"DBParameterGroups": {
+ "DBParameterGroup": {
+ "ParameterApplyStatus": "in-sync",
+ "DBParameterGroupName": "{{ database.db_parameter_group_name }}"
+ }
+ },{%- endif %}
+ "DBSecurityGroups": [
+ {% for security_group in database.security_groups -%}{%- if loop.index != 1 -%},{%- endif -%}
+ {"DBSecurityGroup": {
+ "Status": "active",
+ "DBSecurityGroupName": "{{ security_group }}"
+ }}{% endfor %}
+ ],
+ {%- if database.db_subnet_group -%}{{ database.db_subnet_group.to_json() }},{%- endif %}
+ "Engine": "{{ database.engine }}",
+ "EngineVersion": "{{ database.engine_version }}",
+ "LatestRestorableTime": null,
+ "LicenseModel": "{{ database.license_model }}",
+ "MasterUsername": "{{ database.master_username }}",
+ "MultiAZ": "{{ database.multi_az }}",{% if database.option_group_name %}
+ "OptionGroupMemberships": [{
+ "OptionGroupMembership": {
+ "OptionGroupName": "{{ database.option_group_name }}",
+ "Status": "in-sync"
+ }
+ }],{%- endif %}
+ "PendingModifiedValues": { "MasterUserPassword": "****" },
+ "PreferredBackupWindow": "{{ database.preferred_backup_window }}",
+ "PreferredMaintenanceWindow": "{{ database.preferred_maintenance_window }}",
+ "PubliclyAccessible": "{{ database.publicly_accessible }}",
+ "CopyTagsToSnapshot": "{{ database.copy_tags_to_snapshot }}",
+ "AllocatedStorage": "{{ database.allocated_storage }}",
+ "Endpoint": {
+ "Address": "{{ database.address }}",
+ "Port": "{{ database.port }}"
+ },
+ "InstanceCreateTime": "{{ database.instance_create_time }}",
+ "Iops": null,
+ "ReadReplicaDBInstanceIdentifiers": [{%- for replica in database.replicas -%}
+ {%- if not loop.first -%},{%- endif -%}
+ "{{ replica }}"
+ {%- endfor -%}
+ ],
+ {%- if database.source_db_identifier -%}
+ "ReadReplicaSourceDBInstanceIdentifier": "{{ database.source_db_identifier }}",
+ {%- else -%}
+ "ReadReplicaSourceDBInstanceIdentifier": null,
+ {%- endif -%}
+ "SecondaryAvailabilityZone": null,
+ "StatusInfos": null,
+ "VpcSecurityGroups": [
+ {% for vpc_security_group_id in database.vpc_security_group_ids %}
+ {
+ "Status": "active",
+ "VpcSecurityGroupId": "{{ vpc_security_group_id }}"
+ }
+ {% endfor %}
+ ],
+ "DBInstanceArn": "{{ database.db_instance_arn }}"
+ }"""
+ )
+ return template.render(database=self)
+
+ def get_tags(self):
+ return self.tags
+
+ def add_tags(self, tags):
+ new_keys = [tag_set["Key"] for tag_set in tags]
+ self.tags = [tag_set for tag_set in self.tags if tag_set["Key"] not in new_keys]
+ self.tags.extend(tags)
+ return self.tags
+
+ def remove_tags(self, tag_keys):
+ self.tags = [tag_set for tag_set in self.tags if tag_set["Key"] not in tag_keys]
+
def delete(self, region_name):
backend = rds_backends[region_name]
backend.delete_database(self.db_instance_identifier)
+class DatabaseSnapshot(BaseModel):
+
+ SUPPORTED_FILTERS = {
+ "db-instance-id": FilterDef(
+ ["database.db_instance_arn", "database.db_instance_identifier"],
+ "DB Instance Identifiers",
+ ),
+ "db-snapshot-id": FilterDef(["snapshot_id"], "DB Snapshot Identifiers"),
+ "dbi-resource-id": FilterDef(["database.dbi_resource_id"], "Dbi Resource Ids"),
+ "snapshot-type": FilterDef(None, "Snapshot Types"),
+ "engine": FilterDef(["database.engine"], "Engine Names"),
+ }
+
+ def __init__(self, database, snapshot_id, tags):
+ self.database = database
+ self.snapshot_id = snapshot_id
+ self.tags = tags
+ self.status = "available"
+ self.created_at = iso_8601_datetime_with_milliseconds(datetime.datetime.now())
+
+ @property
+ def snapshot_arn(self):
+ return "arn:aws:rds:{0}:{1}:snapshot:{2}".format(
+ self.database.region, ACCOUNT_ID, self.snapshot_id
+ )
+
+ def to_xml(self):
+ template = Template(
+ """
+ {{ snapshot.snapshot_id }}
+ {{ database.db_instance_identifier }}
+ {{ database.dbi_resource_id }}
+ {{ snapshot.created_at }}
+ {{ database.engine }}
+ {{ database.allocated_storage }}
+ {{ snapshot.status }}
+ {{ database.port }}
+ {{ database.availability_zone }}
+ {{ database.db_subnet_group.vpc_id }}
+ {{ snapshot.created_at }}
+ {{ database.master_username }}
+ {{ database.engine_version }}
+ {{ database.license_model }}
+ manual
+ {% if database.iops %}
+ {{ database.iops }}
+ io1
+ {% else %}
+ {{ database.storage_type }}
+ {% endif %}
+ {{ database.option_group_name }}
+ {{ 100 }}
+ {{ database.region }}
+
+
+ {{ database.storage_encrypted }}
+ {{ database.kms_key_id }}
+ {{ snapshot.snapshot_arn }}
+
+ {{ database.enable_iam_database_authentication|lower }}
+ """
+ )
+ return template.render(snapshot=self, database=self.database)
+
+ def get_tags(self):
+ return self.tags
+
+ def add_tags(self, tags):
+ new_keys = [tag_set["Key"] for tag_set in tags]
+ self.tags = [tag_set for tag_set in self.tags if tag_set["Key"] not in new_keys]
+ self.tags.extend(tags)
+ return self.tags
+
+ def remove_tags(self, tag_keys):
+ self.tags = [tag_set for tag_set in self.tags if tag_set["Key"] not in tag_keys]
+
+
+class ExportTask(BaseModel):
+ def __init__(self, snapshot, kwargs):
+ self.snapshot = snapshot
+
+ self.export_task_identifier = kwargs.get("export_task_identifier")
+ self.kms_key_id = kwargs.get("kms_key_id", "default_kms_key_id")
+ self.source_arn = kwargs.get("source_arn")
+ self.iam_role_arn = kwargs.get("iam_role_arn")
+ self.s3_bucket_name = kwargs.get("s3_bucket_name")
+ self.s3_prefix = kwargs.get("s3_prefix", "")
+ self.export_only = kwargs.get("export_only", [])
+
+ self.status = "available"
+ self.created_at = iso_8601_datetime_with_milliseconds(datetime.datetime.now())
+
+ def to_xml(self):
+ template = Template(
+ """
+ {{ task.export_task_identifier }}
+ {{ snapshot.snapshot_arn }}
+ {{ task.created_at }}
+ {{ task.created_at }}
+ {{ snapshot.created_at }}
+ {{ task.s3_bucket_name }}
+ {{ task.s3_prefix }}
+ {{ task.iam_role_arn }}
+ {{ task.kms_key_id }}
+ {%- if task.export_only -%}
+
+ {%- for table in task.export_only -%}
+ {{ table }}
+ {%- endfor -%}
+
+ {%- endif -%}
+ {{ task.status }}
+ {{ 100 }}
+ {{ 1 }}
+
+
+ """
+ )
+ return template.render(task=self, snapshot=self.snapshot)
+
+
+class EventSubscription(BaseModel):
+ def __init__(self, kwargs):
+ self.subscription_name = kwargs.get("subscription_name")
+ self.sns_topic_arn = kwargs.get("sns_topic_arn")
+ self.source_type = kwargs.get("source_type")
+ self.event_categories = kwargs.get("event_categories", [])
+ self.source_ids = kwargs.get("source_ids", [])
+ self.enabled = kwargs.get("enabled", True)
+ self.tags = kwargs.get("tags", True)
+
+ self.region = ""
+ self.customer_aws_id = copy.copy(ACCOUNT_ID)
+ self.status = "available"
+ self.created_at = iso_8601_datetime_with_milliseconds(datetime.datetime.now())
+
+ @property
+ def es_arn(self):
+ return "arn:aws:rds:{0}:{1}:es:{2}".format(
+ self.region, ACCOUNT_ID, self.subscription_name
+ )
+
+ def to_xml(self):
+ template = Template(
+ """
+
+ {{ subscription.customer_aws_id }}
+ {{ subscription.subscription_name }}
+ {{ subscription.sns_topic_arn }}
+ {{ subscription.created_at }}
+ {{ subscription.source_type }}
+
+ {%- for source_id in subscription.source_ids -%}
+ {{ source_id }}
+ {%- endfor -%}
+
+
+ {%- for category in subscription.event_categories -%}
+ {{ category }}
+ {%- endfor -%}
+
+ {{ subscription.status }}
+ {{ subscription.enabled }}
+ {{ subscription.es_arn }}
+
+ {%- for tag in subscription.tags -%}
+ {{ tag['Key'] }}{{ tag['Value'] }}
+ {%- endfor -%}
+
+
+ """
+ )
+ return template.render(subscription=self)
+
+ def get_tags(self):
+ return self.tags
+
+ def add_tags(self, tags):
+ new_keys = [tag_set["Key"] for tag_set in tags]
+ self.tags = [tag_set for tag_set in self.tags if tag_set["Key"] not in new_keys]
+ self.tags.extend(tags)
+ return self.tags
+
+ def remove_tags(self, tag_keys):
+ self.tags = [tag_set for tag_set in self.tags if tag_set["Key"] not in tag_keys]
+
+
class SecurityGroup(CloudFormationModel):
- def __init__(self, group_name, description):
+ def __init__(self, group_name, description, tags):
self.group_name = group_name
self.description = description
self.status = "authorized"
self.ip_ranges = []
self.ec2_security_groups = []
+ self.tags = tags
+ self.owner_id = ACCOUNT_ID
+ self.vpc_id = None
def to_xml(self):
template = Template(
@@ -207,6 +1050,23 @@ class SecurityGroup(CloudFormationModel):
)
return template.render(security_group=self)
+ def to_json(self):
+ template = Template(
+ """{
+ "DBSecurityGroupDescription": "{{ security_group.description }}",
+ "DBSecurityGroupName": "{{ security_group.group_name }}",
+ "EC2SecurityGroups": {{ security_group.ec2_security_groups }},
+ "IPRanges": [{%- for ip in security_group.ip_ranges -%}
+ {%- if loop.index != 1 -%},{%- endif -%}
+ "{{ ip }}"
+ {%- endfor -%}
+ ],
+ "OwnerId": "{{ security_group.owner_id }}",
+ "VpcId": "{{ security_group.vpc_id }}"
+ }"""
+ )
+ return template.render(security_group=self)
+
def authorize_cidr(self, cidr_ip):
self.ip_ranges.append(cidr_ip)
@@ -237,7 +1097,6 @@ class SecurityGroup(CloudFormationModel):
security_group = rds_backend.create_security_group(
group_name, description, tags
)
-
for security_group_ingress in security_group_ingress_rules:
for ingress_type, ingress_value in security_group_ingress.items():
if ingress_type == "CIDRIP":
@@ -250,18 +1109,30 @@ class SecurityGroup(CloudFormationModel):
security_group.authorize_security_group(subnet)
return security_group
+ def get_tags(self):
+ return self.tags
+
+ def add_tags(self, tags):
+ new_keys = [tag_set["Key"] for tag_set in tags]
+ self.tags = [tag_set for tag_set in self.tags if tag_set["Key"] not in new_keys]
+ self.tags.extend(tags)
+ return self.tags
+
+ def remove_tags(self, tag_keys):
+ self.tags = [tag_set for tag_set in self.tags if tag_set["Key"] not in tag_keys]
+
def delete(self, region_name):
backend = rds_backends[region_name]
backend.delete_security_group(self.group_name)
class SubnetGroup(CloudFormationModel):
- def __init__(self, subnet_name, description, subnets):
+ def __init__(self, subnet_name, description, subnets, tags):
self.subnet_name = subnet_name
self.description = description
self.subnets = subnets
self.status = "Complete"
-
+ self.tags = tags
self.vpc_id = self.subnets[0].vpc_id
def to_xml(self):
@@ -287,6 +1158,29 @@ class SubnetGroup(CloudFormationModel):
)
return template.render(subnet_group=self)
+ def to_json(self):
+ template = Template(
+ """"DBSubnetGroup": {
+ "VpcId": "{{ subnet_group.vpc_id }}",
+ "SubnetGroupStatus": "{{ subnet_group.status }}",
+ "DBSubnetGroupDescription": "{{ subnet_group.description }}",
+ "DBSubnetGroupName": "{{ subnet_group.subnet_name }}",
+ "Subnets": {
+ "Subnet": [
+ {% for subnet in subnet_group.subnets %}{
+ "SubnetStatus": "Active",
+ "SubnetIdentifier": "{{ subnet.id }}",
+ "SubnetAvailabilityZone": {
+ "Name": "{{ subnet.availability_zone }}",
+ "ProvisionedIopsCapable": "false"
+ }
+ }{%- if not loop.last -%},{%- endif -%}{% endfor %}
+ ]
+ }
+ }"""
+ )
+ return template.render(subnet_group=self)
+
@staticmethod
def cloudformation_name_type():
return "DBSubnetGroupName"
@@ -301,7 +1195,7 @@ class SubnetGroup(CloudFormationModel):
cls, resource_name, cloudformation_json, region_name, **kwargs
):
properties = cloudformation_json["Properties"]
- subnet_name = resource_name.lower()
+
description = properties["DBSubnetGroupDescription"]
subnet_ids = properties["SubnetIds"]
tags = properties.get("Tags")
@@ -310,10 +1204,22 @@ class SubnetGroup(CloudFormationModel):
subnets = [ec2_backend.get_subnet(subnet_id) for subnet_id in subnet_ids]
rds_backend = rds_backends[region_name]
subnet_group = rds_backend.create_subnet_group(
- subnet_name, description, subnets, tags
+ resource_name, description, subnets, tags
)
return subnet_group
+ def get_tags(self):
+ return self.tags
+
+ def add_tags(self, tags):
+ new_keys = [tag_set["Key"] for tag_set in tags]
+ self.tags = [tag_set for tag_set in self.tags if tag_set["Key"] not in new_keys]
+ self.tags.extend(tags)
+ return self.tags
+
+ def remove_tags(self, tag_keys):
+ self.tags = [tag_set for tag_set in self.tags if tag_set["Key"] not in tag_keys]
+
def delete(self, region_name):
backend = rds_backends[region_name]
backend.delete_subnet_group(self.subnet_name)
@@ -322,19 +1228,1064 @@ class SubnetGroup(CloudFormationModel):
class RDSBackend(BaseBackend):
def __init__(self, region):
self.region = region
-
- def __getattr__(self, attr):
- return self.rds2_backend().__getattribute__(attr)
+ self.arn_regex = re_compile(
+ r"^arn:aws:rds:.*:[0-9]*:(db|cluster|es|og|pg|ri|secgrp|snapshot|cluster-snapshot|subgrp):.*$"
+ )
+ self.clusters = OrderedDict()
+ self.databases = OrderedDict()
+ self.database_snapshots = OrderedDict()
+ self.cluster_snapshots = OrderedDict()
+ self.export_tasks = OrderedDict()
+ self.event_subscriptions = OrderedDict()
+ self.db_parameter_groups = {}
+ self.option_groups = {}
+ self.security_groups = {}
+ self.subnet_groups = {}
def reset(self):
# preserve region
region = self.region
- self.rds2_backend().reset()
self.__dict__ = {}
self.__init__(region)
- def rds2_backend(self):
- return rds2_backends[self.region]
+ @staticmethod
+ def default_vpc_endpoint_service(service_region, zones):
+ """Default VPC endpoint service."""
+ return BaseBackend.default_vpc_endpoint_service_factory(
+ service_region, zones, "rds"
+ ) + BaseBackend.default_vpc_endpoint_service_factory(
+ service_region, zones, "rds-data"
+ )
+
+ def create_database(self, db_kwargs):
+ database_id = db_kwargs["db_instance_identifier"]
+ database = Database(**db_kwargs)
+ self.databases[database_id] = database
+ return database
+
+ def create_database_snapshot(
+ self, db_instance_identifier, db_snapshot_identifier, tags=None
+ ):
+ database = self.databases.get(db_instance_identifier)
+ if not database:
+ raise DBInstanceNotFoundError(db_instance_identifier)
+ if db_snapshot_identifier in self.database_snapshots:
+ raise DBSnapshotAlreadyExistsError(db_snapshot_identifier)
+ if len(self.database_snapshots) >= int(
+ os.environ.get("MOTO_RDS_SNAPSHOT_LIMIT", "100")
+ ):
+ raise SnapshotQuotaExceededError()
+ if tags is None:
+ tags = list()
+ if database.copy_tags_to_snapshot and not tags:
+ tags = database.get_tags()
+ snapshot = DatabaseSnapshot(database, db_snapshot_identifier, tags)
+ self.database_snapshots[db_snapshot_identifier] = snapshot
+ return snapshot
+
+ def copy_database_snapshot(
+ self, source_snapshot_identifier, target_snapshot_identifier, tags=None,
+ ):
+ if source_snapshot_identifier not in self.database_snapshots:
+ raise DBSnapshotNotFoundError(source_snapshot_identifier)
+ if target_snapshot_identifier in self.database_snapshots:
+ raise DBSnapshotAlreadyExistsError(target_snapshot_identifier)
+ if len(self.database_snapshots) >= int(
+ os.environ.get("MOTO_RDS_SNAPSHOT_LIMIT", "100")
+ ):
+ raise SnapshotQuotaExceededError()
+
+ source_snapshot = self.database_snapshots[source_snapshot_identifier]
+ if tags is None:
+ tags = source_snapshot.tags
+ else:
+ tags = self._merge_tags(source_snapshot.tags, tags)
+ target_snapshot = DatabaseSnapshot(
+ source_snapshot.database, target_snapshot_identifier, tags
+ )
+ self.database_snapshots[target_snapshot_identifier] = target_snapshot
+
+ return target_snapshot
+
+ def delete_database_snapshot(self, db_snapshot_identifier):
+ if db_snapshot_identifier not in self.database_snapshots:
+ raise DBSnapshotNotFoundError(db_snapshot_identifier)
+
+ return self.database_snapshots.pop(db_snapshot_identifier)
+
+ def create_database_replica(self, db_kwargs):
+ database_id = db_kwargs["db_instance_identifier"]
+ source_database_id = db_kwargs["source_db_identifier"]
+ primary = self.find_db_from_id(source_database_id)
+ if self.arn_regex.match(source_database_id):
+ db_kwargs["region"] = self.region
+
+ # Shouldn't really copy here as the instance is duplicated. RDS replicas have different instances.
+ replica = copy.copy(primary)
+ replica.update(db_kwargs)
+ replica.set_as_replica()
+ self.databases[database_id] = replica
+ primary.add_replica(replica)
+ return replica
+
+ def describe_databases(self, db_instance_identifier=None, filters=None):
+ databases = self.databases
+ if db_instance_identifier:
+ filters = merge_filters(
+ filters, {"db-instance-id": [db_instance_identifier]}
+ )
+ if filters:
+ databases = self._filter_resources(databases, filters, Database)
+ if db_instance_identifier and not databases:
+ raise DBInstanceNotFoundError(db_instance_identifier)
+ return list(databases.values())
+
+ def describe_database_snapshots(
+ self, db_instance_identifier, db_snapshot_identifier, filters=None
+ ):
+ snapshots = self.database_snapshots
+ if db_instance_identifier:
+ filters = merge_filters(
+ filters, {"db-instance-id": [db_instance_identifier]}
+ )
+ if db_snapshot_identifier:
+ filters = merge_filters(
+ filters, {"db-snapshot-id": [db_snapshot_identifier]}
+ )
+ if filters:
+ snapshots = self._filter_resources(snapshots, filters, DatabaseSnapshot)
+ if db_snapshot_identifier and not snapshots and not db_instance_identifier:
+ raise DBSnapshotNotFoundError(db_snapshot_identifier)
+ return list(snapshots.values())
+
+ def modify_database(self, db_instance_identifier, db_kwargs):
+ database = self.describe_databases(db_instance_identifier)[0]
+ if "new_db_instance_identifier" in db_kwargs:
+ del self.databases[db_instance_identifier]
+ db_instance_identifier = db_kwargs[
+ "db_instance_identifier"
+ ] = db_kwargs.pop("new_db_instance_identifier")
+ self.databases[db_instance_identifier] = database
+ database.update(db_kwargs)
+ return database
+
+ def reboot_db_instance(self, db_instance_identifier):
+ database = self.describe_databases(db_instance_identifier)[0]
+ return database
+
+ def restore_db_instance_from_db_snapshot(self, from_snapshot_id, overrides):
+ snapshot = self.describe_database_snapshots(
+ db_instance_identifier=None, db_snapshot_identifier=from_snapshot_id
+ )[0]
+ original_database = snapshot.database
+ new_instance_props = copy.deepcopy(original_database.__dict__)
+ if not original_database.option_group_supplied:
+ # If the option group is not supplied originally, the 'option_group_name' will receive a default value
+ # Force this reconstruction, and prevent any validation on the default value
+ del new_instance_props["option_group_name"]
+
+ for key, value in overrides.items():
+ if value:
+ new_instance_props[key] = value
+
+ return self.create_database(new_instance_props)
+
+ def stop_database(self, db_instance_identifier, db_snapshot_identifier=None):
+ database = self.describe_databases(db_instance_identifier)[0]
+ # todo: certain rds types not allowed to be stopped at this time.
+ # https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_StopInstance.html#USER_StopInstance.Limitations
+ if database.is_replica or (
+ database.multi_az and database.engine.lower().startswith("sqlserver")
+ ):
+ # todo: more db types not supported by stop/start instance api
+ raise InvalidDBClusterStateFaultError(db_instance_identifier)
+ if database.status != "available":
+ raise InvalidDBInstanceStateError(db_instance_identifier, "stop")
+ if db_snapshot_identifier:
+ self.create_database_snapshot(
+ db_instance_identifier, db_snapshot_identifier
+ )
+ database.status = "stopped"
+ return database
+
+ def start_database(self, db_instance_identifier):
+ database = self.describe_databases(db_instance_identifier)[0]
+ # todo: bunch of different error messages to be generated from this api call
+ if database.status != "stopped":
+ raise InvalidDBInstanceStateError(db_instance_identifier, "start")
+ database.status = "available"
+ return database
+
+ def find_db_from_id(self, db_id):
+ if self.arn_regex.match(db_id):
+ arn_breakdown = db_id.split(":")
+ region = arn_breakdown[3]
+ backend = rds_backends[region]
+ db_name = arn_breakdown[-1]
+ else:
+ backend = self
+ db_name = db_id
+
+ return backend.describe_databases(db_name)[0]
+
+ def delete_database(self, db_instance_identifier, db_snapshot_name=None):
+ if db_instance_identifier in self.databases:
+ if self.databases[db_instance_identifier].deletion_protection:
+ raise InvalidParameterValue(
+ "Can't delete Instance with protection enabled"
+ )
+ if db_snapshot_name:
+ self.create_database_snapshot(db_instance_identifier, db_snapshot_name)
+ database = self.databases.pop(db_instance_identifier)
+ if database.is_replica:
+ primary = self.find_db_from_id(database.source_db_identifier)
+ primary.remove_replica(database)
+ database.status = "deleting"
+ return database
+ else:
+ raise DBInstanceNotFoundError(db_instance_identifier)
+
+ def create_security_group(self, group_name, description, tags):
+ security_group = SecurityGroup(group_name, description, tags)
+ self.security_groups[group_name] = security_group
+ return security_group
+
+ def describe_security_groups(self, security_group_name):
+ if security_group_name:
+ if security_group_name in self.security_groups:
+ return [self.security_groups[security_group_name]]
+ else:
+ raise DBSecurityGroupNotFoundError(security_group_name)
+ return self.security_groups.values()
+
+ def delete_security_group(self, security_group_name):
+ if security_group_name in self.security_groups:
+ return self.security_groups.pop(security_group_name)
+ else:
+ raise DBSecurityGroupNotFoundError(security_group_name)
+
+ def delete_db_parameter_group(self, db_parameter_group_name):
+ if db_parameter_group_name in self.db_parameter_groups:
+ return self.db_parameter_groups.pop(db_parameter_group_name)
+ else:
+ raise DBParameterGroupNotFoundError(db_parameter_group_name)
+
+ def authorize_security_group(self, security_group_name, cidr_ip):
+ security_group = self.describe_security_groups(security_group_name)[0]
+ security_group.authorize_cidr(cidr_ip)
+ return security_group
+
+ def create_subnet_group(self, subnet_name, description, subnets, tags):
+ subnet_group = SubnetGroup(subnet_name, description, subnets, tags)
+ self.subnet_groups[subnet_name] = subnet_group
+ return subnet_group
+
+ def describe_subnet_groups(self, subnet_group_name):
+ if subnet_group_name:
+ if subnet_group_name in self.subnet_groups:
+ return [self.subnet_groups[subnet_group_name]]
+ else:
+ raise DBSubnetGroupNotFoundError(subnet_group_name)
+ return self.subnet_groups.values()
+
+ def modify_db_subnet_group(self, subnet_name, description, subnets):
+ subnet_group = self.subnet_groups.pop(subnet_name)
+ if not subnet_group:
+ raise DBSubnetGroupNotFoundError(subnet_name)
+ subnet_group.subnet_name = subnet_name
+ subnet_group.subnets = subnets
+ if description is not None:
+ subnet_group.description = description
+ return subnet_group
+
+ def delete_subnet_group(self, subnet_name):
+ if subnet_name in self.subnet_groups:
+ return self.subnet_groups.pop(subnet_name)
+ else:
+ raise DBSubnetGroupNotFoundError(subnet_name)
+
+ def create_option_group(self, option_group_kwargs):
+ option_group_id = option_group_kwargs["name"]
+ valid_option_group_engines = {
+ "mariadb": ["10.0", "10.1", "10.2", "10.3"],
+ "mysql": ["5.5", "5.6", "5.7", "8.0"],
+ "oracle-se2": ["11.2", "12.1", "12.2"],
+ "oracle-se1": ["11.2", "12.1", "12.2"],
+ "oracle-se": ["11.2", "12.1", "12.2"],
+ "oracle-ee": ["11.2", "12.1", "12.2"],
+ "sqlserver-se": ["10.50", "11.00"],
+ "sqlserver-ee": ["10.50", "11.00"],
+ "sqlserver-ex": ["10.50", "11.00"],
+ "sqlserver-web": ["10.50", "11.00"],
+ }
+ if option_group_kwargs["name"] in self.option_groups:
+ raise RDSClientError(
+ "OptionGroupAlreadyExistsFault",
+ "An option group named {0} already exists.".format(
+ option_group_kwargs["name"]
+ ),
+ )
+ if (
+ "description" not in option_group_kwargs
+ or not option_group_kwargs["description"]
+ ):
+ raise RDSClientError(
+ "InvalidParameterValue",
+ "The parameter OptionGroupDescription must be provided and must not be blank.",
+ )
+ if option_group_kwargs["engine_name"] not in valid_option_group_engines.keys():
+ raise RDSClientError(
+ "InvalidParameterValue", "Invalid DB engine: non-existent"
+ )
+ if (
+ option_group_kwargs["major_engine_version"]
+ not in valid_option_group_engines[option_group_kwargs["engine_name"]]
+ ):
+ raise RDSClientError(
+ "InvalidParameterCombination",
+ "Cannot find major version {0} for {1}".format(
+ option_group_kwargs["major_engine_version"],
+ option_group_kwargs["engine_name"],
+ ),
+ )
+ option_group = OptionGroup(**option_group_kwargs)
+ self.option_groups[option_group_id] = option_group
+ return option_group
+
+ def delete_option_group(self, option_group_name):
+ if option_group_name in self.option_groups:
+ return self.option_groups.pop(option_group_name)
+ else:
+ raise OptionGroupNotFoundFaultError(option_group_name)
+
+ def describe_option_groups(self, option_group_kwargs):
+ option_group_list = []
+
+ if option_group_kwargs["marker"]:
+ marker = option_group_kwargs["marker"]
+ else:
+ marker = 0
+ if option_group_kwargs["max_records"]:
+ if (
+ option_group_kwargs["max_records"] < 20
+ or option_group_kwargs["max_records"] > 100
+ ):
+ raise RDSClientError(
+ "InvalidParameterValue",
+ "Invalid value for max records. Must be between 20 and 100",
+ )
+ max_records = option_group_kwargs["max_records"]
+ else:
+ max_records = 100
+
+ for option_group in self.option_groups.values():
+ if (
+ option_group_kwargs["name"]
+ and option_group.name != option_group_kwargs["name"]
+ ):
+ continue
+ elif (
+ option_group_kwargs["engine_name"]
+ and option_group.engine_name != option_group_kwargs["engine_name"]
+ ):
+ continue
+ elif (
+ option_group_kwargs["major_engine_version"]
+ and option_group.major_engine_version
+ != option_group_kwargs["major_engine_version"]
+ ):
+ continue
+ else:
+ option_group_list.append(option_group)
+ if not len(option_group_list):
+ raise OptionGroupNotFoundFaultError(option_group_kwargs["name"])
+ return option_group_list[marker : max_records + marker]
+
+ @staticmethod
+ def describe_option_group_options(engine_name, major_engine_version=None):
+ default_option_group_options = {
+ "mysql": {
+ "5.6": '\n \n \n \n 5.611211TrueInnodb Memcached for MySQLMEMCACHED1-4294967295STATIC1TrueSpecifies how many memcached read operations (get) to perform before doing a COMMIT to start a new transactionDAEMON_MEMCACHED_R_BATCH_SIZE1-4294967295STATIC1TrueSpecifies how many memcached write operations, such as add, set, or incr, to perform before doing a COMMIT to start a new transactionDAEMON_MEMCACHED_W_BATCH_SIZE1-1073741824DYNAMIC5TrueSpecifies how often to auto-commit idle connections that use the InnoDB memcached interface.INNODB_API_BK_COMMIT_INTERVAL0,1STATIC0TrueDisables the use of row locks when using the InnoDB memcached interface.INNODB_API_DISABLE_ROWLOCK0,1STATIC0TrueLocks the table used by the InnoDB memcached plugin, so that it cannot be dropped or altered by DDL through the SQL interface.INNODB_API_ENABLE_MDL0-3STATIC0TrueLets you control the transaction isolation level on queries processed by the memcached interface.INNODB_API_TRX_LEVELauto,ascii,binarySTATICautoTrueThe binding protocol to use which can be either auto, ascii, or binary. The default is auto which means the server automatically negotiates the protocol with the client.BINDING_PROTOCOL1-2048STATIC1024TrueThe backlog queue configures how many network connections can be waiting to be processed by memcachedBACKLOG_QUEUE_LIMIT0,1STATIC0TrueDisable the use of compare and swap (CAS) which reduces the per-item size by 8 bytes.CAS_DISABLED1-48STATIC48TrueMinimum chunk size in bytes to allocate for the smallest item\'s key, value, and flags. The default is 48 and you can get a significant memory efficiency gain with a lower value.CHUNK_SIZE1-2STATIC1.25TrueChunk size growth factor that controls the size of each successive chunk with each chunk growing times this amount larger than the previous chunk.CHUNK_SIZE_GROWTH_FACTOR0,1STATIC0TrueIf enabled when there is no more memory to store items, memcached will return an error rather than evicting items.ERROR_ON_MEMORY_EXHAUSTED10-1024STATIC1024TrueMaximum number of concurrent connections. Setting this value to anything less than 10 prevents MySQL from starting.MAX_SIMULTANEOUS_CONNECTIONSv,vv,vvvSTATICvTrueVerbose level for memcached.VERBOSITYmysql\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n',
+ "all": '\n \n \n \n 5.611211TrueInnodb Memcached for MySQLMEMCACHED1-4294967295STATIC1TrueSpecifies how many memcached read operations (get) to perform before doing a COMMIT to start a new transactionDAEMON_MEMCACHED_R_BATCH_SIZE1-4294967295STATIC1TrueSpecifies how many memcached write operations, such as add, set, or incr, to perform before doing a COMMIT to start a new transactionDAEMON_MEMCACHED_W_BATCH_SIZE1-1073741824DYNAMIC5TrueSpecifies how often to auto-commit idle connections that use the InnoDB memcached interface.INNODB_API_BK_COMMIT_INTERVAL0,1STATIC0TrueDisables the use of row locks when using the InnoDB memcached interface.INNODB_API_DISABLE_ROWLOCK0,1STATIC0TrueLocks the table used by the InnoDB memcached plugin, so that it cannot be dropped or altered by DDL through the SQL interface.INNODB_API_ENABLE_MDL0-3STATIC0TrueLets you control the transaction isolation level on queries processed by the memcached interface.INNODB_API_TRX_LEVELauto,ascii,binarySTATICautoTrueThe binding protocol to use which can be either auto, ascii, or binary. The default is auto which means the server automatically negotiates the protocol with the client.BINDING_PROTOCOL1-2048STATIC1024TrueThe backlog queue configures how many network connections can be waiting to be processed by memcachedBACKLOG_QUEUE_LIMIT0,1STATIC0TrueDisable the use of compare and swap (CAS) which reduces the per-item size by 8 bytes.CAS_DISABLED1-48STATIC48TrueMinimum chunk size in bytes to allocate for the smallest item\'s key, value, and flags. The default is 48 and you can get a significant memory efficiency gain with a lower value.CHUNK_SIZE1-2STATIC1.25TrueChunk size growth factor that controls the size of each successive chunk with each chunk growing times this amount larger than the previous chunk.CHUNK_SIZE_GROWTH_FACTOR0,1STATIC0TrueIf enabled when there is no more memory to store items, memcached will return an error rather than evicting items.ERROR_ON_MEMORY_EXHAUSTED10-1024STATIC1024TrueMaximum number of concurrent connections. Setting this value to anything less than 10 prevents MySQL from starting.MAX_SIMULTANEOUS_CONNECTIONSv,vv,vvvSTATICvTrueVerbose level for memcached.VERBOSITYmysql\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n',
+ },
+ "oracle-ee": {
+ "11.2": '\n \n \n \n 11.2XMLDBOracle Application Express Runtime EnvironmentAPEXoracle-ee\n \n 11.2APEXOracle Application Express Development EnvironmentAPEX-DEVoracle-ee\n \n 11.2Oracle Advanced Security - Native Network EncryptionNATIVE_NETWORK_ENCRYPTIONACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired encryption behaviorSQLNET.ENCRYPTION_SERVERACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired data integrity behaviorSQLNET.CRYPTO_CHECKSUM_SERVERRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40STATICRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40TrueSpecifies list of encryption algorithms in order of intended useSQLNET.ENCRYPTION_TYPES_SERVERSHA1,MD5STATICSHA1,MD5TrueSpecifies list of checksumming algorithms in order of intended useSQLNET.CRYPTO_CHECKSUM_TYPES_SERVERoracle-ee\n \n 11.21158TrueOracle Enterprise Manager (Database Control only)OEMoracle-ee\n \n 11.2Oracle StatspackSTATSPACKoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - Transparent Data EncryptionTDEoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - TDE with HSMTDE_HSMoracle-ee\n \n 11.2TrueTrueChange time zoneTimezoneAfrica/Cairo,Africa/Casablanca,Africa/Harare,Africa/Monrovia,Africa/Nairobi,Africa/Tripoli,Africa/Windhoek,America/Araguaina,America/Asuncion,America/Bogota,America/Caracas,America/Chihuahua,America/Cuiaba,America/Denver,America/Fortaleza,America/Guatemala,America/Halifax,America/Manaus,America/Matamoros,America/Monterrey,America/Montevideo,America/Phoenix,America/Santiago,America/Tijuana,Asia/Amman,Asia/Ashgabat,Asia/Baghdad,Asia/Baku,Asia/Bangkok,Asia/Beirut,Asia/Calcutta,Asia/Damascus,Asia/Dhaka,Asia/Irkutsk,Asia/Jerusalem,Asia/Kabul,Asia/Karachi,Asia/Kathmandu,Asia/Krasnoyarsk,Asia/Magadan,Asia/Muscat,Asia/Novosibirsk,Asia/Riyadh,Asia/Seoul,Asia/Shanghai,Asia/Singapore,Asia/Taipei,Asia/Tehran,Asia/Tokyo,Asia/Ulaanbaatar,Asia/Vladivostok,Asia/Yakutsk,Asia/Yerevan,Atlantic/Azores,Australia/Adelaide,Australia/Brisbane,Australia/Darwin,Australia/Hobart,Australia/Perth,Australia/Sydney,Brazil/East,Canada/Newfoundland,Canada/Saskatchewan,Europe/Amsterdam,Europe/Athens,Europe/Dublin,Europe/Helsinki,Europe/Istanbul,Europe/Kaliningrad,Europe/Moscow,Europe/Paris,Europe/Prague,Europe/Sarajevo,Pacific/Auckland,Pacific/Fiji,Pacific/Guam,Pacific/Honolulu,Pacific/Samoa,US/Alaska,US/Central,US/Eastern,US/East-Indiana,US/Pacific,UTCDYNAMICUTCTrueSpecifies the timezone the user wants to change the system time toTIME_ZONEoracle-ee\n \n 11.2Oracle XMLDB RepositoryXMLDBoracle-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n',
+ "all": '\n \n \n \n 11.2XMLDBOracle Application Express Runtime EnvironmentAPEXoracle-ee\n \n 11.2APEXOracle Application Express Development EnvironmentAPEX-DEVoracle-ee\n \n 11.2Oracle Advanced Security - Native Network EncryptionNATIVE_NETWORK_ENCRYPTIONACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired encryption behaviorSQLNET.ENCRYPTION_SERVERACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired data integrity behaviorSQLNET.CRYPTO_CHECKSUM_SERVERRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40STATICRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40TrueSpecifies list of encryption algorithms in order of intended useSQLNET.ENCRYPTION_TYPES_SERVERSHA1,MD5STATICSHA1,MD5TrueSpecifies list of checksumming algorithms in order of intended useSQLNET.CRYPTO_CHECKSUM_TYPES_SERVERoracle-ee\n \n 11.21158TrueOracle Enterprise Manager (Database Control only)OEMoracle-ee\n \n 11.2Oracle StatspackSTATSPACKoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - Transparent Data EncryptionTDEoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - TDE with HSMTDE_HSMoracle-ee\n \n 11.2TrueTrueChange time zoneTimezoneAfrica/Cairo,Africa/Casablanca,Africa/Harare,Africa/Monrovia,Africa/Nairobi,Africa/Tripoli,Africa/Windhoek,America/Araguaina,America/Asuncion,America/Bogota,America/Caracas,America/Chihuahua,America/Cuiaba,America/Denver,America/Fortaleza,America/Guatemala,America/Halifax,America/Manaus,America/Matamoros,America/Monterrey,America/Montevideo,America/Phoenix,America/Santiago,America/Tijuana,Asia/Amman,Asia/Ashgabat,Asia/Baghdad,Asia/Baku,Asia/Bangkok,Asia/Beirut,Asia/Calcutta,Asia/Damascus,Asia/Dhaka,Asia/Irkutsk,Asia/Jerusalem,Asia/Kabul,Asia/Karachi,Asia/Kathmandu,Asia/Krasnoyarsk,Asia/Magadan,Asia/Muscat,Asia/Novosibirsk,Asia/Riyadh,Asia/Seoul,Asia/Shanghai,Asia/Singapore,Asia/Taipei,Asia/Tehran,Asia/Tokyo,Asia/Ulaanbaatar,Asia/Vladivostok,Asia/Yakutsk,Asia/Yerevan,Atlantic/Azores,Australia/Adelaide,Australia/Brisbane,Australia/Darwin,Australia/Hobart,Australia/Perth,Australia/Sydney,Brazil/East,Canada/Newfoundland,Canada/Saskatchewan,Europe/Amsterdam,Europe/Athens,Europe/Dublin,Europe/Helsinki,Europe/Istanbul,Europe/Kaliningrad,Europe/Moscow,Europe/Paris,Europe/Prague,Europe/Sarajevo,Pacific/Auckland,Pacific/Fiji,Pacific/Guam,Pacific/Honolulu,Pacific/Samoa,US/Alaska,US/Central,US/Eastern,US/East-Indiana,US/Pacific,UTCDYNAMICUTCTrueSpecifies the timezone the user wants to change the system time toTIME_ZONEoracle-ee\n \n 11.2Oracle XMLDB RepositoryXMLDBoracle-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n',
+ },
+ "oracle-sa": {
+ "11.2": '\n \n \n \n 11.2XMLDBOracle Application Express Runtime EnvironmentAPEXoracle-ee\n \n 11.2APEXOracle Application Express Development EnvironmentAPEX-DEVoracle-ee\n \n 11.2Oracle Advanced Security - Native Network EncryptionNATIVE_NETWORK_ENCRYPTIONACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired encryption behaviorSQLNET.ENCRYPTION_SERVERACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired data integrity behaviorSQLNET.CRYPTO_CHECKSUM_SERVERRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40STATICRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40TrueSpecifies list of encryption algorithms in order of intended useSQLNET.ENCRYPTION_TYPES_SERVERSHA1,MD5STATICSHA1,MD5TrueSpecifies list of checksumming algorithms in order of intended useSQLNET.CRYPTO_CHECKSUM_TYPES_SERVERoracle-ee\n \n 11.21158TrueOracle Enterprise Manager (Database Control only)OEMoracle-ee\n \n 11.2Oracle StatspackSTATSPACKoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - Transparent Data EncryptionTDEoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - TDE with HSMTDE_HSMoracle-ee\n \n 11.2TrueTrueChange time zoneTimezoneAfrica/Cairo,Africa/Casablanca,Africa/Harare,Africa/Monrovia,Africa/Nairobi,Africa/Tripoli,Africa/Windhoek,America/Araguaina,America/Asuncion,America/Bogota,America/Caracas,America/Chihuahua,America/Cuiaba,America/Denver,America/Fortaleza,America/Guatemala,America/Halifax,America/Manaus,America/Matamoros,America/Monterrey,America/Montevideo,America/Phoenix,America/Santiago,America/Tijuana,Asia/Amman,Asia/Ashgabat,Asia/Baghdad,Asia/Baku,Asia/Bangkok,Asia/Beirut,Asia/Calcutta,Asia/Damascus,Asia/Dhaka,Asia/Irkutsk,Asia/Jerusalem,Asia/Kabul,Asia/Karachi,Asia/Kathmandu,Asia/Krasnoyarsk,Asia/Magadan,Asia/Muscat,Asia/Novosibirsk,Asia/Riyadh,Asia/Seoul,Asia/Shanghai,Asia/Singapore,Asia/Taipei,Asia/Tehran,Asia/Tokyo,Asia/Ulaanbaatar,Asia/Vladivostok,Asia/Yakutsk,Asia/Yerevan,Atlantic/Azores,Australia/Adelaide,Australia/Brisbane,Australia/Darwin,Australia/Hobart,Australia/Perth,Australia/Sydney,Brazil/East,Canada/Newfoundland,Canada/Saskatchewan,Europe/Amsterdam,Europe/Athens,Europe/Dublin,Europe/Helsinki,Europe/Istanbul,Europe/Kaliningrad,Europe/Moscow,Europe/Paris,Europe/Prague,Europe/Sarajevo,Pacific/Auckland,Pacific/Fiji,Pacific/Guam,Pacific/Honolulu,Pacific/Samoa,US/Alaska,US/Central,US/Eastern,US/East-Indiana,US/Pacific,UTCDYNAMICUTCTrueSpecifies the timezone the user wants to change the system time toTIME_ZONEoracle-ee\n \n 11.2Oracle XMLDB RepositoryXMLDBoracle-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n',
+ "all": '\n \n \n \n 11.2XMLDBOracle Application Express Runtime EnvironmentAPEXoracle-ee\n \n 11.2APEXOracle Application Express Development EnvironmentAPEX-DEVoracle-ee\n \n 11.2Oracle Advanced Security - Native Network EncryptionNATIVE_NETWORK_ENCRYPTIONACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired encryption behaviorSQLNET.ENCRYPTION_SERVERACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired data integrity behaviorSQLNET.CRYPTO_CHECKSUM_SERVERRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40STATICRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40TrueSpecifies list of encryption algorithms in order of intended useSQLNET.ENCRYPTION_TYPES_SERVERSHA1,MD5STATICSHA1,MD5TrueSpecifies list of checksumming algorithms in order of intended useSQLNET.CRYPTO_CHECKSUM_TYPES_SERVERoracle-ee\n \n 11.21158TrueOracle Enterprise Manager (Database Control only)OEMoracle-ee\n \n 11.2Oracle StatspackSTATSPACKoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - Transparent Data EncryptionTDEoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - TDE with HSMTDE_HSMoracle-ee\n \n 11.2TrueTrueChange time zoneTimezoneAfrica/Cairo,Africa/Casablanca,Africa/Harare,Africa/Monrovia,Africa/Nairobi,Africa/Tripoli,Africa/Windhoek,America/Araguaina,America/Asuncion,America/Bogota,America/Caracas,America/Chihuahua,America/Cuiaba,America/Denver,America/Fortaleza,America/Guatemala,America/Halifax,America/Manaus,America/Matamoros,America/Monterrey,America/Montevideo,America/Phoenix,America/Santiago,America/Tijuana,Asia/Amman,Asia/Ashgabat,Asia/Baghdad,Asia/Baku,Asia/Bangkok,Asia/Beirut,Asia/Calcutta,Asia/Damascus,Asia/Dhaka,Asia/Irkutsk,Asia/Jerusalem,Asia/Kabul,Asia/Karachi,Asia/Kathmandu,Asia/Krasnoyarsk,Asia/Magadan,Asia/Muscat,Asia/Novosibirsk,Asia/Riyadh,Asia/Seoul,Asia/Shanghai,Asia/Singapore,Asia/Taipei,Asia/Tehran,Asia/Tokyo,Asia/Ulaanbaatar,Asia/Vladivostok,Asia/Yakutsk,Asia/Yerevan,Atlantic/Azores,Australia/Adelaide,Australia/Brisbane,Australia/Darwin,Australia/Hobart,Australia/Perth,Australia/Sydney,Brazil/East,Canada/Newfoundland,Canada/Saskatchewan,Europe/Amsterdam,Europe/Athens,Europe/Dublin,Europe/Helsinki,Europe/Istanbul,Europe/Kaliningrad,Europe/Moscow,Europe/Paris,Europe/Prague,Europe/Sarajevo,Pacific/Auckland,Pacific/Fiji,Pacific/Guam,Pacific/Honolulu,Pacific/Samoa,US/Alaska,US/Central,US/Eastern,US/East-Indiana,US/Pacific,UTCDYNAMICUTCTrueSpecifies the timezone the user wants to change the system time toTIME_ZONEoracle-ee\n \n 11.2Oracle XMLDB RepositoryXMLDBoracle-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n',
+ },
+ "oracle-sa1": {
+ "11.2": '\n \n \n \n 11.2XMLDBOracle Application Express Runtime EnvironmentAPEXoracle-ee\n \n 11.2APEXOracle Application Express Development EnvironmentAPEX-DEVoracle-ee\n \n 11.2Oracle Advanced Security - Native Network EncryptionNATIVE_NETWORK_ENCRYPTIONACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired encryption behaviorSQLNET.ENCRYPTION_SERVERACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired data integrity behaviorSQLNET.CRYPTO_CHECKSUM_SERVERRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40STATICRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40TrueSpecifies list of encryption algorithms in order of intended useSQLNET.ENCRYPTION_TYPES_SERVERSHA1,MD5STATICSHA1,MD5TrueSpecifies list of checksumming algorithms in order of intended useSQLNET.CRYPTO_CHECKSUM_TYPES_SERVERoracle-ee\n \n 11.21158TrueOracle Enterprise Manager (Database Control only)OEMoracle-ee\n \n 11.2Oracle StatspackSTATSPACKoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - Transparent Data EncryptionTDEoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - TDE with HSMTDE_HSMoracle-ee\n \n 11.2TrueTrueChange time zoneTimezoneAfrica/Cairo,Africa/Casablanca,Africa/Harare,Africa/Monrovia,Africa/Nairobi,Africa/Tripoli,Africa/Windhoek,America/Araguaina,America/Asuncion,America/Bogota,America/Caracas,America/Chihuahua,America/Cuiaba,America/Denver,America/Fortaleza,America/Guatemala,America/Halifax,America/Manaus,America/Matamoros,America/Monterrey,America/Montevideo,America/Phoenix,America/Santiago,America/Tijuana,Asia/Amman,Asia/Ashgabat,Asia/Baghdad,Asia/Baku,Asia/Bangkok,Asia/Beirut,Asia/Calcutta,Asia/Damascus,Asia/Dhaka,Asia/Irkutsk,Asia/Jerusalem,Asia/Kabul,Asia/Karachi,Asia/Kathmandu,Asia/Krasnoyarsk,Asia/Magadan,Asia/Muscat,Asia/Novosibirsk,Asia/Riyadh,Asia/Seoul,Asia/Shanghai,Asia/Singapore,Asia/Taipei,Asia/Tehran,Asia/Tokyo,Asia/Ulaanbaatar,Asia/Vladivostok,Asia/Yakutsk,Asia/Yerevan,Atlantic/Azores,Australia/Adelaide,Australia/Brisbane,Australia/Darwin,Australia/Hobart,Australia/Perth,Australia/Sydney,Brazil/East,Canada/Newfoundland,Canada/Saskatchewan,Europe/Amsterdam,Europe/Athens,Europe/Dublin,Europe/Helsinki,Europe/Istanbul,Europe/Kaliningrad,Europe/Moscow,Europe/Paris,Europe/Prague,Europe/Sarajevo,Pacific/Auckland,Pacific/Fiji,Pacific/Guam,Pacific/Honolulu,Pacific/Samoa,US/Alaska,US/Central,US/Eastern,US/East-Indiana,US/Pacific,UTCDYNAMICUTCTrueSpecifies the timezone the user wants to change the system time toTIME_ZONEoracle-ee\n \n 11.2Oracle XMLDB RepositoryXMLDBoracle-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n',
+ "all": '\n \n \n \n 11.2XMLDBOracle Application Express Runtime EnvironmentAPEXoracle-ee\n \n 11.2APEXOracle Application Express Development EnvironmentAPEX-DEVoracle-ee\n \n 11.2Oracle Advanced Security - Native Network EncryptionNATIVE_NETWORK_ENCRYPTIONACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired encryption behaviorSQLNET.ENCRYPTION_SERVERACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired data integrity behaviorSQLNET.CRYPTO_CHECKSUM_SERVERRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40STATICRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40TrueSpecifies list of encryption algorithms in order of intended useSQLNET.ENCRYPTION_TYPES_SERVERSHA1,MD5STATICSHA1,MD5TrueSpecifies list of checksumming algorithms in order of intended useSQLNET.CRYPTO_CHECKSUM_TYPES_SERVERoracle-ee\n \n 11.21158TrueOracle Enterprise Manager (Database Control only)OEMoracle-ee\n \n 11.2Oracle StatspackSTATSPACKoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - Transparent Data EncryptionTDEoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - TDE with HSMTDE_HSMoracle-ee\n \n 11.2TrueTrueChange time zoneTimezoneAfrica/Cairo,Africa/Casablanca,Africa/Harare,Africa/Monrovia,Africa/Nairobi,Africa/Tripoli,Africa/Windhoek,America/Araguaina,America/Asuncion,America/Bogota,America/Caracas,America/Chihuahua,America/Cuiaba,America/Denver,America/Fortaleza,America/Guatemala,America/Halifax,America/Manaus,America/Matamoros,America/Monterrey,America/Montevideo,America/Phoenix,America/Santiago,America/Tijuana,Asia/Amman,Asia/Ashgabat,Asia/Baghdad,Asia/Baku,Asia/Bangkok,Asia/Beirut,Asia/Calcutta,Asia/Damascus,Asia/Dhaka,Asia/Irkutsk,Asia/Jerusalem,Asia/Kabul,Asia/Karachi,Asia/Kathmandu,Asia/Krasnoyarsk,Asia/Magadan,Asia/Muscat,Asia/Novosibirsk,Asia/Riyadh,Asia/Seoul,Asia/Shanghai,Asia/Singapore,Asia/Taipei,Asia/Tehran,Asia/Tokyo,Asia/Ulaanbaatar,Asia/Vladivostok,Asia/Yakutsk,Asia/Yerevan,Atlantic/Azores,Australia/Adelaide,Australia/Brisbane,Australia/Darwin,Australia/Hobart,Australia/Perth,Australia/Sydney,Brazil/East,Canada/Newfoundland,Canada/Saskatchewan,Europe/Amsterdam,Europe/Athens,Europe/Dublin,Europe/Helsinki,Europe/Istanbul,Europe/Kaliningrad,Europe/Moscow,Europe/Paris,Europe/Prague,Europe/Sarajevo,Pacific/Auckland,Pacific/Fiji,Pacific/Guam,Pacific/Honolulu,Pacific/Samoa,US/Alaska,US/Central,US/Eastern,US/East-Indiana,US/Pacific,UTCDYNAMICUTCTrueSpecifies the timezone the user wants to change the system time toTIME_ZONEoracle-ee\n \n 11.2Oracle XMLDB RepositoryXMLDBoracle-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n',
+ },
+ "sqlserver-ee": {
+ "10.50": '\n \n \n \n 10.50SQLServer Database MirroringMirroringsqlserver-ee\n \n 10.50TrueSQL Server - Transparent Data EncryptionTDEsqlserver-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n',
+ "11.00": '\n \n \n \n 11.00SQLServer Database MirroringMirroringsqlserver-ee\n \n 11.00TrueSQL Server - Transparent Data EncryptionTDEsqlserver-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n',
+ "all": '\n \n \n \n 10.50SQLServer Database MirroringMirroringsqlserver-ee\n \n 10.50TrueSQL Server - Transparent Data EncryptionTDEsqlserver-ee\n \n 11.00SQLServer Database MirroringMirroringsqlserver-ee\n \n 11.00TrueSQL Server - Transparent Data EncryptionTDEsqlserver-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n',
+ },
+ }
+
+ if engine_name not in default_option_group_options:
+ raise RDSClientError(
+ "InvalidParameterValue", "Invalid DB engine: {0}".format(engine_name)
+ )
+ if (
+ major_engine_version
+ and major_engine_version not in default_option_group_options[engine_name]
+ ):
+ raise RDSClientError(
+ "InvalidParameterCombination",
+ "Cannot find major version {0} for {1}".format(
+ major_engine_version, engine_name
+ ),
+ )
+ if major_engine_version:
+ return default_option_group_options[engine_name][major_engine_version]
+ return default_option_group_options[engine_name]["all"]
+
+ def modify_option_group(
+ self,
+ option_group_name,
+ options_to_include=None,
+ options_to_remove=None,
+ apply_immediately=None,
+ ):
+ if option_group_name not in self.option_groups:
+ raise OptionGroupNotFoundFaultError(option_group_name)
+ if not options_to_include and not options_to_remove:
+ raise RDSClientError(
+ "InvalidParameterValue",
+ "At least one option must be added, modified, or removed.",
+ )
+ if options_to_remove:
+ self.option_groups[option_group_name].remove_options(options_to_remove)
+ if options_to_include:
+ self.option_groups[option_group_name].add_options(options_to_include)
+ return self.option_groups[option_group_name]
+
+ def create_db_parameter_group(self, db_parameter_group_kwargs):
+ db_parameter_group_id = db_parameter_group_kwargs["name"]
+ if db_parameter_group_kwargs["name"] in self.db_parameter_groups:
+ raise RDSClientError(
+ "DBParameterGroupAlreadyExistsFault",
+ "A DB parameter group named {0} already exists.".format(
+ db_parameter_group_kwargs["name"]
+ ),
+ )
+ if not db_parameter_group_kwargs.get("description"):
+ raise RDSClientError(
+ "InvalidParameterValue",
+ "The parameter Description must be provided and must not be blank.",
+ )
+ if not db_parameter_group_kwargs.get("family"):
+ raise RDSClientError(
+ "InvalidParameterValue",
+ "The parameter DBParameterGroupName must be provided and must not be blank.",
+ )
+ db_parameter_group_kwargs["region"] = self.region
+ db_parameter_group = DBParameterGroup(**db_parameter_group_kwargs)
+ self.db_parameter_groups[db_parameter_group_id] = db_parameter_group
+ return db_parameter_group
+
+ def describe_db_parameter_groups(self, db_parameter_group_kwargs):
+ db_parameter_group_list = []
+
+ if db_parameter_group_kwargs.get("marker"):
+ marker = db_parameter_group_kwargs["marker"]
+ else:
+ marker = 0
+ if db_parameter_group_kwargs.get("max_records"):
+ if (
+ db_parameter_group_kwargs["max_records"] < 20
+ or db_parameter_group_kwargs["max_records"] > 100
+ ):
+ raise RDSClientError(
+ "InvalidParameterValue",
+ "Invalid value for max records. Must be between 20 and 100",
+ )
+ max_records = db_parameter_group_kwargs["max_records"]
+ else:
+ max_records = 100
+
+ for db_parameter_group in self.db_parameter_groups.values():
+ if not db_parameter_group_kwargs.get(
+ "name"
+ ) or db_parameter_group.name == db_parameter_group_kwargs.get("name"):
+ db_parameter_group_list.append(db_parameter_group)
+ else:
+ continue
+
+ return db_parameter_group_list[marker : max_records + marker]
+
+ def modify_db_parameter_group(
+ self, db_parameter_group_name, db_parameter_group_parameters
+ ):
+ if db_parameter_group_name not in self.db_parameter_groups:
+ raise DBParameterGroupNotFoundError(db_parameter_group_name)
+
+ db_parameter_group = self.db_parameter_groups[db_parameter_group_name]
+ db_parameter_group.update_parameters(db_parameter_group_parameters)
+
+ return db_parameter_group
+
+ def create_db_cluster(self, kwargs):
+ cluster_id = kwargs["db_cluster_identifier"]
+ cluster = Cluster(**kwargs)
+ self.clusters[cluster_id] = cluster
+ initial_state = copy.deepcopy(cluster) # Return status=creating
+ cluster.status = "available" # Already set the final status in the background
+ return initial_state
+
+ def create_db_cluster_snapshot(
+ self, db_cluster_identifier, db_snapshot_identifier, tags=None
+ ):
+ cluster = self.clusters.get(db_cluster_identifier)
+ if cluster is None:
+ raise DBClusterNotFoundError(db_cluster_identifier)
+ if db_snapshot_identifier in self.cluster_snapshots:
+ raise DBClusterSnapshotAlreadyExistsError(db_snapshot_identifier)
+ if len(self.cluster_snapshots) >= int(
+ os.environ.get("MOTO_RDS_SNAPSHOT_LIMIT", "100")
+ ):
+ raise SnapshotQuotaExceededError()
+ if tags is None:
+ tags = list()
+ if cluster.copy_tags_to_snapshot:
+ tags += cluster.get_tags()
+ snapshot = ClusterSnapshot(cluster, db_snapshot_identifier, tags)
+ self.cluster_snapshots[db_snapshot_identifier] = snapshot
+ return snapshot
+
+ def copy_cluster_snapshot(
+ self, source_snapshot_identifier, target_snapshot_identifier, tags=None
+ ):
+ if source_snapshot_identifier not in self.cluster_snapshots:
+ raise DBClusterSnapshotNotFoundError(source_snapshot_identifier)
+ if target_snapshot_identifier in self.cluster_snapshots:
+ raise DBClusterSnapshotAlreadyExistsError(target_snapshot_identifier)
+ if len(self.cluster_snapshots) >= int(
+ os.environ.get("MOTO_RDS_SNAPSHOT_LIMIT", "100")
+ ):
+ raise SnapshotQuotaExceededError()
+ source_snapshot = self.cluster_snapshots[source_snapshot_identifier]
+ if tags is None:
+ tags = source_snapshot.tags
+ else:
+ tags = self._merge_tags(source_snapshot.tags, tags)
+ target_snapshot = ClusterSnapshot(
+ source_snapshot.cluster, target_snapshot_identifier, tags
+ )
+ self.cluster_snapshots[target_snapshot_identifier] = target_snapshot
+ return target_snapshot
+
+ def delete_db_cluster_snapshot(self, db_snapshot_identifier):
+ if db_snapshot_identifier not in self.cluster_snapshots:
+ raise DBClusterSnapshotNotFoundError(db_snapshot_identifier)
+
+ return self.cluster_snapshots.pop(db_snapshot_identifier)
+
+ def describe_db_clusters(self, cluster_identifier):
+ if cluster_identifier:
+ return [self.clusters[cluster_identifier]]
+ return self.clusters.values()
+
+ def describe_db_cluster_snapshots(
+ self, db_cluster_identifier, db_snapshot_identifier, filters=None
+ ):
+ snapshots = self.cluster_snapshots
+ if db_cluster_identifier:
+ filters = merge_filters(filters, {"db-cluster-id": [db_cluster_identifier]})
+ if db_snapshot_identifier:
+ filters = merge_filters(
+ filters, {"db-cluster-snapshot-id": [db_snapshot_identifier]}
+ )
+ if filters:
+ snapshots = self._filter_resources(snapshots, filters, ClusterSnapshot)
+ if db_snapshot_identifier and not snapshots and not db_cluster_identifier:
+ raise DBClusterSnapshotNotFoundError(db_snapshot_identifier)
+ return list(snapshots.values())
+
+ def delete_db_cluster(self, cluster_identifier):
+ if cluster_identifier in self.clusters:
+ if self.clusters[cluster_identifier].deletion_protection:
+ raise InvalidParameterValue(
+ "Can't delete Cluster with protection enabled"
+ )
+ return self.clusters.pop(cluster_identifier)
+ raise DBClusterNotFoundError(cluster_identifier)
+
+ def start_db_cluster(self, cluster_identifier):
+ if cluster_identifier not in self.clusters:
+ raise DBClusterNotFoundError(cluster_identifier)
+ cluster = self.clusters[cluster_identifier]
+ if cluster.status != "stopped":
+ raise InvalidDBClusterStateFault(
+ "DbCluster cluster-id is not in stopped state."
+ )
+ temp_state = copy.deepcopy(cluster)
+ temp_state.status = "started"
+ cluster.status = "available" # This is the final status - already setting it in the background
+ return temp_state
+
+ def restore_db_cluster_from_snapshot(self, from_snapshot_id, overrides):
+ snapshot = self.describe_db_cluster_snapshots(
+ db_cluster_identifier=None, db_snapshot_identifier=from_snapshot_id
+ )[0]
+ original_cluster = snapshot.cluster
+ new_cluster_props = copy.deepcopy(original_cluster.__dict__)
+ for key, value in overrides.items():
+ if value:
+ new_cluster_props[key] = value
+
+ return self.create_db_cluster(new_cluster_props)
+
+ def stop_db_cluster(self, cluster_identifier):
+ if cluster_identifier not in self.clusters:
+ raise DBClusterNotFoundError(cluster_identifier)
+ cluster = self.clusters[cluster_identifier]
+ if cluster.status not in ["available"]:
+ raise InvalidDBClusterStateFault(
+ "DbCluster cluster-id is not in available state."
+ )
+ previous_state = copy.deepcopy(cluster)
+ cluster.status = "stopped"
+ return previous_state
+
+ def start_export_task(self, kwargs):
+ export_task_id = kwargs["export_task_identifier"]
+ source_arn = kwargs["source_arn"]
+ snapshot_id = source_arn.split(":")[-1]
+ snapshot_type = source_arn.split(":")[-2]
+
+ if export_task_id in self.export_tasks:
+ raise ExportTaskAlreadyExistsError(export_task_id)
+ if snapshot_type == "snapshot" and snapshot_id not in self.database_snapshots:
+ raise DBSnapshotNotFoundError(snapshot_id)
+ elif (
+ snapshot_type == "cluster-snapshot"
+ and snapshot_id not in self.cluster_snapshots
+ ):
+ raise DBClusterSnapshotNotFoundError(snapshot_id)
+
+ if snapshot_type == "snapshot":
+ snapshot = self.database_snapshots[snapshot_id]
+ else:
+ snapshot = self.cluster_snapshots[snapshot_id]
+
+ if snapshot.status not in ["available"]:
+ raise InvalidExportSourceStateError(snapshot.status)
+
+ export_task = ExportTask(snapshot, kwargs)
+ self.export_tasks[export_task_id] = export_task
+
+ return export_task
+
+ def cancel_export_task(self, export_task_identifier):
+ if export_task_identifier in self.export_tasks:
+ export_task = self.export_tasks[export_task_identifier]
+ export_task.status = "canceled"
+ self.export_tasks[export_task_identifier] = export_task
+ return export_task
+ raise ExportTaskNotFoundError(export_task_identifier)
+
+ def describe_export_tasks(self, export_task_identifier):
+ if export_task_identifier:
+ if export_task_identifier in self.export_tasks:
+ return [self.export_tasks[export_task_identifier]]
+ else:
+ raise ExportTaskNotFoundError(export_task_identifier)
+ return self.export_tasks.values()
+
+ def create_event_subscription(self, kwargs):
+ subscription_name = kwargs["subscription_name"]
+
+ if subscription_name in self.event_subscriptions:
+ raise SubscriptionAlreadyExistError(subscription_name)
+
+ subscription = EventSubscription(kwargs)
+ self.event_subscriptions[subscription_name] = subscription
+
+ return subscription
+
+ def delete_event_subscription(self, subscription_name):
+ if subscription_name in self.event_subscriptions:
+ return self.event_subscriptions.pop(subscription_name)
+ raise SubscriptionNotFoundError(subscription_name)
+
+ def describe_event_subscriptions(self, subscription_name):
+ if subscription_name:
+ if subscription_name in self.event_subscriptions:
+ return [self.event_subscriptions[subscription_name]]
+ else:
+ raise SubscriptionNotFoundError(subscription_name)
+ return self.event_subscriptions.values()
+
+ def list_tags_for_resource(self, arn):
+ if self.arn_regex.match(arn):
+ arn_breakdown = arn.split(":")
+ resource_type = arn_breakdown[len(arn_breakdown) - 2]
+ resource_name = arn_breakdown[len(arn_breakdown) - 1]
+ if resource_type == "db": # Database
+ if resource_name in self.databases:
+ return self.databases[resource_name].get_tags()
+ elif resource_type == "cluster": # Cluster
+ if resource_name in self.clusters:
+ return self.clusters[resource_name].get_tags()
+ elif resource_type == "es": # Event Subscription
+ if resource_name in self.event_subscriptions:
+ return self.event_subscriptions[resource_name].get_tags()
+ elif resource_type == "og": # Option Group
+ if resource_name in self.option_groups:
+ return self.option_groups[resource_name].get_tags()
+ elif resource_type == "pg": # Parameter Group
+ if resource_name in self.db_parameter_groups:
+ return self.db_parameter_groups[resource_name].get_tags()
+ elif resource_type == "ri": # Reserved DB instance
+ # TODO: Complete call to tags on resource type Reserved DB
+ # instance
+ return []
+ elif resource_type == "secgrp": # DB security group
+ if resource_name in self.security_groups:
+ return self.security_groups[resource_name].get_tags()
+ elif resource_type == "snapshot": # DB Snapshot
+ if resource_name in self.database_snapshots:
+ return self.database_snapshots[resource_name].get_tags()
+ elif resource_type == "cluster-snapshot": # DB Cluster Snapshot
+ if resource_name in self.cluster_snapshots:
+ return self.cluster_snapshots[resource_name].get_tags()
+ elif resource_type == "subgrp": # DB subnet group
+ if resource_name in self.subnet_groups:
+ return self.subnet_groups[resource_name].get_tags()
+ else:
+ raise RDSClientError(
+ "InvalidParameterValue", "Invalid resource name: {0}".format(arn)
+ )
+ return []
+
+ def remove_tags_from_resource(self, arn, tag_keys):
+ if self.arn_regex.match(arn):
+ arn_breakdown = arn.split(":")
+ resource_type = arn_breakdown[len(arn_breakdown) - 2]
+ resource_name = arn_breakdown[len(arn_breakdown) - 1]
+ if resource_type == "db": # Database
+ if resource_name in self.databases:
+ return self.databases[resource_name].remove_tags(tag_keys)
+ elif resource_type == "es": # Event Subscription
+ if resource_name in self.event_subscriptions:
+ return self.event_subscriptions[resource_name].remove_tags(tag_keys)
+ elif resource_type == "og": # Option Group
+ if resource_name in self.option_groups:
+ return self.option_groups[resource_name].remove_tags(tag_keys)
+ elif resource_type == "pg": # Parameter Group
+ if resource_name in self.db_parameter_groups:
+ return self.db_parameter_groups[resource_name].remove_tags(tag_keys)
+ elif resource_type == "ri": # Reserved DB instance
+ return None
+ elif resource_type == "secgrp": # DB security group
+ if resource_name in self.security_groups:
+ return self.security_groups[resource_name].remove_tags(tag_keys)
+ elif resource_type == "snapshot": # DB Snapshot
+ if resource_name in self.database_snapshots:
+ return self.database_snapshots[resource_name].remove_tags(tag_keys)
+ elif resource_type == "cluster-snapshot": # DB Cluster Snapshot
+ if resource_name in self.cluster_snapshots:
+ return self.cluster_snapshots[resource_name].remove_tags(tag_keys)
+ elif resource_type == "subgrp": # DB subnet group
+ if resource_name in self.subnet_groups:
+ return self.subnet_groups[resource_name].remove_tags(tag_keys)
+ else:
+ raise RDSClientError(
+ "InvalidParameterValue", "Invalid resource name: {0}".format(arn)
+ )
+
+ def add_tags_to_resource(self, arn, tags):
+ if self.arn_regex.match(arn):
+ arn_breakdown = arn.split(":")
+ resource_type = arn_breakdown[len(arn_breakdown) - 2]
+ resource_name = arn_breakdown[len(arn_breakdown) - 1]
+ if resource_type == "db": # Database
+ if resource_name in self.databases:
+ return self.databases[resource_name].add_tags(tags)
+ elif resource_type == "es": # Event Subscription
+ if resource_name in self.event_subscriptions:
+ return self.event_subscriptions[resource_name].add_tags(tags)
+ elif resource_type == "og": # Option Group
+ if resource_name in self.option_groups:
+ return self.option_groups[resource_name].add_tags(tags)
+ elif resource_type == "pg": # Parameter Group
+ if resource_name in self.db_parameter_groups:
+ return self.db_parameter_groups[resource_name].add_tags(tags)
+ elif resource_type == "ri": # Reserved DB instance
+ return []
+ elif resource_type == "secgrp": # DB security group
+ if resource_name in self.security_groups:
+ return self.security_groups[resource_name].add_tags(tags)
+ elif resource_type == "snapshot": # DB Snapshot
+ if resource_name in self.database_snapshots:
+ return self.database_snapshots[resource_name].add_tags(tags)
+ elif resource_type == "cluster-snapshot": # DB Cluster Snapshot
+ if resource_name in self.cluster_snapshots:
+ return self.cluster_snapshots[resource_name].add_tags(tags)
+ elif resource_type == "subgrp": # DB subnet group
+ if resource_name in self.subnet_groups:
+ return self.subnet_groups[resource_name].add_tags(tags)
+ else:
+ raise RDSClientError(
+ "InvalidParameterValue", "Invalid resource name: {0}".format(arn)
+ )
+
+ @staticmethod
+ def _filter_resources(resources, filters, resource_class):
+ try:
+ filter_defs = resource_class.SUPPORTED_FILTERS
+ validate_filters(filters, filter_defs)
+ return apply_filter(resources, filters, filter_defs)
+ except KeyError as e:
+ # https://stackoverflow.com/questions/24998968/why-does-strkeyerror-add-extra-quotes
+ raise InvalidParameterValue(e.args[0])
+ except ValueError as e:
+ raise InvalidParameterCombination(str(e))
+
+ @staticmethod
+ def _merge_tags(old_tags: list, new_tags: list):
+ tags_dict = dict()
+ tags_dict.update({d["Key"]: d["Value"] for d in old_tags})
+ tags_dict.update({d["Key"]: d["Value"] for d in new_tags})
+ return [{"Key": k, "Value": v} for k, v in tags_dict.items()]
+
+
+class OptionGroup(object):
+ def __init__(self, name, engine_name, major_engine_version, description=None):
+ self.engine_name = engine_name
+ self.major_engine_version = major_engine_version
+ self.description = description
+ self.name = name
+ self.vpc_and_non_vpc_instance_memberships = False
+ self.options = {}
+ self.vpcId = "null"
+ self.tags = []
+
+ def to_json(self):
+ template = Template(
+ """{
+ "VpcId": null,
+ "MajorEngineVersion": "{{ option_group.major_engine_version }}",
+ "OptionGroupDescription": "{{ option_group.description }}",
+ "AllowsVpcAndNonVpcInstanceMemberships": "{{ option_group.vpc_and_non_vpc_instance_memberships }}",
+ "EngineName": "{{ option_group.engine_name }}",
+ "Options": [],
+ "OptionGroupName": "{{ option_group.name }}"
+}"""
+ )
+ return template.render(option_group=self)
+
+ def to_xml(self):
+ template = Template(
+ """
+ {{ option_group.name }}
+ {{ option_group.vpc_and_non_vpc_instance_memberships }}
+ {{ option_group.major_engine_version }}
+ {{ option_group.engine_name }}
+ {{ option_group.description }}
+
+ """
+ )
+ return template.render(option_group=self)
+
+ def remove_options(self, options_to_remove):
+ # TODO: Check for option in self.options and remove if exists. Raise
+ # error otherwise
+ return
+
+ def add_options(self, options_to_add):
+ # TODO: Validate option and add it to self.options. If invalid raise
+ # error
+ return
+
+ def get_tags(self):
+ return self.tags
+
+ def add_tags(self, tags):
+ new_keys = [tag_set["Key"] for tag_set in tags]
+ self.tags = [tag_set for tag_set in self.tags if tag_set["Key"] not in new_keys]
+ self.tags.extend(tags)
+ return self.tags
+
+ def remove_tags(self, tag_keys):
+ self.tags = [tag_set for tag_set in self.tags if tag_set["Key"] not in tag_keys]
+
+
+class OptionGroupOption(object):
+ def __init__(self, **kwargs):
+ self.default_port = kwargs.get("default_port")
+ self.description = kwargs.get("description")
+ self.engine_name = kwargs.get("engine_name")
+ self.major_engine_version = kwargs.get("major_engine_version")
+ self.name = kwargs.get("name")
+ self.option_group_option_settings = self._make_option_group_option_settings(
+ kwargs.get("option_group_option_settings", [])
+ )
+ self.options_depended_on = kwargs.get("options_depended_on", [])
+ self.permanent = kwargs.get("permanent")
+ self.persistent = kwargs.get("persistent")
+ self.port_required = kwargs.get("port_required")
+
+ def _make_option_group_option_settings(self, option_group_option_settings_kwargs):
+ return [
+ OptionGroupOptionSetting(**setting_kwargs)
+ for setting_kwargs in option_group_option_settings_kwargs
+ ]
+
+ def to_json(self):
+ template = Template(
+ """{ "MinimumRequiredMinorEngineVersion":
+ "2789.0.v1",
+ "OptionsDependedOn": [],
+ "MajorEngineVersion": "10.50",
+ "Persistent": false,
+ "DefaultPort": null,
+ "Permanent": false,
+ "OptionGroupOptionSettings": [],
+ "EngineName": "sqlserver-se",
+ "Name": "Mirroring",
+ "PortRequired": false,
+ "Description": "SQLServer Database Mirroring"
+ }"""
+ )
+ return template.render(option_group=self)
+
+ def to_xml(self):
+ template = Template(
+ """
+ {{ option_group.major_engine_version }}
+ {{ option_group.default_port }}
+ {{ option_group.port_required }}
+ {{ option_group.persistent }}
+
+ {%- for option_name in option_group.options_depended_on -%}
+ {{ option_name }}
+ {%- endfor -%}
+
+ {{ option_group.permanent }}
+ {{ option_group.description }}
+ {{ option_group.name }}
+
+ {%- for setting in option_group.option_group_option_settings -%}
+ {{ setting.to_xml() }}
+ {%- endfor -%}
+
+ {{ option_group.engine_name }}
+ {{ option_group.minimum_required_minor_engine_version }}
+"""
+ )
+ return template.render(option_group=self)
+
+
+class OptionGroupOptionSetting(object):
+ def __init__(self, *kwargs):
+ self.allowed_values = kwargs.get("allowed_values")
+ self.apply_type = kwargs.get("apply_type")
+ self.default_value = kwargs.get("default_value")
+ self.is_modifiable = kwargs.get("is_modifiable")
+ self.setting_description = kwargs.get("setting_description")
+ self.setting_name = kwargs.get("setting_name")
+
+ def to_xml(self):
+ template = Template(
+ """
+ {{ option_group_option_setting.allowed_values }}
+ {{ option_group_option_setting.apply_type }}
+ {{ option_group_option_setting.default_value }}
+ {{ option_group_option_setting.is_modifiable }}
+ {{ option_group_option_setting.setting_description }}
+ {{ option_group_option_setting.setting_name }}
+"""
+ )
+ return template.render(option_group_option_setting=self)
+
+
+def make_rds_arn(region, name):
+ return "arn:aws:rds:{0}:{1}:pg:{2}".format(region, ACCOUNT_ID, name)
+
+
+class DBParameterGroup(CloudFormationModel):
+ def __init__(self, name, description, family, tags, region):
+ self.name = name
+ self.description = description
+ self.family = family
+ self.tags = tags
+ self.parameters = defaultdict(dict)
+ self.arn = make_rds_arn(region, name)
+
+ def to_xml(self):
+ template = Template(
+ """
+ {{ param_group.name }}
+ {{ param_group.family }}
+ {{ param_group.description }}
+ {{ param_group.arn }}
+ """
+ )
+ return template.render(param_group=self)
+
+ def get_tags(self):
+ return self.tags
+
+ def add_tags(self, tags):
+ new_keys = [tag_set["Key"] for tag_set in tags]
+ self.tags = [tag_set for tag_set in self.tags if tag_set["Key"] not in new_keys]
+ self.tags.extend(tags)
+ return self.tags
+
+ def remove_tags(self, tag_keys):
+ self.tags = [tag_set for tag_set in self.tags if tag_set["Key"] not in tag_keys]
+
+ def update_parameters(self, new_parameters):
+ for new_parameter in new_parameters:
+ parameter = self.parameters[new_parameter["ParameterName"]]
+ parameter.update(new_parameter)
+
+ def delete(self, region_name):
+ backend = rds_backends[region_name]
+ backend.delete_db_parameter_group(self.name)
+
+ @staticmethod
+ def cloudformation_name_type():
+ return None
+
+ @staticmethod
+ def cloudformation_type():
+ # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbparametergroup.html
+ return "AWS::RDS::DBParameterGroup"
+
+ @classmethod
+ def create_from_cloudformation_json(
+ cls, resource_name, cloudformation_json, region_name, **kwargs
+ ):
+ properties = cloudformation_json["Properties"]
+
+ db_parameter_group_kwargs = {
+ "description": properties["Description"],
+ "family": properties["Family"],
+ "name": resource_name.lower(),
+ "tags": properties.get("Tags"),
+ }
+ db_parameter_group_parameters = []
+ for db_parameter, db_parameter_value in properties.get(
+ "Parameters", {}
+ ).items():
+ db_parameter_group_parameters.append(
+ {"ParameterName": db_parameter, "ParameterValue": db_parameter_value}
+ )
+
+ rds_backend = rds_backends[region_name]
+ db_parameter_group = rds_backend.create_db_parameter_group(
+ db_parameter_group_kwargs
+ )
+ db_parameter_group.update_parameters(db_parameter_group_parameters)
+ return db_parameter_group
rds_backends = BackendDict(RDSBackend, "rds")
diff --git a/moto/rds/responses.py b/moto/rds/responses.py
index d53f25310..737c2f031 100644
--- a/moto/rds/responses.py
+++ b/moto/rds/responses.py
@@ -1,6 +1,10 @@
+from collections import defaultdict
+
from moto.core.responses import BaseResponse
from moto.ec2.models import ec2_backends
from .models import rds_backends
+from .exceptions import DBParameterGroupNotFoundError
+from .utils import filters_from_querystring
class RDSResponse(BaseResponse):
@@ -14,29 +18,40 @@ class RDSResponse(BaseResponse):
"allocated_storage": self._get_int_param("AllocatedStorage"),
"availability_zone": self._get_param("AvailabilityZone"),
"backup_retention_period": self._get_param("BackupRetentionPeriod"),
+ "copy_tags_to_snapshot": self._get_param("CopyTagsToSnapshot"),
"db_instance_class": self._get_param("DBInstanceClass"),
"db_instance_identifier": self._get_param("DBInstanceIdentifier"),
"db_name": self._get_param("DBName"),
- # DBParameterGroupName
+ "db_parameter_group_name": self._get_param("DBParameterGroupName"),
+ "db_snapshot_identifier": self._get_param("DBSnapshotIdentifier"),
"db_subnet_group_name": self._get_param("DBSubnetGroupName"),
"engine": self._get_param("Engine"),
"engine_version": self._get_param("EngineVersion"),
+ "enable_iam_database_authentication": self._get_bool_param(
+ "EnableIAMDatabaseAuthentication"
+ ),
+ "license_model": self._get_param("LicenseModel"),
"iops": self._get_int_param("Iops"),
"kms_key_id": self._get_param("KmsKeyId"),
- "master_password": self._get_param("MasterUserPassword"),
+ "master_user_password": self._get_param("MasterUserPassword"),
"master_username": self._get_param("MasterUsername"),
"multi_az": self._get_bool_param("MultiAZ"),
- # OptionGroupName
+ "option_group_name": self._get_param("OptionGroupName"),
"port": self._get_param("Port"),
# PreferredBackupWindow
# PreferredMaintenanceWindow
"publicly_accessible": self._get_param("PubliclyAccessible"),
"region": self.region,
- "security_groups": self._get_multi_param("DBSecurityGroups.member"),
+ "security_groups": self._get_multi_param(
+ "DBSecurityGroups.DBSecurityGroupName"
+ ),
"storage_encrypted": self._get_param("StorageEncrypted"),
- "storage_type": self._get_param("StorageType"),
- # VpcSecurityGroupIds.member.N
+ "storage_type": self._get_param("StorageType", None),
+ "vpc_security_group_ids": self._get_multi_param(
+ "VpcSecurityGroupIds.VpcSecurityGroupId"
+ ),
"tags": list(),
+ "deletion_protection": self._get_bool_param("DeletionProtection"),
}
args["tags"] = self.unpack_complex_list_params("Tags.Tag", ("Key", "Value"))
return args
@@ -56,6 +71,70 @@ class RDSResponse(BaseResponse):
"storage_type": self._get_param("StorageType"),
}
+ def _get_option_group_kwargs(self):
+ return {
+ "major_engine_version": self._get_param("MajorEngineVersion"),
+ "description": self._get_param("OptionGroupDescription"),
+ "engine_name": self._get_param("EngineName"),
+ "name": self._get_param("OptionGroupName"),
+ }
+
+ def _get_db_parameter_group_kwargs(self):
+ return {
+ "description": self._get_param("Description"),
+ "family": self._get_param("DBParameterGroupFamily"),
+ "name": self._get_param("DBParameterGroupName"),
+ "tags": self.unpack_complex_list_params("Tags.Tag", ("Key", "Value")),
+ }
+
+ def _get_db_cluster_kwargs(self):
+ return {
+ "availability_zones": self._get_multi_param(
+ "AvailabilityZones.AvailabilityZone"
+ ),
+ "db_name": self._get_param("DatabaseName"),
+ "db_cluster_identifier": self._get_param("DBClusterIdentifier"),
+ "deletion_protection": self._get_bool_param("DeletionProtection"),
+ "engine": self._get_param("Engine"),
+ "engine_version": self._get_param("EngineVersion"),
+ "engine_mode": self._get_param("EngineMode"),
+ "allocated_storage": self._get_param("AllocatedStorage"),
+ "iops": self._get_param("Iops"),
+ "storage_type": self._get_param("StorageType"),
+ "master_username": self._get_param("MasterUsername"),
+ "master_user_password": self._get_param("MasterUserPassword"),
+ "port": self._get_param("Port"),
+ "parameter_group": self._get_param("DBClusterParameterGroup"),
+ "region": self.region,
+ "db_cluster_instance_class": self._get_param("DBClusterInstanceClass"),
+ "copy_tags_to_snapshot": self._get_param("CopyTagsToSnapshot"),
+ "tags": self.unpack_complex_list_params("Tags.Tag", ("Key", "Value")),
+ }
+
+ def _get_export_task_kwargs(self):
+ return {
+ "export_task_identifier": self._get_param("ExportTaskIdentifier"),
+ "source_arn": self._get_param("SourceArn"),
+ "s3_bucket_name": self._get_param("S3BucketName"),
+ "iam_role_arn": self._get_param("IamRoleArn"),
+ "kms_key_id": self._get_param("KmsKeyId"),
+ "s3_prefix": self._get_param("S3Prefix"),
+ "export_only": self.unpack_list_params("ExportOnly.member"),
+ }
+
+ def _get_event_subscription_kwargs(self):
+ return {
+ "subscription_name": self._get_param("SubscriptionName"),
+ "sns_topic_arn": self._get_param("SnsTopicArn"),
+ "source_type": self._get_param("SourceType"),
+ "event_categories": self.unpack_list_params(
+ "EventCategories.EventCategory"
+ ),
+ "source_ids": self.unpack_list_params("SourceIds.SourceId"),
+ "enabled": self._get_param("Enabled"),
+ "tags": self.unpack_complex_list_params("Tags.Tag", ("Key", "Value")),
+ }
+
def unpack_complex_list_params(self, label, names):
unpacked_list = list()
count = 1
@@ -69,9 +148,16 @@ class RDSResponse(BaseResponse):
count += 1
return unpacked_list
+ def unpack_list_params(self, label):
+ unpacked_list = list()
+ count = 1
+ while self._get_param("{0}.{1}".format(label, count)):
+ unpacked_list.append(self._get_param("{0}.{1}".format(label, count)))
+ count += 1
+ return unpacked_list
+
def create_db_instance(self):
db_kwargs = self._get_db_kwargs()
-
database = self.backend.create_database(db_kwargs)
template = self.response_template(CREATE_DATABASE_TEMPLATE)
return template.render(database=database)
@@ -85,7 +171,10 @@ class RDSResponse(BaseResponse):
def describe_db_instances(self):
db_instance_identifier = self._get_param("DBInstanceIdentifier")
- all_instances = list(self.backend.describe_databases(db_instance_identifier))
+ filters = filters_from_querystring(self.querystring)
+ all_instances = list(
+ self.backend.describe_databases(db_instance_identifier, filters=filters)
+ )
marker = self._get_param("Marker")
all_ids = [instance.db_instance_identifier for instance in all_instances]
if marker:
@@ -115,10 +204,99 @@ class RDSResponse(BaseResponse):
def delete_db_instance(self):
db_instance_identifier = self._get_param("DBInstanceIdentifier")
- database = self.backend.delete_database(db_instance_identifier)
+ db_snapshot_name = self._get_param("FinalDBSnapshotIdentifier")
+ database = self.backend.delete_database(
+ db_instance_identifier, db_snapshot_name
+ )
template = self.response_template(DELETE_DATABASE_TEMPLATE)
return template.render(database=database)
+ def reboot_db_instance(self):
+ db_instance_identifier = self._get_param("DBInstanceIdentifier")
+ database = self.backend.reboot_db_instance(db_instance_identifier)
+ template = self.response_template(REBOOT_DATABASE_TEMPLATE)
+ return template.render(database=database)
+
+ def create_db_snapshot(self):
+ db_instance_identifier = self._get_param("DBInstanceIdentifier")
+ db_snapshot_identifier = self._get_param("DBSnapshotIdentifier")
+ tags = self.unpack_complex_list_params("Tags.Tag", ("Key", "Value"))
+ snapshot = self.backend.create_database_snapshot(
+ db_instance_identifier, db_snapshot_identifier, tags
+ )
+ template = self.response_template(CREATE_SNAPSHOT_TEMPLATE)
+ return template.render(snapshot=snapshot)
+
+ def copy_db_snapshot(self):
+ source_snapshot_identifier = self._get_param("SourceDBSnapshotIdentifier")
+ target_snapshot_identifier = self._get_param("TargetDBSnapshotIdentifier")
+ tags = self.unpack_complex_list_params("Tags.Tag", ("Key", "Value"))
+ snapshot = self.backend.copy_database_snapshot(
+ source_snapshot_identifier, target_snapshot_identifier, tags,
+ )
+ template = self.response_template(COPY_SNAPSHOT_TEMPLATE)
+ return template.render(snapshot=snapshot)
+
+ def describe_db_snapshots(self):
+ db_instance_identifier = self._get_param("DBInstanceIdentifier")
+ db_snapshot_identifier = self._get_param("DBSnapshotIdentifier")
+ filters = filters_from_querystring(self.querystring)
+ snapshots = self.backend.describe_database_snapshots(
+ db_instance_identifier, db_snapshot_identifier, filters
+ )
+ template = self.response_template(DESCRIBE_SNAPSHOTS_TEMPLATE)
+ return template.render(snapshots=snapshots)
+
+ def delete_db_snapshot(self):
+ db_snapshot_identifier = self._get_param("DBSnapshotIdentifier")
+ snapshot = self.backend.delete_database_snapshot(db_snapshot_identifier)
+ template = self.response_template(DELETE_SNAPSHOT_TEMPLATE)
+ return template.render(snapshot=snapshot)
+
+ def restore_db_instance_from_db_snapshot(self):
+ db_snapshot_identifier = self._get_param("DBSnapshotIdentifier")
+ db_kwargs = self._get_db_kwargs()
+ new_instance = self.backend.restore_db_instance_from_db_snapshot(
+ db_snapshot_identifier, db_kwargs
+ )
+ template = self.response_template(RESTORE_INSTANCE_FROM_SNAPSHOT_TEMPLATE)
+ return template.render(database=new_instance)
+
+ def list_tags_for_resource(self):
+ arn = self._get_param("ResourceName")
+ template = self.response_template(LIST_TAGS_FOR_RESOURCE_TEMPLATE)
+ tags = self.backend.list_tags_for_resource(arn)
+ return template.render(tags=tags)
+
+ def add_tags_to_resource(self):
+ arn = self._get_param("ResourceName")
+ tags = self.unpack_complex_list_params("Tags.Tag", ("Key", "Value"))
+ tags = self.backend.add_tags_to_resource(arn, tags)
+ template = self.response_template(ADD_TAGS_TO_RESOURCE_TEMPLATE)
+ return template.render(tags=tags)
+
+ def remove_tags_from_resource(self):
+ arn = self._get_param("ResourceName")
+ tag_keys = self.unpack_list_params("TagKeys.member")
+ self.backend.remove_tags_from_resource(arn, tag_keys)
+ template = self.response_template(REMOVE_TAGS_FROM_RESOURCE_TEMPLATE)
+ return template.render()
+
+ def stop_db_instance(self):
+ db_instance_identifier = self._get_param("DBInstanceIdentifier")
+ db_snapshot_identifier = self._get_param("DBSnapshotIdentifier")
+ database = self.backend.stop_database(
+ db_instance_identifier, db_snapshot_identifier
+ )
+ template = self.response_template(STOP_DATABASE_TEMPLATE)
+ return template.render(database=database)
+
+ def start_db_instance(self):
+ db_instance_identifier = self._get_param("DBInstanceIdentifier")
+ database = self.backend.start_database(db_instance_identifier)
+ template = self.response_template(START_DATABASE_TEMPLATE)
+ return template.render(database=database)
+
def create_db_security_group(self):
group_name = self._get_param("DBSecurityGroupName")
description = self._get_param("DBSecurityGroupDescription")
@@ -153,11 +331,11 @@ class RDSResponse(BaseResponse):
def create_db_subnet_group(self):
subnet_name = self._get_param("DBSubnetGroupName")
description = self._get_param("DBSubnetGroupDescription")
- subnet_ids = self._get_multi_param("SubnetIds.member")
+ subnet_ids = self._get_multi_param("SubnetIds.SubnetIdentifier")
+ tags = self.unpack_complex_list_params("Tags.Tag", ("Key", "Value"))
subnets = [
ec2_backends[self.region].get_subnet(subnet_id) for subnet_id in subnet_ids
]
- tags = self.unpack_complex_list_params("Tags.Tag", ("Key", "Value"))
subnet_group = self.backend.create_subnet_group(
subnet_name, description, subnets, tags
)
@@ -170,16 +348,270 @@ class RDSResponse(BaseResponse):
template = self.response_template(DESCRIBE_SUBNET_GROUPS_TEMPLATE)
return template.render(subnet_groups=subnet_groups)
+ def modify_db_subnet_group(self):
+ subnet_name = self._get_param("DBSubnetGroupName")
+ description = self._get_param("DBSubnetGroupDescription")
+ subnet_ids = self._get_multi_param("SubnetIds.SubnetIdentifier")
+ subnets = [
+ ec2_backends[self.region].get_subnet(subnet_id) for subnet_id in subnet_ids
+ ]
+ subnet_group = self.backend.modify_db_subnet_group(
+ subnet_name, description, subnets
+ )
+ template = self.response_template(MODIFY_SUBNET_GROUPS_TEMPLATE)
+ return template.render(subnet_group=subnet_group)
+
def delete_db_subnet_group(self):
subnet_name = self._get_param("DBSubnetGroupName")
subnet_group = self.backend.delete_subnet_group(subnet_name)
template = self.response_template(DELETE_SUBNET_GROUP_TEMPLATE)
return template.render(subnet_group=subnet_group)
+ def create_option_group(self):
+ kwargs = self._get_option_group_kwargs()
+ option_group = self.backend.create_option_group(kwargs)
+ template = self.response_template(CREATE_OPTION_GROUP_TEMPLATE)
+ return template.render(option_group=option_group)
+
+ def delete_option_group(self):
+ kwargs = self._get_option_group_kwargs()
+ option_group = self.backend.delete_option_group(kwargs["name"])
+ template = self.response_template(DELETE_OPTION_GROUP_TEMPLATE)
+ return template.render(option_group=option_group)
+
+ def describe_option_groups(self):
+ kwargs = self._get_option_group_kwargs()
+ kwargs["max_records"] = self._get_int_param("MaxRecords")
+ kwargs["marker"] = self._get_param("Marker")
+ option_groups = self.backend.describe_option_groups(kwargs)
+ template = self.response_template(DESCRIBE_OPTION_GROUP_TEMPLATE)
+ return template.render(option_groups=option_groups)
+
+ def describe_option_group_options(self):
+ engine_name = self._get_param("EngineName")
+ major_engine_version = self._get_param("MajorEngineVersion")
+ option_group_options = self.backend.describe_option_group_options(
+ engine_name, major_engine_version
+ )
+ return option_group_options
+
+ def modify_option_group(self):
+ option_group_name = self._get_param("OptionGroupName")
+ count = 1
+ options_to_include = []
+ while self._get_param("OptionsToInclude.member.{0}.OptionName".format(count)):
+ options_to_include.append(
+ {
+ "Port": self._get_param(
+ "OptionsToInclude.member.{0}.Port".format(count)
+ ),
+ "OptionName": self._get_param(
+ "OptionsToInclude.member.{0}.OptionName".format(count)
+ ),
+ "DBSecurityGroupMemberships": self._get_param(
+ "OptionsToInclude.member.{0}.DBSecurityGroupMemberships".format(
+ count
+ )
+ ),
+ "OptionSettings": self._get_param(
+ "OptionsToInclude.member.{0}.OptionSettings".format(count)
+ ),
+ "VpcSecurityGroupMemberships": self._get_param(
+ "OptionsToInclude.member.{0}.VpcSecurityGroupMemberships".format(
+ count
+ )
+ ),
+ }
+ )
+ count += 1
+
+ count = 1
+ options_to_remove = []
+ while self._get_param("OptionsToRemove.member.{0}".format(count)):
+ options_to_remove.append(
+ self._get_param("OptionsToRemove.member.{0}".format(count))
+ )
+ count += 1
+ apply_immediately = self._get_param("ApplyImmediately")
+ option_group = self.backend.modify_option_group(
+ option_group_name, options_to_include, options_to_remove, apply_immediately
+ )
+ template = self.response_template(MODIFY_OPTION_GROUP_TEMPLATE)
+ return template.render(option_group=option_group)
+
+ def create_db_parameter_group(self):
+ kwargs = self._get_db_parameter_group_kwargs()
+ db_parameter_group = self.backend.create_db_parameter_group(kwargs)
+ template = self.response_template(CREATE_DB_PARAMETER_GROUP_TEMPLATE)
+ return template.render(db_parameter_group=db_parameter_group)
+
+ def describe_db_parameter_groups(self):
+ kwargs = self._get_db_parameter_group_kwargs()
+ kwargs["max_records"] = self._get_int_param("MaxRecords")
+ kwargs["marker"] = self._get_param("Marker")
+ db_parameter_groups = self.backend.describe_db_parameter_groups(kwargs)
+ template = self.response_template(DESCRIBE_DB_PARAMETER_GROUPS_TEMPLATE)
+ return template.render(db_parameter_groups=db_parameter_groups)
+
+ def modify_db_parameter_group(self):
+ db_parameter_group_name = self._get_param("DBParameterGroupName")
+ db_parameter_group_parameters = self._get_db_parameter_group_parameters()
+ db_parameter_group = self.backend.modify_db_parameter_group(
+ db_parameter_group_name, db_parameter_group_parameters
+ )
+ template = self.response_template(MODIFY_DB_PARAMETER_GROUP_TEMPLATE)
+ return template.render(db_parameter_group=db_parameter_group)
+
+ def _get_db_parameter_group_parameters(self):
+ parameter_group_parameters = defaultdict(dict)
+ for param_name, value in self.querystring.items():
+ if not param_name.startswith("Parameters.Parameter"):
+ continue
+
+ split_param_name = param_name.split(".")
+ param_id = split_param_name[2]
+ param_setting = split_param_name[3]
+
+ parameter_group_parameters[param_id][param_setting] = value[0]
+
+ return parameter_group_parameters.values()
+
+ def describe_db_parameters(self):
+ db_parameter_group_name = self._get_param("DBParameterGroupName")
+ db_parameter_groups = self.backend.describe_db_parameter_groups(
+ {"name": db_parameter_group_name}
+ )
+ if not db_parameter_groups:
+ raise DBParameterGroupNotFoundError(db_parameter_group_name)
+
+ template = self.response_template(DESCRIBE_DB_PARAMETERS_TEMPLATE)
+ return template.render(db_parameter_group=db_parameter_groups[0])
+
+ def delete_db_parameter_group(self):
+ kwargs = self._get_db_parameter_group_kwargs()
+ db_parameter_group = self.backend.delete_db_parameter_group(kwargs["name"])
+ template = self.response_template(DELETE_DB_PARAMETER_GROUP_TEMPLATE)
+ return template.render(db_parameter_group=db_parameter_group)
+
+ def create_db_cluster(self):
+ kwargs = self._get_db_cluster_kwargs()
+ cluster = self.backend.create_db_cluster(kwargs)
+ template = self.response_template(CREATE_DB_CLUSTER_TEMPLATE)
+ return template.render(cluster=cluster)
+
+ def describe_db_clusters(self):
+ _id = self._get_param("DBClusterIdentifier")
+ clusters = self.backend.describe_db_clusters(cluster_identifier=_id)
+ template = self.response_template(DESCRIBE_CLUSTERS_TEMPLATE)
+ return template.render(clusters=clusters)
+
+ def delete_db_cluster(self):
+ _id = self._get_param("DBClusterIdentifier")
+ cluster = self.backend.delete_db_cluster(cluster_identifier=_id)
+ template = self.response_template(DELETE_CLUSTER_TEMPLATE)
+ return template.render(cluster=cluster)
+
+ def start_db_cluster(self):
+ _id = self._get_param("DBClusterIdentifier")
+ cluster = self.backend.start_db_cluster(cluster_identifier=_id)
+ template = self.response_template(START_CLUSTER_TEMPLATE)
+ return template.render(cluster=cluster)
+
+ def stop_db_cluster(self):
+ _id = self._get_param("DBClusterIdentifier")
+ cluster = self.backend.stop_db_cluster(cluster_identifier=_id)
+ template = self.response_template(STOP_CLUSTER_TEMPLATE)
+ return template.render(cluster=cluster)
+
+ def create_db_cluster_snapshot(self):
+ db_cluster_identifier = self._get_param("DBClusterIdentifier")
+ db_snapshot_identifier = self._get_param("DBClusterSnapshotIdentifier")
+ tags = self.unpack_complex_list_params("Tags.Tag", ("Key", "Value"))
+ snapshot = self.backend.create_db_cluster_snapshot(
+ db_cluster_identifier, db_snapshot_identifier, tags
+ )
+ template = self.response_template(CREATE_CLUSTER_SNAPSHOT_TEMPLATE)
+ return template.render(snapshot=snapshot)
+
+ def copy_db_cluster_snapshot(self):
+ source_snapshot_identifier = self._get_param(
+ "SourceDBClusterSnapshotIdentifier"
+ )
+ target_snapshot_identifier = self._get_param(
+ "TargetDBClusterSnapshotIdentifier"
+ )
+ tags = self.unpack_complex_list_params("Tags.Tag", ("Key", "Value"))
+ snapshot = self.backend.copy_cluster_snapshot(
+ source_snapshot_identifier, target_snapshot_identifier, tags,
+ )
+ template = self.response_template(COPY_CLUSTER_SNAPSHOT_TEMPLATE)
+ return template.render(snapshot=snapshot)
+
+ def describe_db_cluster_snapshots(self):
+ db_cluster_identifier = self._get_param("DBClusterIdentifier")
+ db_snapshot_identifier = self._get_param("DBClusterSnapshotIdentifier")
+ filters = filters_from_querystring(self.querystring)
+ snapshots = self.backend.describe_db_cluster_snapshots(
+ db_cluster_identifier, db_snapshot_identifier, filters
+ )
+ template = self.response_template(DESCRIBE_CLUSTER_SNAPSHOTS_TEMPLATE)
+ return template.render(snapshots=snapshots)
+
+ def delete_db_cluster_snapshot(self):
+ db_snapshot_identifier = self._get_param("DBClusterSnapshotIdentifier")
+ snapshot = self.backend.delete_db_cluster_snapshot(db_snapshot_identifier)
+ template = self.response_template(DELETE_CLUSTER_SNAPSHOT_TEMPLATE)
+ return template.render(snapshot=snapshot)
+
+ def restore_db_cluster_from_snapshot(self):
+ db_snapshot_identifier = self._get_param("SnapshotIdentifier")
+ db_kwargs = self._get_db_cluster_kwargs()
+ new_cluster = self.backend.restore_db_cluster_from_snapshot(
+ db_snapshot_identifier, db_kwargs
+ )
+ template = self.response_template(RESTORE_CLUSTER_FROM_SNAPSHOT_TEMPLATE)
+ return template.render(cluster=new_cluster)
+
+ def start_export_task(self):
+ kwargs = self._get_export_task_kwargs()
+ export_task = self.backend.start_export_task(kwargs)
+ template = self.response_template(START_EXPORT_TASK_TEMPLATE)
+ return template.render(task=export_task)
+
+ def cancel_export_task(self):
+ export_task_identifier = self._get_param("ExportTaskIdentifier")
+ export_task = self.backend.cancel_export_task(export_task_identifier)
+ template = self.response_template(CANCEL_EXPORT_TASK_TEMPLATE)
+ return template.render(task=export_task)
+
+ def describe_export_tasks(self):
+ export_task_identifier = self._get_param("ExportTaskIdentifier")
+ tasks = self.backend.describe_export_tasks(export_task_identifier,)
+ template = self.response_template(DESCRIBE_EXPORT_TASKS_TEMPLATE)
+ return template.render(tasks=tasks)
+
+ def create_event_subscription(self):
+ kwargs = self._get_event_subscription_kwargs()
+ subscription = self.backend.create_event_subscription(kwargs)
+ template = self.response_template(CREATE_EVENT_SUBSCRIPTION_TEMPLATE)
+ return template.render(subscription=subscription)
+
+ def delete_event_subscription(self):
+ subscription_name = self._get_param("SubscriptionName")
+ subscription = self.backend.delete_event_subscription(subscription_name)
+ template = self.response_template(DELETE_EVENT_SUBSCRIPTION_TEMPLATE)
+ return template.render(subscription=subscription)
+
+ def describe_event_subscriptions(self):
+ subscription_name = self._get_param("SubscriptionName")
+ subscriptions = self.backend.describe_event_subscriptions(subscription_name)
+ template = self.response_template(DESCRIBE_EVENT_SUBSCRIPTIONS_TEMPLATE)
+ return template.render(subscriptions=subscriptions)
+
CREATE_DATABASE_TEMPLATE = """
- {{ database.to_xml() }}
+ {{ database.to_xml() }}
523e3218-afc7-11c3-90f5-f90431260ab4
@@ -188,38 +620,65 @@ CREATE_DATABASE_TEMPLATE = """
- {{ database.to_xml() }}
+ {{ database.to_xml() }}
- ba8dedf0-bb9a-11d3-855b-576787000e19
+ 5e60c46d-a844-11e4-bb68-17f36418e58f
"""
DESCRIBE_DATABASES_TEMPLATE = """
- {% for database in databases %}
- {{ database.to_xml() }}
- {% endfor %}
+ {%- for database in databases -%}
+ {{ database.to_xml() }}
+ {%- endfor -%}
{% if marker %}
{{ marker }}
{% endif %}
- 01b2685a-b978-11d3-f272-7cd6cce12cc5
+ 523e3218-afc7-11c3-90f5-f90431260ab4
"""
MODIFY_DATABASE_TEMPLATE = """
- {{ database.to_xml() }}
+ {{ database.to_xml() }}
- f643f1ac-bbfe-11d3-f4c6-37db295f7674
+ bb58476c-a1a8-11e4-99cf-55e92d4bbada
"""
+REBOOT_DATABASE_TEMPLATE = """
+
+ {{ database.to_xml() }}
+
+
+ d55711cb-a1ab-11e4-99cf-55e92d4bbada
+
+"""
+
+START_DATABASE_TEMPLATE = """
+
+ {{ database.to_xml() }}
+
+
+ 523e3218-afc7-11c3-90f5-f90431260ab9
+
+"""
+
+STOP_DATABASE_TEMPLATE = """
+
+ {{ database.to_xml() }}
+
+
+ 523e3218-afc7-11c3-90f5-f90431260ab8
+
+"""
+
DELETE_DATABASE_TEMPLATE = """
{{ database.to_xml() }}
@@ -229,12 +688,76 @@ DELETE_DATABASE_TEMPLATE = """
+
+ {{ cluster.to_xml() }}
+
+
+ 7369556f-b70d-11c3-faca-6ba18376ea1b
+
+"""
+
+RESTORE_INSTANCE_FROM_SNAPSHOT_TEMPLATE = """
+
+ {{ database.to_xml() }}
+
+
+ 523e3218-afc7-11c3-90f5-f90431260ab4
+
+"""
+
+CREATE_SNAPSHOT_TEMPLATE = """
+
+ {{ snapshot.to_xml() }}
+
+
+ 523e3218-afc7-11c3-90f5-f90431260ab4
+
+
+"""
+
+COPY_SNAPSHOT_TEMPLATE = """
+
+ {{ snapshot.to_xml() }}
+
+
+ 523e3218-afc7-11c3-90f5-f90431260ab4
+
+
+"""
+
+DESCRIBE_SNAPSHOTS_TEMPLATE = """
+
+
+ {%- for snapshot in snapshots -%}
+ {{ snapshot.to_xml() }}
+ {%- endfor -%}
+
+ {% if marker %}
+ {{ marker }}
+ {% endif %}
+
+
+ 523e3218-afc7-11c3-90f5-f90431260ab4
+
+"""
+
+DELETE_SNAPSHOT_TEMPLATE = """
+
+ {{ snapshot.to_xml() }}
+
+
+ 523e3218-afc7-11c3-90f5-f90431260ab4
+
+
+"""
+
CREATE_SECURITY_GROUP_TEMPLATE = """
- {{ security_group.to_xml() }}
+ {{ security_group.to_xml() }}
- e68ef6fa-afc1-11c3-845a-476777009d19
+ 462165d0-a77a-11e4-a5fa-75b30c556f97
"""
@@ -242,18 +765,18 @@ DESCRIBE_SECURITY_GROUPS_TEMPLATE = """
- 7aec7454-ba25-11d3-855b-576787000e19
+ 97e846bd-a77d-11e4-ac58-91351c0f3426
"""
@@ -262,13 +785,13 @@ AUTHORIZE_SECURITY_GROUP_TEMPLATE = """
- 6176b5f8-bfed-11d3-f92b-31fa5e8dbc99
+ 75d32fd5-a77e-11e4-8892-b10432f7a87d
"""
CREATE_SUBNET_GROUP_TEMPLATE = """
- {{ subnet_group.to_xml() }}
+ {{ subnet_group.to_xml() }}
3a401b3f-bb9e-11d3-f4c6-37db295f7674
@@ -279,7 +802,7 @@ DESCRIBE_SUBNET_GROUPS_TEMPLATE = """
+
+ {{ subnet_group.to_xml() }}
+
+
+ b783db3b-b98c-11d3-fbc7-5c0aad74da7c
+
+"""
+
DELETE_SUBNET_GROUP_TEMPLATE = """
- 6295e5ab-bbf3-11d3-f4c6-37db295f7674
+ 13785dd5-a7fc-11e4-bb9c-7f371d0859b0
"""
+
+CREATE_OPTION_GROUP_TEMPLATE = """
+
+ {{ option_group.to_xml() }}
+
+
+ 1e38dad4-9f50-11e4-87ea-a31c60ed2e36
+
+"""
+
+DELETE_OPTION_GROUP_TEMPLATE = """
+
+ e2590367-9fa2-11e4-99cf-55e92d41c60e
+
+"""
+
+DESCRIBE_OPTION_GROUP_TEMPLATE = """
+
+
+ {%- for option_group in option_groups -%}
+ {{ option_group.to_xml() }}
+ {%- endfor -%}
+
+
+
+ 4caf445d-9fbc-11e4-87ea-a31c60ed2e36
+
+"""
+
+DESCRIBE_OPTION_GROUP_OPTIONS_TEMPLATE = """
+
+
+ {%- for option_group_option in option_group_options -%}
+ {{ option_group_option.to_xml() }}
+ {%- endfor -%}
+
+
+
+ 457f7bb8-9fbf-11e4-9084-5754f80d5144
+
+"""
+
+MODIFY_OPTION_GROUP_TEMPLATE = """
+
+ {{ option_group.to_xml() }}
+
+
+ ce9284a5-a0de-11e4-b984-a11a53e1f328
+
+"""
+
+CREATE_DB_PARAMETER_GROUP_TEMPLATE = """
+
+ {{ db_parameter_group.to_xml() }}
+
+
+ 7805c127-af22-11c3-96ac-6999cc5f7e72
+
+"""
+
+DESCRIBE_DB_PARAMETER_GROUPS_TEMPLATE = """
+
+
+ {%- for db_parameter_group in db_parameter_groups -%}
+ {{ db_parameter_group.to_xml() }}
+ {%- endfor -%}
+
+
+
+ b75d527a-b98c-11d3-f272-7cd6cce12cc5
+
+"""
+
+MODIFY_DB_PARAMETER_GROUP_TEMPLATE = """
+
+ {{ db_parameter_group.name }}
+
+
+ 12d7435e-bba0-11d3-fe11-33d33a9bb7e3
+
+"""
+
+DELETE_DB_PARAMETER_GROUP_TEMPLATE = """
+
+ cad6c267-ba25-11d3-fe11-33d33a9bb7e3
+
+"""
+
+DESCRIBE_DB_PARAMETERS_TEMPLATE = """
+
+
+ {%- for db_parameter_name, db_parameter in db_parameter_group.parameters.items() -%}
+
+ {%- for parameter_name, parameter_value in db_parameter.items() -%}
+ <{{ parameter_name }}>{{ parameter_value }}{{ parameter_name }}>
+ {%- endfor -%}
+
+ {%- endfor -%}
+
+
+
+ 8c40488f-b9ff-11d3-a15e-7ac49293f4fa
+
+
+"""
+
+LIST_TAGS_FOR_RESOURCE_TEMPLATE = """
+
+
+ {%- for tag in tags -%}
+
+ {{ tag['Key'] }}
+ {{ tag['Value'] }}
+
+ {%- endfor -%}
+
+
+
+ 8c21ba39-a598-11e4-b688-194eaf8658fa
+
+"""
+
+ADD_TAGS_TO_RESOURCE_TEMPLATE = """
+
+ b194d9ca-a664-11e4-b688-194eaf8658fa
+
+"""
+
+REMOVE_TAGS_FROM_RESOURCE_TEMPLATE = """
+
+ b194d9ca-a664-11e4-b688-194eaf8658fa
+
+"""
+
+CREATE_DB_CLUSTER_TEMPLATE = """
+
+ {{ cluster.to_xml() }}
+
+
+ 523e3218-afc7-11c3-90f5-f90431260ab4
+
+"""
+
+DESCRIBE_CLUSTERS_TEMPLATE = """
+
+
+ {%- for cluster in clusters -%}
+ {{ cluster.to_xml() }}
+ {%- endfor -%}
+
+ {% if marker %}
+ {{ marker }}
+ {% endif %}
+
+
+ 523e3218-afc7-11c3-90f5-f90431260ab4
+
+"""
+
+START_CLUSTER_TEMPLATE = """
+
+ {{ cluster.to_xml() }}
+
+
+ 523e3218-afc7-11c3-90f5-f90431260ab9
+
+"""
+
+STOP_CLUSTER_TEMPLATE = """
+
+ {{ cluster.to_xml() }}
+
+
+ 523e3218-afc7-11c3-90f5-f90431260ab8
+
+"""
+
+RESTORE_CLUSTER_FROM_SNAPSHOT_TEMPLATE = """
+
+ {{ cluster.to_xml() }}
+
+
+ 523e3218-afc7-11c3-90f5-f90431260ab4
+
+
+"""
+
+CREATE_CLUSTER_SNAPSHOT_TEMPLATE = """
+
+ {{ snapshot.to_xml() }}
+
+
+ 523e3218-afc7-11c3-90f5-f90431260ab4
+
+
+"""
+
+COPY_CLUSTER_SNAPSHOT_TEMPLATE = """
+
+ {{ snapshot.to_xml() }}
+
+
+ 523e3218-afc7-11c3-90f5-f90431260ab4
+
+
+"""
+
+DESCRIBE_CLUSTER_SNAPSHOTS_TEMPLATE = """
+
+
+ {%- for snapshot in snapshots -%}
+ {{ snapshot.to_xml() }}
+ {%- endfor -%}
+
+ {% if marker %}
+ {{ marker }}
+ {% endif %}
+
+
+ 523e3218-afc7-11c3-90f5-f90431260ab4
+
+"""
+
+DELETE_CLUSTER_SNAPSHOT_TEMPLATE = """
+
+ {{ snapshot.to_xml() }}
+
+
+ 523e3218-afc7-11c3-90f5-f90431260ab4
+
+
+"""
+
+START_EXPORT_TASK_TEMPLATE = """
+
+ {{ task.to_xml() }}
+
+
+ 523e3218-afc7-11c3-90f5-f90431260ab4
+
+
+"""
+
+CANCEL_EXPORT_TASK_TEMPLATE = """
+
+ {{ task.to_xml() }}
+
+
+ 523e3218-afc7-11c3-90f5-f90431260ab4
+
+
+"""
+
+DESCRIBE_EXPORT_TASKS_TEMPLATE = """
+
+
+ {%- for task in tasks -%}
+ {{ task.to_xml() }}
+ {%- endfor -%}
+
+ {% if marker %}
+ {{ marker }}
+ {% endif %}
+
+
+ 523e3218-afc7-11c3-90f5-f90431260ab4
+
+
+"""
+
+CREATE_EVENT_SUBSCRIPTION_TEMPLATE = """
+
+ {{ subscription.to_xml() }}
+
+
+ 523e3218-afc7-11c3-90f5-f90431260ab4
+
+
+"""
+
+DELETE_EVENT_SUBSCRIPTION_TEMPLATE = """
+
+ {{ subscription.to_xml() }}
+
+
+ 523e3218-afc7-11c3-90f5-f90431260ab4
+
+
+"""
+
+DESCRIBE_EVENT_SUBSCRIPTIONS_TEMPLATE = """
+
+
+ {%- for subscription in subscriptions -%}
+ {{ subscription.to_xml() }}
+ {%- endfor -%}
+
+
+
+ 523e3218-afc7-11c3-90f5-f90431260ab4
+
+
+"""
diff --git a/moto/rds/urls.py b/moto/rds/urls.py
index fa7946241..3bca6b1cf 100644
--- a/moto/rds/urls.py
+++ b/moto/rds/urls.py
@@ -1,5 +1,5 @@
from .responses import RDSResponse
-url_bases = [r"https?://rds(\..+)?.amazonaws.com"]
+url_bases = [r"https?://rds\.(.+)\.amazonaws\.com", r"https?://rds\.amazonaws\.com"]
url_paths = {"{0}/$": RDSResponse.dispatch}
diff --git a/moto/rds2/utils.py b/moto/rds/utils.py
similarity index 100%
rename from moto/rds2/utils.py
rename to moto/rds/utils.py
diff --git a/moto/rds2/__init__.py b/moto/rds2/__init__.py
deleted file mode 100644
index fbba2910f..000000000
--- a/moto/rds2/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-from .models import rds2_backends
-from ..core.models import base_decorator
-
-rds2_backend = rds2_backends["us-west-1"]
-mock_rds2 = base_decorator(rds2_backends)
diff --git a/moto/rds2/exceptions.py b/moto/rds2/exceptions.py
deleted file mode 100644
index d22fb0e23..000000000
--- a/moto/rds2/exceptions.py
+++ /dev/null
@@ -1,195 +0,0 @@
-from jinja2 import Template
-from werkzeug.exceptions import BadRequest
-
-
-class RDSClientError(BadRequest):
- def __init__(self, code, message):
- super().__init__()
- template = Template(
- """
-
-
- {{ code }}
- {{ message }}
- Sender
-
- 6876f774-7273-11e4-85dc-39e55ca848d1
- """
- )
- self.description = template.render(code=code, message=message)
-
-
-class DBInstanceNotFoundError(RDSClientError):
- def __init__(self, database_identifier):
- super().__init__(
- "DBInstanceNotFound",
- "DBInstance {0} not found.".format(database_identifier),
- )
-
-
-class DBSnapshotNotFoundError(RDSClientError):
- def __init__(self, snapshot_identifier):
- super().__init__(
- "DBSnapshotNotFound",
- "DBSnapshot {} not found.".format(snapshot_identifier),
- )
-
-
-class DBSecurityGroupNotFoundError(RDSClientError):
- def __init__(self, security_group_name):
- super().__init__(
- "DBSecurityGroupNotFound",
- "Security Group {0} not found.".format(security_group_name),
- )
-
-
-class DBSubnetGroupNotFoundError(RDSClientError):
- def __init__(self, subnet_group_name):
- super().__init__(
- "DBSubnetGroupNotFound",
- "Subnet Group {0} not found.".format(subnet_group_name),
- )
-
-
-class DBParameterGroupNotFoundError(RDSClientError):
- def __init__(self, db_parameter_group_name):
- super().__init__(
- "DBParameterGroupNotFound",
- "DB Parameter Group {0} not found.".format(db_parameter_group_name),
- )
-
-
-class OptionGroupNotFoundFaultError(RDSClientError):
- def __init__(self, option_group_name):
- super().__init__(
- "OptionGroupNotFoundFault",
- "Specified OptionGroupName: {0} not found.".format(option_group_name),
- )
-
-
-class InvalidDBClusterStateFaultError(RDSClientError):
- def __init__(self, database_identifier):
- super().__init__(
- "InvalidDBClusterStateFault",
- "Invalid DB type, when trying to perform StopDBInstance on {0}e. See AWS RDS documentation on rds.stop_db_instance".format(
- database_identifier
- ),
- )
-
-
-class InvalidDBInstanceStateError(RDSClientError):
- def __init__(self, database_identifier, istate):
- estate = (
- "in available state"
- if istate == "stop"
- else "stopped, it cannot be started"
- )
- super().__init__(
- "InvalidDBInstanceState",
- "Instance {} is not {}.".format(database_identifier, estate),
- )
-
-
-class SnapshotQuotaExceededError(RDSClientError):
- def __init__(self):
- super().__init__(
- "SnapshotQuotaExceeded",
- "The request cannot be processed because it would exceed the maximum number of snapshots.",
- )
-
-
-class DBSnapshotAlreadyExistsError(RDSClientError):
- def __init__(self, database_snapshot_identifier):
- super().__init__(
- "DBSnapshotAlreadyExists",
- "Cannot create the snapshot because a snapshot with the identifier {} already exists.".format(
- database_snapshot_identifier
- ),
- )
-
-
-class InvalidParameterValue(RDSClientError):
- def __init__(self, message):
- super().__init__("InvalidParameterValue", message)
-
-
-class InvalidParameterCombination(RDSClientError):
- def __init__(self, message):
- super().__init__("InvalidParameterCombination", message)
-
-
-class InvalidDBClusterStateFault(RDSClientError):
- def __init__(self, message):
- super().__init__("InvalidDBClusterStateFault", message)
-
-
-class DBClusterNotFoundError(RDSClientError):
- def __init__(self, cluster_identifier):
- super().__init__(
- "DBClusterNotFoundFault",
- "DBCluster {} not found.".format(cluster_identifier),
- )
-
-
-class DBClusterSnapshotNotFoundError(RDSClientError):
- def __init__(self, snapshot_identifier):
- super().__init__(
- "DBClusterSnapshotNotFoundFault",
- "DBClusterSnapshot {} not found.".format(snapshot_identifier),
- )
-
-
-class DBClusterSnapshotAlreadyExistsError(RDSClientError):
- def __init__(self, database_snapshot_identifier):
- super().__init__(
- "DBClusterSnapshotAlreadyExistsFault",
- "Cannot create the snapshot because a snapshot with the identifier {} already exists.".format(
- database_snapshot_identifier
- ),
- )
-
-
-class ExportTaskAlreadyExistsError(RDSClientError):
- def __init__(self, export_task_identifier):
- super().__init__(
- "ExportTaskAlreadyExistsFault",
- "Cannot start export task because a task with the identifier {} already exists.".format(
- export_task_identifier
- ),
- )
-
-
-class ExportTaskNotFoundError(RDSClientError):
- def __init__(self, export_task_identifier):
- super().__init__(
- "ExportTaskNotFoundFault",
- "Cannot cancel export task because a task with the identifier {} is not exist.".format(
- export_task_identifier
- ),
- )
-
-
-class InvalidExportSourceStateError(RDSClientError):
- def __init__(self, status):
- super().__init__(
- "InvalidExportSourceStateFault",
- "Export source should be 'available' but current status is {}.".format(
- status
- ),
- )
-
-
-class SubscriptionAlreadyExistError(RDSClientError):
- def __init__(self, subscription_name):
- super().__init__(
- "SubscriptionAlreadyExistFault",
- "Subscription {} already exists.".format(subscription_name),
- )
-
-
-class SubscriptionNotFoundError(RDSClientError):
- def __init__(self, subscription_name):
- super().__init__(
- "SubscriptionNotFoundFault",
- "Subscription {} not found.".format(subscription_name),
- )
diff --git a/moto/rds2/models.py b/moto/rds2/models.py
deleted file mode 100644
index beccfdfeb..000000000
--- a/moto/rds2/models.py
+++ /dev/null
@@ -1,2291 +0,0 @@
-import copy
-import datetime
-import os
-import random
-import string
-
-from collections import defaultdict
-from jinja2 import Template
-from re import compile as re_compile
-from collections import OrderedDict
-from moto.core import BaseBackend, BaseModel, CloudFormationModel, ACCOUNT_ID
-
-from moto.core.utils import iso_8601_datetime_with_milliseconds, BackendDict
-from moto.ec2.models import ec2_backends
-from .exceptions import (
- RDSClientError,
- DBClusterNotFoundError,
- DBClusterSnapshotAlreadyExistsError,
- DBClusterSnapshotNotFoundError,
- DBInstanceNotFoundError,
- DBSnapshotNotFoundError,
- DBSecurityGroupNotFoundError,
- DBSubnetGroupNotFoundError,
- DBParameterGroupNotFoundError,
- OptionGroupNotFoundFaultError,
- InvalidDBClusterStateFaultError,
- InvalidDBInstanceStateError,
- SnapshotQuotaExceededError,
- DBSnapshotAlreadyExistsError,
- InvalidParameterValue,
- InvalidParameterCombination,
- InvalidDBClusterStateFault,
- ExportTaskNotFoundError,
- ExportTaskAlreadyExistsError,
- InvalidExportSourceStateError,
- SubscriptionNotFoundError,
- SubscriptionAlreadyExistError,
-)
-from .utils import FilterDef, apply_filter, merge_filters, validate_filters
-
-
-class Cluster:
- def __init__(self, **kwargs):
- self.db_name = kwargs.get("db_name")
- self.db_cluster_identifier = kwargs.get("db_cluster_identifier")
- self.db_cluster_instance_class = kwargs.get("db_cluster_instance_class")
- self.deletion_protection = kwargs.get("deletion_protection")
- self.engine = kwargs.get("engine")
- self.engine_version = kwargs.get("engine_version")
- if not self.engine_version:
- self.engine_version = Cluster.default_engine_version(self.engine)
- self.engine_mode = kwargs.get("engine_mode") or "provisioned"
- self.iops = kwargs.get("iops")
- self.status = "active"
- self.region = kwargs.get("region")
- self.cluster_create_time = iso_8601_datetime_with_milliseconds(
- datetime.datetime.now()
- )
- self.copy_tags_to_snapshot = kwargs.get("copy_tags_to_snapshot")
- if self.copy_tags_to_snapshot is None:
- self.copy_tags_to_snapshot = True
- self.storage_type = kwargs.get("storage_type")
- if self.storage_type is None:
- self.storage_type = Cluster.default_storage_type(iops=self.iops)
- self.allocated_storage = kwargs.get("allocated_storage")
- if self.allocated_storage is None:
- self.allocated_storage = Cluster.default_allocated_storage(
- engine=self.engine, storage_type=self.storage_type
- )
- self.master_username = kwargs.get("master_username")
- if not self.master_username:
- raise InvalidParameterValue(
- "The parameter MasterUsername must be provided and must not be blank."
- )
- self.master_user_password = kwargs.get("master_user_password")
- if not self.master_user_password:
- raise InvalidParameterValue(
- "The parameter MasterUserPassword must be provided and must not be blank."
- )
- if len(self.master_user_password) < 8:
- raise InvalidParameterValue(
- "The parameter MasterUserPassword is not a valid password because it is shorter than 8 characters."
- )
- self.availability_zones = kwargs.get("availability_zones")
- if not self.availability_zones:
- self.availability_zones = [
- f"{self.region}a",
- f"{self.region}b",
- f"{self.region}c",
- ]
- self.parameter_group = kwargs.get("parameter_group") or "default.aurora8.0"
- self.subnet_group = "default"
- self.status = "creating"
- self.url_identifier = "".join(
- random.choice(string.ascii_lowercase + string.digits) for _ in range(12)
- )
- self.endpoint = f"{self.db_cluster_identifier}.cluster-{self.url_identifier}.{self.region}.rds.amazonaws.com"
- self.reader_endpoint = f"{self.db_cluster_identifier}.cluster-ro-{self.url_identifier}.{self.region}.rds.amazonaws.com"
- self.port = kwargs.get("port")
- if self.port is None:
- self.port = Cluster.default_port(self.engine)
- self.preferred_backup_window = "01:37-02:07"
- self.preferred_maintenance_window = "wed:02:40-wed:03:10"
- # This should default to the default security group
- self.vpc_security_groups = []
- self.hosted_zone_id = "".join(
- random.choice(string.ascii_uppercase + string.digits) for _ in range(14)
- )
- self.resource_id = "cluster-" + "".join(
- random.choice(string.ascii_uppercase + string.digits) for _ in range(26)
- )
- self.tags = kwargs.get("tags", [])
-
- @property
- def db_cluster_arn(self):
- return "arn:aws:rds:{0}:{1}:cluster:{2}".format(
- self.region, ACCOUNT_ID, self.db_cluster_identifier
- )
-
- def to_xml(self):
- template = Template(
- """
- 1
-
- {% for zone in cluster.availability_zones %}
- {{ zone }}
- {% endfor %}
-
- 1
- {{ cluster.status }}
- {% if cluster.db_name %}{{ cluster.db_name }}{% endif %}
- {{ cluster.db_cluster_identifier }}
- {{ cluster.parameter_group }}
- {{ cluster.subnet_group }}
- {{ cluster.cluster_create_time }}
- {{ cluster.engine }}
- {{ cluster.status }}
- {{ cluster.endpoint }}
- {{ cluster.reader_endpoint }}
- false
- {{ cluster.engine_version }}
- {{ cluster.port }}
- {% if cluster.iops %}
- {{ cluster.iops }}
- io1
- {% else %}
- {{ cluster.storage_type }}
- {% endif %}
- {{ cluster.db_cluster_instance_class }}
- {{ cluster.master_username }}
- {{ cluster.preferred_backup_window }}
- {{ cluster.preferred_maintenance_window }}
-
-
-
- {% for id in cluster.vpc_security_groups %}
-
- {{ id }}
- active
-
- {% endfor %}
-
- {{ cluster.hosted_zone_id }}
- false
- {{ cluster.resource_id }}
- {{ cluster.db_cluster_arn }}
-
- false
- {{ cluster.engine_mode }}
- {{ 'true' if cluster.deletion_protection else 'false' }}
- false
- {{ cluster.copy_tags_to_snapshot }}
- false
-
-
- {%- for tag in cluster.tags -%}
-
- {{ tag['Key'] }}
- {{ tag['Value'] }}
-
- {%- endfor -%}
-
- """
- )
- return template.render(cluster=self)
-
- @staticmethod
- def default_engine_version(engine):
- return {
- "aurora": "5.6.mysql_aurora.1.22.5",
- "aurora-mysql": "5.7.mysql_aurora.2.07.2",
- "aurora-postgresql": "12.7",
- "mysql": "8.0.23",
- "postgres": "13.4",
- }[engine]
-
- @staticmethod
- def default_port(engine):
- return {
- "aurora": 3306,
- "aurora-mysql": 3306,
- "aurora-postgresql": 5432,
- "mysql": 3306,
- "postgres": 5432,
- }[engine]
-
- @staticmethod
- def default_storage_type(iops):
- if iops is None:
- return "gp2"
- else:
- return "io1"
-
- @staticmethod
- def default_allocated_storage(engine, storage_type):
- return {
- "aurora": {"gp2": 0, "io1": 0, "standard": 0},
- "aurora-mysql": {"gp2": 20, "io1": 100, "standard": 10},
- "aurora-postgresql": {"gp2": 20, "io1": 100, "standard": 10},
- "mysql": {"gp2": 20, "io1": 100, "standard": 5},
- "postgres": {"gp2": 20, "io1": 100, "standard": 5},
- }[engine][storage_type]
-
- def get_tags(self):
- return self.tags
-
- def add_tags(self, tags):
- new_keys = [tag_set["Key"] for tag_set in tags]
- self.tags = [tag_set for tag_set in self.tags if tag_set["Key"] not in new_keys]
- self.tags.extend(tags)
- return self.tags
-
- def remove_tags(self, tag_keys):
- self.tags = [tag_set for tag_set in self.tags if tag_set["Key"] not in tag_keys]
-
-
-class ClusterSnapshot(BaseModel):
-
- SUPPORTED_FILTERS = {
- "db-cluster-id": FilterDef(
- ["cluster.db_cluster_arn", "cluster.db_cluster_identifier"],
- "DB Cluster Identifiers",
- ),
- "db-cluster-snapshot-id": FilterDef(
- ["snapshot_id"], "DB Cluster Snapshot Identifiers"
- ),
- "snapshot-type": FilterDef(None, "Snapshot Types"),
- "engine": FilterDef(["cluster.engine"], "Engine Names"),
- }
-
- def __init__(self, cluster, snapshot_id, tags):
- self.cluster = cluster
- self.snapshot_id = snapshot_id
- self.tags = tags
- self.status = "available"
- self.created_at = iso_8601_datetime_with_milliseconds(datetime.datetime.now())
-
- @property
- def snapshot_arn(self):
- return "arn:aws:rds:{0}:{1}:cluster-snapshot:{2}".format(
- self.cluster.region, ACCOUNT_ID, self.snapshot_id
- )
-
- def to_xml(self):
- template = Template(
- """
-
- {{ snapshot.snapshot_id }}
- {{ snapshot.created_at }}
- {{ cluster.db_cluster_identifier }}
- {{ snapshot.created_at }}
- {{ 100 }}
- {{ cluster.allocated_storage }}
- {{ cluster.master_username }}
- {{ cluster.port }}
- {{ cluster.engine }}
- {{ snapshot.status }}
- manual
- {{ snapshot.snapshot_arn }}
- {{ cluster.region }}
- {% if cluster.iops %}
- {{ cluster.iops }}
- io1
- {% else %}
- {{ cluster.storage_type }}
- {% endif %}
-
- {{ cluster.license_model }}
-
- """
- )
- return template.render(snapshot=self, cluster=self.cluster)
-
- def get_tags(self):
- return self.tags
-
- def add_tags(self, tags):
- new_keys = [tag_set["Key"] for tag_set in tags]
- self.tags = [tag_set for tag_set in self.tags if tag_set["Key"] not in new_keys]
- self.tags.extend(tags)
- return self.tags
-
- def remove_tags(self, tag_keys):
- self.tags = [tag_set for tag_set in self.tags if tag_set["Key"] not in tag_keys]
-
-
-class Database(CloudFormationModel):
-
- SUPPORTED_FILTERS = {
- "db-cluster-id": FilterDef(None, "DB Cluster Identifiers"),
- "db-instance-id": FilterDef(
- ["db_instance_arn", "db_instance_identifier"], "DB Instance Identifiers"
- ),
- "dbi-resource-id": FilterDef(["dbi_resource_id"], "Dbi Resource Ids"),
- "domain": FilterDef(None, ""),
- "engine": FilterDef(["engine"], "Engine Names"),
- }
-
- default_engine_versions = {
- "MySQL": "5.6.21",
- "mysql": "5.6.21",
- "oracle-se1": "11.2.0.4.v3",
- "oracle-se": "11.2.0.4.v3",
- "oracle-ee": "11.2.0.4.v3",
- "sqlserver-ee": "11.00.2100.60.v1",
- "sqlserver-se": "11.00.2100.60.v1",
- "sqlserver-ex": "11.00.2100.60.v1",
- "sqlserver-web": "11.00.2100.60.v1",
- "postgres": "9.3.3",
- }
-
- def __init__(self, **kwargs):
- self.status = "available"
- self.is_replica = False
- self.replicas = []
- self.region = kwargs.get("region")
- self.engine = kwargs.get("engine")
- self.engine_version = kwargs.get("engine_version", None)
- if not self.engine_version and self.engine in self.default_engine_versions:
- self.engine_version = self.default_engine_versions[self.engine]
- self.iops = kwargs.get("iops")
- self.storage_encrypted = kwargs.get("storage_encrypted", False)
- if self.storage_encrypted:
- self.kms_key_id = kwargs.get("kms_key_id", "default_kms_key_id")
- else:
- self.kms_key_id = kwargs.get("kms_key_id")
- self.storage_type = kwargs.get("storage_type")
- if self.storage_type is None:
- self.storage_type = Database.default_storage_type(iops=self.iops)
- self.master_username = kwargs.get("master_username")
- self.master_user_password = kwargs.get("master_user_password")
- self.auto_minor_version_upgrade = kwargs.get("auto_minor_version_upgrade")
- if self.auto_minor_version_upgrade is None:
- self.auto_minor_version_upgrade = True
- self.allocated_storage = kwargs.get("allocated_storage")
- if self.allocated_storage is None:
- self.allocated_storage = Database.default_allocated_storage(
- engine=self.engine, storage_type=self.storage_type
- )
- self.db_instance_identifier = kwargs.get("db_instance_identifier")
- self.source_db_identifier = kwargs.get("source_db_identifier")
- self.db_instance_class = kwargs.get("db_instance_class")
- self.port = kwargs.get("port")
- if self.port is None:
- self.port = Database.default_port(self.engine)
- self.db_instance_identifier = kwargs.get("db_instance_identifier")
- self.db_name = kwargs.get("db_name")
- self.instance_create_time = iso_8601_datetime_with_milliseconds(
- datetime.datetime.now()
- )
- self.publicly_accessible = kwargs.get("publicly_accessible")
- if self.publicly_accessible is None:
- self.publicly_accessible = True
- self.copy_tags_to_snapshot = kwargs.get("copy_tags_to_snapshot")
- if self.copy_tags_to_snapshot is None:
- self.copy_tags_to_snapshot = False
- self.backup_retention_period = kwargs.get("backup_retention_period")
- if self.backup_retention_period is None:
- self.backup_retention_period = 1
- self.availability_zone = kwargs.get("availability_zone")
- self.multi_az = kwargs.get("multi_az")
- self.db_subnet_group_name = kwargs.get("db_subnet_group_name")
- if self.db_subnet_group_name:
- self.db_subnet_group = rds2_backends[self.region].describe_subnet_groups(
- self.db_subnet_group_name
- )[0]
- else:
- self.db_subnet_group = None
- self.security_groups = kwargs.get("security_groups", [])
- self.vpc_security_group_ids = kwargs.get("vpc_security_group_ids", [])
- self.preferred_maintenance_window = kwargs.get(
- "preferred_maintenance_window", "wed:06:38-wed:07:08"
- )
- self.db_parameter_group_name = kwargs.get("db_parameter_group_name")
- if (
- self.db_parameter_group_name
- and not self.is_default_parameter_group(self.db_parameter_group_name)
- and self.db_parameter_group_name
- not in rds2_backends[self.region].db_parameter_groups
- ):
- raise DBParameterGroupNotFoundError(self.db_parameter_group_name)
-
- self.preferred_backup_window = kwargs.get(
- "preferred_backup_window", "13:14-13:44"
- )
- self.license_model = kwargs.get("license_model", "general-public-license")
- self.option_group_name = kwargs.get("option_group_name", None)
- self.option_group_supplied = self.option_group_name is not None
- if (
- self.option_group_name
- and self.option_group_name not in rds2_backends[self.region].option_groups
- ):
- raise OptionGroupNotFoundFaultError(self.option_group_name)
- self.default_option_groups = {
- "MySQL": "default.mysql5.6",
- "mysql": "default.mysql5.6",
- "postgres": "default.postgres9.3",
- }
- if not self.option_group_name and self.engine in self.default_option_groups:
- self.option_group_name = self.default_option_groups[self.engine]
- self.character_set_name = kwargs.get("character_set_name", None)
- self.enable_iam_database_authentication = kwargs.get(
- "enable_iam_database_authentication", False
- )
- self.dbi_resource_id = "db-M5ENSHXFPU6XHZ4G4ZEI5QIO2U"
- self.tags = kwargs.get("tags", [])
- self.deletion_protection = kwargs.get("deletion_protection", False)
-
- @property
- def db_instance_arn(self):
- return "arn:aws:rds:{0}:{1}:db:{2}".format(
- self.region, ACCOUNT_ID, self.db_instance_identifier
- )
-
- @property
- def physical_resource_id(self):
- return self.db_instance_identifier
-
- def db_parameter_groups(self):
- if not self.db_parameter_group_name or self.is_default_parameter_group(
- self.db_parameter_group_name
- ):
- (
- db_family,
- db_parameter_group_name,
- ) = self.default_db_parameter_group_details()
- description = "Default parameter group for {0}".format(db_family)
- return [
- DBParameterGroup(
- name=db_parameter_group_name,
- family=db_family,
- description=description,
- tags={},
- region=self.region,
- )
- ]
- else:
- if (
- self.db_parameter_group_name
- not in rds2_backends[self.region].db_parameter_groups
- ):
- raise DBParameterGroupNotFoundError(self.db_parameter_group_name)
-
- return [
- rds2_backends[self.region].db_parameter_groups[
- self.db_parameter_group_name
- ]
- ]
-
- def is_default_parameter_group(self, param_group_name):
- return param_group_name.startswith("default.%s" % self.engine.lower())
-
- def default_db_parameter_group_details(self):
- if not self.engine_version:
- return (None, None)
-
- minor_engine_version = ".".join(str(self.engine_version).rsplit(".")[:-1])
- db_family = "{0}{1}".format(self.engine.lower(), minor_engine_version)
-
- return db_family, "default.{0}".format(db_family)
-
- def to_xml(self):
- template = Template(
- """
- {{ database.backup_retention_period }}
- {{ database.status }}
- {% if database.db_name %}{{ database.db_name }}{% endif %}
- {{ database.multi_az }}
-
- {% for vpc_security_group_id in database.vpc_security_group_ids %}
-
- active
- {{ vpc_security_group_id }}
-
- {% endfor %}
-
- {{ database.db_instance_identifier }}
- {{ database.dbi_resource_id }}
- {{ database.instance_create_time }}
- 03:50-04:20
- wed:06:38-wed:07:08
-
- {% for replica_id in database.replicas %}
- {{ replica_id }}
- {% endfor %}
-
-
- {% if database.is_replica %}
-
- read replication
- replicating
- true
-
-
- {% endif %}
-
- {% if database.is_replica %}
- {{ database.source_db_identifier }}
- {% endif %}
- {{ database.engine }}
- {{database.enable_iam_database_authentication|lower }}
- {{ database.license_model }}
- {{ database.engine_version }}
-
-
- {{ database.option_group_name }}
- in-sync
-
-
-
- {% for db_parameter_group in database.db_parameter_groups() %}
-
- in-sync
- {{ db_parameter_group.name }}
-
- {% endfor %}
-
-
- {% for security_group in database.security_groups %}
-
- active
- {{ security_group }}
-
- {% endfor %}
-
- {% if database.db_subnet_group %}
-
- {{ database.db_subnet_group.subnet_name }}
- {{ database.db_subnet_group.description }}
- {{ database.db_subnet_group.status }}
-
- {% for subnet in database.db_subnet_group.subnets %}
-
- Active
- {{ subnet.id }}
-
- {{ subnet.availability_zone }}
- false
-
-
- {% endfor %}
-
- {{ database.db_subnet_group.vpc_id }}
-
- {% endif %}
- {{ database.publicly_accessible }}
- {{ database.copy_tags_to_snapshot }}
- {{ database.auto_minor_version_upgrade }}
- {{ database.allocated_storage }}
- {{ database.storage_encrypted }}
- {% if database.kms_key_id %}
- {{ database.kms_key_id }}
- {% endif %}
- {% if database.iops %}
- {{ database.iops }}
- io1
- {% else %}
- {{ database.storage_type }}
- {% endif %}
- {{ database.db_instance_class }}
- {{ database.master_username }}
-
- {{ database.address }}
- {{ database.port }}
-
- {{ database.db_instance_arn }}
-
- {%- for tag in database.tags -%}
-
- {{ tag['Key'] }}
- {{ tag['Value'] }}
-
- {%- endfor -%}
-
- {{ 'true' if database.deletion_protection else 'false' }}
- """
- )
- return template.render(database=self)
-
- @property
- def address(self):
- return "{0}.aaaaaaaaaa.{1}.rds.amazonaws.com".format(
- self.db_instance_identifier, self.region
- )
-
- def add_replica(self, replica):
- self.replicas.append(replica.db_instance_identifier)
-
- def remove_replica(self, replica):
- self.replicas.remove(replica.db_instance_identifier)
-
- def set_as_replica(self):
- self.is_replica = True
- self.replicas = []
-
- def update(self, db_kwargs):
- for key, value in db_kwargs.items():
- if value is not None:
- setattr(self, key, value)
-
- @classmethod
- def has_cfn_attr(cls, attribute):
- return attribute in ["Endpoint.Address", "Endpoint.Port"]
-
- def get_cfn_attribute(self, attribute_name):
- # Local import to avoid circular dependency with cloudformation.parsing
- from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
-
- if attribute_name == "Endpoint.Address":
- return self.address
- elif attribute_name == "Endpoint.Port":
- return self.port
- raise UnformattedGetAttTemplateException()
-
- @staticmethod
- def default_port(engine):
- return {
- "mysql": 3306,
- "mariadb": 3306,
- "postgres": 5432,
- "oracle-ee": 1521,
- "oracle-se2": 1521,
- "oracle-se1": 1521,
- "oracle-se": 1521,
- "sqlserver-ee": 1433,
- "sqlserver-ex": 1433,
- "sqlserver-se": 1433,
- "sqlserver-web": 1433,
- }[engine]
-
- @staticmethod
- def default_storage_type(iops):
- if iops is None:
- return "gp2"
- else:
- return "io1"
-
- @staticmethod
- def default_allocated_storage(engine, storage_type):
- return {
- "aurora": {"gp2": 0, "io1": 0, "standard": 0},
- "mysql": {"gp2": 20, "io1": 100, "standard": 5},
- "mariadb": {"gp2": 20, "io1": 100, "standard": 5},
- "postgres": {"gp2": 20, "io1": 100, "standard": 5},
- "oracle-ee": {"gp2": 20, "io1": 100, "standard": 10},
- "oracle-se2": {"gp2": 20, "io1": 100, "standard": 10},
- "oracle-se1": {"gp2": 20, "io1": 100, "standard": 10},
- "oracle-se": {"gp2": 20, "io1": 100, "standard": 10},
- "sqlserver-ee": {"gp2": 200, "io1": 200, "standard": 200},
- "sqlserver-ex": {"gp2": 20, "io1": 100, "standard": 20},
- "sqlserver-se": {"gp2": 200, "io1": 200, "standard": 200},
- "sqlserver-web": {"gp2": 20, "io1": 100, "standard": 20},
- }[engine][storage_type]
-
- @staticmethod
- def cloudformation_name_type():
- return "DBInstanceIdentifier"
-
- @staticmethod
- def cloudformation_type():
- # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbinstance.html
- return "AWS::RDS::DBInstance"
-
- @classmethod
- def create_from_cloudformation_json(
- cls, resource_name, cloudformation_json, region_name, **kwargs
- ):
- properties = cloudformation_json["Properties"]
-
- db_security_groups = properties.get("DBSecurityGroups")
- if not db_security_groups:
- db_security_groups = []
- security_groups = [group.group_name for group in db_security_groups]
- db_subnet_group = properties.get("DBSubnetGroupName")
- db_subnet_group_name = db_subnet_group.subnet_name if db_subnet_group else None
- db_kwargs = {
- "auto_minor_version_upgrade": properties.get("AutoMinorVersionUpgrade"),
- "allocated_storage": properties.get("AllocatedStorage"),
- "availability_zone": properties.get("AvailabilityZone"),
- "backup_retention_period": properties.get("BackupRetentionPeriod"),
- "db_instance_class": properties.get("DBInstanceClass"),
- "db_instance_identifier": resource_name,
- "db_name": properties.get("DBName"),
- "db_subnet_group_name": db_subnet_group_name,
- "engine": properties.get("Engine"),
- "engine_version": properties.get("EngineVersion"),
- "iops": properties.get("Iops"),
- "kms_key_id": properties.get("KmsKeyId"),
- "master_user_password": properties.get("MasterUserPassword"),
- "master_username": properties.get("MasterUsername"),
- "multi_az": properties.get("MultiAZ"),
- "db_parameter_group_name": properties.get("DBParameterGroupName"),
- "port": properties.get("Port", 3306),
- "publicly_accessible": properties.get("PubliclyAccessible"),
- "copy_tags_to_snapshot": properties.get("CopyTagsToSnapshot"),
- "region": region_name,
- "security_groups": security_groups,
- "storage_encrypted": properties.get("StorageEncrypted"),
- "storage_type": properties.get("StorageType"),
- "tags": properties.get("Tags"),
- "vpc_security_group_ids": properties.get("VpcSecurityGroupIds", []),
- }
-
- rds2_backend = rds2_backends[region_name]
- source_db_identifier = properties.get("SourceDBInstanceIdentifier")
- if source_db_identifier:
- # Replica
- db_kwargs["source_db_identifier"] = source_db_identifier
- database = rds2_backend.create_database_replica(db_kwargs)
- else:
- database = rds2_backend.create_database(db_kwargs)
- return database
-
- def to_json(self):
- template = Template(
- """{
- "AllocatedStorage": 10,
- "AutoMinorVersionUpgrade": "{{ database.auto_minor_version_upgrade }}",
- "AvailabilityZone": "{{ database.availability_zone }}",
- "BackupRetentionPeriod": "{{ database.backup_retention_period }}",
- "CharacterSetName": {%- if database.character_set_name -%}{{ database.character_set_name }}{%- else %} null{%- endif -%},
- "DBInstanceClass": "{{ database.db_instance_class }}",
- "DBInstanceIdentifier": "{{ database.db_instance_identifier }}",
- "DBInstanceStatus": "{{ database.status }}",
- "DBName": {%- if database.db_name -%}"{{ database.db_name }}"{%- else %} null{%- endif -%},
- {% if database.db_parameter_group_name -%}"DBParameterGroups": {
- "DBParameterGroup": {
- "ParameterApplyStatus": "in-sync",
- "DBParameterGroupName": "{{ database.db_parameter_group_name }}"
- }
- },{%- endif %}
- "DBSecurityGroups": [
- {% for security_group in database.security_groups -%}{%- if loop.index != 1 -%},{%- endif -%}
- {"DBSecurityGroup": {
- "Status": "active",
- "DBSecurityGroupName": "{{ security_group }}"
- }}{% endfor %}
- ],
- {%- if database.db_subnet_group -%}{{ database.db_subnet_group.to_json() }},{%- endif %}
- "Engine": "{{ database.engine }}",
- "EngineVersion": "{{ database.engine_version }}",
- "LatestRestorableTime": null,
- "LicenseModel": "{{ database.license_model }}",
- "MasterUsername": "{{ database.master_username }}",
- "MultiAZ": "{{ database.multi_az }}",{% if database.option_group_name %}
- "OptionGroupMemberships": [{
- "OptionGroupMembership": {
- "OptionGroupName": "{{ database.option_group_name }}",
- "Status": "in-sync"
- }
- }],{%- endif %}
- "PendingModifiedValues": { "MasterUserPassword": "****" },
- "PreferredBackupWindow": "{{ database.preferred_backup_window }}",
- "PreferredMaintenanceWindow": "{{ database.preferred_maintenance_window }}",
- "PubliclyAccessible": "{{ database.publicly_accessible }}",
- "CopyTagsToSnapshot": "{{ database.copy_tags_to_snapshot }}",
- "AllocatedStorage": "{{ database.allocated_storage }}",
- "Endpoint": {
- "Address": "{{ database.address }}",
- "Port": "{{ database.port }}"
- },
- "InstanceCreateTime": "{{ database.instance_create_time }}",
- "Iops": null,
- "ReadReplicaDBInstanceIdentifiers": [{%- for replica in database.replicas -%}
- {%- if not loop.first -%},{%- endif -%}
- "{{ replica }}"
- {%- endfor -%}
- ],
- {%- if database.source_db_identifier -%}
- "ReadReplicaSourceDBInstanceIdentifier": "{{ database.source_db_identifier }}",
- {%- else -%}
- "ReadReplicaSourceDBInstanceIdentifier": null,
- {%- endif -%}
- "SecondaryAvailabilityZone": null,
- "StatusInfos": null,
- "VpcSecurityGroups": [
- {% for vpc_security_group_id in database.vpc_security_group_ids %}
- {
- "Status": "active",
- "VpcSecurityGroupId": "{{ vpc_security_group_id }}"
- }
- {% endfor %}
- ],
- "DBInstanceArn": "{{ database.db_instance_arn }}"
- }"""
- )
- return template.render(database=self)
-
- def get_tags(self):
- return self.tags
-
- def add_tags(self, tags):
- new_keys = [tag_set["Key"] for tag_set in tags]
- self.tags = [tag_set for tag_set in self.tags if tag_set["Key"] not in new_keys]
- self.tags.extend(tags)
- return self.tags
-
- def remove_tags(self, tag_keys):
- self.tags = [tag_set for tag_set in self.tags if tag_set["Key"] not in tag_keys]
-
- def delete(self, region_name):
- backend = rds2_backends[region_name]
- backend.delete_database(self.db_instance_identifier)
-
-
-class DatabaseSnapshot(BaseModel):
-
- SUPPORTED_FILTERS = {
- "db-instance-id": FilterDef(
- ["database.db_instance_arn", "database.db_instance_identifier"],
- "DB Instance Identifiers",
- ),
- "db-snapshot-id": FilterDef(["snapshot_id"], "DB Snapshot Identifiers"),
- "dbi-resource-id": FilterDef(["database.dbi_resource_id"], "Dbi Resource Ids"),
- "snapshot-type": FilterDef(None, "Snapshot Types"),
- "engine": FilterDef(["database.engine"], "Engine Names"),
- }
-
- def __init__(self, database, snapshot_id, tags):
- self.database = database
- self.snapshot_id = snapshot_id
- self.tags = tags
- self.status = "available"
- self.created_at = iso_8601_datetime_with_milliseconds(datetime.datetime.now())
-
- @property
- def snapshot_arn(self):
- return "arn:aws:rds:{0}:{1}:snapshot:{2}".format(
- self.database.region, ACCOUNT_ID, self.snapshot_id
- )
-
- def to_xml(self):
- template = Template(
- """
- {{ snapshot.snapshot_id }}
- {{ database.db_instance_identifier }}
- {{ database.dbi_resource_id }}
- {{ snapshot.created_at }}
- {{ database.engine }}
- {{ database.allocated_storage }}
- {{ snapshot.status }}
- {{ database.port }}
- {{ database.availability_zone }}
- {{ database.db_subnet_group.vpc_id }}
- {{ snapshot.created_at }}
- {{ database.master_username }}
- {{ database.engine_version }}
- {{ database.license_model }}
- manual
- {% if database.iops %}
- {{ database.iops }}
- io1
- {% else %}
- {{ database.storage_type }}
- {% endif %}
- {{ database.option_group_name }}
- {{ 100 }}
- {{ database.region }}
-
-
- {{ database.storage_encrypted }}
- {{ database.kms_key_id }}
- {{ snapshot.snapshot_arn }}
-
- {{ database.enable_iam_database_authentication|lower }}
- """
- )
- return template.render(snapshot=self, database=self.database)
-
- def get_tags(self):
- return self.tags
-
- def add_tags(self, tags):
- new_keys = [tag_set["Key"] for tag_set in tags]
- self.tags = [tag_set for tag_set in self.tags if tag_set["Key"] not in new_keys]
- self.tags.extend(tags)
- return self.tags
-
- def remove_tags(self, tag_keys):
- self.tags = [tag_set for tag_set in self.tags if tag_set["Key"] not in tag_keys]
-
-
-class ExportTask(BaseModel):
- def __init__(self, snapshot, kwargs):
- self.snapshot = snapshot
-
- self.export_task_identifier = kwargs.get("export_task_identifier")
- self.kms_key_id = kwargs.get("kms_key_id", "default_kms_key_id")
- self.source_arn = kwargs.get("source_arn")
- self.iam_role_arn = kwargs.get("iam_role_arn")
- self.s3_bucket_name = kwargs.get("s3_bucket_name")
- self.s3_prefix = kwargs.get("s3_prefix", "")
- self.export_only = kwargs.get("export_only", [])
-
- self.status = "available"
- self.created_at = iso_8601_datetime_with_milliseconds(datetime.datetime.now())
-
- def to_xml(self):
- template = Template(
- """
- {{ task.export_task_identifier }}
- {{ snapshot.snapshot_arn }}
- {{ task.created_at }}
- {{ task.created_at }}
- {{ snapshot.created_at }}
- {{ task.s3_bucket_name }}
- {{ task.s3_prefix }}
- {{ task.iam_role_arn }}
- {{ task.kms_key_id }}
- {%- if task.export_only -%}
-
- {%- for table in task.export_only -%}
- {{ table }}
- {%- endfor -%}
-
- {%- endif -%}
- {{ task.status }}
- {{ 100 }}
- {{ 1 }}
-
-
- """
- )
- return template.render(task=self, snapshot=self.snapshot)
-
-
-class EventSubscription(BaseModel):
- def __init__(self, kwargs):
- self.subscription_name = kwargs.get("subscription_name")
- self.sns_topic_arn = kwargs.get("sns_topic_arn")
- self.source_type = kwargs.get("source_type")
- self.event_categories = kwargs.get("event_categories", [])
- self.source_ids = kwargs.get("source_ids", [])
- self.enabled = kwargs.get("enabled", True)
- self.tags = kwargs.get("tags", True)
-
- self.region = ""
- self.customer_aws_id = copy.copy(ACCOUNT_ID)
- self.status = "available"
- self.created_at = iso_8601_datetime_with_milliseconds(datetime.datetime.now())
-
- @property
- def es_arn(self):
- return "arn:aws:rds:{0}:{1}:es:{2}".format(
- self.region, ACCOUNT_ID, self.subscription_name
- )
-
- def to_xml(self):
- template = Template(
- """
-
- {{ subscription.customer_aws_id }}
- {{ subscription.subscription_name }}
- {{ subscription.sns_topic_arn }}
- {{ subscription.created_at }}
- {{ subscription.source_type }}
-
- {%- for source_id in subscription.source_ids -%}
- {{ source_id }}
- {%- endfor -%}
-
-
- {%- for category in subscription.event_categories -%}
- {{ category }}
- {%- endfor -%}
-
- {{ subscription.status }}
- {{ subscription.enabled }}
- {{ subscription.es_arn }}
-
- {%- for tag in subscription.tags -%}
- {{ tag['Key'] }}{{ tag['Value'] }}
- {%- endfor -%}
-
-
- """
- )
- return template.render(subscription=self)
-
- def get_tags(self):
- return self.tags
-
- def add_tags(self, tags):
- new_keys = [tag_set["Key"] for tag_set in tags]
- self.tags = [tag_set for tag_set in self.tags if tag_set["Key"] not in new_keys]
- self.tags.extend(tags)
- return self.tags
-
- def remove_tags(self, tag_keys):
- self.tags = [tag_set for tag_set in self.tags if tag_set["Key"] not in tag_keys]
-
-
-class SecurityGroup(CloudFormationModel):
- def __init__(self, group_name, description, tags):
- self.group_name = group_name
- self.description = description
- self.status = "authorized"
- self.ip_ranges = []
- self.ec2_security_groups = []
- self.tags = tags
- self.owner_id = ACCOUNT_ID
- self.vpc_id = None
-
- def to_xml(self):
- template = Template(
- """
-
- {% for security_group in security_group.ec2_security_groups %}
-
- {{ security_group.id }}
- {{ security_group.name }}
- {{ security_group.owner_id }}
- authorized
-
- {% endfor %}
-
-
- {{ security_group.description }}
-
- {% for ip_range in security_group.ip_ranges %}
-
- {{ ip_range }}
- authorized
-
- {% endfor %}
-
- {{ security_group.ownder_id }}
- {{ security_group.group_name }}
- """
- )
- return template.render(security_group=self)
-
- def to_json(self):
- template = Template(
- """{
- "DBSecurityGroupDescription": "{{ security_group.description }}",
- "DBSecurityGroupName": "{{ security_group.group_name }}",
- "EC2SecurityGroups": {{ security_group.ec2_security_groups }},
- "IPRanges": [{%- for ip in security_group.ip_ranges -%}
- {%- if loop.index != 1 -%},{%- endif -%}
- "{{ ip }}"
- {%- endfor -%}
- ],
- "OwnerId": "{{ security_group.owner_id }}",
- "VpcId": "{{ security_group.vpc_id }}"
- }"""
- )
- return template.render(security_group=self)
-
- def authorize_cidr(self, cidr_ip):
- self.ip_ranges.append(cidr_ip)
-
- def authorize_security_group(self, security_group):
- self.ec2_security_groups.append(security_group)
-
- @staticmethod
- def cloudformation_name_type():
- return None
-
- @staticmethod
- def cloudformation_type():
- # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbsecuritygroup.html
- return "AWS::RDS::DBSecurityGroup"
-
- @classmethod
- def create_from_cloudformation_json(
- cls, resource_name, cloudformation_json, region_name, **kwargs
- ):
- properties = cloudformation_json["Properties"]
- group_name = resource_name.lower()
- description = properties["GroupDescription"]
- security_group_ingress_rules = properties.get("DBSecurityGroupIngress", [])
- tags = properties.get("Tags")
-
- ec2_backend = ec2_backends[region_name]
- rds2_backend = rds2_backends[region_name]
- security_group = rds2_backend.create_security_group(
- group_name, description, tags
- )
- for security_group_ingress in security_group_ingress_rules:
- for ingress_type, ingress_value in security_group_ingress.items():
- if ingress_type == "CIDRIP":
- security_group.authorize_cidr(ingress_value)
- elif ingress_type == "EC2SecurityGroupName":
- subnet = ec2_backend.get_security_group_from_name(ingress_value)
- security_group.authorize_security_group(subnet)
- elif ingress_type == "EC2SecurityGroupId":
- subnet = ec2_backend.get_security_group_from_id(ingress_value)
- security_group.authorize_security_group(subnet)
- return security_group
-
- def get_tags(self):
- return self.tags
-
- def add_tags(self, tags):
- new_keys = [tag_set["Key"] for tag_set in tags]
- self.tags = [tag_set for tag_set in self.tags if tag_set["Key"] not in new_keys]
- self.tags.extend(tags)
- return self.tags
-
- def remove_tags(self, tag_keys):
- self.tags = [tag_set for tag_set in self.tags if tag_set["Key"] not in tag_keys]
-
- def delete(self, region_name):
- backend = rds2_backends[region_name]
- backend.delete_security_group(self.group_name)
-
-
-class SubnetGroup(CloudFormationModel):
- def __init__(self, subnet_name, description, subnets, tags):
- self.subnet_name = subnet_name
- self.description = description
- self.subnets = subnets
- self.status = "Complete"
- self.tags = tags
- self.vpc_id = self.subnets[0].vpc_id
-
- def to_xml(self):
- template = Template(
- """
- {{ subnet_group.vpc_id }}
- {{ subnet_group.status }}
- {{ subnet_group.description }}
- {{ subnet_group.subnet_name }}
-
- {% for subnet in subnet_group.subnets %}
-
- Active
- {{ subnet.id }}
-
- {{ subnet.availability_zone }}
- false
-
-
- {% endfor %}
-
- """
- )
- return template.render(subnet_group=self)
-
- def to_json(self):
- template = Template(
- """"DBSubnetGroup": {
- "VpcId": "{{ subnet_group.vpc_id }}",
- "SubnetGroupStatus": "{{ subnet_group.status }}",
- "DBSubnetGroupDescription": "{{ subnet_group.description }}",
- "DBSubnetGroupName": "{{ subnet_group.subnet_name }}",
- "Subnets": {
- "Subnet": [
- {% for subnet in subnet_group.subnets %}{
- "SubnetStatus": "Active",
- "SubnetIdentifier": "{{ subnet.id }}",
- "SubnetAvailabilityZone": {
- "Name": "{{ subnet.availability_zone }}",
- "ProvisionedIopsCapable": "false"
- }
- }{%- if not loop.last -%},{%- endif -%}{% endfor %}
- ]
- }
- }"""
- )
- return template.render(subnet_group=self)
-
- @staticmethod
- def cloudformation_name_type():
- return "DBSubnetGroupName"
-
- @staticmethod
- def cloudformation_type():
- # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbsubnetgroup.html
- return "AWS::RDS::DBSubnetGroup"
-
- @classmethod
- def create_from_cloudformation_json(
- cls, resource_name, cloudformation_json, region_name, **kwargs
- ):
- properties = cloudformation_json["Properties"]
-
- description = properties["DBSubnetGroupDescription"]
- subnet_ids = properties["SubnetIds"]
- tags = properties.get("Tags")
-
- ec2_backend = ec2_backends[region_name]
- subnets = [ec2_backend.get_subnet(subnet_id) for subnet_id in subnet_ids]
- rds2_backend = rds2_backends[region_name]
- subnet_group = rds2_backend.create_subnet_group(
- resource_name, description, subnets, tags
- )
- return subnet_group
-
- def get_tags(self):
- return self.tags
-
- def add_tags(self, tags):
- new_keys = [tag_set["Key"] for tag_set in tags]
- self.tags = [tag_set for tag_set in self.tags if tag_set["Key"] not in new_keys]
- self.tags.extend(tags)
- return self.tags
-
- def remove_tags(self, tag_keys):
- self.tags = [tag_set for tag_set in self.tags if tag_set["Key"] not in tag_keys]
-
- def delete(self, region_name):
- backend = rds2_backends[region_name]
- backend.delete_subnet_group(self.subnet_name)
-
-
-class RDS2Backend(BaseBackend):
- def __init__(self, region):
- self.region = region
- self.arn_regex = re_compile(
- r"^arn:aws:rds:.*:[0-9]*:(db|cluster|es|og|pg|ri|secgrp|snapshot|cluster-snapshot|subgrp):.*$"
- )
- self.clusters = OrderedDict()
- self.databases = OrderedDict()
- self.database_snapshots = OrderedDict()
- self.cluster_snapshots = OrderedDict()
- self.export_tasks = OrderedDict()
- self.event_subscriptions = OrderedDict()
- self.db_parameter_groups = {}
- self.option_groups = {}
- self.security_groups = {}
- self.subnet_groups = {}
-
- def reset(self):
- # preserve region
- region = self.region
- self.__dict__ = {}
- self.__init__(region)
-
- @staticmethod
- def default_vpc_endpoint_service(service_region, zones):
- """Default VPC endpoint service."""
- return BaseBackend.default_vpc_endpoint_service_factory(
- service_region, zones, "rds"
- ) + BaseBackend.default_vpc_endpoint_service_factory(
- service_region, zones, "rds-data"
- )
-
- def create_database(self, db_kwargs):
- database_id = db_kwargs["db_instance_identifier"]
- database = Database(**db_kwargs)
- self.databases[database_id] = database
- return database
-
- def create_database_snapshot(
- self, db_instance_identifier, db_snapshot_identifier, tags=None
- ):
- database = self.databases.get(db_instance_identifier)
- if not database:
- raise DBInstanceNotFoundError(db_instance_identifier)
- if db_snapshot_identifier in self.database_snapshots:
- raise DBSnapshotAlreadyExistsError(db_snapshot_identifier)
- if len(self.database_snapshots) >= int(
- os.environ.get("MOTO_RDS_SNAPSHOT_LIMIT", "100")
- ):
- raise SnapshotQuotaExceededError()
- if tags is None:
- tags = list()
- if database.copy_tags_to_snapshot and not tags:
- tags = database.get_tags()
- snapshot = DatabaseSnapshot(database, db_snapshot_identifier, tags)
- self.database_snapshots[db_snapshot_identifier] = snapshot
- return snapshot
-
- def copy_database_snapshot(
- self, source_snapshot_identifier, target_snapshot_identifier, tags=None,
- ):
- if source_snapshot_identifier not in self.database_snapshots:
- raise DBSnapshotNotFoundError(source_snapshot_identifier)
- if target_snapshot_identifier in self.database_snapshots:
- raise DBSnapshotAlreadyExistsError(target_snapshot_identifier)
- if len(self.database_snapshots) >= int(
- os.environ.get("MOTO_RDS_SNAPSHOT_LIMIT", "100")
- ):
- raise SnapshotQuotaExceededError()
-
- source_snapshot = self.database_snapshots[source_snapshot_identifier]
- if tags is None:
- tags = source_snapshot.tags
- else:
- tags = self._merge_tags(source_snapshot.tags, tags)
- target_snapshot = DatabaseSnapshot(
- source_snapshot.database, target_snapshot_identifier, tags
- )
- self.database_snapshots[target_snapshot_identifier] = target_snapshot
-
- return target_snapshot
-
- def delete_database_snapshot(self, db_snapshot_identifier):
- if db_snapshot_identifier not in self.database_snapshots:
- raise DBSnapshotNotFoundError(db_snapshot_identifier)
-
- return self.database_snapshots.pop(db_snapshot_identifier)
-
- def create_database_replica(self, db_kwargs):
- database_id = db_kwargs["db_instance_identifier"]
- source_database_id = db_kwargs["source_db_identifier"]
- primary = self.find_db_from_id(source_database_id)
- if self.arn_regex.match(source_database_id):
- db_kwargs["region"] = self.region
-
- # Shouldn't really copy here as the instance is duplicated. RDS replicas have different instances.
- replica = copy.copy(primary)
- replica.update(db_kwargs)
- replica.set_as_replica()
- self.databases[database_id] = replica
- primary.add_replica(replica)
- return replica
-
- def describe_databases(self, db_instance_identifier=None, filters=None):
- databases = self.databases
- if db_instance_identifier:
- filters = merge_filters(
- filters, {"db-instance-id": [db_instance_identifier]}
- )
- if filters:
- databases = self._filter_resources(databases, filters, Database)
- if db_instance_identifier and not databases:
- raise DBInstanceNotFoundError(db_instance_identifier)
- return list(databases.values())
-
- def describe_database_snapshots(
- self, db_instance_identifier, db_snapshot_identifier, filters=None
- ):
- snapshots = self.database_snapshots
- if db_instance_identifier:
- filters = merge_filters(
- filters, {"db-instance-id": [db_instance_identifier]}
- )
- if db_snapshot_identifier:
- filters = merge_filters(
- filters, {"db-snapshot-id": [db_snapshot_identifier]}
- )
- if filters:
- snapshots = self._filter_resources(snapshots, filters, DatabaseSnapshot)
- if db_snapshot_identifier and not snapshots and not db_instance_identifier:
- raise DBSnapshotNotFoundError(db_snapshot_identifier)
- return list(snapshots.values())
-
- def modify_database(self, db_instance_identifier, db_kwargs):
- database = self.describe_databases(db_instance_identifier)[0]
- if "new_db_instance_identifier" in db_kwargs:
- del self.databases[db_instance_identifier]
- db_instance_identifier = db_kwargs[
- "db_instance_identifier"
- ] = db_kwargs.pop("new_db_instance_identifier")
- self.databases[db_instance_identifier] = database
- database.update(db_kwargs)
- return database
-
- def reboot_db_instance(self, db_instance_identifier):
- database = self.describe_databases(db_instance_identifier)[0]
- return database
-
- def restore_db_instance_from_db_snapshot(self, from_snapshot_id, overrides):
- snapshot = self.describe_database_snapshots(
- db_instance_identifier=None, db_snapshot_identifier=from_snapshot_id
- )[0]
- original_database = snapshot.database
- new_instance_props = copy.deepcopy(original_database.__dict__)
- if not original_database.option_group_supplied:
- # If the option group is not supplied originally, the 'option_group_name' will receive a default value
- # Force this reconstruction, and prevent any validation on the default value
- del new_instance_props["option_group_name"]
-
- for key, value in overrides.items():
- if value:
- new_instance_props[key] = value
-
- return self.create_database(new_instance_props)
-
- def stop_database(self, db_instance_identifier, db_snapshot_identifier=None):
- database = self.describe_databases(db_instance_identifier)[0]
- # todo: certain rds types not allowed to be stopped at this time.
- # https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_StopInstance.html#USER_StopInstance.Limitations
- if database.is_replica or (
- database.multi_az and database.engine.lower().startswith("sqlserver")
- ):
- # todo: more db types not supported by stop/start instance api
- raise InvalidDBClusterStateFaultError(db_instance_identifier)
- if database.status != "available":
- raise InvalidDBInstanceStateError(db_instance_identifier, "stop")
- if db_snapshot_identifier:
- self.create_database_snapshot(
- db_instance_identifier, db_snapshot_identifier
- )
- database.status = "stopped"
- return database
-
- def start_database(self, db_instance_identifier):
- database = self.describe_databases(db_instance_identifier)[0]
- # todo: bunch of different error messages to be generated from this api call
- if database.status != "stopped":
- raise InvalidDBInstanceStateError(db_instance_identifier, "start")
- database.status = "available"
- return database
-
- def find_db_from_id(self, db_id):
- if self.arn_regex.match(db_id):
- arn_breakdown = db_id.split(":")
- region = arn_breakdown[3]
- backend = rds2_backends[region]
- db_name = arn_breakdown[-1]
- else:
- backend = self
- db_name = db_id
-
- return backend.describe_databases(db_name)[0]
-
- def delete_database(self, db_instance_identifier, db_snapshot_name=None):
- if db_instance_identifier in self.databases:
- if self.databases[db_instance_identifier].deletion_protection:
- raise InvalidParameterValue(
- "Can't delete Instance with protection enabled"
- )
- if db_snapshot_name:
- self.create_database_snapshot(db_instance_identifier, db_snapshot_name)
- database = self.databases.pop(db_instance_identifier)
- if database.is_replica:
- primary = self.find_db_from_id(database.source_db_identifier)
- primary.remove_replica(database)
- database.status = "deleting"
- return database
- else:
- raise DBInstanceNotFoundError(db_instance_identifier)
-
- def create_security_group(self, group_name, description, tags):
- security_group = SecurityGroup(group_name, description, tags)
- self.security_groups[group_name] = security_group
- return security_group
-
- def describe_security_groups(self, security_group_name):
- if security_group_name:
- if security_group_name in self.security_groups:
- return [self.security_groups[security_group_name]]
- else:
- raise DBSecurityGroupNotFoundError(security_group_name)
- return self.security_groups.values()
-
- def delete_security_group(self, security_group_name):
- if security_group_name in self.security_groups:
- return self.security_groups.pop(security_group_name)
- else:
- raise DBSecurityGroupNotFoundError(security_group_name)
-
- def delete_db_parameter_group(self, db_parameter_group_name):
- if db_parameter_group_name in self.db_parameter_groups:
- return self.db_parameter_groups.pop(db_parameter_group_name)
- else:
- raise DBParameterGroupNotFoundError(db_parameter_group_name)
-
- def authorize_security_group(self, security_group_name, cidr_ip):
- security_group = self.describe_security_groups(security_group_name)[0]
- security_group.authorize_cidr(cidr_ip)
- return security_group
-
- def create_subnet_group(self, subnet_name, description, subnets, tags):
- subnet_group = SubnetGroup(subnet_name, description, subnets, tags)
- self.subnet_groups[subnet_name] = subnet_group
- return subnet_group
-
- def describe_subnet_groups(self, subnet_group_name):
- if subnet_group_name:
- if subnet_group_name in self.subnet_groups:
- return [self.subnet_groups[subnet_group_name]]
- else:
- raise DBSubnetGroupNotFoundError(subnet_group_name)
- return self.subnet_groups.values()
-
- def modify_db_subnet_group(self, subnet_name, description, subnets):
- subnet_group = self.subnet_groups.pop(subnet_name)
- if not subnet_group:
- raise DBSubnetGroupNotFoundError(subnet_name)
- subnet_group.subnet_name = subnet_name
- subnet_group.subnets = subnets
- if description is not None:
- subnet_group.description = description
- return subnet_group
-
- def delete_subnet_group(self, subnet_name):
- if subnet_name in self.subnet_groups:
- return self.subnet_groups.pop(subnet_name)
- else:
- raise DBSubnetGroupNotFoundError(subnet_name)
-
- def create_option_group(self, option_group_kwargs):
- option_group_id = option_group_kwargs["name"]
- valid_option_group_engines = {
- "mariadb": ["10.0", "10.1", "10.2", "10.3"],
- "mysql": ["5.5", "5.6", "5.7", "8.0"],
- "oracle-se2": ["11.2", "12.1", "12.2"],
- "oracle-se1": ["11.2", "12.1", "12.2"],
- "oracle-se": ["11.2", "12.1", "12.2"],
- "oracle-ee": ["11.2", "12.1", "12.2"],
- "sqlserver-se": ["10.50", "11.00"],
- "sqlserver-ee": ["10.50", "11.00"],
- "sqlserver-ex": ["10.50", "11.00"],
- "sqlserver-web": ["10.50", "11.00"],
- }
- if option_group_kwargs["name"] in self.option_groups:
- raise RDSClientError(
- "OptionGroupAlreadyExistsFault",
- "An option group named {0} already exists.".format(
- option_group_kwargs["name"]
- ),
- )
- if (
- "description" not in option_group_kwargs
- or not option_group_kwargs["description"]
- ):
- raise RDSClientError(
- "InvalidParameterValue",
- "The parameter OptionGroupDescription must be provided and must not be blank.",
- )
- if option_group_kwargs["engine_name"] not in valid_option_group_engines.keys():
- raise RDSClientError(
- "InvalidParameterValue", "Invalid DB engine: non-existent"
- )
- if (
- option_group_kwargs["major_engine_version"]
- not in valid_option_group_engines[option_group_kwargs["engine_name"]]
- ):
- raise RDSClientError(
- "InvalidParameterCombination",
- "Cannot find major version {0} for {1}".format(
- option_group_kwargs["major_engine_version"],
- option_group_kwargs["engine_name"],
- ),
- )
- option_group = OptionGroup(**option_group_kwargs)
- self.option_groups[option_group_id] = option_group
- return option_group
-
- def delete_option_group(self, option_group_name):
- if option_group_name in self.option_groups:
- return self.option_groups.pop(option_group_name)
- else:
- raise OptionGroupNotFoundFaultError(option_group_name)
-
- def describe_option_groups(self, option_group_kwargs):
- option_group_list = []
-
- if option_group_kwargs["marker"]:
- marker = option_group_kwargs["marker"]
- else:
- marker = 0
- if option_group_kwargs["max_records"]:
- if (
- option_group_kwargs["max_records"] < 20
- or option_group_kwargs["max_records"] > 100
- ):
- raise RDSClientError(
- "InvalidParameterValue",
- "Invalid value for max records. Must be between 20 and 100",
- )
- max_records = option_group_kwargs["max_records"]
- else:
- max_records = 100
-
- for option_group in self.option_groups.values():
- if (
- option_group_kwargs["name"]
- and option_group.name != option_group_kwargs["name"]
- ):
- continue
- elif (
- option_group_kwargs["engine_name"]
- and option_group.engine_name != option_group_kwargs["engine_name"]
- ):
- continue
- elif (
- option_group_kwargs["major_engine_version"]
- and option_group.major_engine_version
- != option_group_kwargs["major_engine_version"]
- ):
- continue
- else:
- option_group_list.append(option_group)
- if not len(option_group_list):
- raise OptionGroupNotFoundFaultError(option_group_kwargs["name"])
- return option_group_list[marker : max_records + marker]
-
- @staticmethod
- def describe_option_group_options(engine_name, major_engine_version=None):
- default_option_group_options = {
- "mysql": {
- "5.6": '\n \n \n \n 5.611211TrueInnodb Memcached for MySQLMEMCACHED1-4294967295STATIC1TrueSpecifies how many memcached read operations (get) to perform before doing a COMMIT to start a new transactionDAEMON_MEMCACHED_R_BATCH_SIZE1-4294967295STATIC1TrueSpecifies how many memcached write operations, such as add, set, or incr, to perform before doing a COMMIT to start a new transactionDAEMON_MEMCACHED_W_BATCH_SIZE1-1073741824DYNAMIC5TrueSpecifies how often to auto-commit idle connections that use the InnoDB memcached interface.INNODB_API_BK_COMMIT_INTERVAL0,1STATIC0TrueDisables the use of row locks when using the InnoDB memcached interface.INNODB_API_DISABLE_ROWLOCK0,1STATIC0TrueLocks the table used by the InnoDB memcached plugin, so that it cannot be dropped or altered by DDL through the SQL interface.INNODB_API_ENABLE_MDL0-3STATIC0TrueLets you control the transaction isolation level on queries processed by the memcached interface.INNODB_API_TRX_LEVELauto,ascii,binarySTATICautoTrueThe binding protocol to use which can be either auto, ascii, or binary. The default is auto which means the server automatically negotiates the protocol with the client.BINDING_PROTOCOL1-2048STATIC1024TrueThe backlog queue configures how many network connections can be waiting to be processed by memcachedBACKLOG_QUEUE_LIMIT0,1STATIC0TrueDisable the use of compare and swap (CAS) which reduces the per-item size by 8 bytes.CAS_DISABLED1-48STATIC48TrueMinimum chunk size in bytes to allocate for the smallest item\'s key, value, and flags. The default is 48 and you can get a significant memory efficiency gain with a lower value.CHUNK_SIZE1-2STATIC1.25TrueChunk size growth factor that controls the size of each successive chunk with each chunk growing times this amount larger than the previous chunk.CHUNK_SIZE_GROWTH_FACTOR0,1STATIC0TrueIf enabled when there is no more memory to store items, memcached will return an error rather than evicting items.ERROR_ON_MEMORY_EXHAUSTED10-1024STATIC1024TrueMaximum number of concurrent connections. Setting this value to anything less than 10 prevents MySQL from starting.MAX_SIMULTANEOUS_CONNECTIONSv,vv,vvvSTATICvTrueVerbose level for memcached.VERBOSITYmysql\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n',
- "all": '\n \n \n \n 5.611211TrueInnodb Memcached for MySQLMEMCACHED1-4294967295STATIC1TrueSpecifies how many memcached read operations (get) to perform before doing a COMMIT to start a new transactionDAEMON_MEMCACHED_R_BATCH_SIZE1-4294967295STATIC1TrueSpecifies how many memcached write operations, such as add, set, or incr, to perform before doing a COMMIT to start a new transactionDAEMON_MEMCACHED_W_BATCH_SIZE1-1073741824DYNAMIC5TrueSpecifies how often to auto-commit idle connections that use the InnoDB memcached interface.INNODB_API_BK_COMMIT_INTERVAL0,1STATIC0TrueDisables the use of row locks when using the InnoDB memcached interface.INNODB_API_DISABLE_ROWLOCK0,1STATIC0TrueLocks the table used by the InnoDB memcached plugin, so that it cannot be dropped or altered by DDL through the SQL interface.INNODB_API_ENABLE_MDL0-3STATIC0TrueLets you control the transaction isolation level on queries processed by the memcached interface.INNODB_API_TRX_LEVELauto,ascii,binarySTATICautoTrueThe binding protocol to use which can be either auto, ascii, or binary. The default is auto which means the server automatically negotiates the protocol with the client.BINDING_PROTOCOL1-2048STATIC1024TrueThe backlog queue configures how many network connections can be waiting to be processed by memcachedBACKLOG_QUEUE_LIMIT0,1STATIC0TrueDisable the use of compare and swap (CAS) which reduces the per-item size by 8 bytes.CAS_DISABLED1-48STATIC48TrueMinimum chunk size in bytes to allocate for the smallest item\'s key, value, and flags. The default is 48 and you can get a significant memory efficiency gain with a lower value.CHUNK_SIZE1-2STATIC1.25TrueChunk size growth factor that controls the size of each successive chunk with each chunk growing times this amount larger than the previous chunk.CHUNK_SIZE_GROWTH_FACTOR0,1STATIC0TrueIf enabled when there is no more memory to store items, memcached will return an error rather than evicting items.ERROR_ON_MEMORY_EXHAUSTED10-1024STATIC1024TrueMaximum number of concurrent connections. Setting this value to anything less than 10 prevents MySQL from starting.MAX_SIMULTANEOUS_CONNECTIONSv,vv,vvvSTATICvTrueVerbose level for memcached.VERBOSITYmysql\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n',
- },
- "oracle-ee": {
- "11.2": '\n \n \n \n 11.2XMLDBOracle Application Express Runtime EnvironmentAPEXoracle-ee\n \n 11.2APEXOracle Application Express Development EnvironmentAPEX-DEVoracle-ee\n \n 11.2Oracle Advanced Security - Native Network EncryptionNATIVE_NETWORK_ENCRYPTIONACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired encryption behaviorSQLNET.ENCRYPTION_SERVERACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired data integrity behaviorSQLNET.CRYPTO_CHECKSUM_SERVERRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40STATICRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40TrueSpecifies list of encryption algorithms in order of intended useSQLNET.ENCRYPTION_TYPES_SERVERSHA1,MD5STATICSHA1,MD5TrueSpecifies list of checksumming algorithms in order of intended useSQLNET.CRYPTO_CHECKSUM_TYPES_SERVERoracle-ee\n \n 11.21158TrueOracle Enterprise Manager (Database Control only)OEMoracle-ee\n \n 11.2Oracle StatspackSTATSPACKoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - Transparent Data EncryptionTDEoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - TDE with HSMTDE_HSMoracle-ee\n \n 11.2TrueTrueChange time zoneTimezoneAfrica/Cairo,Africa/Casablanca,Africa/Harare,Africa/Monrovia,Africa/Nairobi,Africa/Tripoli,Africa/Windhoek,America/Araguaina,America/Asuncion,America/Bogota,America/Caracas,America/Chihuahua,America/Cuiaba,America/Denver,America/Fortaleza,America/Guatemala,America/Halifax,America/Manaus,America/Matamoros,America/Monterrey,America/Montevideo,America/Phoenix,America/Santiago,America/Tijuana,Asia/Amman,Asia/Ashgabat,Asia/Baghdad,Asia/Baku,Asia/Bangkok,Asia/Beirut,Asia/Calcutta,Asia/Damascus,Asia/Dhaka,Asia/Irkutsk,Asia/Jerusalem,Asia/Kabul,Asia/Karachi,Asia/Kathmandu,Asia/Krasnoyarsk,Asia/Magadan,Asia/Muscat,Asia/Novosibirsk,Asia/Riyadh,Asia/Seoul,Asia/Shanghai,Asia/Singapore,Asia/Taipei,Asia/Tehran,Asia/Tokyo,Asia/Ulaanbaatar,Asia/Vladivostok,Asia/Yakutsk,Asia/Yerevan,Atlantic/Azores,Australia/Adelaide,Australia/Brisbane,Australia/Darwin,Australia/Hobart,Australia/Perth,Australia/Sydney,Brazil/East,Canada/Newfoundland,Canada/Saskatchewan,Europe/Amsterdam,Europe/Athens,Europe/Dublin,Europe/Helsinki,Europe/Istanbul,Europe/Kaliningrad,Europe/Moscow,Europe/Paris,Europe/Prague,Europe/Sarajevo,Pacific/Auckland,Pacific/Fiji,Pacific/Guam,Pacific/Honolulu,Pacific/Samoa,US/Alaska,US/Central,US/Eastern,US/East-Indiana,US/Pacific,UTCDYNAMICUTCTrueSpecifies the timezone the user wants to change the system time toTIME_ZONEoracle-ee\n \n 11.2Oracle XMLDB RepositoryXMLDBoracle-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n',
- "all": '\n \n \n \n 11.2XMLDBOracle Application Express Runtime EnvironmentAPEXoracle-ee\n \n 11.2APEXOracle Application Express Development EnvironmentAPEX-DEVoracle-ee\n \n 11.2Oracle Advanced Security - Native Network EncryptionNATIVE_NETWORK_ENCRYPTIONACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired encryption behaviorSQLNET.ENCRYPTION_SERVERACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired data integrity behaviorSQLNET.CRYPTO_CHECKSUM_SERVERRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40STATICRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40TrueSpecifies list of encryption algorithms in order of intended useSQLNET.ENCRYPTION_TYPES_SERVERSHA1,MD5STATICSHA1,MD5TrueSpecifies list of checksumming algorithms in order of intended useSQLNET.CRYPTO_CHECKSUM_TYPES_SERVERoracle-ee\n \n 11.21158TrueOracle Enterprise Manager (Database Control only)OEMoracle-ee\n \n 11.2Oracle StatspackSTATSPACKoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - Transparent Data EncryptionTDEoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - TDE with HSMTDE_HSMoracle-ee\n \n 11.2TrueTrueChange time zoneTimezoneAfrica/Cairo,Africa/Casablanca,Africa/Harare,Africa/Monrovia,Africa/Nairobi,Africa/Tripoli,Africa/Windhoek,America/Araguaina,America/Asuncion,America/Bogota,America/Caracas,America/Chihuahua,America/Cuiaba,America/Denver,America/Fortaleza,America/Guatemala,America/Halifax,America/Manaus,America/Matamoros,America/Monterrey,America/Montevideo,America/Phoenix,America/Santiago,America/Tijuana,Asia/Amman,Asia/Ashgabat,Asia/Baghdad,Asia/Baku,Asia/Bangkok,Asia/Beirut,Asia/Calcutta,Asia/Damascus,Asia/Dhaka,Asia/Irkutsk,Asia/Jerusalem,Asia/Kabul,Asia/Karachi,Asia/Kathmandu,Asia/Krasnoyarsk,Asia/Magadan,Asia/Muscat,Asia/Novosibirsk,Asia/Riyadh,Asia/Seoul,Asia/Shanghai,Asia/Singapore,Asia/Taipei,Asia/Tehran,Asia/Tokyo,Asia/Ulaanbaatar,Asia/Vladivostok,Asia/Yakutsk,Asia/Yerevan,Atlantic/Azores,Australia/Adelaide,Australia/Brisbane,Australia/Darwin,Australia/Hobart,Australia/Perth,Australia/Sydney,Brazil/East,Canada/Newfoundland,Canada/Saskatchewan,Europe/Amsterdam,Europe/Athens,Europe/Dublin,Europe/Helsinki,Europe/Istanbul,Europe/Kaliningrad,Europe/Moscow,Europe/Paris,Europe/Prague,Europe/Sarajevo,Pacific/Auckland,Pacific/Fiji,Pacific/Guam,Pacific/Honolulu,Pacific/Samoa,US/Alaska,US/Central,US/Eastern,US/East-Indiana,US/Pacific,UTCDYNAMICUTCTrueSpecifies the timezone the user wants to change the system time toTIME_ZONEoracle-ee\n \n 11.2Oracle XMLDB RepositoryXMLDBoracle-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n',
- },
- "oracle-sa": {
- "11.2": '\n \n \n \n 11.2XMLDBOracle Application Express Runtime EnvironmentAPEXoracle-ee\n \n 11.2APEXOracle Application Express Development EnvironmentAPEX-DEVoracle-ee\n \n 11.2Oracle Advanced Security - Native Network EncryptionNATIVE_NETWORK_ENCRYPTIONACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired encryption behaviorSQLNET.ENCRYPTION_SERVERACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired data integrity behaviorSQLNET.CRYPTO_CHECKSUM_SERVERRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40STATICRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40TrueSpecifies list of encryption algorithms in order of intended useSQLNET.ENCRYPTION_TYPES_SERVERSHA1,MD5STATICSHA1,MD5TrueSpecifies list of checksumming algorithms in order of intended useSQLNET.CRYPTO_CHECKSUM_TYPES_SERVERoracle-ee\n \n 11.21158TrueOracle Enterprise Manager (Database Control only)OEMoracle-ee\n \n 11.2Oracle StatspackSTATSPACKoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - Transparent Data EncryptionTDEoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - TDE with HSMTDE_HSMoracle-ee\n \n 11.2TrueTrueChange time zoneTimezoneAfrica/Cairo,Africa/Casablanca,Africa/Harare,Africa/Monrovia,Africa/Nairobi,Africa/Tripoli,Africa/Windhoek,America/Araguaina,America/Asuncion,America/Bogota,America/Caracas,America/Chihuahua,America/Cuiaba,America/Denver,America/Fortaleza,America/Guatemala,America/Halifax,America/Manaus,America/Matamoros,America/Monterrey,America/Montevideo,America/Phoenix,America/Santiago,America/Tijuana,Asia/Amman,Asia/Ashgabat,Asia/Baghdad,Asia/Baku,Asia/Bangkok,Asia/Beirut,Asia/Calcutta,Asia/Damascus,Asia/Dhaka,Asia/Irkutsk,Asia/Jerusalem,Asia/Kabul,Asia/Karachi,Asia/Kathmandu,Asia/Krasnoyarsk,Asia/Magadan,Asia/Muscat,Asia/Novosibirsk,Asia/Riyadh,Asia/Seoul,Asia/Shanghai,Asia/Singapore,Asia/Taipei,Asia/Tehran,Asia/Tokyo,Asia/Ulaanbaatar,Asia/Vladivostok,Asia/Yakutsk,Asia/Yerevan,Atlantic/Azores,Australia/Adelaide,Australia/Brisbane,Australia/Darwin,Australia/Hobart,Australia/Perth,Australia/Sydney,Brazil/East,Canada/Newfoundland,Canada/Saskatchewan,Europe/Amsterdam,Europe/Athens,Europe/Dublin,Europe/Helsinki,Europe/Istanbul,Europe/Kaliningrad,Europe/Moscow,Europe/Paris,Europe/Prague,Europe/Sarajevo,Pacific/Auckland,Pacific/Fiji,Pacific/Guam,Pacific/Honolulu,Pacific/Samoa,US/Alaska,US/Central,US/Eastern,US/East-Indiana,US/Pacific,UTCDYNAMICUTCTrueSpecifies the timezone the user wants to change the system time toTIME_ZONEoracle-ee\n \n 11.2Oracle XMLDB RepositoryXMLDBoracle-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n',
- "all": '\n \n \n \n 11.2XMLDBOracle Application Express Runtime EnvironmentAPEXoracle-ee\n \n 11.2APEXOracle Application Express Development EnvironmentAPEX-DEVoracle-ee\n \n 11.2Oracle Advanced Security - Native Network EncryptionNATIVE_NETWORK_ENCRYPTIONACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired encryption behaviorSQLNET.ENCRYPTION_SERVERACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired data integrity behaviorSQLNET.CRYPTO_CHECKSUM_SERVERRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40STATICRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40TrueSpecifies list of encryption algorithms in order of intended useSQLNET.ENCRYPTION_TYPES_SERVERSHA1,MD5STATICSHA1,MD5TrueSpecifies list of checksumming algorithms in order of intended useSQLNET.CRYPTO_CHECKSUM_TYPES_SERVERoracle-ee\n \n 11.21158TrueOracle Enterprise Manager (Database Control only)OEMoracle-ee\n \n 11.2Oracle StatspackSTATSPACKoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - Transparent Data EncryptionTDEoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - TDE with HSMTDE_HSMoracle-ee\n \n 11.2TrueTrueChange time zoneTimezoneAfrica/Cairo,Africa/Casablanca,Africa/Harare,Africa/Monrovia,Africa/Nairobi,Africa/Tripoli,Africa/Windhoek,America/Araguaina,America/Asuncion,America/Bogota,America/Caracas,America/Chihuahua,America/Cuiaba,America/Denver,America/Fortaleza,America/Guatemala,America/Halifax,America/Manaus,America/Matamoros,America/Monterrey,America/Montevideo,America/Phoenix,America/Santiago,America/Tijuana,Asia/Amman,Asia/Ashgabat,Asia/Baghdad,Asia/Baku,Asia/Bangkok,Asia/Beirut,Asia/Calcutta,Asia/Damascus,Asia/Dhaka,Asia/Irkutsk,Asia/Jerusalem,Asia/Kabul,Asia/Karachi,Asia/Kathmandu,Asia/Krasnoyarsk,Asia/Magadan,Asia/Muscat,Asia/Novosibirsk,Asia/Riyadh,Asia/Seoul,Asia/Shanghai,Asia/Singapore,Asia/Taipei,Asia/Tehran,Asia/Tokyo,Asia/Ulaanbaatar,Asia/Vladivostok,Asia/Yakutsk,Asia/Yerevan,Atlantic/Azores,Australia/Adelaide,Australia/Brisbane,Australia/Darwin,Australia/Hobart,Australia/Perth,Australia/Sydney,Brazil/East,Canada/Newfoundland,Canada/Saskatchewan,Europe/Amsterdam,Europe/Athens,Europe/Dublin,Europe/Helsinki,Europe/Istanbul,Europe/Kaliningrad,Europe/Moscow,Europe/Paris,Europe/Prague,Europe/Sarajevo,Pacific/Auckland,Pacific/Fiji,Pacific/Guam,Pacific/Honolulu,Pacific/Samoa,US/Alaska,US/Central,US/Eastern,US/East-Indiana,US/Pacific,UTCDYNAMICUTCTrueSpecifies the timezone the user wants to change the system time toTIME_ZONEoracle-ee\n \n 11.2Oracle XMLDB RepositoryXMLDBoracle-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n',
- },
- "oracle-sa1": {
- "11.2": '\n \n \n \n 11.2XMLDBOracle Application Express Runtime EnvironmentAPEXoracle-ee\n \n 11.2APEXOracle Application Express Development EnvironmentAPEX-DEVoracle-ee\n \n 11.2Oracle Advanced Security - Native Network EncryptionNATIVE_NETWORK_ENCRYPTIONACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired encryption behaviorSQLNET.ENCRYPTION_SERVERACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired data integrity behaviorSQLNET.CRYPTO_CHECKSUM_SERVERRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40STATICRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40TrueSpecifies list of encryption algorithms in order of intended useSQLNET.ENCRYPTION_TYPES_SERVERSHA1,MD5STATICSHA1,MD5TrueSpecifies list of checksumming algorithms in order of intended useSQLNET.CRYPTO_CHECKSUM_TYPES_SERVERoracle-ee\n \n 11.21158TrueOracle Enterprise Manager (Database Control only)OEMoracle-ee\n \n 11.2Oracle StatspackSTATSPACKoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - Transparent Data EncryptionTDEoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - TDE with HSMTDE_HSMoracle-ee\n \n 11.2TrueTrueChange time zoneTimezoneAfrica/Cairo,Africa/Casablanca,Africa/Harare,Africa/Monrovia,Africa/Nairobi,Africa/Tripoli,Africa/Windhoek,America/Araguaina,America/Asuncion,America/Bogota,America/Caracas,America/Chihuahua,America/Cuiaba,America/Denver,America/Fortaleza,America/Guatemala,America/Halifax,America/Manaus,America/Matamoros,America/Monterrey,America/Montevideo,America/Phoenix,America/Santiago,America/Tijuana,Asia/Amman,Asia/Ashgabat,Asia/Baghdad,Asia/Baku,Asia/Bangkok,Asia/Beirut,Asia/Calcutta,Asia/Damascus,Asia/Dhaka,Asia/Irkutsk,Asia/Jerusalem,Asia/Kabul,Asia/Karachi,Asia/Kathmandu,Asia/Krasnoyarsk,Asia/Magadan,Asia/Muscat,Asia/Novosibirsk,Asia/Riyadh,Asia/Seoul,Asia/Shanghai,Asia/Singapore,Asia/Taipei,Asia/Tehran,Asia/Tokyo,Asia/Ulaanbaatar,Asia/Vladivostok,Asia/Yakutsk,Asia/Yerevan,Atlantic/Azores,Australia/Adelaide,Australia/Brisbane,Australia/Darwin,Australia/Hobart,Australia/Perth,Australia/Sydney,Brazil/East,Canada/Newfoundland,Canada/Saskatchewan,Europe/Amsterdam,Europe/Athens,Europe/Dublin,Europe/Helsinki,Europe/Istanbul,Europe/Kaliningrad,Europe/Moscow,Europe/Paris,Europe/Prague,Europe/Sarajevo,Pacific/Auckland,Pacific/Fiji,Pacific/Guam,Pacific/Honolulu,Pacific/Samoa,US/Alaska,US/Central,US/Eastern,US/East-Indiana,US/Pacific,UTCDYNAMICUTCTrueSpecifies the timezone the user wants to change the system time toTIME_ZONEoracle-ee\n \n 11.2Oracle XMLDB RepositoryXMLDBoracle-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n',
- "all": '\n \n \n \n 11.2XMLDBOracle Application Express Runtime EnvironmentAPEXoracle-ee\n \n 11.2APEXOracle Application Express Development EnvironmentAPEX-DEVoracle-ee\n \n 11.2Oracle Advanced Security - Native Network EncryptionNATIVE_NETWORK_ENCRYPTIONACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired encryption behaviorSQLNET.ENCRYPTION_SERVERACCEPTED,REJECTED,REQUESTED,REQUIREDSTATICREQUESTEDTrueSpecifies the desired data integrity behaviorSQLNET.CRYPTO_CHECKSUM_SERVERRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40STATICRC4_256,AES256,AES192,3DES168,RC4_128,AES128,3DES112,RC4_56,DES,RC4_40,DES40TrueSpecifies list of encryption algorithms in order of intended useSQLNET.ENCRYPTION_TYPES_SERVERSHA1,MD5STATICSHA1,MD5TrueSpecifies list of checksumming algorithms in order of intended useSQLNET.CRYPTO_CHECKSUM_TYPES_SERVERoracle-ee\n \n 11.21158TrueOracle Enterprise Manager (Database Control only)OEMoracle-ee\n \n 11.2Oracle StatspackSTATSPACKoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - Transparent Data EncryptionTDEoracle-ee\n \n 11.2TrueTrueOracle Advanced Security - TDE with HSMTDE_HSMoracle-ee\n \n 11.2TrueTrueChange time zoneTimezoneAfrica/Cairo,Africa/Casablanca,Africa/Harare,Africa/Monrovia,Africa/Nairobi,Africa/Tripoli,Africa/Windhoek,America/Araguaina,America/Asuncion,America/Bogota,America/Caracas,America/Chihuahua,America/Cuiaba,America/Denver,America/Fortaleza,America/Guatemala,America/Halifax,America/Manaus,America/Matamoros,America/Monterrey,America/Montevideo,America/Phoenix,America/Santiago,America/Tijuana,Asia/Amman,Asia/Ashgabat,Asia/Baghdad,Asia/Baku,Asia/Bangkok,Asia/Beirut,Asia/Calcutta,Asia/Damascus,Asia/Dhaka,Asia/Irkutsk,Asia/Jerusalem,Asia/Kabul,Asia/Karachi,Asia/Kathmandu,Asia/Krasnoyarsk,Asia/Magadan,Asia/Muscat,Asia/Novosibirsk,Asia/Riyadh,Asia/Seoul,Asia/Shanghai,Asia/Singapore,Asia/Taipei,Asia/Tehran,Asia/Tokyo,Asia/Ulaanbaatar,Asia/Vladivostok,Asia/Yakutsk,Asia/Yerevan,Atlantic/Azores,Australia/Adelaide,Australia/Brisbane,Australia/Darwin,Australia/Hobart,Australia/Perth,Australia/Sydney,Brazil/East,Canada/Newfoundland,Canada/Saskatchewan,Europe/Amsterdam,Europe/Athens,Europe/Dublin,Europe/Helsinki,Europe/Istanbul,Europe/Kaliningrad,Europe/Moscow,Europe/Paris,Europe/Prague,Europe/Sarajevo,Pacific/Auckland,Pacific/Fiji,Pacific/Guam,Pacific/Honolulu,Pacific/Samoa,US/Alaska,US/Central,US/Eastern,US/East-Indiana,US/Pacific,UTCDYNAMICUTCTrueSpecifies the timezone the user wants to change the system time toTIME_ZONEoracle-ee\n \n 11.2Oracle XMLDB RepositoryXMLDBoracle-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n',
- },
- "sqlserver-ee": {
- "10.50": '\n \n \n \n 10.50SQLServer Database MirroringMirroringsqlserver-ee\n \n 10.50TrueSQL Server - Transparent Data EncryptionTDEsqlserver-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n',
- "11.00": '\n \n \n \n 11.00SQLServer Database MirroringMirroringsqlserver-ee\n \n 11.00TrueSQL Server - Transparent Data EncryptionTDEsqlserver-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n',
- "all": '\n \n \n \n 10.50SQLServer Database MirroringMirroringsqlserver-ee\n \n 10.50TrueSQL Server - Transparent Data EncryptionTDEsqlserver-ee\n \n 11.00SQLServer Database MirroringMirroringsqlserver-ee\n \n 11.00TrueSQL Server - Transparent Data EncryptionTDEsqlserver-ee\n \n \n \n \n 457f7bb8-9fbf-11e4-9084-5754f80d5144\n \n',
- },
- }
-
- if engine_name not in default_option_group_options:
- raise RDSClientError(
- "InvalidParameterValue", "Invalid DB engine: {0}".format(engine_name)
- )
- if (
- major_engine_version
- and major_engine_version not in default_option_group_options[engine_name]
- ):
- raise RDSClientError(
- "InvalidParameterCombination",
- "Cannot find major version {0} for {1}".format(
- major_engine_version, engine_name
- ),
- )
- if major_engine_version:
- return default_option_group_options[engine_name][major_engine_version]
- return default_option_group_options[engine_name]["all"]
-
- def modify_option_group(
- self,
- option_group_name,
- options_to_include=None,
- options_to_remove=None,
- apply_immediately=None,
- ):
- if option_group_name not in self.option_groups:
- raise OptionGroupNotFoundFaultError(option_group_name)
- if not options_to_include and not options_to_remove:
- raise RDSClientError(
- "InvalidParameterValue",
- "At least one option must be added, modified, or removed.",
- )
- if options_to_remove:
- self.option_groups[option_group_name].remove_options(options_to_remove)
- if options_to_include:
- self.option_groups[option_group_name].add_options(options_to_include)
- return self.option_groups[option_group_name]
-
- def create_db_parameter_group(self, db_parameter_group_kwargs):
- db_parameter_group_id = db_parameter_group_kwargs["name"]
- if db_parameter_group_kwargs["name"] in self.db_parameter_groups:
- raise RDSClientError(
- "DBParameterGroupAlreadyExistsFault",
- "A DB parameter group named {0} already exists.".format(
- db_parameter_group_kwargs["name"]
- ),
- )
- if not db_parameter_group_kwargs.get("description"):
- raise RDSClientError(
- "InvalidParameterValue",
- "The parameter Description must be provided and must not be blank.",
- )
- if not db_parameter_group_kwargs.get("family"):
- raise RDSClientError(
- "InvalidParameterValue",
- "The parameter DBParameterGroupName must be provided and must not be blank.",
- )
- db_parameter_group_kwargs["region"] = self.region
- db_parameter_group = DBParameterGroup(**db_parameter_group_kwargs)
- self.db_parameter_groups[db_parameter_group_id] = db_parameter_group
- return db_parameter_group
-
- def describe_db_parameter_groups(self, db_parameter_group_kwargs):
- db_parameter_group_list = []
-
- if db_parameter_group_kwargs.get("marker"):
- marker = db_parameter_group_kwargs["marker"]
- else:
- marker = 0
- if db_parameter_group_kwargs.get("max_records"):
- if (
- db_parameter_group_kwargs["max_records"] < 20
- or db_parameter_group_kwargs["max_records"] > 100
- ):
- raise RDSClientError(
- "InvalidParameterValue",
- "Invalid value for max records. Must be between 20 and 100",
- )
- max_records = db_parameter_group_kwargs["max_records"]
- else:
- max_records = 100
-
- for db_parameter_group in self.db_parameter_groups.values():
- if not db_parameter_group_kwargs.get(
- "name"
- ) or db_parameter_group.name == db_parameter_group_kwargs.get("name"):
- db_parameter_group_list.append(db_parameter_group)
- else:
- continue
-
- return db_parameter_group_list[marker : max_records + marker]
-
- def modify_db_parameter_group(
- self, db_parameter_group_name, db_parameter_group_parameters
- ):
- if db_parameter_group_name not in self.db_parameter_groups:
- raise DBParameterGroupNotFoundError(db_parameter_group_name)
-
- db_parameter_group = self.db_parameter_groups[db_parameter_group_name]
- db_parameter_group.update_parameters(db_parameter_group_parameters)
-
- return db_parameter_group
-
- def create_db_cluster(self, kwargs):
- cluster_id = kwargs["db_cluster_identifier"]
- cluster = Cluster(**kwargs)
- self.clusters[cluster_id] = cluster
- initial_state = copy.deepcopy(cluster) # Return status=creating
- cluster.status = "available" # Already set the final status in the background
- return initial_state
-
- def create_db_cluster_snapshot(
- self, db_cluster_identifier, db_snapshot_identifier, tags=None
- ):
- cluster = self.clusters.get(db_cluster_identifier)
- if cluster is None:
- raise DBClusterNotFoundError(db_cluster_identifier)
- if db_snapshot_identifier in self.cluster_snapshots:
- raise DBClusterSnapshotAlreadyExistsError(db_snapshot_identifier)
- if len(self.cluster_snapshots) >= int(
- os.environ.get("MOTO_RDS_SNAPSHOT_LIMIT", "100")
- ):
- raise SnapshotQuotaExceededError()
- if tags is None:
- tags = list()
- if cluster.copy_tags_to_snapshot:
- tags += cluster.get_tags()
- snapshot = ClusterSnapshot(cluster, db_snapshot_identifier, tags)
- self.cluster_snapshots[db_snapshot_identifier] = snapshot
- return snapshot
-
- def copy_cluster_snapshot(
- self, source_snapshot_identifier, target_snapshot_identifier, tags=None
- ):
- if source_snapshot_identifier not in self.cluster_snapshots:
- raise DBClusterSnapshotNotFoundError(source_snapshot_identifier)
- if target_snapshot_identifier in self.cluster_snapshots:
- raise DBClusterSnapshotAlreadyExistsError(target_snapshot_identifier)
- if len(self.cluster_snapshots) >= int(
- os.environ.get("MOTO_RDS_SNAPSHOT_LIMIT", "100")
- ):
- raise SnapshotQuotaExceededError()
- source_snapshot = self.cluster_snapshots[source_snapshot_identifier]
- if tags is None:
- tags = source_snapshot.tags
- else:
- tags = self._merge_tags(source_snapshot.tags, tags)
- target_snapshot = ClusterSnapshot(
- source_snapshot.cluster, target_snapshot_identifier, tags
- )
- self.cluster_snapshots[target_snapshot_identifier] = target_snapshot
- return target_snapshot
-
- def delete_db_cluster_snapshot(self, db_snapshot_identifier):
- if db_snapshot_identifier not in self.cluster_snapshots:
- raise DBClusterSnapshotNotFoundError(db_snapshot_identifier)
-
- return self.cluster_snapshots.pop(db_snapshot_identifier)
-
- def describe_db_clusters(self, cluster_identifier):
- if cluster_identifier:
- return [self.clusters[cluster_identifier]]
- return self.clusters.values()
-
- def describe_db_cluster_snapshots(
- self, db_cluster_identifier, db_snapshot_identifier, filters=None
- ):
- snapshots = self.cluster_snapshots
- if db_cluster_identifier:
- filters = merge_filters(filters, {"db-cluster-id": [db_cluster_identifier]})
- if db_snapshot_identifier:
- filters = merge_filters(
- filters, {"db-cluster-snapshot-id": [db_snapshot_identifier]}
- )
- if filters:
- snapshots = self._filter_resources(snapshots, filters, ClusterSnapshot)
- if db_snapshot_identifier and not snapshots and not db_cluster_identifier:
- raise DBClusterSnapshotNotFoundError(db_snapshot_identifier)
- return list(snapshots.values())
-
- def delete_db_cluster(self, cluster_identifier):
- if cluster_identifier in self.clusters:
- if self.clusters[cluster_identifier].deletion_protection:
- raise InvalidParameterValue(
- "Can't delete Cluster with protection enabled"
- )
- return self.clusters.pop(cluster_identifier)
- raise DBClusterNotFoundError(cluster_identifier)
-
- def start_db_cluster(self, cluster_identifier):
- if cluster_identifier not in self.clusters:
- raise DBClusterNotFoundError(cluster_identifier)
- cluster = self.clusters[cluster_identifier]
- if cluster.status != "stopped":
- raise InvalidDBClusterStateFault(
- "DbCluster cluster-id is not in stopped state."
- )
- temp_state = copy.deepcopy(cluster)
- temp_state.status = "started"
- cluster.status = "available" # This is the final status - already setting it in the background
- return temp_state
-
- def restore_db_cluster_from_snapshot(self, from_snapshot_id, overrides):
- snapshot = self.describe_db_cluster_snapshots(
- db_cluster_identifier=None, db_snapshot_identifier=from_snapshot_id
- )[0]
- original_cluster = snapshot.cluster
- new_cluster_props = copy.deepcopy(original_cluster.__dict__)
- for key, value in overrides.items():
- if value:
- new_cluster_props[key] = value
-
- return self.create_db_cluster(new_cluster_props)
-
- def stop_db_cluster(self, cluster_identifier):
- if cluster_identifier not in self.clusters:
- raise DBClusterNotFoundError(cluster_identifier)
- cluster = self.clusters[cluster_identifier]
- if cluster.status not in ["available"]:
- raise InvalidDBClusterStateFault(
- "DbCluster cluster-id is not in available state."
- )
- previous_state = copy.deepcopy(cluster)
- cluster.status = "stopped"
- return previous_state
-
- def start_export_task(self, kwargs):
- export_task_id = kwargs["export_task_identifier"]
- source_arn = kwargs["source_arn"]
- snapshot_id = source_arn.split(":")[-1]
- snapshot_type = source_arn.split(":")[-2]
-
- if export_task_id in self.export_tasks:
- raise ExportTaskAlreadyExistsError(export_task_id)
- if snapshot_type == "snapshot" and snapshot_id not in self.database_snapshots:
- raise DBSnapshotNotFoundError(snapshot_id)
- elif (
- snapshot_type == "cluster-snapshot"
- and snapshot_id not in self.cluster_snapshots
- ):
- raise DBClusterSnapshotNotFoundError(snapshot_id)
-
- if snapshot_type == "snapshot":
- snapshot = self.database_snapshots[snapshot_id]
- else:
- snapshot = self.cluster_snapshots[snapshot_id]
-
- if snapshot.status not in ["available"]:
- raise InvalidExportSourceStateError(snapshot.status)
-
- export_task = ExportTask(snapshot, kwargs)
- self.export_tasks[export_task_id] = export_task
-
- return export_task
-
- def cancel_export_task(self, export_task_identifier):
- if export_task_identifier in self.export_tasks:
- export_task = self.export_tasks[export_task_identifier]
- export_task.status = "canceled"
- self.export_tasks[export_task_identifier] = export_task
- return export_task
- raise ExportTaskNotFoundError(export_task_identifier)
-
- def describe_export_tasks(self, export_task_identifier):
- if export_task_identifier:
- if export_task_identifier in self.export_tasks:
- return [self.export_tasks[export_task_identifier]]
- else:
- raise ExportTaskNotFoundError(export_task_identifier)
- return self.export_tasks.values()
-
- def create_event_subscription(self, kwargs):
- subscription_name = kwargs["subscription_name"]
-
- if subscription_name in self.event_subscriptions:
- raise SubscriptionAlreadyExistError(subscription_name)
-
- subscription = EventSubscription(kwargs)
- self.event_subscriptions[subscription_name] = subscription
-
- return subscription
-
- def delete_event_subscription(self, subscription_name):
- if subscription_name in self.event_subscriptions:
- return self.event_subscriptions.pop(subscription_name)
- raise SubscriptionNotFoundError(subscription_name)
-
- def describe_event_subscriptions(self, subscription_name):
- if subscription_name:
- if subscription_name in self.event_subscriptions:
- return [self.event_subscriptions[subscription_name]]
- else:
- raise SubscriptionNotFoundError(subscription_name)
- return self.event_subscriptions.values()
-
- def list_tags_for_resource(self, arn):
- if self.arn_regex.match(arn):
- arn_breakdown = arn.split(":")
- resource_type = arn_breakdown[len(arn_breakdown) - 2]
- resource_name = arn_breakdown[len(arn_breakdown) - 1]
- if resource_type == "db": # Database
- if resource_name in self.databases:
- return self.databases[resource_name].get_tags()
- elif resource_type == "cluster": # Cluster
- if resource_name in self.clusters:
- return self.clusters[resource_name].get_tags()
- elif resource_type == "es": # Event Subscription
- if resource_name in self.event_subscriptions:
- return self.event_subscriptions[resource_name].get_tags()
- elif resource_type == "og": # Option Group
- if resource_name in self.option_groups:
- return self.option_groups[resource_name].get_tags()
- elif resource_type == "pg": # Parameter Group
- if resource_name in self.db_parameter_groups:
- return self.db_parameter_groups[resource_name].get_tags()
- elif resource_type == "ri": # Reserved DB instance
- # TODO: Complete call to tags on resource type Reserved DB
- # instance
- return []
- elif resource_type == "secgrp": # DB security group
- if resource_name in self.security_groups:
- return self.security_groups[resource_name].get_tags()
- elif resource_type == "snapshot": # DB Snapshot
- if resource_name in self.database_snapshots:
- return self.database_snapshots[resource_name].get_tags()
- elif resource_type == "cluster-snapshot": # DB Cluster Snapshot
- if resource_name in self.cluster_snapshots:
- return self.cluster_snapshots[resource_name].get_tags()
- elif resource_type == "subgrp": # DB subnet group
- if resource_name in self.subnet_groups:
- return self.subnet_groups[resource_name].get_tags()
- else:
- raise RDSClientError(
- "InvalidParameterValue", "Invalid resource name: {0}".format(arn)
- )
- return []
-
- def remove_tags_from_resource(self, arn, tag_keys):
- if self.arn_regex.match(arn):
- arn_breakdown = arn.split(":")
- resource_type = arn_breakdown[len(arn_breakdown) - 2]
- resource_name = arn_breakdown[len(arn_breakdown) - 1]
- if resource_type == "db": # Database
- if resource_name in self.databases:
- return self.databases[resource_name].remove_tags(tag_keys)
- elif resource_type == "es": # Event Subscription
- if resource_name in self.event_subscriptions:
- return self.event_subscriptions[resource_name].remove_tags(tag_keys)
- elif resource_type == "og": # Option Group
- if resource_name in self.option_groups:
- return self.option_groups[resource_name].remove_tags(tag_keys)
- elif resource_type == "pg": # Parameter Group
- if resource_name in self.db_parameter_groups:
- return self.db_parameter_groups[resource_name].remove_tags(tag_keys)
- elif resource_type == "ri": # Reserved DB instance
- return None
- elif resource_type == "secgrp": # DB security group
- if resource_name in self.security_groups:
- return self.security_groups[resource_name].remove_tags(tag_keys)
- elif resource_type == "snapshot": # DB Snapshot
- if resource_name in self.database_snapshots:
- return self.database_snapshots[resource_name].remove_tags(tag_keys)
- elif resource_type == "cluster-snapshot": # DB Cluster Snapshot
- if resource_name in self.cluster_snapshots:
- return self.cluster_snapshots[resource_name].remove_tags(tag_keys)
- elif resource_type == "subgrp": # DB subnet group
- if resource_name in self.subnet_groups:
- return self.subnet_groups[resource_name].remove_tags(tag_keys)
- else:
- raise RDSClientError(
- "InvalidParameterValue", "Invalid resource name: {0}".format(arn)
- )
-
- def add_tags_to_resource(self, arn, tags):
- if self.arn_regex.match(arn):
- arn_breakdown = arn.split(":")
- resource_type = arn_breakdown[len(arn_breakdown) - 2]
- resource_name = arn_breakdown[len(arn_breakdown) - 1]
- if resource_type == "db": # Database
- if resource_name in self.databases:
- return self.databases[resource_name].add_tags(tags)
- elif resource_type == "es": # Event Subscription
- if resource_name in self.event_subscriptions:
- return self.event_subscriptions[resource_name].add_tags(tags)
- elif resource_type == "og": # Option Group
- if resource_name in self.option_groups:
- return self.option_groups[resource_name].add_tags(tags)
- elif resource_type == "pg": # Parameter Group
- if resource_name in self.db_parameter_groups:
- return self.db_parameter_groups[resource_name].add_tags(tags)
- elif resource_type == "ri": # Reserved DB instance
- return []
- elif resource_type == "secgrp": # DB security group
- if resource_name in self.security_groups:
- return self.security_groups[resource_name].add_tags(tags)
- elif resource_type == "snapshot": # DB Snapshot
- if resource_name in self.database_snapshots:
- return self.database_snapshots[resource_name].add_tags(tags)
- elif resource_type == "cluster-snapshot": # DB Cluster Snapshot
- if resource_name in self.cluster_snapshots:
- return self.cluster_snapshots[resource_name].add_tags(tags)
- elif resource_type == "subgrp": # DB subnet group
- if resource_name in self.subnet_groups:
- return self.subnet_groups[resource_name].add_tags(tags)
- else:
- raise RDSClientError(
- "InvalidParameterValue", "Invalid resource name: {0}".format(arn)
- )
-
- @staticmethod
- def _filter_resources(resources, filters, resource_class):
- try:
- filter_defs = resource_class.SUPPORTED_FILTERS
- validate_filters(filters, filter_defs)
- return apply_filter(resources, filters, filter_defs)
- except KeyError as e:
- # https://stackoverflow.com/questions/24998968/why-does-strkeyerror-add-extra-quotes
- raise InvalidParameterValue(e.args[0])
- except ValueError as e:
- raise InvalidParameterCombination(str(e))
-
- @staticmethod
- def _merge_tags(old_tags: list, new_tags: list):
- tags_dict = dict()
- tags_dict.update({d["Key"]: d["Value"] for d in old_tags})
- tags_dict.update({d["Key"]: d["Value"] for d in new_tags})
- return [{"Key": k, "Value": v} for k, v in tags_dict.items()]
-
-
-class OptionGroup(object):
- def __init__(self, name, engine_name, major_engine_version, description=None):
- self.engine_name = engine_name
- self.major_engine_version = major_engine_version
- self.description = description
- self.name = name
- self.vpc_and_non_vpc_instance_memberships = False
- self.options = {}
- self.vpcId = "null"
- self.tags = []
-
- def to_json(self):
- template = Template(
- """{
- "VpcId": null,
- "MajorEngineVersion": "{{ option_group.major_engine_version }}",
- "OptionGroupDescription": "{{ option_group.description }}",
- "AllowsVpcAndNonVpcInstanceMemberships": "{{ option_group.vpc_and_non_vpc_instance_memberships }}",
- "EngineName": "{{ option_group.engine_name }}",
- "Options": [],
- "OptionGroupName": "{{ option_group.name }}"
-}"""
- )
- return template.render(option_group=self)
-
- def to_xml(self):
- template = Template(
- """
- {{ option_group.name }}
- {{ option_group.vpc_and_non_vpc_instance_memberships }}
- {{ option_group.major_engine_version }}
- {{ option_group.engine_name }}
- {{ option_group.description }}
-
- """
- )
- return template.render(option_group=self)
-
- def remove_options(self, options_to_remove):
- # TODO: Check for option in self.options and remove if exists. Raise
- # error otherwise
- return
-
- def add_options(self, options_to_add):
- # TODO: Validate option and add it to self.options. If invalid raise
- # error
- return
-
- def get_tags(self):
- return self.tags
-
- def add_tags(self, tags):
- new_keys = [tag_set["Key"] for tag_set in tags]
- self.tags = [tag_set for tag_set in self.tags if tag_set["Key"] not in new_keys]
- self.tags.extend(tags)
- return self.tags
-
- def remove_tags(self, tag_keys):
- self.tags = [tag_set for tag_set in self.tags if tag_set["Key"] not in tag_keys]
-
-
-class OptionGroupOption(object):
- def __init__(self, **kwargs):
- self.default_port = kwargs.get("default_port")
- self.description = kwargs.get("description")
- self.engine_name = kwargs.get("engine_name")
- self.major_engine_version = kwargs.get("major_engine_version")
- self.name = kwargs.get("name")
- self.option_group_option_settings = self._make_option_group_option_settings(
- kwargs.get("option_group_option_settings", [])
- )
- self.options_depended_on = kwargs.get("options_depended_on", [])
- self.permanent = kwargs.get("permanent")
- self.persistent = kwargs.get("persistent")
- self.port_required = kwargs.get("port_required")
-
- def _make_option_group_option_settings(self, option_group_option_settings_kwargs):
- return [
- OptionGroupOptionSetting(**setting_kwargs)
- for setting_kwargs in option_group_option_settings_kwargs
- ]
-
- def to_json(self):
- template = Template(
- """{ "MinimumRequiredMinorEngineVersion":
- "2789.0.v1",
- "OptionsDependedOn": [],
- "MajorEngineVersion": "10.50",
- "Persistent": false,
- "DefaultPort": null,
- "Permanent": false,
- "OptionGroupOptionSettings": [],
- "EngineName": "sqlserver-se",
- "Name": "Mirroring",
- "PortRequired": false,
- "Description": "SQLServer Database Mirroring"
- }"""
- )
- return template.render(option_group=self)
-
- def to_xml(self):
- template = Template(
- """
- {{ option_group.major_engine_version }}
- {{ option_group.default_port }}
- {{ option_group.port_required }}
- {{ option_group.persistent }}
-
- {%- for option_name in option_group.options_depended_on -%}
- {{ option_name }}
- {%- endfor -%}
-
- {{ option_group.permanent }}
- {{ option_group.description }}
- {{ option_group.name }}
-
- {%- for setting in option_group.option_group_option_settings -%}
- {{ setting.to_xml() }}
- {%- endfor -%}
-
- {{ option_group.engine_name }}
- {{ option_group.minimum_required_minor_engine_version }}
-"""
- )
- return template.render(option_group=self)
-
-
-class OptionGroupOptionSetting(object):
- def __init__(self, *kwargs):
- self.allowed_values = kwargs.get("allowed_values")
- self.apply_type = kwargs.get("apply_type")
- self.default_value = kwargs.get("default_value")
- self.is_modifiable = kwargs.get("is_modifiable")
- self.setting_description = kwargs.get("setting_description")
- self.setting_name = kwargs.get("setting_name")
-
- def to_xml(self):
- template = Template(
- """
- {{ option_group_option_setting.allowed_values }}
- {{ option_group_option_setting.apply_type }}
- {{ option_group_option_setting.default_value }}
- {{ option_group_option_setting.is_modifiable }}
- {{ option_group_option_setting.setting_description }}
- {{ option_group_option_setting.setting_name }}
-"""
- )
- return template.render(option_group_option_setting=self)
-
-
-def make_rds_arn(region, name):
- return "arn:aws:rds:{0}:{1}:pg:{2}".format(region, ACCOUNT_ID, name)
-
-
-class DBParameterGroup(CloudFormationModel):
- def __init__(self, name, description, family, tags, region):
- self.name = name
- self.description = description
- self.family = family
- self.tags = tags
- self.parameters = defaultdict(dict)
- self.arn = make_rds_arn(region, name)
-
- def to_xml(self):
- template = Template(
- """
- {{ param_group.name }}
- {{ param_group.family }}
- {{ param_group.description }}
- {{ param_group.arn }}
- """
- )
- return template.render(param_group=self)
-
- def get_tags(self):
- return self.tags
-
- def add_tags(self, tags):
- new_keys = [tag_set["Key"] for tag_set in tags]
- self.tags = [tag_set for tag_set in self.tags if tag_set["Key"] not in new_keys]
- self.tags.extend(tags)
- return self.tags
-
- def remove_tags(self, tag_keys):
- self.tags = [tag_set for tag_set in self.tags if tag_set["Key"] not in tag_keys]
-
- def update_parameters(self, new_parameters):
- for new_parameter in new_parameters:
- parameter = self.parameters[new_parameter["ParameterName"]]
- parameter.update(new_parameter)
-
- def delete(self, region_name):
- backend = rds2_backends[region_name]
- backend.delete_db_parameter_group(self.name)
-
- @staticmethod
- def cloudformation_name_type():
- return None
-
- @staticmethod
- def cloudformation_type():
- # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbparametergroup.html
- return "AWS::RDS::DBParameterGroup"
-
- @classmethod
- def create_from_cloudformation_json(
- cls, resource_name, cloudformation_json, region_name, **kwargs
- ):
- properties = cloudformation_json["Properties"]
-
- db_parameter_group_kwargs = {
- "description": properties["Description"],
- "family": properties["Family"],
- "name": resource_name.lower(),
- "tags": properties.get("Tags"),
- }
- db_parameter_group_parameters = []
- for db_parameter, db_parameter_value in properties.get(
- "Parameters", {}
- ).items():
- db_parameter_group_parameters.append(
- {"ParameterName": db_parameter, "ParameterValue": db_parameter_value}
- )
-
- rds2_backend = rds2_backends[region_name]
- db_parameter_group = rds2_backend.create_db_parameter_group(
- db_parameter_group_kwargs
- )
- db_parameter_group.update_parameters(db_parameter_group_parameters)
- return db_parameter_group
-
-
-rds2_backends = BackendDict(RDS2Backend, "rds")
diff --git a/moto/rds2/responses.py b/moto/rds2/responses.py
deleted file mode 100644
index 74495f5e6..000000000
--- a/moto/rds2/responses.py
+++ /dev/null
@@ -1,1130 +0,0 @@
-from collections import defaultdict
-
-from moto.core.responses import BaseResponse
-from moto.ec2.models import ec2_backends
-from .models import rds2_backends
-from .exceptions import DBParameterGroupNotFoundError
-from .utils import filters_from_querystring
-
-
-class RDS2Response(BaseResponse):
- @property
- def backend(self):
- return rds2_backends[self.region]
-
- def _get_db_kwargs(self):
- args = {
- "auto_minor_version_upgrade": self._get_param("AutoMinorVersionUpgrade"),
- "allocated_storage": self._get_int_param("AllocatedStorage"),
- "availability_zone": self._get_param("AvailabilityZone"),
- "backup_retention_period": self._get_param("BackupRetentionPeriod"),
- "copy_tags_to_snapshot": self._get_param("CopyTagsToSnapshot"),
- "db_instance_class": self._get_param("DBInstanceClass"),
- "db_instance_identifier": self._get_param("DBInstanceIdentifier"),
- "db_name": self._get_param("DBName"),
- "db_parameter_group_name": self._get_param("DBParameterGroupName"),
- "db_snapshot_identifier": self._get_param("DBSnapshotIdentifier"),
- "db_subnet_group_name": self._get_param("DBSubnetGroupName"),
- "engine": self._get_param("Engine"),
- "engine_version": self._get_param("EngineVersion"),
- "enable_iam_database_authentication": self._get_bool_param(
- "EnableIAMDatabaseAuthentication"
- ),
- "license_model": self._get_param("LicenseModel"),
- "iops": self._get_int_param("Iops"),
- "kms_key_id": self._get_param("KmsKeyId"),
- "master_user_password": self._get_param("MasterUserPassword"),
- "master_username": self._get_param("MasterUsername"),
- "multi_az": self._get_bool_param("MultiAZ"),
- "option_group_name": self._get_param("OptionGroupName"),
- "port": self._get_param("Port"),
- # PreferredBackupWindow
- # PreferredMaintenanceWindow
- "publicly_accessible": self._get_param("PubliclyAccessible"),
- "region": self.region,
- "security_groups": self._get_multi_param(
- "DBSecurityGroups.DBSecurityGroupName"
- ),
- "storage_encrypted": self._get_param("StorageEncrypted"),
- "storage_type": self._get_param("StorageType", None),
- "vpc_security_group_ids": self._get_multi_param(
- "VpcSecurityGroupIds.VpcSecurityGroupId"
- ),
- "tags": list(),
- "deletion_protection": self._get_bool_param("DeletionProtection"),
- }
- args["tags"] = self.unpack_complex_list_params("Tags.Tag", ("Key", "Value"))
- return args
-
- def _get_db_replica_kwargs(self):
- return {
- "auto_minor_version_upgrade": self._get_param("AutoMinorVersionUpgrade"),
- "availability_zone": self._get_param("AvailabilityZone"),
- "db_instance_class": self._get_param("DBInstanceClass"),
- "db_instance_identifier": self._get_param("DBInstanceIdentifier"),
- "db_subnet_group_name": self._get_param("DBSubnetGroupName"),
- "iops": self._get_int_param("Iops"),
- # OptionGroupName
- "port": self._get_param("Port"),
- "publicly_accessible": self._get_param("PubliclyAccessible"),
- "source_db_identifier": self._get_param("SourceDBInstanceIdentifier"),
- "storage_type": self._get_param("StorageType"),
- }
-
- def _get_option_group_kwargs(self):
- return {
- "major_engine_version": self._get_param("MajorEngineVersion"),
- "description": self._get_param("OptionGroupDescription"),
- "engine_name": self._get_param("EngineName"),
- "name": self._get_param("OptionGroupName"),
- }
-
- def _get_db_parameter_group_kwargs(self):
- return {
- "description": self._get_param("Description"),
- "family": self._get_param("DBParameterGroupFamily"),
- "name": self._get_param("DBParameterGroupName"),
- "tags": self.unpack_complex_list_params("Tags.Tag", ("Key", "Value")),
- }
-
- def _get_db_cluster_kwargs(self):
- return {
- "availability_zones": self._get_multi_param(
- "AvailabilityZones.AvailabilityZone"
- ),
- "db_name": self._get_param("DatabaseName"),
- "db_cluster_identifier": self._get_param("DBClusterIdentifier"),
- "deletion_protection": self._get_bool_param("DeletionProtection"),
- "engine": self._get_param("Engine"),
- "engine_version": self._get_param("EngineVersion"),
- "engine_mode": self._get_param("EngineMode"),
- "allocated_storage": self._get_param("AllocatedStorage"),
- "iops": self._get_param("Iops"),
- "storage_type": self._get_param("StorageType"),
- "master_username": self._get_param("MasterUsername"),
- "master_user_password": self._get_param("MasterUserPassword"),
- "port": self._get_param("Port"),
- "parameter_group": self._get_param("DBClusterParameterGroup"),
- "region": self.region,
- "db_cluster_instance_class": self._get_param("DBClusterInstanceClass"),
- "copy_tags_to_snapshot": self._get_param("CopyTagsToSnapshot"),
- "tags": self.unpack_complex_list_params("Tags.Tag", ("Key", "Value")),
- }
-
- def _get_export_task_kwargs(self):
- return {
- "export_task_identifier": self._get_param("ExportTaskIdentifier"),
- "source_arn": self._get_param("SourceArn"),
- "s3_bucket_name": self._get_param("S3BucketName"),
- "iam_role_arn": self._get_param("IamRoleArn"),
- "kms_key_id": self._get_param("KmsKeyId"),
- "s3_prefix": self._get_param("S3Prefix"),
- "export_only": self.unpack_list_params("ExportOnly.member"),
- }
-
- def _get_event_subscription_kwargs(self):
- return {
- "subscription_name": self._get_param("SubscriptionName"),
- "sns_topic_arn": self._get_param("SnsTopicArn"),
- "source_type": self._get_param("SourceType"),
- "event_categories": self.unpack_list_params(
- "EventCategories.EventCategory"
- ),
- "source_ids": self.unpack_list_params("SourceIds.SourceId"),
- "enabled": self._get_param("Enabled"),
- "tags": self.unpack_complex_list_params("Tags.Tag", ("Key", "Value")),
- }
-
- def unpack_complex_list_params(self, label, names):
- unpacked_list = list()
- count = 1
- while self._get_param("{0}.{1}.{2}".format(label, count, names[0])):
- param = dict()
- for i in range(len(names)):
- param[names[i]] = self._get_param(
- "{0}.{1}.{2}".format(label, count, names[i])
- )
- unpacked_list.append(param)
- count += 1
- return unpacked_list
-
- def unpack_list_params(self, label):
- unpacked_list = list()
- count = 1
- while self._get_param("{0}.{1}".format(label, count)):
- unpacked_list.append(self._get_param("{0}.{1}".format(label, count)))
- count += 1
- return unpacked_list
-
- def create_db_instance(self):
- db_kwargs = self._get_db_kwargs()
- database = self.backend.create_database(db_kwargs)
- template = self.response_template(CREATE_DATABASE_TEMPLATE)
- return template.render(database=database)
-
- def create_db_instance_read_replica(self):
- db_kwargs = self._get_db_replica_kwargs()
-
- database = self.backend.create_database_replica(db_kwargs)
- template = self.response_template(CREATE_DATABASE_REPLICA_TEMPLATE)
- return template.render(database=database)
-
- def describe_db_instances(self):
- db_instance_identifier = self._get_param("DBInstanceIdentifier")
- filters = filters_from_querystring(self.querystring)
- all_instances = list(
- self.backend.describe_databases(db_instance_identifier, filters=filters)
- )
- marker = self._get_param("Marker")
- all_ids = [instance.db_instance_identifier for instance in all_instances]
- if marker:
- start = all_ids.index(marker) + 1
- else:
- start = 0
- page_size = self._get_int_param(
- "MaxRecords", 50
- ) # the default is 100, but using 50 to make testing easier
- instances_resp = all_instances[start : start + page_size]
- next_marker = None
- if len(all_instances) > start + page_size:
- next_marker = instances_resp[-1].db_instance_identifier
-
- template = self.response_template(DESCRIBE_DATABASES_TEMPLATE)
- return template.render(databases=instances_resp, marker=next_marker)
-
- def modify_db_instance(self):
- db_instance_identifier = self._get_param("DBInstanceIdentifier")
- db_kwargs = self._get_db_kwargs()
- new_db_instance_identifier = self._get_param("NewDBInstanceIdentifier")
- if new_db_instance_identifier:
- db_kwargs["new_db_instance_identifier"] = new_db_instance_identifier
- database = self.backend.modify_database(db_instance_identifier, db_kwargs)
- template = self.response_template(MODIFY_DATABASE_TEMPLATE)
- return template.render(database=database)
-
- def delete_db_instance(self):
- db_instance_identifier = self._get_param("DBInstanceIdentifier")
- db_snapshot_name = self._get_param("FinalDBSnapshotIdentifier")
- database = self.backend.delete_database(
- db_instance_identifier, db_snapshot_name
- )
- template = self.response_template(DELETE_DATABASE_TEMPLATE)
- return template.render(database=database)
-
- def reboot_db_instance(self):
- db_instance_identifier = self._get_param("DBInstanceIdentifier")
- database = self.backend.reboot_db_instance(db_instance_identifier)
- template = self.response_template(REBOOT_DATABASE_TEMPLATE)
- return template.render(database=database)
-
- def create_db_snapshot(self):
- db_instance_identifier = self._get_param("DBInstanceIdentifier")
- db_snapshot_identifier = self._get_param("DBSnapshotIdentifier")
- tags = self.unpack_complex_list_params("Tags.Tag", ("Key", "Value"))
- snapshot = self.backend.create_database_snapshot(
- db_instance_identifier, db_snapshot_identifier, tags
- )
- template = self.response_template(CREATE_SNAPSHOT_TEMPLATE)
- return template.render(snapshot=snapshot)
-
- def copy_db_snapshot(self):
- source_snapshot_identifier = self._get_param("SourceDBSnapshotIdentifier")
- target_snapshot_identifier = self._get_param("TargetDBSnapshotIdentifier")
- tags = self.unpack_complex_list_params("Tags.Tag", ("Key", "Value"))
- snapshot = self.backend.copy_database_snapshot(
- source_snapshot_identifier, target_snapshot_identifier, tags,
- )
- template = self.response_template(COPY_SNAPSHOT_TEMPLATE)
- return template.render(snapshot=snapshot)
-
- def describe_db_snapshots(self):
- db_instance_identifier = self._get_param("DBInstanceIdentifier")
- db_snapshot_identifier = self._get_param("DBSnapshotIdentifier")
- filters = filters_from_querystring(self.querystring)
- snapshots = self.backend.describe_database_snapshots(
- db_instance_identifier, db_snapshot_identifier, filters
- )
- template = self.response_template(DESCRIBE_SNAPSHOTS_TEMPLATE)
- return template.render(snapshots=snapshots)
-
- def delete_db_snapshot(self):
- db_snapshot_identifier = self._get_param("DBSnapshotIdentifier")
- snapshot = self.backend.delete_database_snapshot(db_snapshot_identifier)
- template = self.response_template(DELETE_SNAPSHOT_TEMPLATE)
- return template.render(snapshot=snapshot)
-
- def restore_db_instance_from_db_snapshot(self):
- db_snapshot_identifier = self._get_param("DBSnapshotIdentifier")
- db_kwargs = self._get_db_kwargs()
- new_instance = self.backend.restore_db_instance_from_db_snapshot(
- db_snapshot_identifier, db_kwargs
- )
- template = self.response_template(RESTORE_INSTANCE_FROM_SNAPSHOT_TEMPLATE)
- return template.render(database=new_instance)
-
- def list_tags_for_resource(self):
- arn = self._get_param("ResourceName")
- template = self.response_template(LIST_TAGS_FOR_RESOURCE_TEMPLATE)
- tags = self.backend.list_tags_for_resource(arn)
- return template.render(tags=tags)
-
- def add_tags_to_resource(self):
- arn = self._get_param("ResourceName")
- tags = self.unpack_complex_list_params("Tags.Tag", ("Key", "Value"))
- tags = self.backend.add_tags_to_resource(arn, tags)
- template = self.response_template(ADD_TAGS_TO_RESOURCE_TEMPLATE)
- return template.render(tags=tags)
-
- def remove_tags_from_resource(self):
- arn = self._get_param("ResourceName")
- tag_keys = self.unpack_list_params("TagKeys.member")
- self.backend.remove_tags_from_resource(arn, tag_keys)
- template = self.response_template(REMOVE_TAGS_FROM_RESOURCE_TEMPLATE)
- return template.render()
-
- def stop_db_instance(self):
- db_instance_identifier = self._get_param("DBInstanceIdentifier")
- db_snapshot_identifier = self._get_param("DBSnapshotIdentifier")
- database = self.backend.stop_database(
- db_instance_identifier, db_snapshot_identifier
- )
- template = self.response_template(STOP_DATABASE_TEMPLATE)
- return template.render(database=database)
-
- def start_db_instance(self):
- db_instance_identifier = self._get_param("DBInstanceIdentifier")
- database = self.backend.start_database(db_instance_identifier)
- template = self.response_template(START_DATABASE_TEMPLATE)
- return template.render(database=database)
-
- def create_db_security_group(self):
- group_name = self._get_param("DBSecurityGroupName")
- description = self._get_param("DBSecurityGroupDescription")
- tags = self.unpack_complex_list_params("Tags.Tag", ("Key", "Value"))
- security_group = self.backend.create_security_group(
- group_name, description, tags
- )
- template = self.response_template(CREATE_SECURITY_GROUP_TEMPLATE)
- return template.render(security_group=security_group)
-
- def describe_db_security_groups(self):
- security_group_name = self._get_param("DBSecurityGroupName")
- security_groups = self.backend.describe_security_groups(security_group_name)
- template = self.response_template(DESCRIBE_SECURITY_GROUPS_TEMPLATE)
- return template.render(security_groups=security_groups)
-
- def delete_db_security_group(self):
- security_group_name = self._get_param("DBSecurityGroupName")
- security_group = self.backend.delete_security_group(security_group_name)
- template = self.response_template(DELETE_SECURITY_GROUP_TEMPLATE)
- return template.render(security_group=security_group)
-
- def authorize_db_security_group_ingress(self):
- security_group_name = self._get_param("DBSecurityGroupName")
- cidr_ip = self._get_param("CIDRIP")
- security_group = self.backend.authorize_security_group(
- security_group_name, cidr_ip
- )
- template = self.response_template(AUTHORIZE_SECURITY_GROUP_TEMPLATE)
- return template.render(security_group=security_group)
-
- def create_db_subnet_group(self):
- subnet_name = self._get_param("DBSubnetGroupName")
- description = self._get_param("DBSubnetGroupDescription")
- subnet_ids = self._get_multi_param("SubnetIds.SubnetIdentifier")
- tags = self.unpack_complex_list_params("Tags.Tag", ("Key", "Value"))
- subnets = [
- ec2_backends[self.region].get_subnet(subnet_id) for subnet_id in subnet_ids
- ]
- subnet_group = self.backend.create_subnet_group(
- subnet_name, description, subnets, tags
- )
- template = self.response_template(CREATE_SUBNET_GROUP_TEMPLATE)
- return template.render(subnet_group=subnet_group)
-
- def describe_db_subnet_groups(self):
- subnet_name = self._get_param("DBSubnetGroupName")
- subnet_groups = self.backend.describe_subnet_groups(subnet_name)
- template = self.response_template(DESCRIBE_SUBNET_GROUPS_TEMPLATE)
- return template.render(subnet_groups=subnet_groups)
-
- def modify_db_subnet_group(self):
- subnet_name = self._get_param("DBSubnetGroupName")
- description = self._get_param("DBSubnetGroupDescription")
- subnet_ids = self._get_multi_param("SubnetIds.SubnetIdentifier")
- subnets = [
- ec2_backends[self.region].get_subnet(subnet_id) for subnet_id in subnet_ids
- ]
- subnet_group = self.backend.modify_db_subnet_group(
- subnet_name, description, subnets
- )
- template = self.response_template(MODIFY_SUBNET_GROUPS_TEMPLATE)
- return template.render(subnet_group=subnet_group)
-
- def delete_db_subnet_group(self):
- subnet_name = self._get_param("DBSubnetGroupName")
- subnet_group = self.backend.delete_subnet_group(subnet_name)
- template = self.response_template(DELETE_SUBNET_GROUP_TEMPLATE)
- return template.render(subnet_group=subnet_group)
-
- def create_option_group(self):
- kwargs = self._get_option_group_kwargs()
- option_group = self.backend.create_option_group(kwargs)
- template = self.response_template(CREATE_OPTION_GROUP_TEMPLATE)
- return template.render(option_group=option_group)
-
- def delete_option_group(self):
- kwargs = self._get_option_group_kwargs()
- option_group = self.backend.delete_option_group(kwargs["name"])
- template = self.response_template(DELETE_OPTION_GROUP_TEMPLATE)
- return template.render(option_group=option_group)
-
- def describe_option_groups(self):
- kwargs = self._get_option_group_kwargs()
- kwargs["max_records"] = self._get_int_param("MaxRecords")
- kwargs["marker"] = self._get_param("Marker")
- option_groups = self.backend.describe_option_groups(kwargs)
- template = self.response_template(DESCRIBE_OPTION_GROUP_TEMPLATE)
- return template.render(option_groups=option_groups)
-
- def describe_option_group_options(self):
- engine_name = self._get_param("EngineName")
- major_engine_version = self._get_param("MajorEngineVersion")
- option_group_options = self.backend.describe_option_group_options(
- engine_name, major_engine_version
- )
- return option_group_options
-
- def modify_option_group(self):
- option_group_name = self._get_param("OptionGroupName")
- count = 1
- options_to_include = []
- while self._get_param("OptionsToInclude.member.{0}.OptionName".format(count)):
- options_to_include.append(
- {
- "Port": self._get_param(
- "OptionsToInclude.member.{0}.Port".format(count)
- ),
- "OptionName": self._get_param(
- "OptionsToInclude.member.{0}.OptionName".format(count)
- ),
- "DBSecurityGroupMemberships": self._get_param(
- "OptionsToInclude.member.{0}.DBSecurityGroupMemberships".format(
- count
- )
- ),
- "OptionSettings": self._get_param(
- "OptionsToInclude.member.{0}.OptionSettings".format(count)
- ),
- "VpcSecurityGroupMemberships": self._get_param(
- "OptionsToInclude.member.{0}.VpcSecurityGroupMemberships".format(
- count
- )
- ),
- }
- )
- count += 1
-
- count = 1
- options_to_remove = []
- while self._get_param("OptionsToRemove.member.{0}".format(count)):
- options_to_remove.append(
- self._get_param("OptionsToRemove.member.{0}".format(count))
- )
- count += 1
- apply_immediately = self._get_param("ApplyImmediately")
- option_group = self.backend.modify_option_group(
- option_group_name, options_to_include, options_to_remove, apply_immediately
- )
- template = self.response_template(MODIFY_OPTION_GROUP_TEMPLATE)
- return template.render(option_group=option_group)
-
- def create_db_parameter_group(self):
- kwargs = self._get_db_parameter_group_kwargs()
- db_parameter_group = self.backend.create_db_parameter_group(kwargs)
- template = self.response_template(CREATE_DB_PARAMETER_GROUP_TEMPLATE)
- return template.render(db_parameter_group=db_parameter_group)
-
- def describe_db_parameter_groups(self):
- kwargs = self._get_db_parameter_group_kwargs()
- kwargs["max_records"] = self._get_int_param("MaxRecords")
- kwargs["marker"] = self._get_param("Marker")
- db_parameter_groups = self.backend.describe_db_parameter_groups(kwargs)
- template = self.response_template(DESCRIBE_DB_PARAMETER_GROUPS_TEMPLATE)
- return template.render(db_parameter_groups=db_parameter_groups)
-
- def modify_db_parameter_group(self):
- db_parameter_group_name = self._get_param("DBParameterGroupName")
- db_parameter_group_parameters = self._get_db_parameter_group_parameters()
- db_parameter_group = self.backend.modify_db_parameter_group(
- db_parameter_group_name, db_parameter_group_parameters
- )
- template = self.response_template(MODIFY_DB_PARAMETER_GROUP_TEMPLATE)
- return template.render(db_parameter_group=db_parameter_group)
-
- def _get_db_parameter_group_parameters(self):
- parameter_group_parameters = defaultdict(dict)
- for param_name, value in self.querystring.items():
- if not param_name.startswith("Parameters.Parameter"):
- continue
-
- split_param_name = param_name.split(".")
- param_id = split_param_name[2]
- param_setting = split_param_name[3]
-
- parameter_group_parameters[param_id][param_setting] = value[0]
-
- return parameter_group_parameters.values()
-
- def describe_db_parameters(self):
- db_parameter_group_name = self._get_param("DBParameterGroupName")
- db_parameter_groups = self.backend.describe_db_parameter_groups(
- {"name": db_parameter_group_name}
- )
- if not db_parameter_groups:
- raise DBParameterGroupNotFoundError(db_parameter_group_name)
-
- template = self.response_template(DESCRIBE_DB_PARAMETERS_TEMPLATE)
- return template.render(db_parameter_group=db_parameter_groups[0])
-
- def delete_db_parameter_group(self):
- kwargs = self._get_db_parameter_group_kwargs()
- db_parameter_group = self.backend.delete_db_parameter_group(kwargs["name"])
- template = self.response_template(DELETE_DB_PARAMETER_GROUP_TEMPLATE)
- return template.render(db_parameter_group=db_parameter_group)
-
- def create_db_cluster(self):
- kwargs = self._get_db_cluster_kwargs()
- cluster = self.backend.create_db_cluster(kwargs)
- template = self.response_template(CREATE_DB_CLUSTER_TEMPLATE)
- return template.render(cluster=cluster)
-
- def describe_db_clusters(self):
- _id = self._get_param("DBClusterIdentifier")
- clusters = self.backend.describe_db_clusters(cluster_identifier=_id)
- template = self.response_template(DESCRIBE_CLUSTERS_TEMPLATE)
- return template.render(clusters=clusters)
-
- def delete_db_cluster(self):
- _id = self._get_param("DBClusterIdentifier")
- cluster = self.backend.delete_db_cluster(cluster_identifier=_id)
- template = self.response_template(DELETE_CLUSTER_TEMPLATE)
- return template.render(cluster=cluster)
-
- def start_db_cluster(self):
- _id = self._get_param("DBClusterIdentifier")
- cluster = self.backend.start_db_cluster(cluster_identifier=_id)
- template = self.response_template(START_CLUSTER_TEMPLATE)
- return template.render(cluster=cluster)
-
- def stop_db_cluster(self):
- _id = self._get_param("DBClusterIdentifier")
- cluster = self.backend.stop_db_cluster(cluster_identifier=_id)
- template = self.response_template(STOP_CLUSTER_TEMPLATE)
- return template.render(cluster=cluster)
-
- def create_db_cluster_snapshot(self):
- db_cluster_identifier = self._get_param("DBClusterIdentifier")
- db_snapshot_identifier = self._get_param("DBClusterSnapshotIdentifier")
- tags = self.unpack_complex_list_params("Tags.Tag", ("Key", "Value"))
- snapshot = self.backend.create_db_cluster_snapshot(
- db_cluster_identifier, db_snapshot_identifier, tags
- )
- template = self.response_template(CREATE_CLUSTER_SNAPSHOT_TEMPLATE)
- return template.render(snapshot=snapshot)
-
- def copy_db_cluster_snapshot(self):
- source_snapshot_identifier = self._get_param(
- "SourceDBClusterSnapshotIdentifier"
- )
- target_snapshot_identifier = self._get_param(
- "TargetDBClusterSnapshotIdentifier"
- )
- tags = self.unpack_complex_list_params("Tags.Tag", ("Key", "Value"))
- snapshot = self.backend.copy_cluster_snapshot(
- source_snapshot_identifier, target_snapshot_identifier, tags,
- )
- template = self.response_template(COPY_CLUSTER_SNAPSHOT_TEMPLATE)
- return template.render(snapshot=snapshot)
-
- def describe_db_cluster_snapshots(self):
- db_cluster_identifier = self._get_param("DBClusterIdentifier")
- db_snapshot_identifier = self._get_param("DBClusterSnapshotIdentifier")
- filters = filters_from_querystring(self.querystring)
- snapshots = self.backend.describe_db_cluster_snapshots(
- db_cluster_identifier, db_snapshot_identifier, filters
- )
- template = self.response_template(DESCRIBE_CLUSTER_SNAPSHOTS_TEMPLATE)
- return template.render(snapshots=snapshots)
-
- def delete_db_cluster_snapshot(self):
- db_snapshot_identifier = self._get_param("DBClusterSnapshotIdentifier")
- snapshot = self.backend.delete_db_cluster_snapshot(db_snapshot_identifier)
- template = self.response_template(DELETE_CLUSTER_SNAPSHOT_TEMPLATE)
- return template.render(snapshot=snapshot)
-
- def restore_db_cluster_from_snapshot(self):
- db_snapshot_identifier = self._get_param("SnapshotIdentifier")
- db_kwargs = self._get_db_cluster_kwargs()
- new_cluster = self.backend.restore_db_cluster_from_snapshot(
- db_snapshot_identifier, db_kwargs
- )
- template = self.response_template(RESTORE_CLUSTER_FROM_SNAPSHOT_TEMPLATE)
- return template.render(cluster=new_cluster)
-
- def start_export_task(self):
- kwargs = self._get_export_task_kwargs()
- export_task = self.backend.start_export_task(kwargs)
- template = self.response_template(START_EXPORT_TASK_TEMPLATE)
- return template.render(task=export_task)
-
- def cancel_export_task(self):
- export_task_identifier = self._get_param("ExportTaskIdentifier")
- export_task = self.backend.cancel_export_task(export_task_identifier)
- template = self.response_template(CANCEL_EXPORT_TASK_TEMPLATE)
- return template.render(task=export_task)
-
- def describe_export_tasks(self):
- export_task_identifier = self._get_param("ExportTaskIdentifier")
- tasks = self.backend.describe_export_tasks(export_task_identifier,)
- template = self.response_template(DESCRIBE_EXPORT_TASKS_TEMPLATE)
- return template.render(tasks=tasks)
-
- def create_event_subscription(self):
- kwargs = self._get_event_subscription_kwargs()
- subscription = self.backend.create_event_subscription(kwargs)
- template = self.response_template(CREATE_EVENT_SUBSCRIPTION_TEMPLATE)
- return template.render(subscription=subscription)
-
- def delete_event_subscription(self):
- subscription_name = self._get_param("SubscriptionName")
- subscription = self.backend.delete_event_subscription(subscription_name)
- template = self.response_template(DELETE_EVENT_SUBSCRIPTION_TEMPLATE)
- return template.render(subscription=subscription)
-
- def describe_event_subscriptions(self):
- subscription_name = self._get_param("SubscriptionName")
- subscriptions = self.backend.describe_event_subscriptions(subscription_name)
- template = self.response_template(DESCRIBE_EVENT_SUBSCRIPTIONS_TEMPLATE)
- return template.render(subscriptions=subscriptions)
-
-
-CREATE_DATABASE_TEMPLATE = """
-
- {{ database.to_xml() }}
-
-
- 523e3218-afc7-11c3-90f5-f90431260ab4
-
-"""
-
-CREATE_DATABASE_REPLICA_TEMPLATE = """
-
- {{ database.to_xml() }}
-
-
- 5e60c46d-a844-11e4-bb68-17f36418e58f
-
-"""
-
-DESCRIBE_DATABASES_TEMPLATE = """
-
-
- {%- for database in databases -%}
- {{ database.to_xml() }}
- {%- endfor -%}
-
- {% if marker %}
- {{ marker }}
- {% endif %}
-
-
- 523e3218-afc7-11c3-90f5-f90431260ab4
-
-"""
-
-MODIFY_DATABASE_TEMPLATE = """
-
- {{ database.to_xml() }}
-
-
- bb58476c-a1a8-11e4-99cf-55e92d4bbada
-
-"""
-
-REBOOT_DATABASE_TEMPLATE = """
-
- {{ database.to_xml() }}
-
-
- d55711cb-a1ab-11e4-99cf-55e92d4bbada
-
-"""
-
-START_DATABASE_TEMPLATE = """
-
- {{ database.to_xml() }}
-
-
- 523e3218-afc7-11c3-90f5-f90431260ab9
-
-"""
-
-STOP_DATABASE_TEMPLATE = """
-
- {{ database.to_xml() }}
-
-
- 523e3218-afc7-11c3-90f5-f90431260ab8
-
-"""
-
-DELETE_DATABASE_TEMPLATE = """
-
- {{ database.to_xml() }}
-
-
- 7369556f-b70d-11c3-faca-6ba18376ea1b
-
-"""
-
-DELETE_CLUSTER_TEMPLATE = """
-
- {{ cluster.to_xml() }}
-
-
- 7369556f-b70d-11c3-faca-6ba18376ea1b
-
-"""
-
-RESTORE_INSTANCE_FROM_SNAPSHOT_TEMPLATE = """
-
- {{ database.to_xml() }}
-
-
- 523e3218-afc7-11c3-90f5-f90431260ab4
-
-"""
-
-CREATE_SNAPSHOT_TEMPLATE = """
-
- {{ snapshot.to_xml() }}
-
-
- 523e3218-afc7-11c3-90f5-f90431260ab4
-
-
-"""
-
-COPY_SNAPSHOT_TEMPLATE = """
-
- {{ snapshot.to_xml() }}
-
-
- 523e3218-afc7-11c3-90f5-f90431260ab4
-
-
-"""
-
-DESCRIBE_SNAPSHOTS_TEMPLATE = """
-
-
- {%- for snapshot in snapshots -%}
- {{ snapshot.to_xml() }}
- {%- endfor -%}
-
- {% if marker %}
- {{ marker }}
- {% endif %}
-
-
- 523e3218-afc7-11c3-90f5-f90431260ab4
-
-"""
-
-DELETE_SNAPSHOT_TEMPLATE = """
-
- {{ snapshot.to_xml() }}
-
-
- 523e3218-afc7-11c3-90f5-f90431260ab4
-
-
-"""
-
-CREATE_SECURITY_GROUP_TEMPLATE = """
-
- {{ security_group.to_xml() }}
-
-
- 462165d0-a77a-11e4-a5fa-75b30c556f97
-
-"""
-
-DESCRIBE_SECURITY_GROUPS_TEMPLATE = """
-
-
- {% for security_group in security_groups %}
- {{ security_group.to_xml() }}
- {% endfor %}
-
-
-
- 5df2014e-a779-11e4-bdb0-594def064d0c
-
-"""
-
-DELETE_SECURITY_GROUP_TEMPLATE = """
-
- 97e846bd-a77d-11e4-ac58-91351c0f3426
-
-"""
-
-AUTHORIZE_SECURITY_GROUP_TEMPLATE = """
-
- {{ security_group.to_xml() }}
-
-
- 75d32fd5-a77e-11e4-8892-b10432f7a87d
-
-"""
-
-CREATE_SUBNET_GROUP_TEMPLATE = """
-
- {{ subnet_group.to_xml() }}
-
-
- 3a401b3f-bb9e-11d3-f4c6-37db295f7674
-
-"""
-
-DESCRIBE_SUBNET_GROUPS_TEMPLATE = """
-
-
- {% for subnet_group in subnet_groups %}
- {{ subnet_group.to_xml() }}
- {% endfor %}
-
-
-
- b783db3b-b98c-11d3-fbc7-5c0aad74da7c
-
-"""
-
-MODIFY_SUBNET_GROUPS_TEMPLATE = """
-
- {{ subnet_group.to_xml() }}
-
-
- b783db3b-b98c-11d3-fbc7-5c0aad74da7c
-
-"""
-
-DELETE_SUBNET_GROUP_TEMPLATE = """
-
- 13785dd5-a7fc-11e4-bb9c-7f371d0859b0
-
-"""
-
-CREATE_OPTION_GROUP_TEMPLATE = """
-
- {{ option_group.to_xml() }}
-
-
- 1e38dad4-9f50-11e4-87ea-a31c60ed2e36
-
-"""
-
-DELETE_OPTION_GROUP_TEMPLATE = """
-
- e2590367-9fa2-11e4-99cf-55e92d41c60e
-
-"""
-
-DESCRIBE_OPTION_GROUP_TEMPLATE = """
-
-
- {%- for option_group in option_groups -%}
- {{ option_group.to_xml() }}
- {%- endfor -%}
-
-
-
- 4caf445d-9fbc-11e4-87ea-a31c60ed2e36
-
-"""
-
-DESCRIBE_OPTION_GROUP_OPTIONS_TEMPLATE = """
-
-
- {%- for option_group_option in option_group_options -%}
- {{ option_group_option.to_xml() }}
- {%- endfor -%}
-
-
-
- 457f7bb8-9fbf-11e4-9084-5754f80d5144
-
-"""
-
-MODIFY_OPTION_GROUP_TEMPLATE = """
-
- {{ option_group.to_xml() }}
-
-
- ce9284a5-a0de-11e4-b984-a11a53e1f328
-
-"""
-
-CREATE_DB_PARAMETER_GROUP_TEMPLATE = """
-
- {{ db_parameter_group.to_xml() }}
-
-
- 7805c127-af22-11c3-96ac-6999cc5f7e72
-
-"""
-
-DESCRIBE_DB_PARAMETER_GROUPS_TEMPLATE = """
-
-
- {%- for db_parameter_group in db_parameter_groups -%}
- {{ db_parameter_group.to_xml() }}
- {%- endfor -%}
-
-
-
- b75d527a-b98c-11d3-f272-7cd6cce12cc5
-
-"""
-
-MODIFY_DB_PARAMETER_GROUP_TEMPLATE = """
-
- {{ db_parameter_group.name }}
-
-
- 12d7435e-bba0-11d3-fe11-33d33a9bb7e3
-
-"""
-
-DELETE_DB_PARAMETER_GROUP_TEMPLATE = """
-
- cad6c267-ba25-11d3-fe11-33d33a9bb7e3
-
-"""
-
-DESCRIBE_DB_PARAMETERS_TEMPLATE = """
-
-
- {%- for db_parameter_name, db_parameter in db_parameter_group.parameters.items() -%}
-
- {%- for parameter_name, parameter_value in db_parameter.items() -%}
- <{{ parameter_name }}>{{ parameter_value }}{{ parameter_name }}>
- {%- endfor -%}
-
- {%- endfor -%}
-
-
-
- 8c40488f-b9ff-11d3-a15e-7ac49293f4fa
-
-
-"""
-
-LIST_TAGS_FOR_RESOURCE_TEMPLATE = """
-
-
- {%- for tag in tags -%}
-
- {{ tag['Key'] }}
- {{ tag['Value'] }}
-
- {%- endfor -%}
-
-
-
- 8c21ba39-a598-11e4-b688-194eaf8658fa
-
-"""
-
-ADD_TAGS_TO_RESOURCE_TEMPLATE = """
-
- b194d9ca-a664-11e4-b688-194eaf8658fa
-
-"""
-
-REMOVE_TAGS_FROM_RESOURCE_TEMPLATE = """
-
- b194d9ca-a664-11e4-b688-194eaf8658fa
-
-"""
-
-CREATE_DB_CLUSTER_TEMPLATE = """
-
- {{ cluster.to_xml() }}
-
-
- 523e3218-afc7-11c3-90f5-f90431260ab4
-
-"""
-
-DESCRIBE_CLUSTERS_TEMPLATE = """
-
-
- {%- for cluster in clusters -%}
- {{ cluster.to_xml() }}
- {%- endfor -%}
-
- {% if marker %}
- {{ marker }}
- {% endif %}
-
-
- 523e3218-afc7-11c3-90f5-f90431260ab4
-
-"""
-
-START_CLUSTER_TEMPLATE = """
-
- {{ cluster.to_xml() }}
-
-
- 523e3218-afc7-11c3-90f5-f90431260ab9
-
-"""
-
-STOP_CLUSTER_TEMPLATE = """
-
- {{ cluster.to_xml() }}
-
-
- 523e3218-afc7-11c3-90f5-f90431260ab8
-
-"""
-
-RESTORE_CLUSTER_FROM_SNAPSHOT_TEMPLATE = """
-
- {{ cluster.to_xml() }}
-
-
- 523e3218-afc7-11c3-90f5-f90431260ab4
-
-
-"""
-
-CREATE_CLUSTER_SNAPSHOT_TEMPLATE = """
-
- {{ snapshot.to_xml() }}
-
-
- 523e3218-afc7-11c3-90f5-f90431260ab4
-
-
-"""
-
-COPY_CLUSTER_SNAPSHOT_TEMPLATE = """
-
- {{ snapshot.to_xml() }}
-
-
- 523e3218-afc7-11c3-90f5-f90431260ab4
-
-
-"""
-
-DESCRIBE_CLUSTER_SNAPSHOTS_TEMPLATE = """
-
-
- {%- for snapshot in snapshots -%}
- {{ snapshot.to_xml() }}
- {%- endfor -%}
-
- {% if marker %}
- {{ marker }}
- {% endif %}
-
-
- 523e3218-afc7-11c3-90f5-f90431260ab4
-
-"""
-
-DELETE_CLUSTER_SNAPSHOT_TEMPLATE = """
-
- {{ snapshot.to_xml() }}
-
-
- 523e3218-afc7-11c3-90f5-f90431260ab4
-
-
-"""
-
-START_EXPORT_TASK_TEMPLATE = """
-
- {{ task.to_xml() }}
-
-
- 523e3218-afc7-11c3-90f5-f90431260ab4
-
-
-"""
-
-CANCEL_EXPORT_TASK_TEMPLATE = """
-
- {{ task.to_xml() }}
-
-
- 523e3218-afc7-11c3-90f5-f90431260ab4
-
-
-"""
-
-DESCRIBE_EXPORT_TASKS_TEMPLATE = """
-
-
- {%- for task in tasks -%}
- {{ task.to_xml() }}
- {%- endfor -%}
-
- {% if marker %}
- {{ marker }}
- {% endif %}
-
-
- 523e3218-afc7-11c3-90f5-f90431260ab4
-
-
-"""
-
-CREATE_EVENT_SUBSCRIPTION_TEMPLATE = """
-
- {{ subscription.to_xml() }}
-
-
- 523e3218-afc7-11c3-90f5-f90431260ab4
-
-
-"""
-
-DELETE_EVENT_SUBSCRIPTION_TEMPLATE = """
-
- {{ subscription.to_xml() }}
-
-
- 523e3218-afc7-11c3-90f5-f90431260ab4
-
-
-"""
-
-DESCRIBE_EVENT_SUBSCRIPTIONS_TEMPLATE = """
-
-
- {%- for subscription in subscriptions -%}
- {{ subscription.to_xml() }}
- {%- endfor -%}
-
-
-
- 523e3218-afc7-11c3-90f5-f90431260ab4
-
-
-"""
diff --git a/moto/rds2/urls.py b/moto/rds2/urls.py
deleted file mode 100644
index 95ec8b4fb..000000000
--- a/moto/rds2/urls.py
+++ /dev/null
@@ -1,5 +0,0 @@
-from .responses import RDS2Response
-
-url_bases = [r"https?://rds\.(.+)\.amazonaws\.com", r"https?://rds\.amazonaws\.com"]
-
-url_paths = {"{0}/$": RDS2Response.dispatch}
diff --git a/moto/resourcegroupstaggingapi/models.py b/moto/resourcegroupstaggingapi/models.py
index bd37ec0f0..2d2ed8deb 100644
--- a/moto/resourcegroupstaggingapi/models.py
+++ b/moto/resourcegroupstaggingapi/models.py
@@ -11,7 +11,7 @@ from moto.elb import elb_backends
from moto.elbv2 import elbv2_backends
from moto.kinesis import kinesis_backends
from moto.kms import kms_backends
-from moto.rds2 import rds2_backends
+from moto.rds import rds_backends
from moto.glacier import glacier_backends
from moto.redshift import redshift_backends
from moto.emr import emr_backends
@@ -83,9 +83,9 @@ class ResourceGroupsTaggingAPIBackend(BaseBackend):
@property
def rds_backend(self):
"""
- :rtype: moto.rds2.models.RDS2Backend
+ :rtype: moto.rds.models.RDSBackend
"""
- return rds2_backends[self.region_name]
+ return rds_backends[self.region_name]
@property
def glacier_backend(self):
diff --git a/scripts/implementation_coverage.py b/scripts/implementation_coverage.py
index f8904aedf..39d48ad48 100755
--- a/scripts/implementation_coverage.py
+++ b/scripts/implementation_coverage.py
@@ -7,7 +7,7 @@ import boto3
script_dir = os.path.dirname(os.path.abspath(__file__))
-alternative_service_names = {"lambda": "awslambda", "dynamodb": "dynamodb2", "rds": "rds2"}
+alternative_service_names = {"lambda": "awslambda", "dynamodb": "dynamodb2"}
def get_moto_implementation(service_name):
diff --git a/tests/test_core/test_auth.py b/tests/test_core/test_auth.py
index e27e8faf1..d6821010c 100644
--- a/tests/test_core/test_auth.py
+++ b/tests/test_core/test_auth.py
@@ -6,7 +6,7 @@ from botocore.exceptions import ClientError
import pytest
-from moto import mock_iam, mock_ec2, mock_s3, mock_sts, mock_elbv2, mock_rds2
+from moto import mock_iam, mock_ec2, mock_s3, mock_sts, mock_elbv2, mock_rds
from moto.core import set_initial_no_auth_action_count
from moto.core import ACCOUNT_ID
from uuid import uuid4
@@ -599,7 +599,7 @@ def test_allowed_with_temporary_credentials():
@set_initial_no_auth_action_count(3)
@mock_iam
@mock_sts
-@mock_rds2
+@mock_rds
def test_access_denied_with_temporary_credentials():
role_name = "test-role"
session_name = "test-session"
diff --git a/tests/test_rds2/test_filters.py b/tests/test_rds/test_filters.py
similarity index 98%
rename from tests/test_rds2/test_filters.py
rename to tests/test_rds/test_filters.py
index 6c56ad13b..87fac8f40 100644
--- a/tests/test_rds2/test_filters.py
+++ b/tests/test_rds/test_filters.py
@@ -3,16 +3,16 @@ import pytest
import sure # noqa # pylint: disable=unused-import
from botocore.exceptions import ClientError
-from moto import mock_rds2
+from moto import mock_rds
class TestDBInstanceFilters(object):
- mock_rds = mock_rds2()
+ mock = mock_rds()
@classmethod
def setup_class(cls):
- cls.mock_rds.start()
+ cls.mock.start()
client = boto3.client("rds", region_name="us-west-2")
for i in range(10):
identifier = "db-instance-{}".format(i)
@@ -27,7 +27,7 @@ class TestDBInstanceFilters(object):
@classmethod
def teardown_class(cls):
try:
- cls.mock_rds.stop()
+ cls.mock.stop()
except RuntimeError:
pass
@@ -179,11 +179,11 @@ class TestDBInstanceFilters(object):
class TestDBSnapshotFilters(object):
- mock_rds = mock_rds2()
+ mock = mock_rds()
@classmethod
def setup_class(cls):
- cls.mock_rds.start()
+ cls.mock.start()
client = boto3.client("rds", region_name="us-west-2")
# We'll set up two instances (one postgres, one mysql)
# with two snapshots each.
@@ -205,7 +205,7 @@ class TestDBSnapshotFilters(object):
@classmethod
def teardown_class(cls):
try:
- cls.mock_rds.stop()
+ cls.mock.stop()
except RuntimeError:
pass
diff --git a/tests/test_rds/test_rds.py b/tests/test_rds/test_rds.py
index 96ed6b6b6..d72826577 100644
--- a/tests/test_rds/test_rds.py
+++ b/tests/test_rds/test_rds.py
@@ -1,16 +1,339 @@
+from botocore.exceptions import ClientError
import boto3
-import sure # noqa # pylint: disable=unused-import
import pytest
-
-from moto import mock_rds
+import sure # noqa # pylint: disable=unused-import
+from moto import mock_ec2, mock_kms, mock_rds
+from moto.core import ACCOUNT_ID
-def test_deprecation_warning():
- with pytest.warns(None) as record:
- mock_rds()
- str(record[0].message).should.contain(
- "Module mock_rds has been deprecated, and will be repurposed in a later release"
+@mock_rds
+def test_create_database():
+ conn = boto3.client("rds", region_name="us-west-2")
+ database = conn.create_db_instance(
+ DBInstanceIdentifier="db-master-1",
+ AllocatedStorage=10,
+ Engine="postgres",
+ DBName="staging-postgres",
+ DBInstanceClass="db.m1.small",
+ LicenseModel="license-included",
+ MasterUsername="root",
+ MasterUserPassword="hunter2",
+ Port=1234,
+ DBSecurityGroups=["my_sg"],
+ VpcSecurityGroupIds=["sg-123456"],
)
+ db_instance = database["DBInstance"]
+ db_instance["AllocatedStorage"].should.equal(10)
+ db_instance["DBInstanceClass"].should.equal("db.m1.small")
+ db_instance["LicenseModel"].should.equal("license-included")
+ db_instance["MasterUsername"].should.equal("root")
+ db_instance["DBSecurityGroups"][0]["DBSecurityGroupName"].should.equal("my_sg")
+ db_instance["DBInstanceArn"].should.equal(
+ "arn:aws:rds:us-west-2:{}:db:db-master-1".format(ACCOUNT_ID)
+ )
+ db_instance["DBInstanceStatus"].should.equal("available")
+ db_instance["DBName"].should.equal("staging-postgres")
+ db_instance["DBInstanceIdentifier"].should.equal("db-master-1")
+ db_instance["IAMDatabaseAuthenticationEnabled"].should.equal(False)
+ db_instance["DbiResourceId"].should.contain("db-")
+ db_instance["CopyTagsToSnapshot"].should.equal(False)
+ db_instance["InstanceCreateTime"].should.be.a("datetime.datetime")
+ db_instance["VpcSecurityGroups"][0]["VpcSecurityGroupId"].should.equal("sg-123456")
+ db_instance["DeletionProtection"].should.equal(False)
+
+
+@mock_rds
+def test_database_with_deletion_protection_cannot_be_deleted():
+ conn = boto3.client("rds", region_name="us-west-2")
+ database = conn.create_db_instance(
+ DBInstanceIdentifier="db-master-1",
+ AllocatedStorage=10,
+ Engine="postgres",
+ DBName="staging-postgres",
+ DBInstanceClass="db.m1.small",
+ DeletionProtection=True,
+ )
+ db_instance = database["DBInstance"]
+ db_instance["DBInstanceClass"].should.equal("db.m1.small")
+ db_instance["DeletionProtection"].should.equal(True)
+
+
+@mock_rds
+def test_create_database_no_allocated_storage():
+ conn = boto3.client("rds", region_name="us-west-2")
+ database = conn.create_db_instance(
+ DBInstanceIdentifier="db-master-1",
+ Engine="postgres",
+ DBName="staging-postgres",
+ DBInstanceClass="db.m1.small",
+ )
+ db_instance = database["DBInstance"]
+ db_instance["Engine"].should.equal("postgres")
+ db_instance["StorageType"].should.equal("gp2")
+ db_instance["AllocatedStorage"].should.equal(20)
+
+
+@mock_rds
+def test_create_database_non_existing_option_group():
+ conn = boto3.client("rds", region_name="us-west-2")
+ conn.create_db_instance.when.called_with(
+ DBInstanceIdentifier="db-master-1",
+ AllocatedStorage=10,
+ Engine="postgres",
+ DBName="staging-postgres",
+ DBInstanceClass="db.m1.small",
+ OptionGroupName="non-existing",
+ ).should.throw(ClientError)
+
+
+@mock_rds
+def test_create_database_with_option_group():
+ conn = boto3.client("rds", region_name="us-west-2")
+ conn.create_option_group(
+ OptionGroupName="my-og",
+ EngineName="mysql",
+ MajorEngineVersion="5.6",
+ OptionGroupDescription="test option group",
+ )
+ database = conn.create_db_instance(
+ DBInstanceIdentifier="db-master-1",
+ AllocatedStorage=10,
+ Engine="postgres",
+ DBName="staging-postgres",
+ DBInstanceClass="db.m1.small",
+ OptionGroupName="my-og",
+ )
+ db_instance = database["DBInstance"]
+ db_instance["AllocatedStorage"].should.equal(10)
+ db_instance["DBInstanceClass"].should.equal("db.m1.small")
+ db_instance["DBName"].should.equal("staging-postgres")
+ db_instance["OptionGroupMemberships"][0]["OptionGroupName"].should.equal("my-og")
+
+
+@mock_rds
+def test_stop_database():
+ conn = boto3.client("rds", region_name="us-west-2")
+ database = conn.create_db_instance(
+ DBInstanceIdentifier="db-master-1",
+ AllocatedStorage=10,
+ Engine="postgres",
+ DBName="staging-postgres",
+ DBInstanceClass="db.m1.small",
+ LicenseModel="license-included",
+ MasterUsername="root",
+ MasterUserPassword="hunter2",
+ Port=1234,
+ DBSecurityGroups=["my_sg"],
+ )
+ mydb = conn.describe_db_instances(
+ DBInstanceIdentifier=database["DBInstance"]["DBInstanceIdentifier"]
+ )["DBInstances"][0]
+ mydb["DBInstanceStatus"].should.equal("available")
+ # test stopping database should shutdown
+ response = conn.stop_db_instance(DBInstanceIdentifier=mydb["DBInstanceIdentifier"])
+ response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
+ response["DBInstance"]["DBInstanceStatus"].should.equal("stopped")
+ # test rdsclient error when trying to stop an already stopped database
+ conn.stop_db_instance.when.called_with(
+ DBInstanceIdentifier=mydb["DBInstanceIdentifier"]
+ ).should.throw(ClientError)
+ # test stopping a stopped database with snapshot should error and no snapshot should exist for that call
+ conn.stop_db_instance.when.called_with(
+ DBInstanceIdentifier=mydb["DBInstanceIdentifier"],
+ DBSnapshotIdentifier="rocky4570-rds-snap",
+ ).should.throw(ClientError)
+ response = conn.describe_db_snapshots()
+ response["DBSnapshots"].should.equal([])
+
+
+@mock_rds
+def test_start_database():
+ conn = boto3.client("rds", region_name="us-west-2")
+ database = conn.create_db_instance(
+ DBInstanceIdentifier="db-master-1",
+ AllocatedStorage=10,
+ Engine="postgres",
+ DBName="staging-postgres",
+ DBInstanceClass="db.m1.small",
+ LicenseModel="license-included",
+ MasterUsername="root",
+ MasterUserPassword="hunter2",
+ Port=1234,
+ DBSecurityGroups=["my_sg"],
+ )
+ mydb = conn.describe_db_instances(
+ DBInstanceIdentifier=database["DBInstance"]["DBInstanceIdentifier"]
+ )["DBInstances"][0]
+ mydb["DBInstanceStatus"].should.equal("available")
+ # test starting an already started database should error
+ conn.start_db_instance.when.called_with(
+ DBInstanceIdentifier=mydb["DBInstanceIdentifier"]
+ ).should.throw(ClientError)
+ # stop and test start - should go from stopped to available, create snapshot and check snapshot
+ response = conn.stop_db_instance(
+ DBInstanceIdentifier=mydb["DBInstanceIdentifier"],
+ DBSnapshotIdentifier="rocky4570-rds-snap",
+ )
+ response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
+ response["DBInstance"]["DBInstanceStatus"].should.equal("stopped")
+ response = conn.describe_db_snapshots()
+ response["DBSnapshots"][0]["DBSnapshotIdentifier"].should.equal(
+ "rocky4570-rds-snap"
+ )
+ response = conn.start_db_instance(DBInstanceIdentifier=mydb["DBInstanceIdentifier"])
+ response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
+ response["DBInstance"]["DBInstanceStatus"].should.equal("available")
+ # starting database should not remove snapshot
+ response = conn.describe_db_snapshots()
+ response["DBSnapshots"][0]["DBSnapshotIdentifier"].should.equal(
+ "rocky4570-rds-snap"
+ )
+ # test stopping database, create snapshot with existing snapshot already created should throw error
+ conn.stop_db_instance.when.called_with(
+ DBInstanceIdentifier=mydb["DBInstanceIdentifier"],
+ DBSnapshotIdentifier="rocky4570-rds-snap",
+ ).should.throw(ClientError)
+ # test stopping database not invoking snapshot should succeed.
+ response = conn.stop_db_instance(DBInstanceIdentifier=mydb["DBInstanceIdentifier"])
+ response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
+ response["DBInstance"]["DBInstanceStatus"].should.equal("stopped")
+
+
+@mock_rds
+def test_fail_to_stop_multi_az_and_sqlserver():
+ conn = boto3.client("rds", region_name="us-west-2")
+ database = conn.create_db_instance(
+ DBInstanceIdentifier="db-master-1",
+ AllocatedStorage=10,
+ Engine="sqlserver-ee",
+ DBName="staging-postgres",
+ DBInstanceClass="db.m1.small",
+ LicenseModel="license-included",
+ MasterUsername="root",
+ MasterUserPassword="hunter2",
+ Port=1234,
+ DBSecurityGroups=["my_sg"],
+ MultiAZ=True,
+ )
+
+ mydb = conn.describe_db_instances(
+ DBInstanceIdentifier=database["DBInstance"]["DBInstanceIdentifier"]
+ )["DBInstances"][0]
+ mydb["DBInstanceStatus"].should.equal("available")
+ # multi-az databases arent allowed to be shutdown at this time.
+ conn.stop_db_instance.when.called_with(
+ DBInstanceIdentifier=mydb["DBInstanceIdentifier"]
+ ).should.throw(ClientError)
+ # multi-az databases arent allowed to be started up at this time.
+ conn.start_db_instance.when.called_with(
+ DBInstanceIdentifier=mydb["DBInstanceIdentifier"]
+ ).should.throw(ClientError)
+
+
+@mock_rds
+def test_stop_multi_az_postgres():
+ conn = boto3.client("rds", region_name="us-west-2")
+ database = conn.create_db_instance(
+ DBInstanceIdentifier="db-master-1",
+ AllocatedStorage=10,
+ Engine="postgres",
+ DBName="staging-postgres",
+ DBInstanceClass="db.m1.small",
+ LicenseModel="license-included",
+ MasterUsername="root",
+ MasterUserPassword="hunter2",
+ Port=1234,
+ DBSecurityGroups=["my_sg"],
+ MultiAZ=True,
+ )
+
+ mydb = conn.describe_db_instances(
+ DBInstanceIdentifier=database["DBInstance"]["DBInstanceIdentifier"]
+ )["DBInstances"][0]
+ mydb["DBInstanceStatus"].should.equal("available")
+
+ response = conn.stop_db_instance(DBInstanceIdentifier=mydb["DBInstanceIdentifier"])
+ response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
+ response["DBInstance"]["DBInstanceStatus"].should.equal("stopped")
+
+
+@mock_rds
+def test_fail_to_stop_readreplica():
+ conn = boto3.client("rds", region_name="us-west-2")
+ conn.create_db_instance(
+ DBInstanceIdentifier="db-master-1",
+ AllocatedStorage=10,
+ Engine="postgres",
+ DBName="staging-postgres",
+ DBInstanceClass="db.m1.small",
+ LicenseModel="license-included",
+ MasterUsername="root",
+ MasterUserPassword="hunter2",
+ Port=1234,
+ DBSecurityGroups=["my_sg"],
+ )
+
+ replica = conn.create_db_instance_read_replica(
+ DBInstanceIdentifier="db-replica-1",
+ SourceDBInstanceIdentifier="db-master-1",
+ DBInstanceClass="db.m1.small",
+ )
+
+ mydb = conn.describe_db_instances(
+ DBInstanceIdentifier=replica["DBInstance"]["DBInstanceIdentifier"]
+ )["DBInstances"][0]
+ mydb["DBInstanceStatus"].should.equal("available")
+ # read-replicas are not allowed to be stopped at this time.
+ conn.stop_db_instance.when.called_with(
+ DBInstanceIdentifier=mydb["DBInstanceIdentifier"]
+ ).should.throw(ClientError)
+ # read-replicas are not allowed to be started at this time.
+ conn.start_db_instance.when.called_with(
+ DBInstanceIdentifier=mydb["DBInstanceIdentifier"]
+ ).should.throw(ClientError)
+
+
+@mock_rds
+def test_get_databases():
+ conn = boto3.client("rds", region_name="us-west-2")
+
+ instances = conn.describe_db_instances()
+ list(instances["DBInstances"]).should.have.length_of(0)
+
+ conn.create_db_instance(
+ DBInstanceIdentifier="db-master-1",
+ AllocatedStorage=10,
+ DBInstanceClass="postgres",
+ Engine="db.m1.small",
+ MasterUsername="root",
+ MasterUserPassword="hunter2",
+ Port=1234,
+ DBSecurityGroups=["my_sg"],
+ )
+ conn.create_db_instance(
+ DBInstanceIdentifier="db-master-2",
+ AllocatedStorage=10,
+ DBInstanceClass="postgres",
+ Engine="db.m1.small",
+ MasterUsername="root",
+ MasterUserPassword="hunter2",
+ Port=1234,
+ DBSecurityGroups=["my_sg"],
+ DeletionProtection=True,
+ )
+ instances = conn.describe_db_instances()
+ list(instances["DBInstances"]).should.have.length_of(2)
+
+ instances = conn.describe_db_instances(DBInstanceIdentifier="db-master-1")
+ list(instances["DBInstances"]).should.have.length_of(1)
+ instances["DBInstances"][0]["DBInstanceIdentifier"].should.equal("db-master-1")
+ instances["DBInstances"][0]["DeletionProtection"].should.equal(False)
+ instances["DBInstances"][0]["DBInstanceArn"].should.equal(
+ "arn:aws:rds:us-west-2:{}:db:db-master-1".format(ACCOUNT_ID)
+ )
+
+ instances = conn.describe_db_instances(DBInstanceIdentifier="db-master-2")
+ instances["DBInstances"][0]["DeletionProtection"].should.equal(True)
@mock_rds
@@ -32,3 +355,1666 @@ def test_get_databases_paginated():
resp2 = conn.describe_db_instances(Marker=resp["Marker"])
resp2["DBInstances"].should.have.length_of(1)
+
+ resp3 = conn.describe_db_instances(MaxRecords=100)
+ resp3["DBInstances"].should.have.length_of(51)
+
+
+@mock_rds
+def test_describe_non_existent_database():
+ conn = boto3.client("rds", region_name="us-west-2")
+ conn.describe_db_instances.when.called_with(
+ DBInstanceIdentifier="not-a-db"
+ ).should.throw(ClientError)
+
+
+@mock_rds
+def test_modify_db_instance():
+ conn = boto3.client("rds", region_name="us-west-2")
+ conn.create_db_instance(
+ DBInstanceIdentifier="db-master-1",
+ AllocatedStorage=10,
+ DBInstanceClass="postgres",
+ Engine="db.m1.small",
+ MasterUsername="root",
+ MasterUserPassword="hunter2",
+ Port=1234,
+ DBSecurityGroups=["my_sg"],
+ )
+ instances = conn.describe_db_instances(DBInstanceIdentifier="db-master-1")
+ instances["DBInstances"][0]["AllocatedStorage"].should.equal(10)
+ conn.modify_db_instance(
+ DBInstanceIdentifier="db-master-1",
+ AllocatedStorage=20,
+ ApplyImmediately=True,
+ VpcSecurityGroupIds=["sg-123456"],
+ )
+ instances = conn.describe_db_instances(DBInstanceIdentifier="db-master-1")
+ instances["DBInstances"][0]["AllocatedStorage"].should.equal(20)
+ instances["DBInstances"][0]["VpcSecurityGroups"][0][
+ "VpcSecurityGroupId"
+ ].should.equal("sg-123456")
+
+
+@mock_rds
+def test_modify_db_instance_not_existent_db_parameter_group_name():
+ conn = boto3.client("rds", region_name="us-west-2")
+ conn.create_db_instance(
+ DBInstanceIdentifier="db-master-1",
+ AllocatedStorage=10,
+ DBInstanceClass="postgres",
+ Engine="db.m1.small",
+ MasterUsername="root",
+ MasterUserPassword="hunter2",
+ Port=1234,
+ DBSecurityGroups=["my_sg"],
+ )
+ instances = conn.describe_db_instances(DBInstanceIdentifier="db-master-1")
+ instances["DBInstances"][0]["AllocatedStorage"].should.equal(10)
+ conn.modify_db_instance.when.called_with(
+ DBInstanceIdentifier="db-master-1",
+ DBParameterGroupName="test-sqlserver-se-2017",
+ ).should.throw(ClientError)
+
+
+@mock_rds
+def test_rename_db_instance():
+ conn = boto3.client("rds", region_name="us-west-2")
+ conn.create_db_instance(
+ DBInstanceIdentifier="db-master-1",
+ AllocatedStorage=10,
+ DBInstanceClass="postgres",
+ Engine="db.m1.small",
+ MasterUsername="root",
+ MasterUserPassword="hunter2",
+ Port=1234,
+ DBSecurityGroups=["my_sg"],
+ )
+ instances = conn.describe_db_instances(DBInstanceIdentifier="db-master-1")
+ list(instances["DBInstances"]).should.have.length_of(1)
+ conn.describe_db_instances.when.called_with(
+ DBInstanceIdentifier="db-master-2"
+ ).should.throw(ClientError)
+ conn.modify_db_instance(
+ DBInstanceIdentifier="db-master-1",
+ NewDBInstanceIdentifier="db-master-2",
+ ApplyImmediately=True,
+ )
+ conn.describe_db_instances.when.called_with(
+ DBInstanceIdentifier="db-master-1"
+ ).should.throw(ClientError)
+ instances = conn.describe_db_instances(DBInstanceIdentifier="db-master-2")
+ list(instances["DBInstances"]).should.have.length_of(1)
+
+
+@mock_rds
+def test_modify_non_existent_database():
+ conn = boto3.client("rds", region_name="us-west-2")
+ conn.modify_db_instance.when.called_with(
+ DBInstanceIdentifier="not-a-db", AllocatedStorage=20, ApplyImmediately=True
+ ).should.throw(ClientError)
+
+
+@mock_rds
+def test_reboot_db_instance():
+ conn = boto3.client("rds", region_name="us-west-2")
+ conn.create_db_instance(
+ DBInstanceIdentifier="db-master-1",
+ AllocatedStorage=10,
+ DBInstanceClass="postgres",
+ Engine="db.m1.small",
+ MasterUsername="root",
+ MasterUserPassword="hunter2",
+ Port=1234,
+ DBSecurityGroups=["my_sg"],
+ )
+ database = conn.reboot_db_instance(DBInstanceIdentifier="db-master-1")
+ database["DBInstance"]["DBInstanceIdentifier"].should.equal("db-master-1")
+
+
+@mock_rds
+def test_reboot_non_existent_database():
+ conn = boto3.client("rds", region_name="us-west-2")
+ conn.reboot_db_instance.when.called_with(
+ DBInstanceIdentifier="not-a-db"
+ ).should.throw(ClientError)
+
+
+@mock_rds
+def test_delete_database():
+ conn = boto3.client("rds", region_name="us-west-2")
+ instances = conn.describe_db_instances()
+ list(instances["DBInstances"]).should.have.length_of(0)
+ conn.create_db_instance(
+ DBInstanceIdentifier="db-primary-1",
+ AllocatedStorage=10,
+ Engine="postgres",
+ DBInstanceClass="db.m1.small",
+ MasterUsername="root",
+ MasterUserPassword="hunter2",
+ Port=1234,
+ DBSecurityGroups=["my_sg"],
+ )
+ instances = conn.describe_db_instances()
+ list(instances["DBInstances"]).should.have.length_of(1)
+
+ conn.delete_db_instance(
+ DBInstanceIdentifier="db-primary-1",
+ FinalDBSnapshotIdentifier="primary-1-snapshot",
+ )
+
+ instances = conn.describe_db_instances()
+ list(instances["DBInstances"]).should.have.length_of(0)
+
+ # Saved the snapshot
+ snapshots = conn.describe_db_snapshots(DBInstanceIdentifier="db-primary-1").get(
+ "DBSnapshots"
+ )
+ snapshots[0].get("Engine").should.equal("postgres")
+
+
+@mock_rds
+def test_create_db_snapshots():
+ conn = boto3.client("rds", region_name="us-west-2")
+ conn.create_db_snapshot.when.called_with(
+ DBInstanceIdentifier="db-primary-1", DBSnapshotIdentifier="snapshot-1"
+ ).should.throw(ClientError)
+
+ conn.create_db_instance(
+ DBInstanceIdentifier="db-primary-1",
+ AllocatedStorage=10,
+ Engine="postgres",
+ DBName="staging-postgres",
+ DBInstanceClass="db.m1.small",
+ MasterUsername="root",
+ MasterUserPassword="hunter2",
+ Port=1234,
+ DBSecurityGroups=["my_sg"],
+ )
+
+ snapshot = conn.create_db_snapshot(
+ DBInstanceIdentifier="db-primary-1", DBSnapshotIdentifier="g-1"
+ ).get("DBSnapshot")
+
+ snapshot.get("Engine").should.equal("postgres")
+ snapshot.get("DBInstanceIdentifier").should.equal("db-primary-1")
+ snapshot.get("DBSnapshotIdentifier").should.equal("g-1")
+ result = conn.list_tags_for_resource(ResourceName=snapshot["DBSnapshotArn"])
+ result["TagList"].should.equal([])
+
+
+@mock_rds
+def test_create_db_snapshots_copy_tags():
+ conn = boto3.client("rds", region_name="us-west-2")
+ conn.create_db_snapshot.when.called_with(
+ DBInstanceIdentifier="db-primary-1", DBSnapshotIdentifier="snapshot-1"
+ ).should.throw(ClientError)
+
+ conn.create_db_instance(
+ DBInstanceIdentifier="db-primary-1",
+ AllocatedStorage=10,
+ Engine="postgres",
+ DBName="staging-postgres",
+ DBInstanceClass="db.m1.small",
+ MasterUsername="root",
+ MasterUserPassword="hunter2",
+ Port=1234,
+ DBSecurityGroups=["my_sg"],
+ CopyTagsToSnapshot=True,
+ Tags=[{"Key": "foo", "Value": "bar"}, {"Key": "foo1", "Value": "bar1"}],
+ )
+
+ snapshot = conn.create_db_snapshot(
+ DBInstanceIdentifier="db-primary-1", DBSnapshotIdentifier="g-1"
+ ).get("DBSnapshot")
+
+ snapshot.get("Engine").should.equal("postgres")
+ snapshot.get("DBInstanceIdentifier").should.equal("db-primary-1")
+ snapshot.get("DBSnapshotIdentifier").should.equal("g-1")
+ result = conn.list_tags_for_resource(ResourceName=snapshot["DBSnapshotArn"])
+ result["TagList"].should.equal(
+ [{"Value": "bar", "Key": "foo"}, {"Value": "bar1", "Key": "foo1"}]
+ )
+
+
+@mock_rds
+def test_copy_db_snapshots():
+ conn = boto3.client("rds", region_name="us-west-2")
+
+ conn.create_db_instance(
+ DBInstanceIdentifier="db-primary-1",
+ AllocatedStorage=10,
+ Engine="postgres",
+ DBName="staging-postgres",
+ DBInstanceClass="db.m1.small",
+ MasterUsername="root",
+ MasterUserPassword="hunter2",
+ Port=1234,
+ DBSecurityGroups=["my_sg"],
+ )
+
+ conn.create_db_snapshot(
+ DBInstanceIdentifier="db-primary-1", DBSnapshotIdentifier="snapshot-1"
+ ).get("DBSnapshot")
+
+ target_snapshot = conn.copy_db_snapshot(
+ SourceDBSnapshotIdentifier="snapshot-1", TargetDBSnapshotIdentifier="snapshot-2"
+ ).get("DBSnapshot")
+
+ target_snapshot.get("Engine").should.equal("postgres")
+ target_snapshot.get("DBInstanceIdentifier").should.equal("db-primary-1")
+ target_snapshot.get("DBSnapshotIdentifier").should.equal("snapshot-2")
+ result = conn.list_tags_for_resource(ResourceName=target_snapshot["DBSnapshotArn"])
+ result["TagList"].should.equal([])
+
+
+@mock_rds
+def test_describe_db_snapshots():
+ conn = boto3.client("rds", region_name="us-west-2")
+ conn.create_db_instance(
+ DBInstanceIdentifier="db-primary-1",
+ AllocatedStorage=10,
+ Engine="postgres",
+ DBName="staging-postgres",
+ DBInstanceClass="db.m1.small",
+ MasterUsername="root",
+ MasterUserPassword="hunter2",
+ Port=1234,
+ DBSecurityGroups=["my_sg"],
+ )
+
+ created = conn.create_db_snapshot(
+ DBInstanceIdentifier="db-primary-1", DBSnapshotIdentifier="snapshot-1"
+ ).get("DBSnapshot")
+
+ created.get("Engine").should.equal("postgres")
+
+ by_database_id = conn.describe_db_snapshots(
+ DBInstanceIdentifier="db-primary-1"
+ ).get("DBSnapshots")
+ by_snapshot_id = conn.describe_db_snapshots(DBSnapshotIdentifier="snapshot-1").get(
+ "DBSnapshots"
+ )
+ by_snapshot_id.should.equal(by_database_id)
+
+ snapshot = by_snapshot_id[0]
+ snapshot.should.equal(created)
+ snapshot.get("Engine").should.equal("postgres")
+
+ conn.create_db_snapshot(
+ DBInstanceIdentifier="db-primary-1", DBSnapshotIdentifier="snapshot-2"
+ )
+ snapshots = conn.describe_db_snapshots(DBInstanceIdentifier="db-primary-1").get(
+ "DBSnapshots"
+ )
+ snapshots.should.have.length_of(2)
+
+
+@mock_rds
+def test_delete_db_snapshot():
+ conn = boto3.client("rds", region_name="us-west-2")
+ conn.create_db_instance(
+ DBInstanceIdentifier="db-primary-1",
+ AllocatedStorage=10,
+ Engine="postgres",
+ DBName="staging-postgres",
+ DBInstanceClass="db.m1.small",
+ MasterUsername="root",
+ MasterUserPassword="hunter2",
+ Port=1234,
+ DBSecurityGroups=["my_sg"],
+ )
+ conn.create_db_snapshot(
+ DBInstanceIdentifier="db-primary-1", DBSnapshotIdentifier="snapshot-1"
+ )
+
+ conn.describe_db_snapshots(DBSnapshotIdentifier="snapshot-1").get("DBSnapshots")[0]
+ conn.delete_db_snapshot(DBSnapshotIdentifier="snapshot-1")
+ conn.describe_db_snapshots.when.called_with(
+ DBSnapshotIdentifier="snapshot-1"
+ ).should.throw(ClientError)
+
+
+@mock_rds
+def test_restore_db_instance_from_db_snapshot():
+ conn = boto3.client("rds", region_name="us-west-2")
+ conn.create_db_instance(
+ DBInstanceIdentifier="db-primary-1",
+ AllocatedStorage=10,
+ Engine="postgres",
+ DBName="staging-postgres",
+ DBInstanceClass="db.m1.small",
+ MasterUsername="root",
+ MasterUserPassword="hunter2",
+ DBSecurityGroups=["my_sg"],
+ )
+ conn.describe_db_instances()["DBInstances"].should.have.length_of(1)
+
+ conn.create_db_snapshot(
+ DBInstanceIdentifier="db-primary-1", DBSnapshotIdentifier="snapshot-1"
+ )
+
+ # restore
+ new_instance = conn.restore_db_instance_from_db_snapshot(
+ DBInstanceIdentifier="db-restore-1", DBSnapshotIdentifier="snapshot-1"
+ )["DBInstance"]
+ new_instance["DBInstanceIdentifier"].should.equal("db-restore-1")
+ new_instance["DBInstanceClass"].should.equal("db.m1.small")
+ new_instance["StorageType"].should.equal("gp2")
+ new_instance["Engine"].should.equal("postgres")
+ new_instance["DBName"].should.equal("staging-postgres")
+ new_instance["DBParameterGroups"][0]["DBParameterGroupName"].should.equal(
+ "default.postgres9.3"
+ )
+ new_instance["DBSecurityGroups"].should.equal(
+ [{"DBSecurityGroupName": "my_sg", "Status": "active"}]
+ )
+ new_instance["Endpoint"]["Port"].should.equal(5432)
+
+ # Verify it exists
+ conn.describe_db_instances()["DBInstances"].should.have.length_of(2)
+ conn.describe_db_instances(DBInstanceIdentifier="db-restore-1")[
+ "DBInstances"
+ ].should.have.length_of(1)
+
+
+@mock_rds
+def test_restore_db_instance_from_db_snapshot_and_override_params():
+ conn = boto3.client("rds", region_name="us-west-2")
+ conn.create_db_instance(
+ DBInstanceIdentifier="db-primary-1",
+ AllocatedStorage=10,
+ Engine="postgres",
+ DBName="staging-postgres",
+ DBInstanceClass="db.m1.small",
+ MasterUsername="root",
+ MasterUserPassword="hunter2",
+ Port=1234,
+ DBSecurityGroups=["my_sg"],
+ )
+ conn.describe_db_instances()["DBInstances"].should.have.length_of(1)
+ conn.create_db_snapshot(
+ DBInstanceIdentifier="db-primary-1", DBSnapshotIdentifier="snapshot-1"
+ )
+
+ # restore with some updated attributes
+ new_instance = conn.restore_db_instance_from_db_snapshot(
+ DBInstanceIdentifier="db-restore-1",
+ DBSnapshotIdentifier="snapshot-1",
+ Port=10000,
+ VpcSecurityGroupIds=["new_vpc"],
+ )["DBInstance"]
+ new_instance["DBInstanceIdentifier"].should.equal("db-restore-1")
+ new_instance["DBParameterGroups"][0]["DBParameterGroupName"].should.equal(
+ "default.postgres9.3"
+ )
+ new_instance["DBSecurityGroups"].should.equal(
+ [{"DBSecurityGroupName": "my_sg", "Status": "active"}]
+ )
+ new_instance["VpcSecurityGroups"].should.equal(
+ [{"VpcSecurityGroupId": "new_vpc", "Status": "active"}]
+ )
+ new_instance["Endpoint"]["Port"].should.equal(10000)
+
+
+@mock_rds
+def test_create_option_group():
+ conn = boto3.client("rds", region_name="us-west-2")
+ option_group = conn.create_option_group(
+ OptionGroupName="test",
+ EngineName="mysql",
+ MajorEngineVersion="5.6",
+ OptionGroupDescription="test option group",
+ )
+ option_group["OptionGroup"]["OptionGroupName"].should.equal("test")
+ option_group["OptionGroup"]["EngineName"].should.equal("mysql")
+ option_group["OptionGroup"]["OptionGroupDescription"].should.equal(
+ "test option group"
+ )
+ option_group["OptionGroup"]["MajorEngineVersion"].should.equal("5.6")
+
+
+@mock_rds
+def test_create_option_group_bad_engine_name():
+ conn = boto3.client("rds", region_name="us-west-2")
+ conn.create_option_group.when.called_with(
+ OptionGroupName="test",
+ EngineName="invalid_engine",
+ MajorEngineVersion="5.6",
+ OptionGroupDescription="test invalid engine",
+ ).should.throw(ClientError)
+
+
+@mock_rds
+def test_create_option_group_bad_engine_major_version():
+ conn = boto3.client("rds", region_name="us-west-2")
+ conn.create_option_group.when.called_with(
+ OptionGroupName="test",
+ EngineName="mysql",
+ MajorEngineVersion="6.6.6",
+ OptionGroupDescription="test invalid engine version",
+ ).should.throw(ClientError)
+
+
+@mock_rds
+def test_create_option_group_empty_description():
+ conn = boto3.client("rds", region_name="us-west-2")
+ conn.create_option_group.when.called_with(
+ OptionGroupName="test",
+ EngineName="mysql",
+ MajorEngineVersion="5.6",
+ OptionGroupDescription="",
+ ).should.throw(ClientError)
+
+
+@mock_rds
+def test_create_option_group_duplicate():
+ conn = boto3.client("rds", region_name="us-west-2")
+ conn.create_option_group(
+ OptionGroupName="test",
+ EngineName="mysql",
+ MajorEngineVersion="5.6",
+ OptionGroupDescription="test option group",
+ )
+ conn.create_option_group.when.called_with(
+ OptionGroupName="test",
+ EngineName="mysql",
+ MajorEngineVersion="5.6",
+ OptionGroupDescription="test option group",
+ ).should.throw(ClientError)
+
+
+@mock_rds
+def test_describe_option_group():
+ conn = boto3.client("rds", region_name="us-west-2")
+ conn.create_option_group(
+ OptionGroupName="test",
+ EngineName="mysql",
+ MajorEngineVersion="5.6",
+ OptionGroupDescription="test option group",
+ )
+ option_groups = conn.describe_option_groups(OptionGroupName="test")
+ option_groups["OptionGroupsList"][0]["OptionGroupName"].should.equal("test")
+
+
+@mock_rds
+def test_describe_non_existent_option_group():
+ conn = boto3.client("rds", region_name="us-west-2")
+ conn.describe_option_groups.when.called_with(
+ OptionGroupName="not-a-option-group"
+ ).should.throw(ClientError)
+
+
+@mock_rds
+def test_delete_option_group():
+ conn = boto3.client("rds", region_name="us-west-2")
+ conn.create_option_group(
+ OptionGroupName="test",
+ EngineName="mysql",
+ MajorEngineVersion="5.6",
+ OptionGroupDescription="test option group",
+ )
+ option_groups = conn.describe_option_groups(OptionGroupName="test")
+ option_groups["OptionGroupsList"][0]["OptionGroupName"].should.equal("test")
+ conn.delete_option_group(OptionGroupName="test")
+ conn.describe_option_groups.when.called_with(OptionGroupName="test").should.throw(
+ ClientError
+ )
+
+
+@mock_rds
+def test_delete_non_existent_option_group():
+ conn = boto3.client("rds", region_name="us-west-2")
+ conn.delete_option_group.when.called_with(
+ OptionGroupName="non-existent"
+ ).should.throw(ClientError)
+
+
+@mock_rds
+def test_describe_option_group_options():
+ conn = boto3.client("rds", region_name="us-west-2")
+ option_group_options = conn.describe_option_group_options(EngineName="sqlserver-ee")
+ len(option_group_options["OptionGroupOptions"]).should.equal(4)
+ option_group_options = conn.describe_option_group_options(
+ EngineName="sqlserver-ee", MajorEngineVersion="11.00"
+ )
+ len(option_group_options["OptionGroupOptions"]).should.equal(2)
+ option_group_options = conn.describe_option_group_options(
+ EngineName="mysql", MajorEngineVersion="5.6"
+ )
+ len(option_group_options["OptionGroupOptions"]).should.equal(1)
+ conn.describe_option_group_options.when.called_with(
+ EngineName="non-existent"
+ ).should.throw(ClientError)
+ conn.describe_option_group_options.when.called_with(
+ EngineName="mysql", MajorEngineVersion="non-existent"
+ ).should.throw(ClientError)
+
+
+@mock_rds
+def test_modify_option_group():
+ conn = boto3.client("rds", region_name="us-west-2")
+ conn.create_option_group(
+ OptionGroupName="test",
+ EngineName="mysql",
+ MajorEngineVersion="5.6",
+ OptionGroupDescription="test option group",
+ )
+ # TODO: create option and validate before deleting.
+ # if Someone can tell me how the hell to use this function
+ # to add options to an option_group, I can finish coding this.
+ result = conn.modify_option_group(
+ OptionGroupName="test",
+ OptionsToInclude=[],
+ OptionsToRemove=["MEMCACHED"],
+ ApplyImmediately=True,
+ )
+ result["OptionGroup"]["EngineName"].should.equal("mysql")
+ result["OptionGroup"]["Options"].should.equal([])
+ result["OptionGroup"]["OptionGroupName"].should.equal("test")
+
+
+@mock_rds
+def test_modify_option_group_no_options():
+ conn = boto3.client("rds", region_name="us-west-2")
+ conn.create_option_group(
+ OptionGroupName="test",
+ EngineName="mysql",
+ MajorEngineVersion="5.6",
+ OptionGroupDescription="test option group",
+ )
+ conn.modify_option_group.when.called_with(OptionGroupName="test").should.throw(
+ ClientError
+ )
+
+
+@mock_rds
+def test_modify_non_existent_option_group():
+ conn = boto3.client("rds", region_name="us-west-2")
+ conn.modify_option_group.when.called_with(
+ OptionGroupName="non-existent",
+ OptionsToInclude=[{"OptionName": "test-option"}],
+ ).should.throw(ClientError, "Specified OptionGroupName: non-existent not found.")
+
+
+@mock_rds
+def test_delete_database_with_protection():
+ conn = boto3.client("rds", region_name="us-west-2")
+ conn.create_db_instance(
+ DBInstanceIdentifier="db-primary-1",
+ AllocatedStorage=10,
+ Engine="postgres",
+ DBInstanceClass="db.m1.small",
+ DeletionProtection=True,
+ )
+
+ with pytest.raises(ClientError) as exc:
+ conn.delete_db_instance(DBInstanceIdentifier="db-primary-1")
+ err = exc.value.response["Error"]
+ err["Message"].should.equal("Can't delete Instance with protection enabled")
+
+
+@mock_rds
+def test_delete_non_existent_database():
+ conn = boto3.client("rds", region_name="us-west-2")
+ with pytest.raises(ClientError) as ex:
+ conn.delete_db_instance(DBInstanceIdentifier="non-existent")
+ ex.value.response["Error"]["Code"].should.equal("DBInstanceNotFound")
+ ex.value.response["Error"]["Message"].should.equal(
+ "DBInstance non-existent not found."
+ )
+
+
+@mock_rds
+def test_list_tags_invalid_arn():
+ conn = boto3.client("rds", region_name="us-west-2")
+ conn.list_tags_for_resource.when.called_with(
+ ResourceName="arn:aws:rds:bad-arn"
+ ).should.throw(ClientError)
+
+
+@mock_rds
+def test_list_tags_db():
+ conn = boto3.client("rds", region_name="us-west-2")
+ result = conn.list_tags_for_resource(
+ ResourceName="arn:aws:rds:us-west-2:1234567890:db:foo"
+ )
+ result["TagList"].should.equal([])
+ test_instance = conn.create_db_instance(
+ DBInstanceIdentifier="db-with-tags",
+ AllocatedStorage=10,
+ DBInstanceClass="postgres",
+ Engine="db.m1.small",
+ MasterUsername="root",
+ MasterUserPassword="hunter2",
+ Port=1234,
+ DBSecurityGroups=["my_sg"],
+ Tags=[{"Key": "foo", "Value": "bar"}, {"Key": "foo1", "Value": "bar1"}],
+ )
+ result = conn.list_tags_for_resource(
+ ResourceName=test_instance["DBInstance"]["DBInstanceArn"]
+ )
+ result["TagList"].should.equal(
+ [{"Value": "bar", "Key": "foo"}, {"Value": "bar1", "Key": "foo1"}]
+ )
+
+
+@mock_rds
+def test_add_tags_db():
+ conn = boto3.client("rds", region_name="us-west-2")
+ conn.create_db_instance(
+ DBInstanceIdentifier="db-without-tags",
+ AllocatedStorage=10,
+ DBInstanceClass="postgres",
+ Engine="db.m1.small",
+ MasterUsername="root",
+ MasterUserPassword="hunter2",
+ Port=1234,
+ DBSecurityGroups=["my_sg"],
+ Tags=[{"Key": "foo", "Value": "bar"}, {"Key": "foo1", "Value": "bar1"}],
+ )
+ result = conn.list_tags_for_resource(
+ ResourceName="arn:aws:rds:us-west-2:1234567890:db:db-without-tags"
+ )
+ list(result["TagList"]).should.have.length_of(2)
+ conn.add_tags_to_resource(
+ ResourceName="arn:aws:rds:us-west-2:1234567890:db:db-without-tags",
+ Tags=[{"Key": "foo", "Value": "fish"}, {"Key": "foo2", "Value": "bar2"}],
+ )
+ result = conn.list_tags_for_resource(
+ ResourceName="arn:aws:rds:us-west-2:1234567890:db:db-without-tags"
+ )
+ list(result["TagList"]).should.have.length_of(3)
+
+
+@mock_rds
+def test_remove_tags_db():
+ conn = boto3.client("rds", region_name="us-west-2")
+ conn.create_db_instance(
+ DBInstanceIdentifier="db-with-tags",
+ AllocatedStorage=10,
+ DBInstanceClass="postgres",
+ Engine="db.m1.small",
+ MasterUsername="root",
+ MasterUserPassword="hunter2",
+ Port=1234,
+ DBSecurityGroups=["my_sg"],
+ Tags=[{"Key": "foo", "Value": "bar"}, {"Key": "foo1", "Value": "bar1"}],
+ )
+ result = conn.list_tags_for_resource(
+ ResourceName="arn:aws:rds:us-west-2:1234567890:db:db-with-tags"
+ )
+ list(result["TagList"]).should.have.length_of(2)
+ conn.remove_tags_from_resource(
+ ResourceName="arn:aws:rds:us-west-2:1234567890:db:db-with-tags", TagKeys=["foo"]
+ )
+ result = conn.list_tags_for_resource(
+ ResourceName="arn:aws:rds:us-west-2:1234567890:db:db-with-tags"
+ )
+ len(result["TagList"]).should.equal(1)
+
+
+@mock_rds
+def test_list_tags_snapshot():
+ conn = boto3.client("rds", region_name="us-west-2")
+ result = conn.list_tags_for_resource(
+ ResourceName="arn:aws:rds:us-west-2:1234567890:snapshot:foo"
+ )
+ result["TagList"].should.equal([])
+ conn.create_db_instance(
+ DBInstanceIdentifier="db-primary-1",
+ AllocatedStorage=10,
+ Engine="postgres",
+ DBName="staging-postgres",
+ DBInstanceClass="db.m1.small",
+ MasterUsername="root",
+ MasterUserPassword="hunter2",
+ Port=1234,
+ DBSecurityGroups=["my_sg"],
+ )
+ snapshot = conn.create_db_snapshot(
+ DBInstanceIdentifier="db-primary-1",
+ DBSnapshotIdentifier="snapshot-with-tags",
+ Tags=[{"Key": "foo", "Value": "bar"}, {"Key": "foo1", "Value": "bar1"}],
+ )
+ result = conn.list_tags_for_resource(
+ ResourceName=snapshot["DBSnapshot"]["DBSnapshotArn"]
+ )
+ result["TagList"].should.equal(
+ [{"Value": "bar", "Key": "foo"}, {"Value": "bar1", "Key": "foo1"}]
+ )
+
+
+@mock_rds
+def test_add_tags_snapshot():
+ conn = boto3.client("rds", region_name="us-west-2")
+ conn.create_db_instance(
+ DBInstanceIdentifier="db-primary-1",
+ AllocatedStorage=10,
+ Engine="postgres",
+ DBName="staging-postgres",
+ DBInstanceClass="db.m1.small",
+ MasterUsername="root",
+ MasterUserPassword="hunter2",
+ Port=1234,
+ DBSecurityGroups=["my_sg"],
+ )
+ conn.create_db_snapshot(
+ DBInstanceIdentifier="db-primary-1",
+ DBSnapshotIdentifier="snapshot-without-tags",
+ Tags=[{"Key": "foo", "Value": "bar"}, {"Key": "foo1", "Value": "bar1"}],
+ )
+ result = conn.list_tags_for_resource(
+ ResourceName="arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-without-tags"
+ )
+ list(result["TagList"]).should.have.length_of(2)
+ conn.add_tags_to_resource(
+ ResourceName="arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-without-tags",
+ Tags=[{"Key": "foo", "Value": "fish"}, {"Key": "foo2", "Value": "bar2"}],
+ )
+ result = conn.list_tags_for_resource(
+ ResourceName="arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-without-tags"
+ )
+ list(result["TagList"]).should.have.length_of(3)
+
+
+@mock_rds
+def test_remove_tags_snapshot():
+ conn = boto3.client("rds", region_name="us-west-2")
+ conn.create_db_instance(
+ DBInstanceIdentifier="db-primary-1",
+ AllocatedStorage=10,
+ Engine="postgres",
+ DBName="staging-postgres",
+ DBInstanceClass="db.m1.small",
+ MasterUsername="root",
+ MasterUserPassword="hunter2",
+ Port=1234,
+ DBSecurityGroups=["my_sg"],
+ )
+ conn.create_db_snapshot(
+ DBInstanceIdentifier="db-primary-1",
+ DBSnapshotIdentifier="snapshot-with-tags",
+ Tags=[{"Key": "foo", "Value": "bar"}, {"Key": "foo1", "Value": "bar1"}],
+ )
+ result = conn.list_tags_for_resource(
+ ResourceName="arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-with-tags"
+ )
+ list(result["TagList"]).should.have.length_of(2)
+ conn.remove_tags_from_resource(
+ ResourceName="arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-with-tags",
+ TagKeys=["foo"],
+ )
+ result = conn.list_tags_for_resource(
+ ResourceName="arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-with-tags"
+ )
+ len(result["TagList"]).should.equal(1)
+
+
+@mock_rds
+def test_add_tags_option_group():
+ conn = boto3.client("rds", region_name="us-west-2")
+ conn.create_option_group(
+ OptionGroupName="test",
+ EngineName="mysql",
+ MajorEngineVersion="5.6",
+ OptionGroupDescription="test option group",
+ )
+ result = conn.list_tags_for_resource(
+ ResourceName="arn:aws:rds:us-west-2:1234567890:og:test"
+ )
+ list(result["TagList"]).should.have.length_of(0)
+ conn.add_tags_to_resource(
+ ResourceName="arn:aws:rds:us-west-2:1234567890:og:test",
+ Tags=[{"Key": "foo", "Value": "fish"}, {"Key": "foo2", "Value": "bar2"}],
+ )
+ result = conn.list_tags_for_resource(
+ ResourceName="arn:aws:rds:us-west-2:1234567890:og:test"
+ )
+ list(result["TagList"]).should.have.length_of(2)
+
+
+@mock_rds
+def test_remove_tags_option_group():
+ conn = boto3.client("rds", region_name="us-west-2")
+ conn.create_option_group(
+ OptionGroupName="test",
+ EngineName="mysql",
+ MajorEngineVersion="5.6",
+ OptionGroupDescription="test option group",
+ )
+ result = conn.list_tags_for_resource(
+ ResourceName="arn:aws:rds:us-west-2:1234567890:og:test"
+ )
+ conn.add_tags_to_resource(
+ ResourceName="arn:aws:rds:us-west-2:1234567890:og:test",
+ Tags=[{"Key": "foo", "Value": "fish"}, {"Key": "foo2", "Value": "bar2"}],
+ )
+ result = conn.list_tags_for_resource(
+ ResourceName="arn:aws:rds:us-west-2:1234567890:og:test"
+ )
+ list(result["TagList"]).should.have.length_of(2)
+ conn.remove_tags_from_resource(
+ ResourceName="arn:aws:rds:us-west-2:1234567890:og:test", TagKeys=["foo"]
+ )
+ result = conn.list_tags_for_resource(
+ ResourceName="arn:aws:rds:us-west-2:1234567890:og:test"
+ )
+ list(result["TagList"]).should.have.length_of(1)
+
+
+@mock_rds
+def test_create_database_security_group():
+ conn = boto3.client("rds", region_name="us-west-2")
+
+ result = conn.create_db_security_group(
+ DBSecurityGroupName="db_sg", DBSecurityGroupDescription="DB Security Group"
+ )
+ result["DBSecurityGroup"]["DBSecurityGroupName"].should.equal("db_sg")
+ result["DBSecurityGroup"]["DBSecurityGroupDescription"].should.equal(
+ "DB Security Group"
+ )
+ result["DBSecurityGroup"]["IPRanges"].should.equal([])
+
+
+@mock_rds
+def test_get_security_groups():
+ conn = boto3.client("rds", region_name="us-west-2")
+
+ result = conn.describe_db_security_groups()
+ result["DBSecurityGroups"].should.have.length_of(0)
+
+ conn.create_db_security_group(
+ DBSecurityGroupName="db_sg1", DBSecurityGroupDescription="DB Security Group"
+ )
+ conn.create_db_security_group(
+ DBSecurityGroupName="db_sg2", DBSecurityGroupDescription="DB Security Group"
+ )
+
+ result = conn.describe_db_security_groups()
+ result["DBSecurityGroups"].should.have.length_of(2)
+
+ result = conn.describe_db_security_groups(DBSecurityGroupName="db_sg1")
+ result["DBSecurityGroups"].should.have.length_of(1)
+ result["DBSecurityGroups"][0]["DBSecurityGroupName"].should.equal("db_sg1")
+
+
+@mock_rds
+def test_get_non_existent_security_group():
+ conn = boto3.client("rds", region_name="us-west-2")
+ conn.describe_db_security_groups.when.called_with(
+ DBSecurityGroupName="not-a-sg"
+ ).should.throw(ClientError)
+
+
+@mock_rds
+def test_delete_database_security_group():
+ conn = boto3.client("rds", region_name="us-west-2")
+ conn.create_db_security_group(
+ DBSecurityGroupName="db_sg", DBSecurityGroupDescription="DB Security Group"
+ )
+
+ result = conn.describe_db_security_groups()
+ result["DBSecurityGroups"].should.have.length_of(1)
+
+ conn.delete_db_security_group(DBSecurityGroupName="db_sg")
+ result = conn.describe_db_security_groups()
+ result["DBSecurityGroups"].should.have.length_of(0)
+
+
+@mock_rds
+def test_delete_non_existent_security_group():
+ conn = boto3.client("rds", region_name="us-west-2")
+ conn.delete_db_security_group.when.called_with(
+ DBSecurityGroupName="not-a-db"
+ ).should.throw(ClientError)
+
+
+@mock_rds
+def test_security_group_authorize():
+ conn = boto3.client("rds", region_name="us-west-2")
+ security_group = conn.create_db_security_group(
+ DBSecurityGroupName="db_sg", DBSecurityGroupDescription="DB Security Group"
+ )
+ security_group["DBSecurityGroup"]["IPRanges"].should.equal([])
+
+ conn.authorize_db_security_group_ingress(
+ DBSecurityGroupName="db_sg", CIDRIP="10.3.2.45/32"
+ )
+
+ result = conn.describe_db_security_groups(DBSecurityGroupName="db_sg")
+ result["DBSecurityGroups"][0]["IPRanges"].should.have.length_of(1)
+ result["DBSecurityGroups"][0]["IPRanges"].should.equal(
+ [{"Status": "authorized", "CIDRIP": "10.3.2.45/32"}]
+ )
+
+ conn.authorize_db_security_group_ingress(
+ DBSecurityGroupName="db_sg", CIDRIP="10.3.2.46/32"
+ )
+ result = conn.describe_db_security_groups(DBSecurityGroupName="db_sg")
+ result["DBSecurityGroups"][0]["IPRanges"].should.have.length_of(2)
+ result["DBSecurityGroups"][0]["IPRanges"].should.equal(
+ [
+ {"Status": "authorized", "CIDRIP": "10.3.2.45/32"},
+ {"Status": "authorized", "CIDRIP": "10.3.2.46/32"},
+ ]
+ )
+
+
+@mock_rds
+def test_add_security_group_to_database():
+ conn = boto3.client("rds", region_name="us-west-2")
+
+ conn.create_db_instance(
+ DBInstanceIdentifier="db-master-1",
+ AllocatedStorage=10,
+ DBInstanceClass="postgres",
+ Engine="db.m1.small",
+ MasterUsername="root",
+ MasterUserPassword="hunter2",
+ Port=1234,
+ )
+
+ result = conn.describe_db_instances()
+ result["DBInstances"][0]["DBSecurityGroups"].should.equal([])
+ conn.create_db_security_group(
+ DBSecurityGroupName="db_sg", DBSecurityGroupDescription="DB Security Group"
+ )
+ conn.modify_db_instance(
+ DBInstanceIdentifier="db-master-1", DBSecurityGroups=["db_sg"]
+ )
+ result = conn.describe_db_instances()
+ result["DBInstances"][0]["DBSecurityGroups"][0]["DBSecurityGroupName"].should.equal(
+ "db_sg"
+ )
+
+
+@mock_rds
+def test_list_tags_security_group():
+ conn = boto3.client("rds", region_name="us-west-2")
+ result = conn.describe_db_subnet_groups()
+ result["DBSubnetGroups"].should.have.length_of(0)
+
+ security_group = conn.create_db_security_group(
+ DBSecurityGroupName="db_sg",
+ DBSecurityGroupDescription="DB Security Group",
+ Tags=[{"Value": "bar", "Key": "foo"}, {"Value": "bar1", "Key": "foo1"}],
+ )["DBSecurityGroup"]["DBSecurityGroupName"]
+ resource = "arn:aws:rds:us-west-2:1234567890:secgrp:{0}".format(security_group)
+ result = conn.list_tags_for_resource(ResourceName=resource)
+ result["TagList"].should.equal(
+ [{"Value": "bar", "Key": "foo"}, {"Value": "bar1", "Key": "foo1"}]
+ )
+
+
+@mock_rds
+def test_add_tags_security_group():
+ conn = boto3.client("rds", region_name="us-west-2")
+ result = conn.describe_db_subnet_groups()
+ result["DBSubnetGroups"].should.have.length_of(0)
+
+ security_group = conn.create_db_security_group(
+ DBSecurityGroupName="db_sg", DBSecurityGroupDescription="DB Security Group"
+ )["DBSecurityGroup"]["DBSecurityGroupName"]
+
+ resource = "arn:aws:rds:us-west-2:1234567890:secgrp:{0}".format(security_group)
+ conn.add_tags_to_resource(
+ ResourceName=resource,
+ Tags=[{"Value": "bar", "Key": "foo"}, {"Value": "bar1", "Key": "foo1"}],
+ )
+
+ result = conn.list_tags_for_resource(ResourceName=resource)
+ result["TagList"].should.equal(
+ [{"Value": "bar", "Key": "foo"}, {"Value": "bar1", "Key": "foo1"}]
+ )
+
+
+@mock_rds
+def test_remove_tags_security_group():
+ conn = boto3.client("rds", region_name="us-west-2")
+ result = conn.describe_db_subnet_groups()
+ result["DBSubnetGroups"].should.have.length_of(0)
+
+ security_group = conn.create_db_security_group(
+ DBSecurityGroupName="db_sg",
+ DBSecurityGroupDescription="DB Security Group",
+ Tags=[{"Value": "bar", "Key": "foo"}, {"Value": "bar1", "Key": "foo1"}],
+ )["DBSecurityGroup"]["DBSecurityGroupName"]
+
+ resource = "arn:aws:rds:us-west-2:1234567890:secgrp:{0}".format(security_group)
+ conn.remove_tags_from_resource(ResourceName=resource, TagKeys=["foo"])
+
+ result = conn.list_tags_for_resource(ResourceName=resource)
+ result["TagList"].should.equal([{"Value": "bar1", "Key": "foo1"}])
+
+
+@mock_ec2
+@mock_rds
+def test_create_database_subnet_group():
+ vpc_conn = boto3.client("ec2", "us-west-2")
+ vpc = vpc_conn.create_vpc(CidrBlock="10.0.0.0/16")["Vpc"]
+ subnet1 = vpc_conn.create_subnet(VpcId=vpc["VpcId"], CidrBlock="10.0.1.0/24")[
+ "Subnet"
+ ]
+ subnet2 = vpc_conn.create_subnet(VpcId=vpc["VpcId"], CidrBlock="10.0.2.0/24")[
+ "Subnet"
+ ]
+
+ subnet_ids = [subnet1["SubnetId"], subnet2["SubnetId"]]
+ conn = boto3.client("rds", region_name="us-west-2")
+ result = conn.create_db_subnet_group(
+ DBSubnetGroupName="db_subnet",
+ DBSubnetGroupDescription="my db subnet",
+ SubnetIds=subnet_ids,
+ )
+ result["DBSubnetGroup"]["DBSubnetGroupName"].should.equal("db_subnet")
+ result["DBSubnetGroup"]["DBSubnetGroupDescription"].should.equal("my db subnet")
+ subnets = result["DBSubnetGroup"]["Subnets"]
+ subnet_group_ids = [subnets[0]["SubnetIdentifier"], subnets[1]["SubnetIdentifier"]]
+ list(subnet_group_ids).should.equal(subnet_ids)
+
+
+@mock_ec2
+@mock_rds
+def test_modify_database_subnet_group():
+ vpc_conn = boto3.client("ec2", "us-west-2")
+ vpc = vpc_conn.create_vpc(CidrBlock="10.0.0.0/16")["Vpc"]
+ subnet1 = vpc_conn.create_subnet(VpcId=vpc["VpcId"], CidrBlock="10.0.1.0/24")[
+ "Subnet"
+ ]
+ subnet2 = vpc_conn.create_subnet(VpcId=vpc["VpcId"], CidrBlock="10.0.2.0/24")[
+ "Subnet"
+ ]
+
+ conn = boto3.client("rds", region_name="us-west-2")
+ conn.create_db_subnet_group(
+ DBSubnetGroupName="db_subnet",
+ DBSubnetGroupDescription="my db subnet",
+ SubnetIds=[subnet1["SubnetId"]],
+ )
+
+ conn.modify_db_subnet_group(
+ DBSubnetGroupName="db_subnet",
+ DBSubnetGroupDescription="my updated desc",
+ SubnetIds=[subnet1["SubnetId"], subnet2["SubnetId"]],
+ )
+
+ conn.describe_db_subnet_groups()["DBSubnetGroups"]
+ # FIXME: Group is deleted atm
+ # TODO: we should check whether all attrs are persisted
+
+
+@mock_ec2
+@mock_rds
+def test_create_database_in_subnet_group():
+ vpc_conn = boto3.client("ec2", "us-west-2")
+ vpc = vpc_conn.create_vpc(CidrBlock="10.0.0.0/16")["Vpc"]
+ subnet = vpc_conn.create_subnet(VpcId=vpc["VpcId"], CidrBlock="10.0.1.0/24")[
+ "Subnet"
+ ]
+
+ conn = boto3.client("rds", region_name="us-west-2")
+ conn.create_db_subnet_group(
+ DBSubnetGroupName="db_subnet1",
+ DBSubnetGroupDescription="my db subnet",
+ SubnetIds=[subnet["SubnetId"]],
+ )
+ conn.create_db_instance(
+ DBInstanceIdentifier="db-master-1",
+ AllocatedStorage=10,
+ Engine="postgres",
+ DBInstanceClass="db.m1.small",
+ MasterUsername="root",
+ MasterUserPassword="hunter2",
+ Port=1234,
+ DBSubnetGroupName="db_subnet1",
+ )
+ result = conn.describe_db_instances(DBInstanceIdentifier="db-master-1")
+ result["DBInstances"][0]["DBSubnetGroup"]["DBSubnetGroupName"].should.equal(
+ "db_subnet1"
+ )
+
+
+@mock_ec2
+@mock_rds
+def test_describe_database_subnet_group():
+ vpc_conn = boto3.client("ec2", "us-west-2")
+ vpc = vpc_conn.create_vpc(CidrBlock="10.0.0.0/16")["Vpc"]
+ subnet = vpc_conn.create_subnet(VpcId=vpc["VpcId"], CidrBlock="10.0.1.0/24")[
+ "Subnet"
+ ]
+
+ conn = boto3.client("rds", region_name="us-west-2")
+ conn.create_db_subnet_group(
+ DBSubnetGroupName="db_subnet1",
+ DBSubnetGroupDescription="my db subnet",
+ SubnetIds=[subnet["SubnetId"]],
+ )
+ conn.create_db_subnet_group(
+ DBSubnetGroupName="db_subnet2",
+ DBSubnetGroupDescription="my db subnet",
+ SubnetIds=[subnet["SubnetId"]],
+ )
+
+ resp = conn.describe_db_subnet_groups()
+ resp["DBSubnetGroups"].should.have.length_of(2)
+
+ subnets = resp["DBSubnetGroups"][0]["Subnets"]
+ subnets.should.have.length_of(1)
+
+ list(
+ conn.describe_db_subnet_groups(DBSubnetGroupName="db_subnet1")["DBSubnetGroups"]
+ ).should.have.length_of(1)
+
+ conn.describe_db_subnet_groups.when.called_with(
+ DBSubnetGroupName="not-a-subnet"
+ ).should.throw(ClientError)
+
+
+@mock_ec2
+@mock_rds
+def test_delete_database_subnet_group():
+ vpc_conn = boto3.client("ec2", "us-west-2")
+ vpc = vpc_conn.create_vpc(CidrBlock="10.0.0.0/16")["Vpc"]
+ subnet = vpc_conn.create_subnet(VpcId=vpc["VpcId"], CidrBlock="10.0.1.0/24")[
+ "Subnet"
+ ]
+
+ conn = boto3.client("rds", region_name="us-west-2")
+ result = conn.describe_db_subnet_groups()
+ result["DBSubnetGroups"].should.have.length_of(0)
+
+ conn.create_db_subnet_group(
+ DBSubnetGroupName="db_subnet1",
+ DBSubnetGroupDescription="my db subnet",
+ SubnetIds=[subnet["SubnetId"]],
+ )
+ result = conn.describe_db_subnet_groups()
+ result["DBSubnetGroups"].should.have.length_of(1)
+
+ conn.delete_db_subnet_group(DBSubnetGroupName="db_subnet1")
+ result = conn.describe_db_subnet_groups()
+ result["DBSubnetGroups"].should.have.length_of(0)
+
+ conn.delete_db_subnet_group.when.called_with(
+ DBSubnetGroupName="db_subnet1"
+ ).should.throw(ClientError)
+
+
+@mock_ec2
+@mock_rds
+def test_list_tags_database_subnet_group():
+ vpc_conn = boto3.client("ec2", "us-west-2")
+ vpc = vpc_conn.create_vpc(CidrBlock="10.0.0.0/16")["Vpc"]
+ subnet = vpc_conn.create_subnet(VpcId=vpc["VpcId"], CidrBlock="10.0.1.0/24")[
+ "Subnet"
+ ]
+
+ conn = boto3.client("rds", region_name="us-west-2")
+ result = conn.describe_db_subnet_groups()
+ result["DBSubnetGroups"].should.have.length_of(0)
+
+ subnet = conn.create_db_subnet_group(
+ DBSubnetGroupName="db_subnet1",
+ DBSubnetGroupDescription="my db subnet",
+ SubnetIds=[subnet["SubnetId"]],
+ Tags=[{"Value": "bar", "Key": "foo"}, {"Value": "bar1", "Key": "foo1"}],
+ )["DBSubnetGroup"]["DBSubnetGroupName"]
+ result = conn.list_tags_for_resource(
+ ResourceName="arn:aws:rds:us-west-2:1234567890:subgrp:{0}".format(subnet)
+ )
+ result["TagList"].should.equal(
+ [{"Value": "bar", "Key": "foo"}, {"Value": "bar1", "Key": "foo1"}]
+ )
+
+
+@mock_rds
+def test_modify_tags_parameter_group():
+ conn = boto3.client("rds", region_name="us-west-2")
+ client_tags = [{"Key": "character_set_client", "Value": "utf-8"}]
+ result = conn.create_db_parameter_group(
+ DBParameterGroupName="test-sqlserver-2017",
+ DBParameterGroupFamily="mysql5.6",
+ Description="MySQL Group",
+ Tags=client_tags,
+ )
+ resource = result["DBParameterGroup"]["DBParameterGroupArn"]
+ result = conn.list_tags_for_resource(ResourceName=resource)
+ result["TagList"].should.equal(client_tags)
+ server_tags = [{"Key": "character_set_server", "Value": "utf-8"}]
+ conn.add_tags_to_resource(ResourceName=resource, Tags=server_tags)
+ combined_tags = client_tags + server_tags
+ result = conn.list_tags_for_resource(ResourceName=resource)
+ result["TagList"].should.equal(combined_tags)
+
+ conn.remove_tags_from_resource(
+ ResourceName=resource, TagKeys=["character_set_client"]
+ )
+ result = conn.list_tags_for_resource(ResourceName=resource)
+ result["TagList"].should.equal(server_tags)
+
+
+@mock_rds
+def test_modify_tags_event_subscription():
+ conn = boto3.client("rds", region_name="us-west-2")
+ tags = [{"Key": "hello", "Value": "world"}]
+ result = conn.create_event_subscription(
+ SubscriptionName="my-instance-events",
+ SourceType="db-instance",
+ EventCategories=["backup", "recovery"],
+ SnsTopicArn="arn:aws:sns:us-east-1:123456789012:interesting-events",
+ Tags=tags,
+ )
+ resource = result["EventSubscription"]["EventSubscriptionArn"]
+ result = conn.list_tags_for_resource(ResourceName=resource)
+ result["TagList"].should.equal(tags)
+ new_tags = [{"Key": "new_key", "Value": "new_value"}]
+ conn.add_tags_to_resource(ResourceName=resource, Tags=new_tags)
+ combined_tags = tags + new_tags
+ result = conn.list_tags_for_resource(ResourceName=resource)
+ result["TagList"].should.equal(combined_tags)
+
+ conn.remove_tags_from_resource(ResourceName=resource, TagKeys=["new_key"])
+ result = conn.list_tags_for_resource(ResourceName=resource)
+ result["TagList"].should.equal(tags)
+
+
+@mock_ec2
+@mock_rds
+def test_add_tags_database_subnet_group():
+ vpc_conn = boto3.client("ec2", "us-west-2")
+ vpc = vpc_conn.create_vpc(CidrBlock="10.0.0.0/16")["Vpc"]
+ subnet = vpc_conn.create_subnet(VpcId=vpc["VpcId"], CidrBlock="10.0.1.0/24")[
+ "Subnet"
+ ]
+
+ conn = boto3.client("rds", region_name="us-west-2")
+ result = conn.describe_db_subnet_groups()
+ result["DBSubnetGroups"].should.have.length_of(0)
+
+ subnet = conn.create_db_subnet_group(
+ DBSubnetGroupName="db_subnet1",
+ DBSubnetGroupDescription="my db subnet",
+ SubnetIds=[subnet["SubnetId"]],
+ Tags=[],
+ )["DBSubnetGroup"]["DBSubnetGroupName"]
+ resource = "arn:aws:rds:us-west-2:1234567890:subgrp:{0}".format(subnet)
+
+ conn.add_tags_to_resource(
+ ResourceName=resource,
+ Tags=[{"Value": "bar", "Key": "foo"}, {"Value": "bar1", "Key": "foo1"}],
+ )
+
+ result = conn.list_tags_for_resource(ResourceName=resource)
+ result["TagList"].should.equal(
+ [{"Value": "bar", "Key": "foo"}, {"Value": "bar1", "Key": "foo1"}]
+ )
+
+
+@mock_ec2
+@mock_rds
+def test_remove_tags_database_subnet_group():
+ vpc_conn = boto3.client("ec2", "us-west-2")
+ vpc = vpc_conn.create_vpc(CidrBlock="10.0.0.0/16")["Vpc"]
+ subnet = vpc_conn.create_subnet(VpcId=vpc["VpcId"], CidrBlock="10.0.1.0/24")[
+ "Subnet"
+ ]
+
+ conn = boto3.client("rds", region_name="us-west-2")
+ result = conn.describe_db_subnet_groups()
+ result["DBSubnetGroups"].should.have.length_of(0)
+
+ subnet = conn.create_db_subnet_group(
+ DBSubnetGroupName="db_subnet1",
+ DBSubnetGroupDescription="my db subnet",
+ SubnetIds=[subnet["SubnetId"]],
+ Tags=[{"Value": "bar", "Key": "foo"}, {"Value": "bar1", "Key": "foo1"}],
+ )["DBSubnetGroup"]["DBSubnetGroupName"]
+ resource = "arn:aws:rds:us-west-2:1234567890:subgrp:{0}".format(subnet)
+
+ conn.remove_tags_from_resource(ResourceName=resource, TagKeys=["foo"])
+
+ result = conn.list_tags_for_resource(ResourceName=resource)
+ result["TagList"].should.equal([{"Value": "bar1", "Key": "foo1"}])
+
+
+@mock_rds
+def test_create_database_replica():
+ conn = boto3.client("rds", region_name="us-west-2")
+
+ conn.create_db_instance(
+ DBInstanceIdentifier="db-master-1",
+ AllocatedStorage=10,
+ Engine="postgres",
+ DBInstanceClass="db.m1.small",
+ MasterUsername="root",
+ MasterUserPassword="hunter2",
+ Port=1234,
+ DBSecurityGroups=["my_sg"],
+ )
+
+ replica = conn.create_db_instance_read_replica(
+ DBInstanceIdentifier="db-replica-1",
+ SourceDBInstanceIdentifier="db-master-1",
+ DBInstanceClass="db.m1.small",
+ )
+ replica["DBInstance"]["ReadReplicaSourceDBInstanceIdentifier"].should.equal(
+ "db-master-1"
+ )
+ replica["DBInstance"]["DBInstanceClass"].should.equal("db.m1.small")
+ replica["DBInstance"]["DBInstanceIdentifier"].should.equal("db-replica-1")
+
+ master = conn.describe_db_instances(DBInstanceIdentifier="db-master-1")
+ master["DBInstances"][0]["ReadReplicaDBInstanceIdentifiers"].should.equal(
+ ["db-replica-1"]
+ )
+
+ conn.delete_db_instance(DBInstanceIdentifier="db-replica-1", SkipFinalSnapshot=True)
+
+ master = conn.describe_db_instances(DBInstanceIdentifier="db-master-1")
+ master["DBInstances"][0]["ReadReplicaDBInstanceIdentifiers"].should.equal([])
+
+
+@mock_rds
+@mock_kms
+def test_create_database_with_encrypted_storage():
+ kms_conn = boto3.client("kms", region_name="us-west-2")
+ key = kms_conn.create_key(
+ Policy="my RDS encryption policy",
+ Description="RDS encryption key",
+ KeyUsage="ENCRYPT_DECRYPT",
+ )
+
+ conn = boto3.client("rds", region_name="us-west-2")
+ database = conn.create_db_instance(
+ DBInstanceIdentifier="db-master-1",
+ AllocatedStorage=10,
+ Engine="postgres",
+ DBInstanceClass="db.m1.small",
+ MasterUsername="root",
+ MasterUserPassword="hunter2",
+ Port=1234,
+ DBSecurityGroups=["my_sg"],
+ StorageEncrypted=True,
+ KmsKeyId=key["KeyMetadata"]["KeyId"],
+ )
+
+ database["DBInstance"]["StorageEncrypted"].should.equal(True)
+ database["DBInstance"]["KmsKeyId"].should.equal(key["KeyMetadata"]["KeyId"])
+
+
+@mock_rds
+def test_create_db_parameter_group():
+ region = "us-west-2"
+ pg_name = "test"
+ conn = boto3.client("rds", region_name=region)
+ db_parameter_group = conn.create_db_parameter_group(
+ DBParameterGroupName="test",
+ DBParameterGroupFamily="mysql5.6",
+ Description="test parameter group",
+ )
+
+ db_parameter_group["DBParameterGroup"]["DBParameterGroupName"].should.equal("test")
+ db_parameter_group["DBParameterGroup"]["DBParameterGroupFamily"].should.equal(
+ "mysql5.6"
+ )
+ db_parameter_group["DBParameterGroup"]["Description"].should.equal(
+ "test parameter group"
+ )
+ db_parameter_group["DBParameterGroup"]["DBParameterGroupArn"].should.equal(
+ "arn:aws:rds:{0}:{1}:pg:{2}".format(region, ACCOUNT_ID, pg_name)
+ )
+
+
+@mock_rds
+def test_create_db_instance_with_parameter_group():
+ conn = boto3.client("rds", region_name="us-west-2")
+ conn.create_db_parameter_group(
+ DBParameterGroupName="test",
+ DBParameterGroupFamily="mysql5.6",
+ Description="test parameter group",
+ )
+
+ database = conn.create_db_instance(
+ DBInstanceIdentifier="db-master-1",
+ AllocatedStorage=10,
+ Engine="mysql",
+ DBInstanceClass="db.m1.small",
+ DBParameterGroupName="test",
+ MasterUsername="root",
+ MasterUserPassword="hunter2",
+ Port=1234,
+ )
+
+ len(database["DBInstance"]["DBParameterGroups"]).should.equal(1)
+ database["DBInstance"]["DBParameterGroups"][0]["DBParameterGroupName"].should.equal(
+ "test"
+ )
+ database["DBInstance"]["DBParameterGroups"][0]["ParameterApplyStatus"].should.equal(
+ "in-sync"
+ )
+
+
+@mock_rds
+def test_create_database_with_default_port():
+ conn = boto3.client("rds", region_name="us-west-2")
+ database = conn.create_db_instance(
+ DBInstanceIdentifier="db-master-1",
+ AllocatedStorage=10,
+ Engine="postgres",
+ DBInstanceClass="db.m1.small",
+ MasterUsername="root",
+ MasterUserPassword="hunter2",
+ DBSecurityGroups=["my_sg"],
+ )
+ database["DBInstance"]["Endpoint"]["Port"].should.equal(5432)
+
+
+@mock_rds
+def test_modify_db_instance_with_parameter_group():
+ conn = boto3.client("rds", region_name="us-west-2")
+ database = conn.create_db_instance(
+ DBInstanceIdentifier="db-master-1",
+ AllocatedStorage=10,
+ Engine="mysql",
+ DBInstanceClass="db.m1.small",
+ MasterUsername="root",
+ MasterUserPassword="hunter2",
+ Port=1234,
+ )
+
+ len(database["DBInstance"]["DBParameterGroups"]).should.equal(1)
+ database["DBInstance"]["DBParameterGroups"][0]["DBParameterGroupName"].should.equal(
+ "default.mysql5.6"
+ )
+ database["DBInstance"]["DBParameterGroups"][0]["ParameterApplyStatus"].should.equal(
+ "in-sync"
+ )
+
+ conn.create_db_parameter_group(
+ DBParameterGroupName="test",
+ DBParameterGroupFamily="mysql5.6",
+ Description="test parameter group",
+ )
+ conn.modify_db_instance(
+ DBInstanceIdentifier="db-master-1",
+ DBParameterGroupName="test",
+ ApplyImmediately=True,
+ )
+
+ database = conn.describe_db_instances(DBInstanceIdentifier="db-master-1")[
+ "DBInstances"
+ ][0]
+ len(database["DBParameterGroups"]).should.equal(1)
+ database["DBParameterGroups"][0]["DBParameterGroupName"].should.equal("test")
+ database["DBParameterGroups"][0]["ParameterApplyStatus"].should.equal("in-sync")
+
+
+@mock_rds
+def test_create_db_parameter_group_empty_description():
+ conn = boto3.client("rds", region_name="us-west-2")
+ conn.create_db_parameter_group.when.called_with(
+ DBParameterGroupName="test", DBParameterGroupFamily="mysql5.6", Description=""
+ ).should.throw(ClientError)
+
+
+@mock_rds
+def test_create_db_parameter_group_duplicate():
+ conn = boto3.client("rds", region_name="us-west-2")
+ conn.create_db_parameter_group(
+ DBParameterGroupName="test",
+ DBParameterGroupFamily="mysql5.6",
+ Description="test parameter group",
+ )
+ conn.create_db_parameter_group.when.called_with(
+ DBParameterGroupName="test",
+ DBParameterGroupFamily="mysql5.6",
+ Description="test parameter group",
+ ).should.throw(ClientError)
+
+
+@mock_rds
+def test_describe_db_parameter_group():
+ region = "us-west-2"
+ pg_name = "test"
+ conn = boto3.client("rds", region_name=region)
+ conn.create_db_parameter_group(
+ DBParameterGroupName=pg_name,
+ DBParameterGroupFamily="mysql5.6",
+ Description="test parameter group",
+ )
+ db_parameter_groups = conn.describe_db_parameter_groups(DBParameterGroupName="test")
+ db_parameter_groups["DBParameterGroups"][0]["DBParameterGroupName"].should.equal(
+ "test"
+ )
+ db_parameter_groups["DBParameterGroups"][0]["DBParameterGroupArn"].should.equal(
+ "arn:aws:rds:{0}:{1}:pg:{2}".format(region, ACCOUNT_ID, pg_name)
+ )
+
+
+@mock_rds
+def test_describe_non_existent_db_parameter_group():
+ conn = boto3.client("rds", region_name="us-west-2")
+ db_parameter_groups = conn.describe_db_parameter_groups(DBParameterGroupName="test")
+ len(db_parameter_groups["DBParameterGroups"]).should.equal(0)
+
+
+@mock_rds
+def test_delete_db_parameter_group():
+ conn = boto3.client("rds", region_name="us-west-2")
+ conn.create_db_parameter_group(
+ DBParameterGroupName="test",
+ DBParameterGroupFamily="mysql5.6",
+ Description="test parameter group",
+ )
+ db_parameter_groups = conn.describe_db_parameter_groups(DBParameterGroupName="test")
+ db_parameter_groups["DBParameterGroups"][0]["DBParameterGroupName"].should.equal(
+ "test"
+ )
+ conn.delete_db_parameter_group(DBParameterGroupName="test")
+ db_parameter_groups = conn.describe_db_parameter_groups(DBParameterGroupName="test")
+ len(db_parameter_groups["DBParameterGroups"]).should.equal(0)
+
+
+@mock_rds
+def test_modify_db_parameter_group():
+ conn = boto3.client("rds", region_name="us-west-2")
+ conn.create_db_parameter_group(
+ DBParameterGroupName="test",
+ DBParameterGroupFamily="mysql5.6",
+ Description="test parameter group",
+ )
+
+ modify_result = conn.modify_db_parameter_group(
+ DBParameterGroupName="test",
+ Parameters=[
+ {
+ "ParameterName": "foo",
+ "ParameterValue": "foo_val",
+ "Description": "test param",
+ "ApplyMethod": "immediate",
+ }
+ ],
+ )
+
+ modify_result["DBParameterGroupName"].should.equal("test")
+
+ db_parameters = conn.describe_db_parameters(DBParameterGroupName="test")
+ db_parameters["Parameters"][0]["ParameterName"].should.equal("foo")
+ db_parameters["Parameters"][0]["ParameterValue"].should.equal("foo_val")
+ db_parameters["Parameters"][0]["Description"].should.equal("test param")
+ db_parameters["Parameters"][0]["ApplyMethod"].should.equal("immediate")
+
+
+@mock_rds
+def test_delete_non_existent_db_parameter_group():
+ conn = boto3.client("rds", region_name="us-west-2")
+ conn.delete_db_parameter_group.when.called_with(
+ DBParameterGroupName="non-existent"
+ ).should.throw(ClientError)
+
+
+@mock_rds
+def test_create_parameter_group_with_tags():
+ conn = boto3.client("rds", region_name="us-west-2")
+ conn.create_db_parameter_group(
+ DBParameterGroupName="test",
+ DBParameterGroupFamily="mysql5.6",
+ Description="test parameter group",
+ Tags=[{"Key": "foo", "Value": "bar"}],
+ )
+ result = conn.list_tags_for_resource(
+ ResourceName="arn:aws:rds:us-west-2:1234567890:pg:test"
+ )
+ result["TagList"].should.equal([{"Value": "bar", "Key": "foo"}])
+
+
+@mock_rds
+def test_create_db_with_iam_authentication():
+ conn = boto3.client("rds", region_name="us-west-2")
+
+ database = conn.create_db_instance(
+ DBInstanceIdentifier="rds",
+ DBInstanceClass="db.t1.micro",
+ Engine="postgres",
+ EnableIAMDatabaseAuthentication=True,
+ )
+
+ db_instance = database["DBInstance"]
+ db_instance["IAMDatabaseAuthenticationEnabled"].should.equal(True)
+
+
+@mock_rds
+def test_create_db_snapshot_with_iam_authentication():
+ conn = boto3.client("rds", region_name="us-west-2")
+
+ conn.create_db_instance(
+ DBInstanceIdentifier="rds",
+ DBInstanceClass="db.t1.micro",
+ Engine="postgres",
+ EnableIAMDatabaseAuthentication=True,
+ )
+
+ snapshot = conn.create_db_snapshot(
+ DBInstanceIdentifier="rds", DBSnapshotIdentifier="snapshot"
+ ).get("DBSnapshot")
+
+ snapshot.get("IAMDatabaseAuthenticationEnabled").should.equal(True)
+
+
+@mock_rds
+def test_create_db_instance_with_tags():
+ client = boto3.client("rds", region_name="us-west-2")
+ tags = [{"Key": "foo", "Value": "bar"}, {"Key": "foo1", "Value": "bar1"}]
+ db_instance_identifier = "test-db-instance"
+ resp = client.create_db_instance(
+ DBInstanceIdentifier=db_instance_identifier,
+ Engine="postgres",
+ DBName="staging-postgres",
+ DBInstanceClass="db.m1.small",
+ Tags=tags,
+ )
+ resp["DBInstance"]["TagList"].should.equal(tags)
+
+ resp = client.describe_db_instances(DBInstanceIdentifier=db_instance_identifier)
+ resp["DBInstances"][0]["TagList"].should.equal(tags)
diff --git a/tests/test_rds2/test_rds2_cloudformation.py b/tests/test_rds/test_rds_cloudformation.py
similarity index 98%
rename from tests/test_rds2/test_rds2_cloudformation.py
rename to tests/test_rds/test_rds_cloudformation.py
index 015ceb26f..32f8febc3 100644
--- a/tests/test_rds2/test_rds2_cloudformation.py
+++ b/tests/test_rds/test_rds_cloudformation.py
@@ -1,13 +1,13 @@
import boto3
import json
import sure # noqa # pylint: disable=unused-import
-from moto import mock_cloudformation, mock_ec2, mock_rds2
+from moto import mock_cloudformation, mock_ec2, mock_rds
from tests.test_cloudformation.fixtures import rds_mysql_with_db_parameter_group
from tests.test_cloudformation.fixtures import rds_mysql_with_read_replica
@mock_ec2
-@mock_rds2
+@mock_rds
@mock_cloudformation
def test_create_subnetgroup_via_cf():
vpc_conn = boto3.client("ec2", "us-west-2")
@@ -45,7 +45,7 @@ def test_create_subnetgroup_via_cf():
@mock_ec2
-@mock_rds2
+@mock_rds
@mock_cloudformation
def test_create_dbinstance_via_cf():
vpc_conn = boto3.client("ec2", "us-west-2")
@@ -87,7 +87,7 @@ def test_create_dbinstance_via_cf():
@mock_ec2
-@mock_rds2
+@mock_rds
@mock_cloudformation
def test_create_dbsecuritygroup_via_cf():
vpc_conn = boto3.client("ec2", "us-west-2")
@@ -118,7 +118,7 @@ def test_create_dbsecuritygroup_via_cf():
@mock_cloudformation
@mock_ec2
-@mock_rds2
+@mock_rds
def test_rds_db_parameter_groups():
ec2_conn = boto3.client("ec2", region_name="us-west-1")
ec2_conn.create_security_group(
@@ -168,7 +168,7 @@ def test_rds_db_parameter_groups():
@mock_cloudformation
@mock_ec2
-@mock_rds2
+@mock_rds
def test_rds_mysql_with_read_replica():
ec2_conn = boto3.client("ec2", region_name="us-west-1")
ec2_conn.create_security_group(
@@ -220,7 +220,7 @@ def test_rds_mysql_with_read_replica():
@mock_cloudformation
@mock_ec2
-@mock_rds2
+@mock_rds
def test_rds_mysql_with_read_replica_in_vpc():
template_json = json.dumps(rds_mysql_with_read_replica.template)
cf = boto3.client("cloudformation", "eu-central-1")
diff --git a/tests/test_rds2/test_rds2_clusters.py b/tests/test_rds/test_rds_clusters.py
similarity index 98%
rename from tests/test_rds2/test_rds2_clusters.py
rename to tests/test_rds/test_rds_clusters.py
index 7d49c78bb..5c4d66314 100644
--- a/tests/test_rds2/test_rds2_clusters.py
+++ b/tests/test_rds/test_rds_clusters.py
@@ -3,11 +3,11 @@ import pytest
import sure # noqa # pylint: disable=unused-import
from botocore.exceptions import ClientError
-from moto import mock_rds2
+from moto import mock_rds
from moto.core import ACCOUNT_ID
-@mock_rds2
+@mock_rds
def test_describe_db_cluster_initial():
client = boto3.client("rds", region_name="eu-north-1")
@@ -15,7 +15,7 @@ def test_describe_db_cluster_initial():
resp.should.have.key("DBClusters").should.have.length_of(0)
-@mock_rds2
+@mock_rds
def test_create_db_cluster_needs_master_username():
client = boto3.client("rds", region_name="eu-north-1")
@@ -28,7 +28,7 @@ def test_create_db_cluster_needs_master_username():
)
-@mock_rds2
+@mock_rds
def test_create_db_cluster_needs_master_user_password():
client = boto3.client("rds", region_name="eu-north-1")
@@ -43,7 +43,7 @@ def test_create_db_cluster_needs_master_user_password():
)
-@mock_rds2
+@mock_rds
def test_create_db_cluster_needs_long_master_user_password():
client = boto3.client("rds", region_name="eu-north-1")
@@ -61,7 +61,7 @@ def test_create_db_cluster_needs_long_master_user_password():
)
-@mock_rds2
+@mock_rds
def test_create_db_cluster__verify_default_properties():
client = boto3.client("rds", region_name="eu-north-1")
@@ -126,7 +126,7 @@ def test_create_db_cluster__verify_default_properties():
cluster.should.have.key("ClusterCreateTime")
-@mock_rds2
+@mock_rds
def test_create_db_cluster_with_database_name():
client = boto3.client("rds", region_name="eu-north-1")
@@ -143,7 +143,7 @@ def test_create_db_cluster_with_database_name():
cluster.should.have.key("DBClusterParameterGroup").equal("default.aurora8.0")
-@mock_rds2
+@mock_rds
def test_create_db_cluster_additional_parameters():
client = boto3.client("rds", region_name="eu-north-1")
@@ -169,7 +169,7 @@ def test_create_db_cluster_additional_parameters():
cluster.should.have.key("DeletionProtection").equal(True)
-@mock_rds2
+@mock_rds
def test_describe_db_cluster_after_creation():
client = boto3.client("rds", region_name="eu-north-1")
@@ -194,7 +194,7 @@ def test_describe_db_cluster_after_creation():
].should.have.length_of(1)
-@mock_rds2
+@mock_rds
def test_delete_db_cluster():
client = boto3.client("rds", region_name="eu-north-1")
@@ -210,7 +210,7 @@ def test_delete_db_cluster():
client.describe_db_clusters()["DBClusters"].should.have.length_of(0)
-@mock_rds2
+@mock_rds
def test_delete_db_cluster_that_is_protected():
client = boto3.client("rds", region_name="eu-north-1")
@@ -228,7 +228,7 @@ def test_delete_db_cluster_that_is_protected():
err["Message"].should.equal("Can't delete Cluster with protection enabled")
-@mock_rds2
+@mock_rds
def test_delete_db_cluster_unknown_cluster():
client = boto3.client("rds", region_name="eu-north-1")
@@ -239,7 +239,7 @@ def test_delete_db_cluster_unknown_cluster():
err["Message"].should.equal("DBCluster cluster-unknown not found.")
-@mock_rds2
+@mock_rds
def test_start_db_cluster_unknown_cluster():
client = boto3.client("rds", region_name="eu-north-1")
@@ -250,7 +250,7 @@ def test_start_db_cluster_unknown_cluster():
err["Message"].should.equal("DBCluster cluster-unknown not found.")
-@mock_rds2
+@mock_rds
def test_start_db_cluster_after_stopping():
client = boto3.client("rds", region_name="eu-north-1")
@@ -267,7 +267,7 @@ def test_start_db_cluster_after_stopping():
cluster["Status"].should.equal("available")
-@mock_rds2
+@mock_rds
def test_start_db_cluster_without_stopping():
client = boto3.client("rds", region_name="eu-north-1")
@@ -285,7 +285,7 @@ def test_start_db_cluster_without_stopping():
err["Message"].should.equal("DbCluster cluster-id is not in stopped state.")
-@mock_rds2
+@mock_rds
def test_stop_db_cluster():
client = boto3.client("rds", region_name="eu-north-1")
@@ -306,7 +306,7 @@ def test_stop_db_cluster():
cluster["Status"].should.equal("stopped")
-@mock_rds2
+@mock_rds
def test_stop_db_cluster_already_stopped():
client = boto3.client("rds", region_name="eu-north-1")
@@ -326,7 +326,7 @@ def test_stop_db_cluster_already_stopped():
err["Message"].should.equal("DbCluster cluster-id is not in available state.")
-@mock_rds2
+@mock_rds
def test_stop_db_cluster_unknown_cluster():
client = boto3.client("rds", region_name="eu-north-1")
@@ -337,7 +337,7 @@ def test_stop_db_cluster_unknown_cluster():
err["Message"].should.equal("DBCluster cluster-unknown not found.")
-@mock_rds2
+@mock_rds
def test_create_db_cluster_snapshot_fails_for_unknown_cluster():
conn = boto3.client("rds", region_name="us-west-2")
with pytest.raises(ClientError) as exc:
@@ -348,7 +348,7 @@ def test_create_db_cluster_snapshot_fails_for_unknown_cluster():
err["Message"].should.equal("DBCluster db-primary-1 not found.")
-@mock_rds2
+@mock_rds
def test_create_db_cluster_snapshot():
conn = boto3.client("rds", region_name="us-west-2")
conn.create_db_cluster(
@@ -373,7 +373,7 @@ def test_create_db_cluster_snapshot():
result["TagList"].should.equal([])
-@mock_rds2
+@mock_rds
def test_create_db_cluster_snapshot_copy_tags():
conn = boto3.client("rds", region_name="us-west-2")
@@ -404,7 +404,7 @@ def test_create_db_cluster_snapshot_copy_tags():
)
-@mock_rds2
+@mock_rds
def test_copy_db_cluster_snapshot_fails_for_unknown_snapshot():
conn = boto3.client("rds", region_name="us-west-2")
@@ -418,7 +418,7 @@ def test_copy_db_cluster_snapshot_fails_for_unknown_snapshot():
err["Message"].should.equal("DBClusterSnapshot snapshot-1 not found.")
-@mock_rds2
+@mock_rds
def test_copy_db_cluster_snapshot():
conn = boto3.client("rds", region_name="us-west-2")
@@ -451,7 +451,7 @@ def test_copy_db_cluster_snapshot():
result["TagList"].should.equal([])
-@mock_rds2
+@mock_rds
def test_copy_db_cluster_snapshot_fails_for_existed_target_snapshot():
conn = boto3.client("rds", region_name="us-west-2")
@@ -486,7 +486,7 @@ def test_copy_db_cluster_snapshot_fails_for_existed_target_snapshot():
)
-@mock_rds2
+@mock_rds
def test_describe_db_cluster_snapshots():
conn = boto3.client("rds", region_name="us-west-2")
conn.create_db_cluster(
@@ -527,7 +527,7 @@ def test_describe_db_cluster_snapshots():
snapshots.should.have.length_of(2)
-@mock_rds2
+@mock_rds
def test_delete_db_cluster_snapshot():
conn = boto3.client("rds", region_name="us-west-2")
conn.create_db_cluster(
@@ -551,7 +551,7 @@ def test_delete_db_cluster_snapshot():
).should.throw(ClientError)
-@mock_rds2
+@mock_rds
def test_restore_db_cluster_from_snapshot():
conn = boto3.client("rds", region_name="us-west-2")
conn.create_db_cluster(
@@ -590,7 +590,7 @@ def test_restore_db_cluster_from_snapshot():
].should.have.length_of(1)
-@mock_rds2
+@mock_rds
def test_restore_db_cluster_from_snapshot_and_override_params():
conn = boto3.client("rds", region_name="us-west-2")
conn.create_db_cluster(
diff --git a/tests/test_rds2/test_rds2_event_subscriptions.py b/tests/test_rds/test_rds_event_subscriptions.py
similarity index 97%
rename from tests/test_rds2/test_rds2_event_subscriptions.py
rename to tests/test_rds/test_rds_event_subscriptions.py
index 1e780fcf9..814d6cf67 100644
--- a/tests/test_rds2/test_rds2_event_subscriptions.py
+++ b/tests/test_rds/test_rds_event_subscriptions.py
@@ -3,7 +3,7 @@ import pytest
import sure # noqa # pylint: disable=unused-import
from botocore.exceptions import ClientError
-from moto import mock_rds2
+from moto import mock_rds
from moto.core import ACCOUNT_ID
DB_INSTANCE_IDENTIFIER = "db-primary-1"
@@ -24,7 +24,7 @@ def _prepare_db_instance(client):
return resp["DBInstance"]["DBInstanceIdentifier"]
-@mock_rds2
+@mock_rds
def test_create_event_subscription():
client = boto3.client("rds", region_name="us-west-2")
db_identifier = _prepare_db_instance(client)
@@ -56,7 +56,7 @@ def test_create_event_subscription():
es["Enabled"].should.equal(False)
-@mock_rds2
+@mock_rds
def test_create_event_fail_already_exists():
client = boto3.client("rds", region_name="us-west-2")
db_identifier = _prepare_db_instance(client)
@@ -79,7 +79,7 @@ def test_create_event_fail_already_exists():
err["Message"].should.equal("Subscription db-primary-1-events already exists.")
-@mock_rds2
+@mock_rds
def test_delete_event_subscription_fails_unknown_subscription():
client = boto3.client("rds", region_name="us-west-2")
with pytest.raises(ClientError) as ex:
@@ -90,7 +90,7 @@ def test_delete_event_subscription_fails_unknown_subscription():
err["Message"].should.equal("Subscription my-db-events not found.")
-@mock_rds2
+@mock_rds
def test_delete_event_subscription():
client = boto3.client("rds", region_name="us-west-2")
db_identifier = _prepare_db_instance(client)
@@ -110,7 +110,7 @@ def test_delete_event_subscription():
)
-@mock_rds2
+@mock_rds
def test_describe_event_subscriptions():
client = boto3.client("rds", region_name="us-west-2")
db_identifier = _prepare_db_instance(client)
@@ -126,7 +126,7 @@ def test_describe_event_subscriptions():
subscriptions[0]["CustSubscriptionId"].should.equal(f"{db_identifier}-events")
-@mock_rds2
+@mock_rds
def test_describe_event_subscriptions_fails_unknown_subscription():
client = boto3.client("rds", region_name="us-west-2")
with pytest.raises(ClientError) as ex:
diff --git a/tests/test_rds2/test_rds2_export_tasks.py b/tests/test_rds/test_rds_export_tasks.py
similarity index 98%
rename from tests/test_rds2/test_rds2_export_tasks.py
rename to tests/test_rds/test_rds_export_tasks.py
index 587abb30e..185bf6f73 100644
--- a/tests/test_rds2/test_rds2_export_tasks.py
+++ b/tests/test_rds/test_rds_export_tasks.py
@@ -3,7 +3,7 @@ import pytest
import sure # noqa # pylint: disable=unused-import
from botocore.exceptions import ClientError
-from moto import mock_rds2
+from moto import mock_rds
from moto.core import ACCOUNT_ID
@@ -25,7 +25,7 @@ def _prepare_db_snapshot(client, snapshot_name="snapshot-1"):
return resp["DBSnapshot"]["DBSnapshotArn"]
-@mock_rds2
+@mock_rds
def test_start_export_task_fails_unknown_snapshot():
client = boto3.client("rds", region_name="us-west-2")
@@ -43,7 +43,7 @@ def test_start_export_task_fails_unknown_snapshot():
err["Message"].should.equal("DBSnapshot snapshot-1 not found.")
-@mock_rds2
+@mock_rds
def test_start_export_task():
client = boto3.client("rds", region_name="us-west-2")
source_arn = _prepare_db_snapshot(client)
@@ -69,7 +69,7 @@ def test_start_export_task():
export["ExportOnly"].should.equal(["schema.table"])
-@mock_rds2
+@mock_rds
def test_start_export_task_fail_already_exists():
client = boto3.client("rds", region_name="us-west-2")
source_arn = _prepare_db_snapshot(client)
@@ -97,7 +97,7 @@ def test_start_export_task_fail_already_exists():
)
-@mock_rds2
+@mock_rds
def test_cancel_export_task_fails_unknown_task():
client = boto3.client("rds", region_name="us-west-2")
with pytest.raises(ClientError) as ex:
@@ -110,7 +110,7 @@ def test_cancel_export_task_fails_unknown_task():
)
-@mock_rds2
+@mock_rds
def test_cancel_export_task():
client = boto3.client("rds", region_name="us-west-2")
source_arn = _prepare_db_snapshot(client)
@@ -129,7 +129,7 @@ def test_cancel_export_task():
export["Status"].should.equal("canceled")
-@mock_rds2
+@mock_rds
def test_describe_export_tasks():
client = boto3.client("rds", region_name="us-west-2")
source_arn = _prepare_db_snapshot(client)
@@ -147,7 +147,7 @@ def test_describe_export_tasks():
exports[0]["ExportTaskIdentifier"].should.equal("export-snapshot-1")
-@mock_rds2
+@mock_rds
def test_describe_export_tasks_fails_unknown_task():
client = boto3.client("rds", region_name="us-west-2")
with pytest.raises(ClientError) as ex:
diff --git a/tests/test_rds/test_server.py b/tests/test_rds/test_server.py
index 8d81cbd44..8fe0ab6c0 100644
--- a/tests/test_rds/test_server.py
+++ b/tests/test_rds/test_server.py
@@ -1,14 +1,7 @@
+import moto.server as server
import sure # noqa # pylint: disable=unused-import
-import moto.server as server
-from moto import mock_rds
-"""
-Test the different server responses
-"""
-
-
-@mock_rds
def test_list_databases():
backend = server.create_backend_app("rds")
test_client = backend.test_client()
diff --git a/tests/test_rds2/test_utils.py b/tests/test_rds/test_utils.py
similarity index 99%
rename from tests/test_rds2/test_utils.py
rename to tests/test_rds/test_utils.py
index e6b4c0c50..15a3968b5 100644
--- a/tests/test_rds2/test_utils.py
+++ b/tests/test_rds/test_utils.py
@@ -1,6 +1,6 @@
import pytest
-from moto.rds2.utils import (
+from moto.rds.utils import (
FilterDef,
apply_filter,
merge_filters,
diff --git a/tests/test_rds2/__init__.py b/tests/test_rds2/__init__.py
deleted file mode 100644
index 08a1c1568..000000000
--- a/tests/test_rds2/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# This file is intentionally left blank.
diff --git a/tests/test_rds2/test_rds2.py b/tests/test_rds2/test_rds2.py
index b70e70f1c..d33615884 100644
--- a/tests/test_rds2/test_rds2.py
+++ b/tests/test_rds2/test_rds2.py
@@ -1,2020 +1,80 @@
-from botocore.exceptions import ClientError
import boto3
import pytest
import sure # noqa # pylint: disable=unused-import
-from moto import mock_ec2, mock_kms, mock_rds2
+
+from moto import mock_rds2
from moto.core import ACCOUNT_ID
+def test_deprecation_warning():
+ with pytest.warns(None) as record:
+ mock_rds2()
+ str(record[0].message).should.contain(
+ "Module mock_rds2 has been deprecated, and will be removed in a later release."
+ )
+ str(record[0].message).should.contain("Please use mock_rds instead.")
+
+
@mock_rds2
-def test_create_database():
- conn = boto3.client("rds", region_name="us-west-2")
- database = conn.create_db_instance(
- DBInstanceIdentifier="db-master-1",
- AllocatedStorage=10,
- Engine="postgres",
- DBName="staging-postgres",
- DBInstanceClass="db.m1.small",
- LicenseModel="license-included",
+def test_create_db_cluster__verify_default_properties():
+ client = boto3.client("rds", region_name="eu-north-1")
+
+ resp = client.create_db_cluster(
+ DBClusterIdentifier="cluster-id",
+ Engine="aurora",
MasterUsername="root",
- MasterUserPassword="hunter2",
- Port=1234,
- DBSecurityGroups=["my_sg"],
- VpcSecurityGroupIds=["sg-123456"],
- )
- db_instance = database["DBInstance"]
- db_instance["AllocatedStorage"].should.equal(10)
- db_instance["DBInstanceClass"].should.equal("db.m1.small")
- db_instance["LicenseModel"].should.equal("license-included")
- db_instance["MasterUsername"].should.equal("root")
- db_instance["DBSecurityGroups"][0]["DBSecurityGroupName"].should.equal("my_sg")
- db_instance["DBInstanceArn"].should.equal(
- "arn:aws:rds:us-west-2:{}:db:db-master-1".format(ACCOUNT_ID)
- )
- db_instance["DBInstanceStatus"].should.equal("available")
- db_instance["DBName"].should.equal("staging-postgres")
- db_instance["DBInstanceIdentifier"].should.equal("db-master-1")
- db_instance["IAMDatabaseAuthenticationEnabled"].should.equal(False)
- db_instance["DbiResourceId"].should.contain("db-")
- db_instance["CopyTagsToSnapshot"].should.equal(False)
- db_instance["InstanceCreateTime"].should.be.a("datetime.datetime")
- db_instance["VpcSecurityGroups"][0]["VpcSecurityGroupId"].should.equal("sg-123456")
- db_instance["DeletionProtection"].should.equal(False)
-
-
-@mock_rds2
-def test_database_with_deletion_protection_cannot_be_deleted():
- conn = boto3.client("rds", region_name="us-west-2")
- database = conn.create_db_instance(
- DBInstanceIdentifier="db-master-1",
- AllocatedStorage=10,
- Engine="postgres",
- DBName="staging-postgres",
- DBInstanceClass="db.m1.small",
- DeletionProtection=True,
- )
- db_instance = database["DBInstance"]
- db_instance["DBInstanceClass"].should.equal("db.m1.small")
- db_instance["DeletionProtection"].should.equal(True)
-
-
-@mock_rds2
-def test_create_database_no_allocated_storage():
- conn = boto3.client("rds", region_name="us-west-2")
- database = conn.create_db_instance(
- DBInstanceIdentifier="db-master-1",
- Engine="postgres",
- DBName="staging-postgres",
- DBInstanceClass="db.m1.small",
- )
- db_instance = database["DBInstance"]
- db_instance["Engine"].should.equal("postgres")
- db_instance["StorageType"].should.equal("gp2")
- db_instance["AllocatedStorage"].should.equal(20)
-
-
-@mock_rds2
-def test_create_database_non_existing_option_group():
- conn = boto3.client("rds", region_name="us-west-2")
- conn.create_db_instance.when.called_with(
- DBInstanceIdentifier="db-master-1",
- AllocatedStorage=10,
- Engine="postgres",
- DBName="staging-postgres",
- DBInstanceClass="db.m1.small",
- OptionGroupName="non-existing",
- ).should.throw(ClientError)
-
-
-@mock_rds2
-def test_create_database_with_option_group():
- conn = boto3.client("rds", region_name="us-west-2")
- conn.create_option_group(
- OptionGroupName="my-og",
- EngineName="mysql",
- MajorEngineVersion="5.6",
- OptionGroupDescription="test option group",
- )
- database = conn.create_db_instance(
- DBInstanceIdentifier="db-master-1",
- AllocatedStorage=10,
- Engine="postgres",
- DBName="staging-postgres",
- DBInstanceClass="db.m1.small",
- OptionGroupName="my-og",
- )
- db_instance = database["DBInstance"]
- db_instance["AllocatedStorage"].should.equal(10)
- db_instance["DBInstanceClass"].should.equal("db.m1.small")
- db_instance["DBName"].should.equal("staging-postgres")
- db_instance["OptionGroupMemberships"][0]["OptionGroupName"].should.equal("my-og")
-
-
-@mock_rds2
-def test_stop_database():
- conn = boto3.client("rds", region_name="us-west-2")
- database = conn.create_db_instance(
- DBInstanceIdentifier="db-master-1",
- AllocatedStorage=10,
- Engine="postgres",
- DBName="staging-postgres",
- DBInstanceClass="db.m1.small",
- LicenseModel="license-included",
- MasterUsername="root",
- MasterUserPassword="hunter2",
- Port=1234,
- DBSecurityGroups=["my_sg"],
- )
- mydb = conn.describe_db_instances(
- DBInstanceIdentifier=database["DBInstance"]["DBInstanceIdentifier"]
- )["DBInstances"][0]
- mydb["DBInstanceStatus"].should.equal("available")
- # test stopping database should shutdown
- response = conn.stop_db_instance(DBInstanceIdentifier=mydb["DBInstanceIdentifier"])
- response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
- response["DBInstance"]["DBInstanceStatus"].should.equal("stopped")
- # test rdsclient error when trying to stop an already stopped database
- conn.stop_db_instance.when.called_with(
- DBInstanceIdentifier=mydb["DBInstanceIdentifier"]
- ).should.throw(ClientError)
- # test stopping a stopped database with snapshot should error and no snapshot should exist for that call
- conn.stop_db_instance.when.called_with(
- DBInstanceIdentifier=mydb["DBInstanceIdentifier"],
- DBSnapshotIdentifier="rocky4570-rds-snap",
- ).should.throw(ClientError)
- response = conn.describe_db_snapshots()
- response["DBSnapshots"].should.equal([])
-
-
-@mock_rds2
-def test_start_database():
- conn = boto3.client("rds", region_name="us-west-2")
- database = conn.create_db_instance(
- DBInstanceIdentifier="db-master-1",
- AllocatedStorage=10,
- Engine="postgres",
- DBName="staging-postgres",
- DBInstanceClass="db.m1.small",
- LicenseModel="license-included",
- MasterUsername="root",
- MasterUserPassword="hunter2",
- Port=1234,
- DBSecurityGroups=["my_sg"],
- )
- mydb = conn.describe_db_instances(
- DBInstanceIdentifier=database["DBInstance"]["DBInstanceIdentifier"]
- )["DBInstances"][0]
- mydb["DBInstanceStatus"].should.equal("available")
- # test starting an already started database should error
- conn.start_db_instance.when.called_with(
- DBInstanceIdentifier=mydb["DBInstanceIdentifier"]
- ).should.throw(ClientError)
- # stop and test start - should go from stopped to available, create snapshot and check snapshot
- response = conn.stop_db_instance(
- DBInstanceIdentifier=mydb["DBInstanceIdentifier"],
- DBSnapshotIdentifier="rocky4570-rds-snap",
- )
- response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
- response["DBInstance"]["DBInstanceStatus"].should.equal("stopped")
- response = conn.describe_db_snapshots()
- response["DBSnapshots"][0]["DBSnapshotIdentifier"].should.equal(
- "rocky4570-rds-snap"
- )
- response = conn.start_db_instance(DBInstanceIdentifier=mydb["DBInstanceIdentifier"])
- response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
- response["DBInstance"]["DBInstanceStatus"].should.equal("available")
- # starting database should not remove snapshot
- response = conn.describe_db_snapshots()
- response["DBSnapshots"][0]["DBSnapshotIdentifier"].should.equal(
- "rocky4570-rds-snap"
- )
- # test stopping database, create snapshot with existing snapshot already created should throw error
- conn.stop_db_instance.when.called_with(
- DBInstanceIdentifier=mydb["DBInstanceIdentifier"],
- DBSnapshotIdentifier="rocky4570-rds-snap",
- ).should.throw(ClientError)
- # test stopping database not invoking snapshot should succeed.
- response = conn.stop_db_instance(DBInstanceIdentifier=mydb["DBInstanceIdentifier"])
- response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
- response["DBInstance"]["DBInstanceStatus"].should.equal("stopped")
-
-
-@mock_rds2
-def test_fail_to_stop_multi_az_and_sqlserver():
- conn = boto3.client("rds", region_name="us-west-2")
- database = conn.create_db_instance(
- DBInstanceIdentifier="db-master-1",
- AllocatedStorage=10,
- Engine="sqlserver-ee",
- DBName="staging-postgres",
- DBInstanceClass="db.m1.small",
- LicenseModel="license-included",
- MasterUsername="root",
- MasterUserPassword="hunter2",
- Port=1234,
- DBSecurityGroups=["my_sg"],
- MultiAZ=True,
- )
-
- mydb = conn.describe_db_instances(
- DBInstanceIdentifier=database["DBInstance"]["DBInstanceIdentifier"]
- )["DBInstances"][0]
- mydb["DBInstanceStatus"].should.equal("available")
- # multi-az databases arent allowed to be shutdown at this time.
- conn.stop_db_instance.when.called_with(
- DBInstanceIdentifier=mydb["DBInstanceIdentifier"]
- ).should.throw(ClientError)
- # multi-az databases arent allowed to be started up at this time.
- conn.start_db_instance.when.called_with(
- DBInstanceIdentifier=mydb["DBInstanceIdentifier"]
- ).should.throw(ClientError)
-
-
-@mock_rds2
-def test_stop_multi_az_postgres():
- conn = boto3.client("rds", region_name="us-west-2")
- database = conn.create_db_instance(
- DBInstanceIdentifier="db-master-1",
- AllocatedStorage=10,
- Engine="postgres",
- DBName="staging-postgres",
- DBInstanceClass="db.m1.small",
- LicenseModel="license-included",
- MasterUsername="root",
- MasterUserPassword="hunter2",
- Port=1234,
- DBSecurityGroups=["my_sg"],
- MultiAZ=True,
- )
-
- mydb = conn.describe_db_instances(
- DBInstanceIdentifier=database["DBInstance"]["DBInstanceIdentifier"]
- )["DBInstances"][0]
- mydb["DBInstanceStatus"].should.equal("available")
-
- response = conn.stop_db_instance(DBInstanceIdentifier=mydb["DBInstanceIdentifier"])
- response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
- response["DBInstance"]["DBInstanceStatus"].should.equal("stopped")
-
-
-@mock_rds2
-def test_fail_to_stop_readreplica():
- conn = boto3.client("rds", region_name="us-west-2")
- conn.create_db_instance(
- DBInstanceIdentifier="db-master-1",
- AllocatedStorage=10,
- Engine="postgres",
- DBName="staging-postgres",
- DBInstanceClass="db.m1.small",
- LicenseModel="license-included",
- MasterUsername="root",
- MasterUserPassword="hunter2",
- Port=1234,
- DBSecurityGroups=["my_sg"],
- )
-
- replica = conn.create_db_instance_read_replica(
- DBInstanceIdentifier="db-replica-1",
- SourceDBInstanceIdentifier="db-master-1",
- DBInstanceClass="db.m1.small",
- )
-
- mydb = conn.describe_db_instances(
- DBInstanceIdentifier=replica["DBInstance"]["DBInstanceIdentifier"]
- )["DBInstances"][0]
- mydb["DBInstanceStatus"].should.equal("available")
- # read-replicas are not allowed to be stopped at this time.
- conn.stop_db_instance.when.called_with(
- DBInstanceIdentifier=mydb["DBInstanceIdentifier"]
- ).should.throw(ClientError)
- # read-replicas are not allowed to be started at this time.
- conn.start_db_instance.when.called_with(
- DBInstanceIdentifier=mydb["DBInstanceIdentifier"]
- ).should.throw(ClientError)
-
-
-@mock_rds2
-def test_get_databases():
- conn = boto3.client("rds", region_name="us-west-2")
-
- instances = conn.describe_db_instances()
- list(instances["DBInstances"]).should.have.length_of(0)
-
- conn.create_db_instance(
- DBInstanceIdentifier="db-master-1",
- AllocatedStorage=10,
- DBInstanceClass="postgres",
- Engine="db.m1.small",
- MasterUsername="root",
- MasterUserPassword="hunter2",
- Port=1234,
- DBSecurityGroups=["my_sg"],
- )
- conn.create_db_instance(
- DBInstanceIdentifier="db-master-2",
- AllocatedStorage=10,
- DBInstanceClass="postgres",
- Engine="db.m1.small",
- MasterUsername="root",
- MasterUserPassword="hunter2",
- Port=1234,
- DBSecurityGroups=["my_sg"],
- DeletionProtection=True,
- )
- instances = conn.describe_db_instances()
- list(instances["DBInstances"]).should.have.length_of(2)
-
- instances = conn.describe_db_instances(DBInstanceIdentifier="db-master-1")
- list(instances["DBInstances"]).should.have.length_of(1)
- instances["DBInstances"][0]["DBInstanceIdentifier"].should.equal("db-master-1")
- instances["DBInstances"][0]["DeletionProtection"].should.equal(False)
- instances["DBInstances"][0]["DBInstanceArn"].should.equal(
- "arn:aws:rds:us-west-2:{}:db:db-master-1".format(ACCOUNT_ID)
- )
-
- instances = conn.describe_db_instances(DBInstanceIdentifier="db-master-2")
- instances["DBInstances"][0]["DeletionProtection"].should.equal(True)
-
-
-@mock_rds2
-def test_get_databases_paginated():
- conn = boto3.client("rds", region_name="us-west-2")
-
- for i in range(51):
- conn.create_db_instance(
- AllocatedStorage=5,
- Port=5432,
- DBInstanceIdentifier="rds%d" % i,
- DBInstanceClass="db.t1.micro",
- Engine="postgres",
- )
-
- resp = conn.describe_db_instances()
- resp["DBInstances"].should.have.length_of(50)
- resp["Marker"].should.equal(resp["DBInstances"][-1]["DBInstanceIdentifier"])
-
- resp2 = conn.describe_db_instances(Marker=resp["Marker"])
- resp2["DBInstances"].should.have.length_of(1)
-
- resp3 = conn.describe_db_instances(MaxRecords=100)
- resp3["DBInstances"].should.have.length_of(51)
-
-
-@mock_rds2
-def test_describe_non_existent_database():
- conn = boto3.client("rds", region_name="us-west-2")
- conn.describe_db_instances.when.called_with(
- DBInstanceIdentifier="not-a-db"
- ).should.throw(ClientError)
-
-
-@mock_rds2
-def test_modify_db_instance():
- conn = boto3.client("rds", region_name="us-west-2")
- conn.create_db_instance(
- DBInstanceIdentifier="db-master-1",
- AllocatedStorage=10,
- DBInstanceClass="postgres",
- Engine="db.m1.small",
- MasterUsername="root",
- MasterUserPassword="hunter2",
- Port=1234,
- DBSecurityGroups=["my_sg"],
- )
- instances = conn.describe_db_instances(DBInstanceIdentifier="db-master-1")
- instances["DBInstances"][0]["AllocatedStorage"].should.equal(10)
- conn.modify_db_instance(
- DBInstanceIdentifier="db-master-1",
- AllocatedStorage=20,
- ApplyImmediately=True,
- VpcSecurityGroupIds=["sg-123456"],
- )
- instances = conn.describe_db_instances(DBInstanceIdentifier="db-master-1")
- instances["DBInstances"][0]["AllocatedStorage"].should.equal(20)
- instances["DBInstances"][0]["VpcSecurityGroups"][0][
- "VpcSecurityGroupId"
- ].should.equal("sg-123456")
-
-
-@mock_rds2
-def test_modify_db_instance_not_existent_db_parameter_group_name():
- conn = boto3.client("rds", region_name="us-west-2")
- conn.create_db_instance(
- DBInstanceIdentifier="db-master-1",
- AllocatedStorage=10,
- DBInstanceClass="postgres",
- Engine="db.m1.small",
- MasterUsername="root",
- MasterUserPassword="hunter2",
- Port=1234,
- DBSecurityGroups=["my_sg"],
- )
- instances = conn.describe_db_instances(DBInstanceIdentifier="db-master-1")
- instances["DBInstances"][0]["AllocatedStorage"].should.equal(10)
- conn.modify_db_instance.when.called_with(
- DBInstanceIdentifier="db-master-1",
- DBParameterGroupName="test-sqlserver-se-2017",
- ).should.throw(ClientError)
-
-
-@mock_rds2
-def test_rename_db_instance():
- conn = boto3.client("rds", region_name="us-west-2")
- conn.create_db_instance(
- DBInstanceIdentifier="db-master-1",
- AllocatedStorage=10,
- DBInstanceClass="postgres",
- Engine="db.m1.small",
- MasterUsername="root",
- MasterUserPassword="hunter2",
- Port=1234,
- DBSecurityGroups=["my_sg"],
- )
- instances = conn.describe_db_instances(DBInstanceIdentifier="db-master-1")
- list(instances["DBInstances"]).should.have.length_of(1)
- conn.describe_db_instances.when.called_with(
- DBInstanceIdentifier="db-master-2"
- ).should.throw(ClientError)
- conn.modify_db_instance(
- DBInstanceIdentifier="db-master-1",
- NewDBInstanceIdentifier="db-master-2",
- ApplyImmediately=True,
- )
- conn.describe_db_instances.when.called_with(
- DBInstanceIdentifier="db-master-1"
- ).should.throw(ClientError)
- instances = conn.describe_db_instances(DBInstanceIdentifier="db-master-2")
- list(instances["DBInstances"]).should.have.length_of(1)
-
-
-@mock_rds2
-def test_modify_non_existent_database():
- conn = boto3.client("rds", region_name="us-west-2")
- conn.modify_db_instance.when.called_with(
- DBInstanceIdentifier="not-a-db", AllocatedStorage=20, ApplyImmediately=True
- ).should.throw(ClientError)
-
-
-@mock_rds2
-def test_reboot_db_instance():
- conn = boto3.client("rds", region_name="us-west-2")
- conn.create_db_instance(
- DBInstanceIdentifier="db-master-1",
- AllocatedStorage=10,
- DBInstanceClass="postgres",
- Engine="db.m1.small",
- MasterUsername="root",
- MasterUserPassword="hunter2",
- Port=1234,
- DBSecurityGroups=["my_sg"],
- )
- database = conn.reboot_db_instance(DBInstanceIdentifier="db-master-1")
- database["DBInstance"]["DBInstanceIdentifier"].should.equal("db-master-1")
-
-
-@mock_rds2
-def test_reboot_non_existent_database():
- conn = boto3.client("rds", region_name="us-west-2")
- conn.reboot_db_instance.when.called_with(
- DBInstanceIdentifier="not-a-db"
- ).should.throw(ClientError)
-
-
-@mock_rds2
-def test_delete_database():
- conn = boto3.client("rds", region_name="us-west-2")
- instances = conn.describe_db_instances()
- list(instances["DBInstances"]).should.have.length_of(0)
- conn.create_db_instance(
- DBInstanceIdentifier="db-primary-1",
- AllocatedStorage=10,
- Engine="postgres",
- DBInstanceClass="db.m1.small",
- MasterUsername="root",
- MasterUserPassword="hunter2",
- Port=1234,
- DBSecurityGroups=["my_sg"],
- )
- instances = conn.describe_db_instances()
- list(instances["DBInstances"]).should.have.length_of(1)
-
- conn.delete_db_instance(
- DBInstanceIdentifier="db-primary-1",
- FinalDBSnapshotIdentifier="primary-1-snapshot",
- )
-
- instances = conn.describe_db_instances()
- list(instances["DBInstances"]).should.have.length_of(0)
-
- # Saved the snapshot
- snapshots = conn.describe_db_snapshots(DBInstanceIdentifier="db-primary-1").get(
- "DBSnapshots"
- )
- snapshots[0].get("Engine").should.equal("postgres")
-
-
-@mock_rds2
-def test_create_db_snapshots():
- conn = boto3.client("rds", region_name="us-west-2")
- conn.create_db_snapshot.when.called_with(
- DBInstanceIdentifier="db-primary-1", DBSnapshotIdentifier="snapshot-1"
- ).should.throw(ClientError)
-
- conn.create_db_instance(
- DBInstanceIdentifier="db-primary-1",
- AllocatedStorage=10,
- Engine="postgres",
- DBName="staging-postgres",
- DBInstanceClass="db.m1.small",
- MasterUsername="root",
- MasterUserPassword="hunter2",
- Port=1234,
- DBSecurityGroups=["my_sg"],
- )
-
- snapshot = conn.create_db_snapshot(
- DBInstanceIdentifier="db-primary-1", DBSnapshotIdentifier="g-1"
- ).get("DBSnapshot")
-
- snapshot.get("Engine").should.equal("postgres")
- snapshot.get("DBInstanceIdentifier").should.equal("db-primary-1")
- snapshot.get("DBSnapshotIdentifier").should.equal("g-1")
- result = conn.list_tags_for_resource(ResourceName=snapshot["DBSnapshotArn"])
- result["TagList"].should.equal([])
-
-
-@mock_rds2
-def test_create_db_snapshots_copy_tags():
- conn = boto3.client("rds", region_name="us-west-2")
- conn.create_db_snapshot.when.called_with(
- DBInstanceIdentifier="db-primary-1", DBSnapshotIdentifier="snapshot-1"
- ).should.throw(ClientError)
-
- conn.create_db_instance(
- DBInstanceIdentifier="db-primary-1",
- AllocatedStorage=10,
- Engine="postgres",
- DBName="staging-postgres",
- DBInstanceClass="db.m1.small",
- MasterUsername="root",
- MasterUserPassword="hunter2",
- Port=1234,
- DBSecurityGroups=["my_sg"],
- CopyTagsToSnapshot=True,
- Tags=[{"Key": "foo", "Value": "bar"}, {"Key": "foo1", "Value": "bar1"}],
- )
-
- snapshot = conn.create_db_snapshot(
- DBInstanceIdentifier="db-primary-1", DBSnapshotIdentifier="g-1"
- ).get("DBSnapshot")
-
- snapshot.get("Engine").should.equal("postgres")
- snapshot.get("DBInstanceIdentifier").should.equal("db-primary-1")
- snapshot.get("DBSnapshotIdentifier").should.equal("g-1")
- result = conn.list_tags_for_resource(ResourceName=snapshot["DBSnapshotArn"])
- result["TagList"].should.equal(
- [{"Value": "bar", "Key": "foo"}, {"Value": "bar1", "Key": "foo1"}]
- )
-
-
-@mock_rds2
-def test_copy_db_snapshots():
- conn = boto3.client("rds", region_name="us-west-2")
-
- conn.create_db_instance(
- DBInstanceIdentifier="db-primary-1",
- AllocatedStorage=10,
- Engine="postgres",
- DBName="staging-postgres",
- DBInstanceClass="db.m1.small",
- MasterUsername="root",
- MasterUserPassword="hunter2",
- Port=1234,
- DBSecurityGroups=["my_sg"],
- )
-
- conn.create_db_snapshot(
- DBInstanceIdentifier="db-primary-1", DBSnapshotIdentifier="snapshot-1"
- ).get("DBSnapshot")
-
- target_snapshot = conn.copy_db_snapshot(
- SourceDBSnapshotIdentifier="snapshot-1", TargetDBSnapshotIdentifier="snapshot-2"
- ).get("DBSnapshot")
-
- target_snapshot.get("Engine").should.equal("postgres")
- target_snapshot.get("DBInstanceIdentifier").should.equal("db-primary-1")
- target_snapshot.get("DBSnapshotIdentifier").should.equal("snapshot-2")
- result = conn.list_tags_for_resource(ResourceName=target_snapshot["DBSnapshotArn"])
- result["TagList"].should.equal([])
-
-
-@mock_rds2
-def test_describe_db_snapshots():
- conn = boto3.client("rds", region_name="us-west-2")
- conn.create_db_instance(
- DBInstanceIdentifier="db-primary-1",
- AllocatedStorage=10,
- Engine="postgres",
- DBName="staging-postgres",
- DBInstanceClass="db.m1.small",
- MasterUsername="root",
- MasterUserPassword="hunter2",
- Port=1234,
- DBSecurityGroups=["my_sg"],
- )
-
- created = conn.create_db_snapshot(
- DBInstanceIdentifier="db-primary-1", DBSnapshotIdentifier="snapshot-1"
- ).get("DBSnapshot")
-
- created.get("Engine").should.equal("postgres")
-
- by_database_id = conn.describe_db_snapshots(
- DBInstanceIdentifier="db-primary-1"
- ).get("DBSnapshots")
- by_snapshot_id = conn.describe_db_snapshots(DBSnapshotIdentifier="snapshot-1").get(
- "DBSnapshots"
- )
- by_snapshot_id.should.equal(by_database_id)
-
- snapshot = by_snapshot_id[0]
- snapshot.should.equal(created)
- snapshot.get("Engine").should.equal("postgres")
-
- conn.create_db_snapshot(
- DBInstanceIdentifier="db-primary-1", DBSnapshotIdentifier="snapshot-2"
- )
- snapshots = conn.describe_db_snapshots(DBInstanceIdentifier="db-primary-1").get(
- "DBSnapshots"
- )
- snapshots.should.have.length_of(2)
-
-
-@mock_rds2
-def test_delete_db_snapshot():
- conn = boto3.client("rds", region_name="us-west-2")
- conn.create_db_instance(
- DBInstanceIdentifier="db-primary-1",
- AllocatedStorage=10,
- Engine="postgres",
- DBName="staging-postgres",
- DBInstanceClass="db.m1.small",
- MasterUsername="root",
- MasterUserPassword="hunter2",
- Port=1234,
- DBSecurityGroups=["my_sg"],
- )
- conn.create_db_snapshot(
- DBInstanceIdentifier="db-primary-1", DBSnapshotIdentifier="snapshot-1"
- )
-
- conn.describe_db_snapshots(DBSnapshotIdentifier="snapshot-1").get("DBSnapshots")[0]
- conn.delete_db_snapshot(DBSnapshotIdentifier="snapshot-1")
- conn.describe_db_snapshots.when.called_with(
- DBSnapshotIdentifier="snapshot-1"
- ).should.throw(ClientError)
-
-
-@mock_rds2
-def test_restore_db_instance_from_db_snapshot():
- conn = boto3.client("rds", region_name="us-west-2")
- conn.create_db_instance(
- DBInstanceIdentifier="db-primary-1",
- AllocatedStorage=10,
- Engine="postgres",
- DBName="staging-postgres",
- DBInstanceClass="db.m1.small",
- MasterUsername="root",
- MasterUserPassword="hunter2",
- DBSecurityGroups=["my_sg"],
- )
- conn.describe_db_instances()["DBInstances"].should.have.length_of(1)
-
- conn.create_db_snapshot(
- DBInstanceIdentifier="db-primary-1", DBSnapshotIdentifier="snapshot-1"
- )
-
- # restore
- new_instance = conn.restore_db_instance_from_db_snapshot(
- DBInstanceIdentifier="db-restore-1", DBSnapshotIdentifier="snapshot-1"
- )["DBInstance"]
- new_instance["DBInstanceIdentifier"].should.equal("db-restore-1")
- new_instance["DBInstanceClass"].should.equal("db.m1.small")
- new_instance["StorageType"].should.equal("gp2")
- new_instance["Engine"].should.equal("postgres")
- new_instance["DBName"].should.equal("staging-postgres")
- new_instance["DBParameterGroups"][0]["DBParameterGroupName"].should.equal(
- "default.postgres9.3"
- )
- new_instance["DBSecurityGroups"].should.equal(
- [{"DBSecurityGroupName": "my_sg", "Status": "active"}]
- )
- new_instance["Endpoint"]["Port"].should.equal(5432)
-
- # Verify it exists
- conn.describe_db_instances()["DBInstances"].should.have.length_of(2)
- conn.describe_db_instances(DBInstanceIdentifier="db-restore-1")[
- "DBInstances"
- ].should.have.length_of(1)
-
-
-@mock_rds2
-def test_restore_db_instance_from_db_snapshot_and_override_params():
- conn = boto3.client("rds", region_name="us-west-2")
- conn.create_db_instance(
- DBInstanceIdentifier="db-primary-1",
- AllocatedStorage=10,
- Engine="postgres",
- DBName="staging-postgres",
- DBInstanceClass="db.m1.small",
- MasterUsername="root",
- MasterUserPassword="hunter2",
- Port=1234,
- DBSecurityGroups=["my_sg"],
- )
- conn.describe_db_instances()["DBInstances"].should.have.length_of(1)
- conn.create_db_snapshot(
- DBInstanceIdentifier="db-primary-1", DBSnapshotIdentifier="snapshot-1"
- )
-
- # restore with some updated attributes
- new_instance = conn.restore_db_instance_from_db_snapshot(
- DBInstanceIdentifier="db-restore-1",
- DBSnapshotIdentifier="snapshot-1",
- Port=10000,
- VpcSecurityGroupIds=["new_vpc"],
- )["DBInstance"]
- new_instance["DBInstanceIdentifier"].should.equal("db-restore-1")
- new_instance["DBParameterGroups"][0]["DBParameterGroupName"].should.equal(
- "default.postgres9.3"
- )
- new_instance["DBSecurityGroups"].should.equal(
- [{"DBSecurityGroupName": "my_sg", "Status": "active"}]
- )
- new_instance["VpcSecurityGroups"].should.equal(
- [{"VpcSecurityGroupId": "new_vpc", "Status": "active"}]
- )
- new_instance["Endpoint"]["Port"].should.equal(10000)
-
-
-@mock_rds2
-def test_create_option_group():
- conn = boto3.client("rds", region_name="us-west-2")
- option_group = conn.create_option_group(
- OptionGroupName="test",
- EngineName="mysql",
- MajorEngineVersion="5.6",
- OptionGroupDescription="test option group",
- )
- option_group["OptionGroup"]["OptionGroupName"].should.equal("test")
- option_group["OptionGroup"]["EngineName"].should.equal("mysql")
- option_group["OptionGroup"]["OptionGroupDescription"].should.equal(
- "test option group"
- )
- option_group["OptionGroup"]["MajorEngineVersion"].should.equal("5.6")
-
-
-@mock_rds2
-def test_create_option_group_bad_engine_name():
- conn = boto3.client("rds", region_name="us-west-2")
- conn.create_option_group.when.called_with(
- OptionGroupName="test",
- EngineName="invalid_engine",
- MajorEngineVersion="5.6",
- OptionGroupDescription="test invalid engine",
- ).should.throw(ClientError)
-
-
-@mock_rds2
-def test_create_option_group_bad_engine_major_version():
- conn = boto3.client("rds", region_name="us-west-2")
- conn.create_option_group.when.called_with(
- OptionGroupName="test",
- EngineName="mysql",
- MajorEngineVersion="6.6.6",
- OptionGroupDescription="test invalid engine version",
- ).should.throw(ClientError)
-
-
-@mock_rds2
-def test_create_option_group_empty_description():
- conn = boto3.client("rds", region_name="us-west-2")
- conn.create_option_group.when.called_with(
- OptionGroupName="test",
- EngineName="mysql",
- MajorEngineVersion="5.6",
- OptionGroupDescription="",
- ).should.throw(ClientError)
-
-
-@mock_rds2
-def test_create_option_group_duplicate():
- conn = boto3.client("rds", region_name="us-west-2")
- conn.create_option_group(
- OptionGroupName="test",
- EngineName="mysql",
- MajorEngineVersion="5.6",
- OptionGroupDescription="test option group",
- )
- conn.create_option_group.when.called_with(
- OptionGroupName="test",
- EngineName="mysql",
- MajorEngineVersion="5.6",
- OptionGroupDescription="test option group",
- ).should.throw(ClientError)
-
-
-@mock_rds2
-def test_describe_option_group():
- conn = boto3.client("rds", region_name="us-west-2")
- conn.create_option_group(
- OptionGroupName="test",
- EngineName="mysql",
- MajorEngineVersion="5.6",
- OptionGroupDescription="test option group",
- )
- option_groups = conn.describe_option_groups(OptionGroupName="test")
- option_groups["OptionGroupsList"][0]["OptionGroupName"].should.equal("test")
-
-
-@mock_rds2
-def test_describe_non_existent_option_group():
- conn = boto3.client("rds", region_name="us-west-2")
- conn.describe_option_groups.when.called_with(
- OptionGroupName="not-a-option-group"
- ).should.throw(ClientError)
-
-
-@mock_rds2
-def test_delete_option_group():
- conn = boto3.client("rds", region_name="us-west-2")
- conn.create_option_group(
- OptionGroupName="test",
- EngineName="mysql",
- MajorEngineVersion="5.6",
- OptionGroupDescription="test option group",
- )
- option_groups = conn.describe_option_groups(OptionGroupName="test")
- option_groups["OptionGroupsList"][0]["OptionGroupName"].should.equal("test")
- conn.delete_option_group(OptionGroupName="test")
- conn.describe_option_groups.when.called_with(OptionGroupName="test").should.throw(
- ClientError
- )
-
-
-@mock_rds2
-def test_delete_non_existent_option_group():
- conn = boto3.client("rds", region_name="us-west-2")
- conn.delete_option_group.when.called_with(
- OptionGroupName="non-existent"
- ).should.throw(ClientError)
-
-
-@mock_rds2
-def test_describe_option_group_options():
- conn = boto3.client("rds", region_name="us-west-2")
- option_group_options = conn.describe_option_group_options(EngineName="sqlserver-ee")
- len(option_group_options["OptionGroupOptions"]).should.equal(4)
- option_group_options = conn.describe_option_group_options(
- EngineName="sqlserver-ee", MajorEngineVersion="11.00"
- )
- len(option_group_options["OptionGroupOptions"]).should.equal(2)
- option_group_options = conn.describe_option_group_options(
- EngineName="mysql", MajorEngineVersion="5.6"
- )
- len(option_group_options["OptionGroupOptions"]).should.equal(1)
- conn.describe_option_group_options.when.called_with(
- EngineName="non-existent"
- ).should.throw(ClientError)
- conn.describe_option_group_options.when.called_with(
- EngineName="mysql", MajorEngineVersion="non-existent"
- ).should.throw(ClientError)
-
-
-@mock_rds2
-def test_modify_option_group():
- conn = boto3.client("rds", region_name="us-west-2")
- conn.create_option_group(
- OptionGroupName="test",
- EngineName="mysql",
- MajorEngineVersion="5.6",
- OptionGroupDescription="test option group",
- )
- # TODO: create option and validate before deleting.
- # if Someone can tell me how the hell to use this function
- # to add options to an option_group, I can finish coding this.
- result = conn.modify_option_group(
- OptionGroupName="test",
- OptionsToInclude=[],
- OptionsToRemove=["MEMCACHED"],
- ApplyImmediately=True,
- )
- result["OptionGroup"]["EngineName"].should.equal("mysql")
- result["OptionGroup"]["Options"].should.equal([])
- result["OptionGroup"]["OptionGroupName"].should.equal("test")
-
-
-@mock_rds2
-def test_modify_option_group_no_options():
- conn = boto3.client("rds", region_name="us-west-2")
- conn.create_option_group(
- OptionGroupName="test",
- EngineName="mysql",
- MajorEngineVersion="5.6",
- OptionGroupDescription="test option group",
- )
- conn.modify_option_group.when.called_with(OptionGroupName="test").should.throw(
- ClientError
- )
-
-
-@mock_rds2
-def test_modify_non_existent_option_group():
- conn = boto3.client("rds", region_name="us-west-2")
- conn.modify_option_group.when.called_with(
- OptionGroupName="non-existent",
- OptionsToInclude=[{"OptionName": "test-option"}],
- ).should.throw(ClientError, "Specified OptionGroupName: non-existent not found.")
-
-
-@mock_rds2
-def test_delete_database_with_protection():
- conn = boto3.client("rds", region_name="us-west-2")
- conn.create_db_instance(
- DBInstanceIdentifier="db-primary-1",
- AllocatedStorage=10,
- Engine="postgres",
- DBInstanceClass="db.m1.small",
- DeletionProtection=True,
- )
-
- with pytest.raises(ClientError) as exc:
- conn.delete_db_instance(DBInstanceIdentifier="db-primary-1")
- err = exc.value.response["Error"]
- err["Message"].should.equal("Can't delete Instance with protection enabled")
-
-
-@mock_rds2
-def test_delete_non_existent_database():
- conn = boto3.client("rds", region_name="us-west-2")
- with pytest.raises(ClientError) as ex:
- conn.delete_db_instance(DBInstanceIdentifier="non-existent")
- ex.value.response["Error"]["Code"].should.equal("DBInstanceNotFound")
- ex.value.response["Error"]["Message"].should.equal(
- "DBInstance non-existent not found."
- )
-
-
-@mock_rds2
-def test_list_tags_invalid_arn():
- conn = boto3.client("rds", region_name="us-west-2")
- conn.list_tags_for_resource.when.called_with(
- ResourceName="arn:aws:rds:bad-arn"
- ).should.throw(ClientError)
-
-
-@mock_rds2
-def test_list_tags_db():
- conn = boto3.client("rds", region_name="us-west-2")
- result = conn.list_tags_for_resource(
- ResourceName="arn:aws:rds:us-west-2:1234567890:db:foo"
- )
- result["TagList"].should.equal([])
- test_instance = conn.create_db_instance(
- DBInstanceIdentifier="db-with-tags",
- AllocatedStorage=10,
- DBInstanceClass="postgres",
- Engine="db.m1.small",
- MasterUsername="root",
- MasterUserPassword="hunter2",
- Port=1234,
- DBSecurityGroups=["my_sg"],
- Tags=[{"Key": "foo", "Value": "bar"}, {"Key": "foo1", "Value": "bar1"}],
- )
- result = conn.list_tags_for_resource(
- ResourceName=test_instance["DBInstance"]["DBInstanceArn"]
- )
- result["TagList"].should.equal(
- [{"Value": "bar", "Key": "foo"}, {"Value": "bar1", "Key": "foo1"}]
- )
-
-
-@mock_rds2
-def test_add_tags_db():
- conn = boto3.client("rds", region_name="us-west-2")
- conn.create_db_instance(
- DBInstanceIdentifier="db-without-tags",
- AllocatedStorage=10,
- DBInstanceClass="postgres",
- Engine="db.m1.small",
- MasterUsername="root",
- MasterUserPassword="hunter2",
- Port=1234,
- DBSecurityGroups=["my_sg"],
- Tags=[{"Key": "foo", "Value": "bar"}, {"Key": "foo1", "Value": "bar1"}],
- )
- result = conn.list_tags_for_resource(
- ResourceName="arn:aws:rds:us-west-2:1234567890:db:db-without-tags"
- )
- list(result["TagList"]).should.have.length_of(2)
- conn.add_tags_to_resource(
- ResourceName="arn:aws:rds:us-west-2:1234567890:db:db-without-tags",
- Tags=[{"Key": "foo", "Value": "fish"}, {"Key": "foo2", "Value": "bar2"}],
- )
- result = conn.list_tags_for_resource(
- ResourceName="arn:aws:rds:us-west-2:1234567890:db:db-without-tags"
- )
- list(result["TagList"]).should.have.length_of(3)
-
-
-@mock_rds2
-def test_remove_tags_db():
- conn = boto3.client("rds", region_name="us-west-2")
- conn.create_db_instance(
- DBInstanceIdentifier="db-with-tags",
- AllocatedStorage=10,
- DBInstanceClass="postgres",
- Engine="db.m1.small",
- MasterUsername="root",
- MasterUserPassword="hunter2",
- Port=1234,
- DBSecurityGroups=["my_sg"],
- Tags=[{"Key": "foo", "Value": "bar"}, {"Key": "foo1", "Value": "bar1"}],
- )
- result = conn.list_tags_for_resource(
- ResourceName="arn:aws:rds:us-west-2:1234567890:db:db-with-tags"
- )
- list(result["TagList"]).should.have.length_of(2)
- conn.remove_tags_from_resource(
- ResourceName="arn:aws:rds:us-west-2:1234567890:db:db-with-tags", TagKeys=["foo"]
- )
- result = conn.list_tags_for_resource(
- ResourceName="arn:aws:rds:us-west-2:1234567890:db:db-with-tags"
- )
- len(result["TagList"]).should.equal(1)
-
-
-@mock_rds2
-def test_list_tags_snapshot():
- conn = boto3.client("rds", region_name="us-west-2")
- result = conn.list_tags_for_resource(
- ResourceName="arn:aws:rds:us-west-2:1234567890:snapshot:foo"
- )
- result["TagList"].should.equal([])
- conn.create_db_instance(
- DBInstanceIdentifier="db-primary-1",
- AllocatedStorage=10,
- Engine="postgres",
- DBName="staging-postgres",
- DBInstanceClass="db.m1.small",
- MasterUsername="root",
- MasterUserPassword="hunter2",
- Port=1234,
- DBSecurityGroups=["my_sg"],
- )
- snapshot = conn.create_db_snapshot(
- DBInstanceIdentifier="db-primary-1",
- DBSnapshotIdentifier="snapshot-with-tags",
- Tags=[{"Key": "foo", "Value": "bar"}, {"Key": "foo1", "Value": "bar1"}],
- )
- result = conn.list_tags_for_resource(
- ResourceName=snapshot["DBSnapshot"]["DBSnapshotArn"]
- )
- result["TagList"].should.equal(
- [{"Value": "bar", "Key": "foo"}, {"Value": "bar1", "Key": "foo1"}]
- )
-
-
-@mock_rds2
-def test_add_tags_snapshot():
- conn = boto3.client("rds", region_name="us-west-2")
- conn.create_db_instance(
- DBInstanceIdentifier="db-primary-1",
- AllocatedStorage=10,
- Engine="postgres",
- DBName="staging-postgres",
- DBInstanceClass="db.m1.small",
- MasterUsername="root",
- MasterUserPassword="hunter2",
- Port=1234,
- DBSecurityGroups=["my_sg"],
- )
- conn.create_db_snapshot(
- DBInstanceIdentifier="db-primary-1",
- DBSnapshotIdentifier="snapshot-without-tags",
- Tags=[{"Key": "foo", "Value": "bar"}, {"Key": "foo1", "Value": "bar1"}],
- )
- result = conn.list_tags_for_resource(
- ResourceName="arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-without-tags"
- )
- list(result["TagList"]).should.have.length_of(2)
- conn.add_tags_to_resource(
- ResourceName="arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-without-tags",
- Tags=[{"Key": "foo", "Value": "fish"}, {"Key": "foo2", "Value": "bar2"}],
- )
- result = conn.list_tags_for_resource(
- ResourceName="arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-without-tags"
- )
- list(result["TagList"]).should.have.length_of(3)
-
-
-@mock_rds2
-def test_remove_tags_snapshot():
- conn = boto3.client("rds", region_name="us-west-2")
- conn.create_db_instance(
- DBInstanceIdentifier="db-primary-1",
- AllocatedStorage=10,
- Engine="postgres",
- DBName="staging-postgres",
- DBInstanceClass="db.m1.small",
- MasterUsername="root",
- MasterUserPassword="hunter2",
- Port=1234,
- DBSecurityGroups=["my_sg"],
- )
- conn.create_db_snapshot(
- DBInstanceIdentifier="db-primary-1",
- DBSnapshotIdentifier="snapshot-with-tags",
- Tags=[{"Key": "foo", "Value": "bar"}, {"Key": "foo1", "Value": "bar1"}],
- )
- result = conn.list_tags_for_resource(
- ResourceName="arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-with-tags"
- )
- list(result["TagList"]).should.have.length_of(2)
- conn.remove_tags_from_resource(
- ResourceName="arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-with-tags",
- TagKeys=["foo"],
- )
- result = conn.list_tags_for_resource(
- ResourceName="arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-with-tags"
- )
- len(result["TagList"]).should.equal(1)
-
-
-@mock_rds2
-def test_add_tags_option_group():
- conn = boto3.client("rds", region_name="us-west-2")
- conn.create_option_group(
- OptionGroupName="test",
- EngineName="mysql",
- MajorEngineVersion="5.6",
- OptionGroupDescription="test option group",
- )
- result = conn.list_tags_for_resource(
- ResourceName="arn:aws:rds:us-west-2:1234567890:og:test"
- )
- list(result["TagList"]).should.have.length_of(0)
- conn.add_tags_to_resource(
- ResourceName="arn:aws:rds:us-west-2:1234567890:og:test",
- Tags=[{"Key": "foo", "Value": "fish"}, {"Key": "foo2", "Value": "bar2"}],
- )
- result = conn.list_tags_for_resource(
- ResourceName="arn:aws:rds:us-west-2:1234567890:og:test"
- )
- list(result["TagList"]).should.have.length_of(2)
-
-
-@mock_rds2
-def test_remove_tags_option_group():
- conn = boto3.client("rds", region_name="us-west-2")
- conn.create_option_group(
- OptionGroupName="test",
- EngineName="mysql",
- MajorEngineVersion="5.6",
- OptionGroupDescription="test option group",
- )
- result = conn.list_tags_for_resource(
- ResourceName="arn:aws:rds:us-west-2:1234567890:og:test"
- )
- conn.add_tags_to_resource(
- ResourceName="arn:aws:rds:us-west-2:1234567890:og:test",
- Tags=[{"Key": "foo", "Value": "fish"}, {"Key": "foo2", "Value": "bar2"}],
- )
- result = conn.list_tags_for_resource(
- ResourceName="arn:aws:rds:us-west-2:1234567890:og:test"
- )
- list(result["TagList"]).should.have.length_of(2)
- conn.remove_tags_from_resource(
- ResourceName="arn:aws:rds:us-west-2:1234567890:og:test", TagKeys=["foo"]
- )
- result = conn.list_tags_for_resource(
- ResourceName="arn:aws:rds:us-west-2:1234567890:og:test"
- )
- list(result["TagList"]).should.have.length_of(1)
-
-
-@mock_rds2
-def test_create_database_security_group():
- conn = boto3.client("rds", region_name="us-west-2")
-
- result = conn.create_db_security_group(
- DBSecurityGroupName="db_sg", DBSecurityGroupDescription="DB Security Group"
- )
- result["DBSecurityGroup"]["DBSecurityGroupName"].should.equal("db_sg")
- result["DBSecurityGroup"]["DBSecurityGroupDescription"].should.equal(
- "DB Security Group"
- )
- result["DBSecurityGroup"]["IPRanges"].should.equal([])
-
-
-@mock_rds2
-def test_get_security_groups():
- conn = boto3.client("rds", region_name="us-west-2")
-
- result = conn.describe_db_security_groups()
- result["DBSecurityGroups"].should.have.length_of(0)
-
- conn.create_db_security_group(
- DBSecurityGroupName="db_sg1", DBSecurityGroupDescription="DB Security Group"
- )
- conn.create_db_security_group(
- DBSecurityGroupName="db_sg2", DBSecurityGroupDescription="DB Security Group"
- )
-
- result = conn.describe_db_security_groups()
- result["DBSecurityGroups"].should.have.length_of(2)
-
- result = conn.describe_db_security_groups(DBSecurityGroupName="db_sg1")
- result["DBSecurityGroups"].should.have.length_of(1)
- result["DBSecurityGroups"][0]["DBSecurityGroupName"].should.equal("db_sg1")
-
-
-@mock_rds2
-def test_get_non_existent_security_group():
- conn = boto3.client("rds", region_name="us-west-2")
- conn.describe_db_security_groups.when.called_with(
- DBSecurityGroupName="not-a-sg"
- ).should.throw(ClientError)
-
-
-@mock_rds2
-def test_delete_database_security_group():
- conn = boto3.client("rds", region_name="us-west-2")
- conn.create_db_security_group(
- DBSecurityGroupName="db_sg", DBSecurityGroupDescription="DB Security Group"
- )
-
- result = conn.describe_db_security_groups()
- result["DBSecurityGroups"].should.have.length_of(1)
-
- conn.delete_db_security_group(DBSecurityGroupName="db_sg")
- result = conn.describe_db_security_groups()
- result["DBSecurityGroups"].should.have.length_of(0)
-
-
-@mock_rds2
-def test_delete_non_existent_security_group():
- conn = boto3.client("rds", region_name="us-west-2")
- conn.delete_db_security_group.when.called_with(
- DBSecurityGroupName="not-a-db"
- ).should.throw(ClientError)
-
-
-@mock_rds2
-def test_security_group_authorize():
- conn = boto3.client("rds", region_name="us-west-2")
- security_group = conn.create_db_security_group(
- DBSecurityGroupName="db_sg", DBSecurityGroupDescription="DB Security Group"
- )
- security_group["DBSecurityGroup"]["IPRanges"].should.equal([])
-
- conn.authorize_db_security_group_ingress(
- DBSecurityGroupName="db_sg", CIDRIP="10.3.2.45/32"
- )
-
- result = conn.describe_db_security_groups(DBSecurityGroupName="db_sg")
- result["DBSecurityGroups"][0]["IPRanges"].should.have.length_of(1)
- result["DBSecurityGroups"][0]["IPRanges"].should.equal(
- [{"Status": "authorized", "CIDRIP": "10.3.2.45/32"}]
- )
-
- conn.authorize_db_security_group_ingress(
- DBSecurityGroupName="db_sg", CIDRIP="10.3.2.46/32"
- )
- result = conn.describe_db_security_groups(DBSecurityGroupName="db_sg")
- result["DBSecurityGroups"][0]["IPRanges"].should.have.length_of(2)
- result["DBSecurityGroups"][0]["IPRanges"].should.equal(
- [
- {"Status": "authorized", "CIDRIP": "10.3.2.45/32"},
- {"Status": "authorized", "CIDRIP": "10.3.2.46/32"},
- ]
- )
-
-
-@mock_rds2
-def test_add_security_group_to_database():
- conn = boto3.client("rds", region_name="us-west-2")
-
- conn.create_db_instance(
- DBInstanceIdentifier="db-master-1",
- AllocatedStorage=10,
- DBInstanceClass="postgres",
- Engine="db.m1.small",
- MasterUsername="root",
- MasterUserPassword="hunter2",
- Port=1234,
- )
-
- result = conn.describe_db_instances()
- result["DBInstances"][0]["DBSecurityGroups"].should.equal([])
- conn.create_db_security_group(
- DBSecurityGroupName="db_sg", DBSecurityGroupDescription="DB Security Group"
- )
- conn.modify_db_instance(
- DBInstanceIdentifier="db-master-1", DBSecurityGroups=["db_sg"]
- )
- result = conn.describe_db_instances()
- result["DBInstances"][0]["DBSecurityGroups"][0]["DBSecurityGroupName"].should.equal(
- "db_sg"
- )
-
-
-@mock_rds2
-def test_list_tags_security_group():
- conn = boto3.client("rds", region_name="us-west-2")
- result = conn.describe_db_subnet_groups()
- result["DBSubnetGroups"].should.have.length_of(0)
-
- security_group = conn.create_db_security_group(
- DBSecurityGroupName="db_sg",
- DBSecurityGroupDescription="DB Security Group",
- Tags=[{"Value": "bar", "Key": "foo"}, {"Value": "bar1", "Key": "foo1"}],
- )["DBSecurityGroup"]["DBSecurityGroupName"]
- resource = "arn:aws:rds:us-west-2:1234567890:secgrp:{0}".format(security_group)
- result = conn.list_tags_for_resource(ResourceName=resource)
- result["TagList"].should.equal(
- [{"Value": "bar", "Key": "foo"}, {"Value": "bar1", "Key": "foo1"}]
- )
-
-
-@mock_rds2
-def test_add_tags_security_group():
- conn = boto3.client("rds", region_name="us-west-2")
- result = conn.describe_db_subnet_groups()
- result["DBSubnetGroups"].should.have.length_of(0)
-
- security_group = conn.create_db_security_group(
- DBSecurityGroupName="db_sg", DBSecurityGroupDescription="DB Security Group"
- )["DBSecurityGroup"]["DBSecurityGroupName"]
-
- resource = "arn:aws:rds:us-west-2:1234567890:secgrp:{0}".format(security_group)
- conn.add_tags_to_resource(
- ResourceName=resource,
- Tags=[{"Value": "bar", "Key": "foo"}, {"Value": "bar1", "Key": "foo1"}],
- )
-
- result = conn.list_tags_for_resource(ResourceName=resource)
- result["TagList"].should.equal(
- [{"Value": "bar", "Key": "foo"}, {"Value": "bar1", "Key": "foo1"}]
- )
-
-
-@mock_rds2
-def test_remove_tags_security_group():
- conn = boto3.client("rds", region_name="us-west-2")
- result = conn.describe_db_subnet_groups()
- result["DBSubnetGroups"].should.have.length_of(0)
-
- security_group = conn.create_db_security_group(
- DBSecurityGroupName="db_sg",
- DBSecurityGroupDescription="DB Security Group",
- Tags=[{"Value": "bar", "Key": "foo"}, {"Value": "bar1", "Key": "foo1"}],
- )["DBSecurityGroup"]["DBSecurityGroupName"]
-
- resource = "arn:aws:rds:us-west-2:1234567890:secgrp:{0}".format(security_group)
- conn.remove_tags_from_resource(ResourceName=resource, TagKeys=["foo"])
-
- result = conn.list_tags_for_resource(ResourceName=resource)
- result["TagList"].should.equal([{"Value": "bar1", "Key": "foo1"}])
-
-
-@mock_ec2
-@mock_rds2
-def test_create_database_subnet_group():
- vpc_conn = boto3.client("ec2", "us-west-2")
- vpc = vpc_conn.create_vpc(CidrBlock="10.0.0.0/16")["Vpc"]
- subnet1 = vpc_conn.create_subnet(VpcId=vpc["VpcId"], CidrBlock="10.0.1.0/24")[
- "Subnet"
- ]
- subnet2 = vpc_conn.create_subnet(VpcId=vpc["VpcId"], CidrBlock="10.0.2.0/24")[
- "Subnet"
- ]
-
- subnet_ids = [subnet1["SubnetId"], subnet2["SubnetId"]]
- conn = boto3.client("rds", region_name="us-west-2")
- result = conn.create_db_subnet_group(
- DBSubnetGroupName="db_subnet",
- DBSubnetGroupDescription="my db subnet",
- SubnetIds=subnet_ids,
- )
- result["DBSubnetGroup"]["DBSubnetGroupName"].should.equal("db_subnet")
- result["DBSubnetGroup"]["DBSubnetGroupDescription"].should.equal("my db subnet")
- subnets = result["DBSubnetGroup"]["Subnets"]
- subnet_group_ids = [subnets[0]["SubnetIdentifier"], subnets[1]["SubnetIdentifier"]]
- list(subnet_group_ids).should.equal(subnet_ids)
-
-
-@mock_ec2
-@mock_rds2
-def test_modify_database_subnet_group():
- vpc_conn = boto3.client("ec2", "us-west-2")
- vpc = vpc_conn.create_vpc(CidrBlock="10.0.0.0/16")["Vpc"]
- subnet1 = vpc_conn.create_subnet(VpcId=vpc["VpcId"], CidrBlock="10.0.1.0/24")[
- "Subnet"
- ]
- subnet2 = vpc_conn.create_subnet(VpcId=vpc["VpcId"], CidrBlock="10.0.2.0/24")[
- "Subnet"
- ]
-
- conn = boto3.client("rds", region_name="us-west-2")
- conn.create_db_subnet_group(
- DBSubnetGroupName="db_subnet",
- DBSubnetGroupDescription="my db subnet",
- SubnetIds=[subnet1["SubnetId"]],
- )
-
- conn.modify_db_subnet_group(
- DBSubnetGroupName="db_subnet",
- DBSubnetGroupDescription="my updated desc",
- SubnetIds=[subnet1["SubnetId"], subnet2["SubnetId"]],
- )
-
- conn.describe_db_subnet_groups()["DBSubnetGroups"]
- # FIXME: Group is deleted atm
- # TODO: we should check whether all attrs are persisted
-
-
-@mock_ec2
-@mock_rds2
-def test_create_database_in_subnet_group():
- vpc_conn = boto3.client("ec2", "us-west-2")
- vpc = vpc_conn.create_vpc(CidrBlock="10.0.0.0/16")["Vpc"]
- subnet = vpc_conn.create_subnet(VpcId=vpc["VpcId"], CidrBlock="10.0.1.0/24")[
- "Subnet"
- ]
-
- conn = boto3.client("rds", region_name="us-west-2")
- conn.create_db_subnet_group(
- DBSubnetGroupName="db_subnet1",
- DBSubnetGroupDescription="my db subnet",
- SubnetIds=[subnet["SubnetId"]],
- )
- conn.create_db_instance(
- DBInstanceIdentifier="db-master-1",
- AllocatedStorage=10,
- Engine="postgres",
- DBInstanceClass="db.m1.small",
- MasterUsername="root",
- MasterUserPassword="hunter2",
- Port=1234,
- DBSubnetGroupName="db_subnet1",
- )
- result = conn.describe_db_instances(DBInstanceIdentifier="db-master-1")
- result["DBInstances"][0]["DBSubnetGroup"]["DBSubnetGroupName"].should.equal(
- "db_subnet1"
- )
-
-
-@mock_ec2
-@mock_rds2
-def test_describe_database_subnet_group():
- vpc_conn = boto3.client("ec2", "us-west-2")
- vpc = vpc_conn.create_vpc(CidrBlock="10.0.0.0/16")["Vpc"]
- subnet = vpc_conn.create_subnet(VpcId=vpc["VpcId"], CidrBlock="10.0.1.0/24")[
- "Subnet"
- ]
-
- conn = boto3.client("rds", region_name="us-west-2")
- conn.create_db_subnet_group(
- DBSubnetGroupName="db_subnet1",
- DBSubnetGroupDescription="my db subnet",
- SubnetIds=[subnet["SubnetId"]],
- )
- conn.create_db_subnet_group(
- DBSubnetGroupName="db_subnet2",
- DBSubnetGroupDescription="my db subnet",
- SubnetIds=[subnet["SubnetId"]],
- )
-
- resp = conn.describe_db_subnet_groups()
- resp["DBSubnetGroups"].should.have.length_of(2)
-
- subnets = resp["DBSubnetGroups"][0]["Subnets"]
- subnets.should.have.length_of(1)
-
- list(
- conn.describe_db_subnet_groups(DBSubnetGroupName="db_subnet1")["DBSubnetGroups"]
- ).should.have.length_of(1)
-
- conn.describe_db_subnet_groups.when.called_with(
- DBSubnetGroupName="not-a-subnet"
- ).should.throw(ClientError)
-
-
-@mock_ec2
-@mock_rds2
-def test_delete_database_subnet_group():
- vpc_conn = boto3.client("ec2", "us-west-2")
- vpc = vpc_conn.create_vpc(CidrBlock="10.0.0.0/16")["Vpc"]
- subnet = vpc_conn.create_subnet(VpcId=vpc["VpcId"], CidrBlock="10.0.1.0/24")[
- "Subnet"
- ]
-
- conn = boto3.client("rds", region_name="us-west-2")
- result = conn.describe_db_subnet_groups()
- result["DBSubnetGroups"].should.have.length_of(0)
-
- conn.create_db_subnet_group(
- DBSubnetGroupName="db_subnet1",
- DBSubnetGroupDescription="my db subnet",
- SubnetIds=[subnet["SubnetId"]],
- )
- result = conn.describe_db_subnet_groups()
- result["DBSubnetGroups"].should.have.length_of(1)
-
- conn.delete_db_subnet_group(DBSubnetGroupName="db_subnet1")
- result = conn.describe_db_subnet_groups()
- result["DBSubnetGroups"].should.have.length_of(0)
-
- conn.delete_db_subnet_group.when.called_with(
- DBSubnetGroupName="db_subnet1"
- ).should.throw(ClientError)
-
-
-@mock_ec2
-@mock_rds2
-def test_list_tags_database_subnet_group():
- vpc_conn = boto3.client("ec2", "us-west-2")
- vpc = vpc_conn.create_vpc(CidrBlock="10.0.0.0/16")["Vpc"]
- subnet = vpc_conn.create_subnet(VpcId=vpc["VpcId"], CidrBlock="10.0.1.0/24")[
- "Subnet"
- ]
-
- conn = boto3.client("rds", region_name="us-west-2")
- result = conn.describe_db_subnet_groups()
- result["DBSubnetGroups"].should.have.length_of(0)
-
- subnet = conn.create_db_subnet_group(
- DBSubnetGroupName="db_subnet1",
- DBSubnetGroupDescription="my db subnet",
- SubnetIds=[subnet["SubnetId"]],
- Tags=[{"Value": "bar", "Key": "foo"}, {"Value": "bar1", "Key": "foo1"}],
- )["DBSubnetGroup"]["DBSubnetGroupName"]
- result = conn.list_tags_for_resource(
- ResourceName="arn:aws:rds:us-west-2:1234567890:subgrp:{0}".format(subnet)
- )
- result["TagList"].should.equal(
- [{"Value": "bar", "Key": "foo"}, {"Value": "bar1", "Key": "foo1"}]
- )
-
-
-@mock_rds2
-def test_modify_tags_parameter_group():
- conn = boto3.client("rds", region_name="us-west-2")
- client_tags = [{"Key": "character_set_client", "Value": "utf-8"}]
- result = conn.create_db_parameter_group(
- DBParameterGroupName="test-sqlserver-2017",
- DBParameterGroupFamily="mysql5.6",
- Description="MySQL Group",
- Tags=client_tags,
- )
- resource = result["DBParameterGroup"]["DBParameterGroupArn"]
- result = conn.list_tags_for_resource(ResourceName=resource)
- result["TagList"].should.equal(client_tags)
- server_tags = [{"Key": "character_set_server", "Value": "utf-8"}]
- conn.add_tags_to_resource(ResourceName=resource, Tags=server_tags)
- combined_tags = client_tags + server_tags
- result = conn.list_tags_for_resource(ResourceName=resource)
- result["TagList"].should.equal(combined_tags)
-
- conn.remove_tags_from_resource(
- ResourceName=resource, TagKeys=["character_set_client"]
- )
- result = conn.list_tags_for_resource(ResourceName=resource)
- result["TagList"].should.equal(server_tags)
-
-
-@mock_rds2
-def test_modify_tags_event_subscription():
- conn = boto3.client("rds", region_name="us-west-2")
- tags = [{"Key": "hello", "Value": "world"}]
- result = conn.create_event_subscription(
- SubscriptionName="my-instance-events",
- SourceType="db-instance",
- EventCategories=["backup", "recovery"],
- SnsTopicArn="arn:aws:sns:us-east-1:123456789012:interesting-events",
- Tags=tags,
- )
- resource = result["EventSubscription"]["EventSubscriptionArn"]
- result = conn.list_tags_for_resource(ResourceName=resource)
- result["TagList"].should.equal(tags)
- new_tags = [{"Key": "new_key", "Value": "new_value"}]
- conn.add_tags_to_resource(ResourceName=resource, Tags=new_tags)
- combined_tags = tags + new_tags
- result = conn.list_tags_for_resource(ResourceName=resource)
- result["TagList"].should.equal(combined_tags)
-
- conn.remove_tags_from_resource(ResourceName=resource, TagKeys=["new_key"])
- result = conn.list_tags_for_resource(ResourceName=resource)
- result["TagList"].should.equal(tags)
-
-
-@mock_ec2
-@mock_rds2
-def test_add_tags_database_subnet_group():
- vpc_conn = boto3.client("ec2", "us-west-2")
- vpc = vpc_conn.create_vpc(CidrBlock="10.0.0.0/16")["Vpc"]
- subnet = vpc_conn.create_subnet(VpcId=vpc["VpcId"], CidrBlock="10.0.1.0/24")[
- "Subnet"
- ]
-
- conn = boto3.client("rds", region_name="us-west-2")
- result = conn.describe_db_subnet_groups()
- result["DBSubnetGroups"].should.have.length_of(0)
-
- subnet = conn.create_db_subnet_group(
- DBSubnetGroupName="db_subnet1",
- DBSubnetGroupDescription="my db subnet",
- SubnetIds=[subnet["SubnetId"]],
- Tags=[],
- )["DBSubnetGroup"]["DBSubnetGroupName"]
- resource = "arn:aws:rds:us-west-2:1234567890:subgrp:{0}".format(subnet)
-
- conn.add_tags_to_resource(
- ResourceName=resource,
- Tags=[{"Value": "bar", "Key": "foo"}, {"Value": "bar1", "Key": "foo1"}],
- )
-
- result = conn.list_tags_for_resource(ResourceName=resource)
- result["TagList"].should.equal(
- [{"Value": "bar", "Key": "foo"}, {"Value": "bar1", "Key": "foo1"}]
- )
-
-
-@mock_ec2
-@mock_rds2
-def test_remove_tags_database_subnet_group():
- vpc_conn = boto3.client("ec2", "us-west-2")
- vpc = vpc_conn.create_vpc(CidrBlock="10.0.0.0/16")["Vpc"]
- subnet = vpc_conn.create_subnet(VpcId=vpc["VpcId"], CidrBlock="10.0.1.0/24")[
- "Subnet"
- ]
-
- conn = boto3.client("rds", region_name="us-west-2")
- result = conn.describe_db_subnet_groups()
- result["DBSubnetGroups"].should.have.length_of(0)
-
- subnet = conn.create_db_subnet_group(
- DBSubnetGroupName="db_subnet1",
- DBSubnetGroupDescription="my db subnet",
- SubnetIds=[subnet["SubnetId"]],
- Tags=[{"Value": "bar", "Key": "foo"}, {"Value": "bar1", "Key": "foo1"}],
- )["DBSubnetGroup"]["DBSubnetGroupName"]
- resource = "arn:aws:rds:us-west-2:1234567890:subgrp:{0}".format(subnet)
-
- conn.remove_tags_from_resource(ResourceName=resource, TagKeys=["foo"])
-
- result = conn.list_tags_for_resource(ResourceName=resource)
- result["TagList"].should.equal([{"Value": "bar1", "Key": "foo1"}])
-
-
-@mock_rds2
-def test_create_database_replica():
- conn = boto3.client("rds", region_name="us-west-2")
-
- conn.create_db_instance(
- DBInstanceIdentifier="db-master-1",
- AllocatedStorage=10,
- Engine="postgres",
- DBInstanceClass="db.m1.small",
- MasterUsername="root",
- MasterUserPassword="hunter2",
- Port=1234,
- DBSecurityGroups=["my_sg"],
- )
-
- replica = conn.create_db_instance_read_replica(
- DBInstanceIdentifier="db-replica-1",
- SourceDBInstanceIdentifier="db-master-1",
- DBInstanceClass="db.m1.small",
- )
- replica["DBInstance"]["ReadReplicaSourceDBInstanceIdentifier"].should.equal(
- "db-master-1"
- )
- replica["DBInstance"]["DBInstanceClass"].should.equal("db.m1.small")
- replica["DBInstance"]["DBInstanceIdentifier"].should.equal("db-replica-1")
-
- master = conn.describe_db_instances(DBInstanceIdentifier="db-master-1")
- master["DBInstances"][0]["ReadReplicaDBInstanceIdentifiers"].should.equal(
- ["db-replica-1"]
- )
-
- conn.delete_db_instance(DBInstanceIdentifier="db-replica-1", SkipFinalSnapshot=True)
-
- master = conn.describe_db_instances(DBInstanceIdentifier="db-master-1")
- master["DBInstances"][0]["ReadReplicaDBInstanceIdentifiers"].should.equal([])
-
-
-@mock_rds2
-@mock_kms
-def test_create_database_with_encrypted_storage():
- kms_conn = boto3.client("kms", region_name="us-west-2")
- key = kms_conn.create_key(
- Policy="my RDS encryption policy",
- Description="RDS encryption key",
- KeyUsage="ENCRYPT_DECRYPT",
- )
-
- conn = boto3.client("rds", region_name="us-west-2")
- database = conn.create_db_instance(
- DBInstanceIdentifier="db-master-1",
- AllocatedStorage=10,
- Engine="postgres",
- DBInstanceClass="db.m1.small",
- MasterUsername="root",
- MasterUserPassword="hunter2",
- Port=1234,
- DBSecurityGroups=["my_sg"],
- StorageEncrypted=True,
- KmsKeyId=key["KeyMetadata"]["KeyId"],
- )
-
- database["DBInstance"]["StorageEncrypted"].should.equal(True)
- database["DBInstance"]["KmsKeyId"].should.equal(key["KeyMetadata"]["KeyId"])
-
-
-@mock_rds2
-def test_create_db_parameter_group():
- region = "us-west-2"
- pg_name = "test"
- conn = boto3.client("rds", region_name=region)
- db_parameter_group = conn.create_db_parameter_group(
- DBParameterGroupName="test",
- DBParameterGroupFamily="mysql5.6",
- Description="test parameter group",
- )
-
- db_parameter_group["DBParameterGroup"]["DBParameterGroupName"].should.equal("test")
- db_parameter_group["DBParameterGroup"]["DBParameterGroupFamily"].should.equal(
- "mysql5.6"
- )
- db_parameter_group["DBParameterGroup"]["Description"].should.equal(
- "test parameter group"
- )
- db_parameter_group["DBParameterGroup"]["DBParameterGroupArn"].should.equal(
- "arn:aws:rds:{0}:{1}:pg:{2}".format(region, ACCOUNT_ID, pg_name)
- )
-
-
-@mock_rds2
-def test_create_db_instance_with_parameter_group():
- conn = boto3.client("rds", region_name="us-west-2")
- conn.create_db_parameter_group(
- DBParameterGroupName="test",
- DBParameterGroupFamily="mysql5.6",
- Description="test parameter group",
- )
-
- database = conn.create_db_instance(
- DBInstanceIdentifier="db-master-1",
- AllocatedStorage=10,
- Engine="mysql",
- DBInstanceClass="db.m1.small",
- DBParameterGroupName="test",
- MasterUsername="root",
- MasterUserPassword="hunter2",
- Port=1234,
- )
-
- len(database["DBInstance"]["DBParameterGroups"]).should.equal(1)
- database["DBInstance"]["DBParameterGroups"][0]["DBParameterGroupName"].should.equal(
- "test"
- )
- database["DBInstance"]["DBParameterGroups"][0]["ParameterApplyStatus"].should.equal(
- "in-sync"
- )
-
-
-@mock_rds2
-def test_create_database_with_default_port():
- conn = boto3.client("rds", region_name="us-west-2")
- database = conn.create_db_instance(
- DBInstanceIdentifier="db-master-1",
- AllocatedStorage=10,
- Engine="postgres",
- DBInstanceClass="db.m1.small",
- MasterUsername="root",
- MasterUserPassword="hunter2",
- DBSecurityGroups=["my_sg"],
- )
- database["DBInstance"]["Endpoint"]["Port"].should.equal(5432)
-
-
-@mock_rds2
-def test_modify_db_instance_with_parameter_group():
- conn = boto3.client("rds", region_name="us-west-2")
- database = conn.create_db_instance(
- DBInstanceIdentifier="db-master-1",
- AllocatedStorage=10,
- Engine="mysql",
- DBInstanceClass="db.m1.small",
- MasterUsername="root",
- MasterUserPassword="hunter2",
- Port=1234,
- )
-
- len(database["DBInstance"]["DBParameterGroups"]).should.equal(1)
- database["DBInstance"]["DBParameterGroups"][0]["DBParameterGroupName"].should.equal(
- "default.mysql5.6"
- )
- database["DBInstance"]["DBParameterGroups"][0]["ParameterApplyStatus"].should.equal(
- "in-sync"
- )
-
- conn.create_db_parameter_group(
- DBParameterGroupName="test",
- DBParameterGroupFamily="mysql5.6",
- Description="test parameter group",
- )
- conn.modify_db_instance(
- DBInstanceIdentifier="db-master-1",
- DBParameterGroupName="test",
- ApplyImmediately=True,
- )
-
- database = conn.describe_db_instances(DBInstanceIdentifier="db-master-1")[
- "DBInstances"
- ][0]
- len(database["DBParameterGroups"]).should.equal(1)
- database["DBParameterGroups"][0]["DBParameterGroupName"].should.equal("test")
- database["DBParameterGroups"][0]["ParameterApplyStatus"].should.equal("in-sync")
-
-
-@mock_rds2
-def test_create_db_parameter_group_empty_description():
- conn = boto3.client("rds", region_name="us-west-2")
- conn.create_db_parameter_group.when.called_with(
- DBParameterGroupName="test", DBParameterGroupFamily="mysql5.6", Description=""
- ).should.throw(ClientError)
-
-
-@mock_rds2
-def test_create_db_parameter_group_duplicate():
- conn = boto3.client("rds", region_name="us-west-2")
- conn.create_db_parameter_group(
- DBParameterGroupName="test",
- DBParameterGroupFamily="mysql5.6",
- Description="test parameter group",
- )
- conn.create_db_parameter_group.when.called_with(
- DBParameterGroupName="test",
- DBParameterGroupFamily="mysql5.6",
- Description="test parameter group",
- ).should.throw(ClientError)
-
-
-@mock_rds2
-def test_describe_db_parameter_group():
- region = "us-west-2"
- pg_name = "test"
- conn = boto3.client("rds", region_name=region)
- conn.create_db_parameter_group(
- DBParameterGroupName=pg_name,
- DBParameterGroupFamily="mysql5.6",
- Description="test parameter group",
- )
- db_parameter_groups = conn.describe_db_parameter_groups(DBParameterGroupName="test")
- db_parameter_groups["DBParameterGroups"][0]["DBParameterGroupName"].should.equal(
- "test"
- )
- db_parameter_groups["DBParameterGroups"][0]["DBParameterGroupArn"].should.equal(
- "arn:aws:rds:{0}:{1}:pg:{2}".format(region, ACCOUNT_ID, pg_name)
- )
-
-
-@mock_rds2
-def test_describe_non_existent_db_parameter_group():
- conn = boto3.client("rds", region_name="us-west-2")
- db_parameter_groups = conn.describe_db_parameter_groups(DBParameterGroupName="test")
- len(db_parameter_groups["DBParameterGroups"]).should.equal(0)
-
-
-@mock_rds2
-def test_delete_db_parameter_group():
- conn = boto3.client("rds", region_name="us-west-2")
- conn.create_db_parameter_group(
- DBParameterGroupName="test",
- DBParameterGroupFamily="mysql5.6",
- Description="test parameter group",
- )
- db_parameter_groups = conn.describe_db_parameter_groups(DBParameterGroupName="test")
- db_parameter_groups["DBParameterGroups"][0]["DBParameterGroupName"].should.equal(
- "test"
- )
- conn.delete_db_parameter_group(DBParameterGroupName="test")
- db_parameter_groups = conn.describe_db_parameter_groups(DBParameterGroupName="test")
- len(db_parameter_groups["DBParameterGroups"]).should.equal(0)
-
-
-@mock_rds2
-def test_modify_db_parameter_group():
- conn = boto3.client("rds", region_name="us-west-2")
- conn.create_db_parameter_group(
- DBParameterGroupName="test",
- DBParameterGroupFamily="mysql5.6",
- Description="test parameter group",
- )
-
- modify_result = conn.modify_db_parameter_group(
- DBParameterGroupName="test",
- Parameters=[
- {
- "ParameterName": "foo",
- "ParameterValue": "foo_val",
- "Description": "test param",
- "ApplyMethod": "immediate",
- }
- ],
- )
-
- modify_result["DBParameterGroupName"].should.equal("test")
-
- db_parameters = conn.describe_db_parameters(DBParameterGroupName="test")
- db_parameters["Parameters"][0]["ParameterName"].should.equal("foo")
- db_parameters["Parameters"][0]["ParameterValue"].should.equal("foo_val")
- db_parameters["Parameters"][0]["Description"].should.equal("test param")
- db_parameters["Parameters"][0]["ApplyMethod"].should.equal("immediate")
-
-
-@mock_rds2
-def test_delete_non_existent_db_parameter_group():
- conn = boto3.client("rds", region_name="us-west-2")
- conn.delete_db_parameter_group.when.called_with(
- DBParameterGroupName="non-existent"
- ).should.throw(ClientError)
-
-
-@mock_rds2
-def test_create_parameter_group_with_tags():
- conn = boto3.client("rds", region_name="us-west-2")
- conn.create_db_parameter_group(
- DBParameterGroupName="test",
- DBParameterGroupFamily="mysql5.6",
- Description="test parameter group",
- Tags=[{"Key": "foo", "Value": "bar"}],
- )
- result = conn.list_tags_for_resource(
- ResourceName="arn:aws:rds:us-west-2:1234567890:pg:test"
- )
- result["TagList"].should.equal([{"Value": "bar", "Key": "foo"}])
-
-
-@mock_rds2
-def test_create_db_with_iam_authentication():
- conn = boto3.client("rds", region_name="us-west-2")
-
- database = conn.create_db_instance(
- DBInstanceIdentifier="rds",
- DBInstanceClass="db.t1.micro",
- Engine="postgres",
- EnableIAMDatabaseAuthentication=True,
- )
-
- db_instance = database["DBInstance"]
- db_instance["IAMDatabaseAuthenticationEnabled"].should.equal(True)
-
-
-@mock_rds2
-def test_create_db_snapshot_with_iam_authentication():
- conn = boto3.client("rds", region_name="us-west-2")
-
- conn.create_db_instance(
- DBInstanceIdentifier="rds",
- DBInstanceClass="db.t1.micro",
- Engine="postgres",
- EnableIAMDatabaseAuthentication=True,
- )
-
- snapshot = conn.create_db_snapshot(
- DBInstanceIdentifier="rds", DBSnapshotIdentifier="snapshot"
- ).get("DBSnapshot")
-
- snapshot.get("IAMDatabaseAuthenticationEnabled").should.equal(True)
-
-
-@mock_rds2
-def test_create_db_instance_with_tags():
- client = boto3.client("rds", region_name="us-west-2")
- tags = [{"Key": "foo", "Value": "bar"}, {"Key": "foo1", "Value": "bar1"}]
- db_instance_identifier = "test-db-instance"
- resp = client.create_db_instance(
- DBInstanceIdentifier=db_instance_identifier,
- Engine="postgres",
- DBName="staging-postgres",
- DBInstanceClass="db.m1.small",
- Tags=tags,
- )
- resp["DBInstance"]["TagList"].should.equal(tags)
-
- resp = client.describe_db_instances(DBInstanceIdentifier=db_instance_identifier)
- resp["DBInstances"][0]["TagList"].should.equal(tags)
+ MasterUserPassword="hunter2_",
+ )
+ resp.should.have.key("DBCluster")
+
+ cluster = resp["DBCluster"]
+
+ cluster.shouldnt.have.key(
+ "DatabaseName"
+ ) # This was not supplied, so should not be returned
+
+ cluster.should.have.key("AllocatedStorage").equal(1)
+ cluster.should.have.key("AvailabilityZones")
+ set(cluster["AvailabilityZones"]).should.equal(
+ {"eu-north-1a", "eu-north-1b", "eu-north-1c"}
+ )
+ cluster.should.have.key("BackupRetentionPeriod").equal(1)
+ cluster.should.have.key("DBClusterIdentifier").equal("cluster-id")
+ cluster.should.have.key("DBClusterParameterGroup").equal("default.aurora8.0")
+ cluster.should.have.key("DBSubnetGroup").equal("default")
+ cluster.should.have.key("Status").equal("creating")
+ cluster.should.have.key("Endpoint").match(
+ "cluster-id.cluster-[a-z0-9]{12}.eu-north-1.rds.amazonaws.com"
+ )
+ endpoint = cluster["Endpoint"]
+ expected_readonly = endpoint.replace(
+ "cluster-id.cluster-", "cluster-id.cluster-ro-"
+ )
+ cluster.should.have.key("ReaderEndpoint").equal(expected_readonly)
+ cluster.should.have.key("MultiAZ").equal(False)
+ cluster.should.have.key("Engine").equal("aurora")
+ cluster.should.have.key("EngineVersion").equal("5.6.mysql_aurora.1.22.5")
+ cluster.should.have.key("Port").equal(3306)
+ cluster.should.have.key("MasterUsername").equal("root")
+ cluster.should.have.key("PreferredBackupWindow").equal("01:37-02:07")
+ cluster.should.have.key("PreferredMaintenanceWindow").equal("wed:02:40-wed:03:10")
+ cluster.should.have.key("ReadReplicaIdentifiers").equal([])
+ cluster.should.have.key("DBClusterMembers").equal([])
+ cluster.should.have.key("VpcSecurityGroups")
+ cluster.should.have.key("HostedZoneId")
+ cluster.should.have.key("StorageEncrypted").equal(False)
+ cluster.should.have.key("DbClusterResourceId").match(r"cluster-[A-Z0-9]{26}")
+ cluster.should.have.key("DBClusterArn").equal(
+ f"arn:aws:rds:eu-north-1:{ACCOUNT_ID}:cluster:cluster-id"
+ )
+ cluster.should.have.key("AssociatedRoles").equal([])
+ cluster.should.have.key("IAMDatabaseAuthenticationEnabled").equal(False)
+ cluster.should.have.key("EngineMode").equal("provisioned")
+ cluster.should.have.key("DeletionProtection").equal(False)
+ cluster.should.have.key("HttpEndpointEnabled").equal(False)
+ cluster.should.have.key("CopyTagsToSnapshot").equal(False)
+ cluster.should.have.key("CrossAccountClone").equal(False)
+ cluster.should.have.key("DeletionProtection").equal(False)
+ cluster.should.have.key("DomainMemberships").equal([])
+ cluster.should.have.key("TagList").equal([])
+ cluster.should.have.key("ClusterCreateTime")
diff --git a/tests/test_rds2/test_server.py b/tests/test_rds2/test_server.py
deleted file mode 100644
index cf57ce750..000000000
--- a/tests/test_rds2/test_server.py
+++ /dev/null
@@ -1,15 +0,0 @@
-import sure # noqa # pylint: disable=unused-import
-
-"""
-Test the different server responses
-"""
-
-
-# @mock_rds2
-# def test_list_databases():
-# backend = server.create_backend_app("rds2")
-# test_client = backend.test_client()
-#
-# res = test_client.get('/?Action=DescribeDBInstances')
-#
-# res.data.decode("utf-8").should.contain("")
diff --git a/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py b/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py
index d9b9998e1..1340d80b4 100644
--- a/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py
+++ b/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py
@@ -3,7 +3,7 @@ import sure # noqa # pylint: disable=unused-import
from moto import mock_ec2
from moto import mock_elbv2
from moto import mock_kms
-from moto import mock_rds2
+from moto import mock_rds
from moto import mock_resourcegroupstaggingapi
from moto import mock_s3
from moto import mock_lambda
@@ -12,7 +12,7 @@ from botocore.client import ClientError
from tests import EXAMPLE_AMI_ID, EXAMPLE_AMI_ID2
-@mock_rds2
+@mock_rds
@mock_ec2
@mock_resourcegroupstaggingapi
def test_get_resources_ec2():
@@ -379,7 +379,7 @@ def test_multiple_tag_filters():
instance_2_id.shouldnt.be.within(results[0]["ResourceARN"])
-@mock_rds2
+@mock_rds
@mock_resourcegroupstaggingapi
def test_get_resources_rds():
client = boto3.client("rds", region_name="us-west-2")