diff --git a/moto/__init__.py b/moto/__init__.py
index 8041f0856..965eaf4ee 100644
--- a/moto/__init__.py
+++ b/moto/__init__.py
@@ -12,6 +12,7 @@ from .elb import mock_elb # flake8: noqa
from .emr import mock_emr # flake8: noqa
from .iam import mock_iam # flake8: noqa
from .kinesis import mock_kinesis # flake8: noqa
+from .rds import mock_rds # flake8: noqa
from .redshift import mock_redshift # flake8: noqa
from .s3 import mock_s3 # flake8: noqa
from .s3bucket_path import mock_s3bucket_path # flake8: noqa
diff --git a/moto/backends.py b/moto/backends.py
index cf6759d99..460ac028f 100644
--- a/moto/backends.py
+++ b/moto/backends.py
@@ -7,6 +7,7 @@ from moto.ec2 import ec2_backend
from moto.elb import elb_backend
from moto.emr import emr_backend
from moto.kinesis import kinesis_backend
+from moto.rds import rds_backend
from moto.redshift import redshift_backend
from moto.s3 import s3_backend
from moto.s3bucket_path import s3bucket_path_backend
@@ -25,6 +26,7 @@ BACKENDS = {
'emr': emr_backend,
'kinesis': kinesis_backend,
'redshift': redshift_backend,
+ 'rds': rds_backend,
's3': s3_backend,
's3bucket_path': s3bucket_path_backend,
'ses': ses_backend,
diff --git a/moto/cloudformation/parsing.py b/moto/cloudformation/parsing.py
index 8650b4f1b..75aade56f 100644
--- a/moto/cloudformation/parsing.py
+++ b/moto/cloudformation/parsing.py
@@ -1,11 +1,13 @@
from __future__ import unicode_literals
import collections
+import functools
import logging
from moto.autoscaling import models as autoscaling_models
from moto.ec2 import models as ec2_models
from moto.elb import models as elb_models
from moto.iam import models as iam_models
+from moto.rds import models as rds_models
from moto.sqs import models as sqs_models
from .utils import random_suffix
from .exceptions import MissingParameterError, UnformattedGetAttTemplateException
@@ -31,6 +33,9 @@ MODEL_MAP = {
"AWS::ElasticLoadBalancing::LoadBalancer": elb_models.FakeLoadBalancer,
"AWS::IAM::InstanceProfile": iam_models.InstanceProfile,
"AWS::IAM::Role": iam_models.Role,
+ "AWS::RDS::DBInstance": rds_models.Database,
+ "AWS::RDS::DBSecurityGroup": rds_models.SecurityGroup,
+ "AWS::RDS::DBSubnetGroup": rds_models.SubnetGroup,
"AWS::SQS::Queue": sqs_models.Queue,
}
@@ -57,6 +62,15 @@ NULL_MODELS = [
logger = logging.getLogger("moto")
+class LazyDict(dict):
+ def __getitem__(self, key):
+ val = dict.__getitem__(self, key)
+ if callable(val):
+ val = val()
+ self[key] = val
+ return val
+
+
def clean_json(resource_json, resources_map):
"""
Cleanup the a resource dict. For now, this just means replacing any Ref node
@@ -99,15 +113,15 @@ def clean_json(resource_json, resources_map):
if 'Fn::If' in resource_json:
condition_name, true_value, false_value = resource_json['Fn::If']
if resources_map[condition_name]:
- return true_value
+ return clean_json(true_value, resources_map)
else:
- return false_value
+ return clean_json(false_value, resources_map)
if 'Fn::Join' in resource_json:
join_list = []
for val in resource_json['Fn::Join'][1]:
cleaned_val = clean_json(val, resources_map)
- join_list.append(cleaned_val if cleaned_val else '{0}'.format(val))
+ join_list.append('{0}'.format(cleaned_val) if cleaned_val else '{0}'.format(val))
return resource_json['Fn::Join'][0].join(join_list)
cleaned_json = {}
@@ -168,7 +182,7 @@ def parse_resource(logical_id, resource_json, resources_map, region_name):
return resource
-def parse_condition(condition, resources_map):
+def parse_condition(condition, resources_map, condition_map):
if isinstance(condition, bool):
return condition
@@ -178,22 +192,22 @@ def parse_condition(condition, resources_map):
for value in list(condition.values())[0]:
# Check if we are referencing another Condition
if 'Condition' in value:
- condition_values.append(resources_map[value['Condition']])
+ condition_values.append(condition_map[value['Condition']])
else:
condition_values.append(clean_json(value, resources_map))
if condition_operator == "Fn::Equals":
return condition_values[0] == condition_values[1]
elif condition_operator == "Fn::Not":
- return not parse_condition(condition_values[0], resources_map)
+ return not parse_condition(condition_values[0], resources_map, condition_map)
elif condition_operator == "Fn::And":
return all([
- parse_condition(condition_value, resources_map)
+ parse_condition(condition_value, resources_map, condition_map)
for condition_value
in condition_values])
elif condition_operator == "Fn::Or":
return any([
- parse_condition(condition_value, resources_map)
+ parse_condition(condition_value, resources_map, condition_map)
for condition_value
in condition_values])
@@ -227,6 +241,7 @@ class ResourceMap(collections.Mapping):
"AWS::Region": self._region_name,
"AWS::StackId": stack_id,
"AWS::StackName": stack_name,
+ "AWS::NoValue": None,
}
def __getitem__(self, key):
@@ -273,9 +288,13 @@ class ResourceMap(collections.Mapping):
def load_conditions(self):
conditions = self._template.get('Conditions', {})
-
+ lazy_condition_map = LazyDict()
for condition_name, condition in conditions.items():
- self._parsed_resources[condition_name] = parse_condition(condition, self._parsed_resources)
+ lazy_condition_map[condition_name] = functools.partial(parse_condition,
+ condition, self._parsed_resources, lazy_condition_map)
+
+ for condition_name in lazy_condition_map:
+ self._parsed_resources[condition_name] = lazy_condition_map[condition_name]
def create(self):
self.load_mapping()
diff --git a/moto/ec2/models.py b/moto/ec2/models.py
index 1a5f0a21e..e9246a6bf 100644
--- a/moto/ec2/models.py
+++ b/moto/ec2/models.py
@@ -996,6 +996,7 @@ class SecurityGroup(object):
self.egress_rules = []
self.enis = {}
self.vpc_id = vpc_id
+ self.owner_id = "123456789012"
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
diff --git a/moto/rds/__init__.py b/moto/rds/__init__.py
new file mode 100644
index 000000000..407f1680c
--- /dev/null
+++ b/moto/rds/__init__.py
@@ -0,0 +1,12 @@
+from __future__ import unicode_literals
+from .models import rds_backends
+from ..core.models import MockAWS
+
+rds_backend = rds_backends['us-east-1']
+
+
+def mock_rds(func=None):
+ if func:
+ return MockAWS(rds_backends)(func)
+ else:
+ return MockAWS(rds_backends)
diff --git a/moto/rds/exceptions.py b/moto/rds/exceptions.py
new file mode 100644
index 000000000..936b979d2
--- /dev/null
+++ b/moto/rds/exceptions.py
@@ -0,0 +1,38 @@
+from __future__ import unicode_literals
+
+import json
+from werkzeug.exceptions import BadRequest
+
+
+class RDSClientError(BadRequest):
+ def __init__(self, code, message):
+ super(RDSClientError, self).__init__()
+ self.description = json.dumps({
+ "Error": {
+ "Code": code,
+ "Message": message,
+ 'Type': 'Sender',
+ },
+ 'RequestId': '6876f774-7273-11e4-85dc-39e55ca848d1',
+ })
+
+
+class DBInstanceNotFoundError(RDSClientError):
+ def __init__(self, database_identifier):
+ super(DBInstanceNotFoundError, self).__init__(
+ 'DBInstanceNotFound',
+ "Database {0} not found.".format(database_identifier))
+
+
+class DBSecurityGroupNotFoundError(RDSClientError):
+ def __init__(self, security_group_name):
+ super(DBSecurityGroupNotFoundError, self).__init__(
+ 'DBSecurityGroupNotFound',
+ "Security Group {0} not found.".format(security_group_name))
+
+
+class DBSubnetGroupNotFoundError(RDSClientError):
+ def __init__(self, subnet_group_name):
+ super(DBSubnetGroupNotFoundError, self).__init__(
+ 'DBSubnetGroupNotFound',
+ "Subnet Group {0} not found.".format(subnet_group_name))
diff --git a/moto/rds/models.py b/moto/rds/models.py
new file mode 100644
index 000000000..f0ac1e789
--- /dev/null
+++ b/moto/rds/models.py
@@ -0,0 +1,420 @@
+from __future__ import unicode_literals
+
+import copy
+
+import boto.rds
+from jinja2 import Template
+
+from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
+from moto.core import BaseBackend
+from moto.core.utils import get_random_hex
+from moto.ec2.models import ec2_backends
+from .exceptions import DBInstanceNotFoundError, DBSecurityGroupNotFoundError, DBSubnetGroupNotFoundError
+
+
+class Database(object):
+ def __init__(self, **kwargs):
+ self.status = "available"
+
+ self.is_replica = False
+ self.replicas = []
+
+ self.region = kwargs.get('region')
+ self.engine = kwargs.get("engine")
+ self.engine_version = kwargs.get("engine_version")
+ if self.engine_version is None:
+ self.engine_version = "5.6.21"
+ self.iops = kwargs.get("iops")
+ self.storage_type = kwargs.get("storage_type")
+ self.master_username = kwargs.get('master_username')
+ self.master_password = kwargs.get('master_password')
+ self.auto_minor_version_upgrade = kwargs.get('auto_minor_version_upgrade')
+ if self.auto_minor_version_upgrade is None:
+ self.auto_minor_version_upgrade = True
+ self.allocated_storage = kwargs.get('allocated_storage')
+ self.db_instance_identifier = kwargs.get('db_instance_identifier')
+ self.source_db_identifier = kwargs.get("source_db_identifier")
+ self.db_instance_class = kwargs.get('db_instance_class')
+ self.port = kwargs.get('port')
+ self.db_instance_identifier = kwargs.get('db_instance_identifier')
+ self.db_name = kwargs.get("db_name")
+ self.publicly_accessible = kwargs.get("publicly_accessible")
+ if self.publicly_accessible is None:
+ self.publicly_accessible = True
+
+ self.backup_retention_period = kwargs.get("backup_retention_period")
+ if self.backup_retention_period is None:
+ self.backup_retention_period = 1
+
+ self.availability_zone = kwargs.get("availability_zone")
+ self.multi_az = kwargs.get("multi_az")
+ self.db_subnet_group_name = kwargs.get("db_subnet_group_name")
+ if self.db_subnet_group_name:
+ self.db_subnet_group = rds_backends[self.region].describe_subnet_groups(self.db_subnet_group_name)[0]
+ else:
+ self.db_subnet_group = []
+
+ self.security_groups = kwargs.get('security_groups', [])
+
+ # PreferredBackupWindow
+ # PreferredMaintenanceWindow
+ # backup_retention_period = self._get_param("BackupRetentionPeriod")
+ # OptionGroupName
+ # DBParameterGroupName
+ # VpcSecurityGroupIds.member.N
+
+ @property
+ def address(self):
+ return "{0}.aaaaaaaaaa.{1}.rds.amazonaws.com".format(self.db_instance_identifier, self.region)
+
+ def add_replica(self, replica):
+ self.replicas.append(replica.db_instance_identifier)
+
+ def remove_replica(self, replica):
+ self.replicas.remove(replica.db_instance_identifier)
+
+ def set_as_replica(self):
+ self.is_replica = True
+ self.replicas = []
+
+ def update(self, db_kwargs):
+ for key, value in db_kwargs.items():
+ if value is not None:
+ setattr(self, key, value)
+
+ def get_cfn_attribute(self, attribute_name):
+ if attribute_name == 'Endpoint.Address':
+ return self.address
+ elif attribute_name == 'Endpoint.Port':
+ return self.port
+ raise UnformattedGetAttTemplateException()
+
+ @classmethod
+ def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
+ properties = cloudformation_json['Properties']
+
+ db_instance_identifier = properties.get('DBInstanceIdentifier')
+ if not db_instance_identifier:
+ db_instance_identifier = resource_name.lower() + get_random_hex(12)
+ db_security_groups = properties.get('DBSecurityGroups')
+ if not db_security_groups:
+ db_security_groups = []
+ security_groups = [group.group_name for group in db_security_groups]
+ db_subnet_group = properties.get("DBSubnetGroupName")
+ db_subnet_group_name = db_subnet_group.subnet_name if db_subnet_group else None
+ db_kwargs = {
+ "auto_minor_version_upgrade": properties.get('AutoMinorVersionUpgrade'),
+ "allocated_storage": properties.get('AllocatedStorage'),
+ "availability_zone": properties.get("AvailabilityZone"),
+ "backup_retention_period": properties.get("BackupRetentionPeriod"),
+ "db_instance_class": properties.get('DBInstanceClass'),
+ "db_instance_identifier": db_instance_identifier,
+ "db_name": properties.get("DBName"),
+ "db_subnet_group_name": db_subnet_group_name,
+ "engine": properties.get("Engine"),
+ "engine_version": properties.get("EngineVersion"),
+ "iops": properties.get("Iops"),
+ "master_password": properties.get('MasterUserPassword'),
+ "master_username": properties.get('MasterUsername'),
+ "multi_az": properties.get("MultiAZ"),
+ "port": properties.get('Port', 3306),
+ "publicly_accessible": properties.get("PubliclyAccessible"),
+ "region": region_name,
+ "security_groups": security_groups,
+ "storage_type": properties.get("StorageType"),
+ }
+
+ rds_backend = rds_backends[region_name]
+ source_db_identifier = properties.get("SourceDBInstanceIdentifier")
+ if source_db_identifier:
+ # Replica
+ db_kwargs["source_db_identifier"] = source_db_identifier.db_instance_identifier
+ database = rds_backend.create_database_replica(db_kwargs)
+ else:
+ database = rds_backend.create_database(db_kwargs)
+ return database
+
+ def to_xml(self):
+ template = Template("""
+ {{ database.backup_retention_period }}
+ {{ database.status }}
+ {{ database.multi_az }}
+
+ {{ database.db_instance_identifier }}
+ 03:50-04:20
+ wed:06:38-wed:07:08
+
+ {% for replica_id in database.replicas %}
+ {{ replica_id }}
+ {% endfor %}
+
+
+ {% if database.is_replica %}
+
+ read replication
+ replicating
+ true
+
+
+ {% endif %}
+
+ {% if database.is_replica %}
+ {{ database.source_db_identifier }}
+ {% endif %}
+ {{ database.engine }}
+ general-public-license
+ {{ database.engine_version }}
+
+
+
+
+
+ {% for security_group in database.security_groups %}
+
+ active
+ {{ security_group }}
+
+ {% endfor %}
+
+ {% if database.db_subnet_group %}
+
+ {{ database.db_subnet_group.subnet_name }}
+ {{ database.db_subnet_group.description }}
+ {{ database.db_subnet_group.status }}
+
+ {% for subnet in database.db_subnet_group.subnets %}
+
+ Active
+ {{ subnet.id }}
+
+ {{ subnet.availability_zone }}
+ false
+
+
+ {% endfor %}
+
+ {{ database.db_subnet_group.vpc_id }}
+
+ {% endif %}
+ {{ database.publicly_accessible }}
+ {{ database.auto_minor_version_upgrade }}
+ {{ database.allocated_storage }}
+ {{ database.db_instance_class }}
+ {{ database.master_username }}
+
+ {{ database.address }}
+ {{ database.port }}
+
+ """)
+ return template.render(database=self)
+
+
+class SecurityGroup(object):
+ def __init__(self, group_name, description):
+ self.group_name = group_name
+ self.description = description
+ self.status = "authorized"
+ self.ip_ranges = []
+ self.ec2_security_groups = []
+
+ def to_xml(self):
+ template = Template("""
+
+ {% for security_group in security_group.ec2_security_groups %}
+
+ {{ security_group.id }}
+ {{ security_group.name }}
+ {{ security_group.owner_id }}
+ authorized
+
+ {% endfor %}
+
+
+ {{ security_group.description }}
+
+ {% for ip_range in security_group.ip_ranges %}
+
+ {{ ip_range }}
+ authorized
+
+ {% endfor %}
+
+ {{ security_group.ownder_id }}
+ {{ security_group.group_name }}
+ """)
+ return template.render(security_group=self)
+
+ def authorize_cidr(self, cidr_ip):
+ self.ip_ranges.append(cidr_ip)
+
+ def authorize_security_group(self, security_group):
+ self.ec2_security_groups.append(security_group)
+
+ @classmethod
+ def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
+ properties = cloudformation_json['Properties']
+ group_name = resource_name.lower() + get_random_hex(12)
+ description = properties['GroupDescription']
+ security_group_ingress = properties['DBSecurityGroupIngress']
+
+ ec2_backend = ec2_backends[region_name]
+ rds_backend = rds_backends[region_name]
+ security_group = rds_backend.create_security_group(
+ group_name,
+ description,
+ )
+ for ingress_type, ingress_value in security_group_ingress.items():
+ if ingress_type == "CIDRIP":
+ security_group.authorize_cidr(ingress_value)
+ elif ingress_type == "EC2SecurityGroupName":
+ subnet = ec2_backend.get_security_group_from_name(ingress_value)
+ security_group.authorize_security_group(subnet)
+ elif ingress_type == "EC2SecurityGroupId":
+ subnet = ec2_backend.get_security_group_from_id(ingress_value)
+ security_group.authorize_security_group(subnet)
+ return security_group
+
+
+class SubnetGroup(object):
+ def __init__(self, subnet_name, description, subnets):
+ self.subnet_name = subnet_name
+ self.description = description
+ self.subnets = subnets
+ self.status = "Complete"
+
+ self.vpc_id = self.subnets[0].vpc_id
+
+ def to_xml(self):
+ template = Template("""
+ {{ subnet_group.vpc_id }}
+ {{ subnet_group.status }}
+ {{ subnet_group.description }}
+ {{ subnet_group.subnet_name }}
+
+ {% for subnet in subnet_group.subnets %}
+
+ Active
+ {{ subnet.id }}
+
+ {{ subnet.availability_zone }}
+ false
+
+
+ {% endfor %}
+
+ """)
+ return template.render(subnet_group=self)
+
+ @classmethod
+ def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
+ properties = cloudformation_json['Properties']
+
+ subnet_name = resource_name.lower() + get_random_hex(12)
+ description = properties['DBSubnetGroupDescription']
+ subnet_ids = properties['SubnetIds']
+
+ ec2_backend = ec2_backends[region_name]
+ subnets = [ec2_backend.get_subnet(subnet_id) for subnet_id in subnet_ids]
+ rds_backend = rds_backends[region_name]
+ subnet_group = rds_backend.create_subnet_group(
+ subnet_name,
+ description,
+ subnets,
+ )
+ return subnet_group
+
+
+class RDSBackend(BaseBackend):
+
+ def __init__(self):
+ self.databases = {}
+ self.security_groups = {}
+ self.subnet_groups = {}
+
+ def create_database(self, db_kwargs):
+ database_id = db_kwargs['db_instance_identifier']
+ database = Database(**db_kwargs)
+ self.databases[database_id] = database
+ return database
+
+ def create_database_replica(self, db_kwargs):
+ database_id = db_kwargs['db_instance_identifier']
+ source_database_id = db_kwargs['source_db_identifier']
+ primary = self.describe_databases(source_database_id)[0]
+ replica = copy.deepcopy(primary)
+ replica.update(db_kwargs)
+ replica.set_as_replica()
+ self.databases[database_id] = replica
+ primary.add_replica(replica)
+ return replica
+
+ def describe_databases(self, db_instance_identifier=None):
+ if db_instance_identifier:
+ if db_instance_identifier in self.databases:
+ return [self.databases[db_instance_identifier]]
+ else:
+ raise DBInstanceNotFoundError(db_instance_identifier)
+ return self.databases.values()
+
+ def modify_database(self, db_instance_identifier, db_kwargs):
+ database = self.describe_databases(db_instance_identifier)[0]
+ database.update(db_kwargs)
+ return database
+
+ def delete_database(self, db_instance_identifier):
+ if db_instance_identifier in self.databases:
+ database = self.databases.pop(db_instance_identifier)
+ if database.is_replica:
+ primary = self.describe_databases(database.source_db_identifier)[0]
+ primary.remove_replica(database)
+ return database
+ else:
+ raise DBInstanceNotFoundError(db_instance_identifier)
+
+ def create_security_group(self, group_name, description):
+ security_group = SecurityGroup(group_name, description)
+ self.security_groups[group_name] = security_group
+ return security_group
+
+ def describe_security_groups(self, security_group_name):
+ if security_group_name:
+ if security_group_name in self.security_groups:
+ return [self.security_groups[security_group_name]]
+ else:
+ raise DBSecurityGroupNotFoundError(security_group_name)
+ return self.security_groups.values()
+
+ def delete_security_group(self, security_group_name):
+ if security_group_name in self.security_groups:
+ return self.security_groups.pop(security_group_name)
+ else:
+ raise DBSecurityGroupNotFoundError(security_group_name)
+
+ def authorize_security_group(self, security_group_name, cidr_ip):
+ security_group = self.describe_security_groups(security_group_name)[0]
+ security_group.authorize_cidr(cidr_ip)
+ return security_group
+
+ def create_subnet_group(self, subnet_name, description, subnets):
+ subnet_group = SubnetGroup(subnet_name, description, subnets)
+ self.subnet_groups[subnet_name] = subnet_group
+ return subnet_group
+
+ def describe_subnet_groups(self, subnet_group_name):
+ if subnet_group_name:
+ if subnet_group_name in self.subnet_groups:
+ return [self.subnet_groups[subnet_group_name]]
+ else:
+ raise DBSubnetGroupNotFoundError(subnet_group_name)
+ return self.subnet_groups.values()
+
+ def delete_subnet_group(self, subnet_name):
+ if subnet_name in self.subnet_groups:
+ return self.subnet_groups.pop(subnet_name)
+ else:
+ raise DBSubnetGroupNotFoundError(subnet_name)
+
+
+rds_backends = {}
+for region in boto.rds.regions():
+ rds_backends[region.name] = RDSBackend()
diff --git a/moto/rds/responses.py b/moto/rds/responses.py
new file mode 100644
index 000000000..98015e7bb
--- /dev/null
+++ b/moto/rds/responses.py
@@ -0,0 +1,250 @@
+from __future__ import unicode_literals
+
+from moto.core.responses import BaseResponse
+from moto.ec2.models import ec2_backends
+from .models import rds_backends
+
+
+class RDSResponse(BaseResponse):
+
+ @property
+ def backend(self):
+ return rds_backends[self.region]
+
+ def _get_db_kwargs(self):
+ return {
+ "auto_minor_version_upgrade": self._get_param('AutoMinorVersionUpgrade'),
+ "allocated_storage": self._get_int_param('AllocatedStorage'),
+ "availability_zone": self._get_param("AvailabilityZone"),
+ "backup_retention_period": self._get_param("BackupRetentionPeriod"),
+ "db_instance_class": self._get_param('DBInstanceClass'),
+ "db_instance_identifier": self._get_param('DBInstanceIdentifier'),
+ "db_name": self._get_param("DBName"),
+ # DBParameterGroupName
+ "db_subnet_group_name": self._get_param("DBSubnetGroupName"),
+ "engine": self._get_param("Engine"),
+ "engine_version": self._get_param("EngineVersion"),
+ "iops": self._get_int_param("Iops"),
+ "master_password": self._get_param('MasterUserPassword'),
+ "master_username": self._get_param('MasterUsername'),
+ "multi_az": self._get_bool_param("MultiAZ"),
+ # OptionGroupName
+ "port": self._get_param('Port'),
+ # PreferredBackupWindow
+ # PreferredMaintenanceWindow
+ "publicly_accessible": self._get_param("PubliclyAccessible"),
+ "region": self.region,
+ "security_groups": self._get_multi_param('DBSecurityGroups.member'),
+ "storage_type": self._get_param("StorageType"),
+ # VpcSecurityGroupIds.member.N
+ }
+
+ def _get_db_replica_kwargs(self):
+ return {
+ "auto_minor_version_upgrade": self._get_param('AutoMinorVersionUpgrade'),
+ "availability_zone": self._get_param("AvailabilityZone"),
+ "db_instance_class": self._get_param('DBInstanceClass'),
+ "db_instance_identifier": self._get_param('DBInstanceIdentifier'),
+ "db_subnet_group_name": self._get_param("DBSubnetGroupName"),
+ "iops": self._get_int_param("Iops"),
+ # OptionGroupName
+ "port": self._get_param('Port'),
+ "publicly_accessible": self._get_param("PubliclyAccessible"),
+ "source_db_identifier": self._get_param('SourceDBInstanceIdentifier'),
+ "storage_type": self._get_param("StorageType"),
+ }
+
+ def create_dbinstance(self):
+ db_kwargs = self._get_db_kwargs()
+
+ database = self.backend.create_database(db_kwargs)
+ template = self.response_template(CREATE_DATABASE_TEMPLATE)
+ return template.render(database=database)
+
+ def create_dbinstance_read_replica(self):
+ db_kwargs = self._get_db_replica_kwargs()
+
+ database = self.backend.create_database_replica(db_kwargs)
+ template = self.response_template(CREATE_DATABASE_REPLICA_TEMPLATE)
+ return template.render(database=database)
+
+ def describe_dbinstances(self):
+ db_instance_identifier = self._get_param('DBInstanceIdentifier')
+ databases = self.backend.describe_databases(db_instance_identifier)
+ template = self.response_template(DESCRIBE_DATABASES_TEMPLATE)
+ return template.render(databases=databases)
+
+ def modify_dbinstance(self):
+ db_instance_identifier = self._get_param('DBInstanceIdentifier')
+ db_kwargs = self._get_db_kwargs()
+ database = self.backend.modify_database(db_instance_identifier, db_kwargs)
+ template = self.response_template(MODIFY_DATABASE_TEMPLATE)
+ return template.render(database=database)
+
+ def delete_dbinstance(self):
+ db_instance_identifier = self._get_param('DBInstanceIdentifier')
+ database = self.backend.delete_database(db_instance_identifier)
+ template = self.response_template(DELETE_DATABASE_TEMPLATE)
+ return template.render(database=database)
+
+ def create_dbsecurity_group(self):
+ group_name = self._get_param('DBSecurityGroupName')
+ description = self._get_param('DBSecurityGroupDescription')
+ security_group = self.backend.create_security_group(group_name, description)
+ template = self.response_template(CREATE_SECURITY_GROUP_TEMPLATE)
+ return template.render(security_group=security_group)
+
+ def describe_dbsecurity_groups(self):
+ security_group_name = self._get_param('DBSecurityGroupName')
+ security_groups = self.backend.describe_security_groups(security_group_name)
+ template = self.response_template(DESCRIBE_SECURITY_GROUPS_TEMPLATE)
+ return template.render(security_groups=security_groups)
+
+ def delete_dbsecurity_group(self):
+ security_group_name = self._get_param('DBSecurityGroupName')
+ security_group = self.backend.delete_security_group(security_group_name)
+ template = self.response_template(DELETE_SECURITY_GROUP_TEMPLATE)
+ return template.render(security_group=security_group)
+
+ def authorize_dbsecurity_group_ingress(self):
+ security_group_name = self._get_param('DBSecurityGroupName')
+ cidr_ip = self._get_param('CIDRIP')
+ security_group = self.backend.authorize_security_group(security_group_name, cidr_ip)
+ template = self.response_template(AUTHORIZE_SECURITY_GROUP_TEMPLATE)
+ return template.render(security_group=security_group)
+
+ def create_dbsubnet_group(self):
+ subnet_name = self._get_param('DBSubnetGroupName')
+ description = self._get_param('DBSubnetGroupDescription')
+ subnet_ids = self._get_multi_param('SubnetIds.member')
+ subnets = [ec2_backends[self.region].get_subnet(subnet_id) for subnet_id in subnet_ids]
+ subnet_group = self.backend.create_subnet_group(subnet_name, description, subnets)
+ template = self.response_template(CREATE_SUBNET_GROUP_TEMPLATE)
+ return template.render(subnet_group=subnet_group)
+
+ def describe_dbsubnet_groups(self):
+ subnet_name = self._get_param('DBSubnetGroupName')
+ subnet_groups = self.backend.describe_subnet_groups(subnet_name)
+ template = self.response_template(DESCRIBE_SUBNET_GROUPS_TEMPLATE)
+ return template.render(subnet_groups=subnet_groups)
+
+ def delete_dbsubnet_group(self):
+ subnet_name = self._get_param('DBSubnetGroupName')
+ subnet_group = self.backend.delete_subnet_group(subnet_name)
+ template = self.response_template(DELETE_SUBNET_GROUP_TEMPLATE)
+ return template.render(subnet_group=subnet_group)
+
+
+CREATE_DATABASE_TEMPLATE = """
+
+ {{ database.to_xml() }}
+
+
+ 523e3218-afc7-11c3-90f5-f90431260ab4
+
+"""
+
+CREATE_DATABASE_REPLICA_TEMPLATE = """
+
+ {{ database.to_xml() }}
+
+
+ ba8dedf0-bb9a-11d3-855b-576787000e19
+
+"""
+
+DESCRIBE_DATABASES_TEMPLATE = """
+
+
+ {% for database in databases %}
+ {{ database.to_xml() }}
+ {% endfor %}
+
+
+
+ 01b2685a-b978-11d3-f272-7cd6cce12cc5
+
+"""
+
+MODIFY_DATABASE_TEMPLATE = """
+
+ {{ database.to_xml() }}
+
+
+ f643f1ac-bbfe-11d3-f4c6-37db295f7674
+
+"""
+
+DELETE_DATABASE_TEMPLATE = """
+
+ {{ database.to_xml() }}
+
+
+ 7369556f-b70d-11c3-faca-6ba18376ea1b
+
+"""
+
+CREATE_SECURITY_GROUP_TEMPLATE = """
+
+ {{ security_group.to_xml() }}
+
+
+ e68ef6fa-afc1-11c3-845a-476777009d19
+
+"""
+
+DESCRIBE_SECURITY_GROUPS_TEMPLATE = """
+
+
+ {% for security_group in security_groups %}
+ {{ security_group.to_xml() }}
+ {% endfor %}
+
+
+
+ b76e692c-b98c-11d3-a907-5a2c468b9cb0
+
+"""
+
+DELETE_SECURITY_GROUP_TEMPLATE = """
+
+ 7aec7454-ba25-11d3-855b-576787000e19
+
+"""
+
+AUTHORIZE_SECURITY_GROUP_TEMPLATE = """
+
+ {{ security_group.to_xml() }}
+
+
+ 6176b5f8-bfed-11d3-f92b-31fa5e8dbc99
+
+"""
+
+CREATE_SUBNET_GROUP_TEMPLATE = """
+
+ {{ subnet_group.to_xml() }}
+
+
+ 3a401b3f-bb9e-11d3-f4c6-37db295f7674
+
+"""
+
+DESCRIBE_SUBNET_GROUPS_TEMPLATE = """
+
+
+ {% for subnet_group in subnet_groups %}
+ {{ subnet_group.to_xml() }}
+ {% endfor %}
+
+
+
+ b783db3b-b98c-11d3-fbc7-5c0aad74da7c
+
+"""
+
+DELETE_SUBNET_GROUP_TEMPLATE = """
+
+ 6295e5ab-bbf3-11d3-f4c6-37db295f7674
+
+"""
diff --git a/moto/rds/urls.py b/moto/rds/urls.py
new file mode 100644
index 000000000..e2e5b86ce
--- /dev/null
+++ b/moto/rds/urls.py
@@ -0,0 +1,10 @@
+from __future__ import unicode_literals
+from .responses import RDSResponse
+
+url_bases = [
+ "https?://rds.(.+).amazonaws.com",
+]
+
+url_paths = {
+ '{0}/$': RDSResponse().dispatch,
+}
diff --git a/tests/helpers.py b/tests/helpers.py
index 2532548cd..33509c06e 100644
--- a/tests/helpers.py
+++ b/tests/helpers.py
@@ -1,6 +1,7 @@
from __future__ import unicode_literals
import boto
from nose.plugins.skip import SkipTest
+import six
def version_tuple(v):
@@ -23,3 +24,10 @@ class requires_boto_gte(object):
if boto_version >= required:
return test
return skip_test
+
+
+class disable_on_py3(object):
+ def __call__(self, test):
+ if not six.PY3:
+ return test
+ return skip_test
diff --git a/tests/test_cloudformation/fixtures/rds_mysql_with_read_replica.py b/tests/test_cloudformation/fixtures/rds_mysql_with_read_replica.py
new file mode 100644
index 000000000..b743f46f4
--- /dev/null
+++ b/tests/test_cloudformation/fixtures/rds_mysql_with_read_replica.py
@@ -0,0 +1,191 @@
+from __future__ import unicode_literals
+
+template = {
+ "AWSTemplateFormatVersion" : "2010-09-09",
+
+ "Description" : "AWS CloudFormation Sample Template RDS_MySQL_With_Read_Replica: Sample template showing how to create a highly-available, RDS DBInstance with a read replica. **WARNING** This template creates an Amazon Relational Database Service database instance and Amazon CloudWatch alarms. You will be billed for the AWS resources used if you create a stack from this template.",
+
+ "Parameters": {
+ "DBName": {
+ "Default": "MyDatabase",
+ "Description" : "The database name",
+ "Type": "String",
+ "MinLength": "1",
+ "MaxLength": "64",
+ "AllowedPattern" : "[a-zA-Z][a-zA-Z0-9]*",
+ "ConstraintDescription" : "must begin with a letter and contain only alphanumeric characters."
+ },
+
+ "DBInstanceIdentifier": {
+ "Type": "String"
+ },
+
+ "DBUser": {
+ "NoEcho": "true",
+ "Description" : "The database admin account username",
+ "Type": "String",
+ "MinLength": "1",
+ "MaxLength": "16",
+ "AllowedPattern" : "[a-zA-Z][a-zA-Z0-9]*",
+ "ConstraintDescription" : "must begin with a letter and contain only alphanumeric characters."
+ },
+
+ "DBPassword": {
+ "NoEcho": "true",
+ "Description" : "The database admin account password",
+ "Type": "String",
+ "MinLength": "1",
+ "MaxLength": "41",
+ "AllowedPattern" : "[a-zA-Z0-9]+",
+ "ConstraintDescription" : "must contain only alphanumeric characters."
+ },
+
+ "DBAllocatedStorage": {
+ "Default": "5",
+ "Description" : "The size of the database (Gb)",
+ "Type": "Number",
+ "MinValue": "5",
+ "MaxValue": "1024",
+ "ConstraintDescription" : "must be between 5 and 1024Gb."
+ },
+
+ "DBInstanceClass": {
+ "Description" : "The database instance type",
+ "Type": "String",
+ "Default": "db.m1.small",
+ "AllowedValues" : [ "db.t1.micro", "db.m1.small", "db.m1.medium", "db.m1.large", "db.m1.xlarge", "db.m2.xlarge", "db.m2.2xlarge", "db.m2.4xlarge", "db.m3.medium", "db.m3.large", "db.m3.xlarge", "db.m3.2xlarge", "db.r3.large", "db.r3.xlarge", "db.r3.2xlarge", "db.r3.4xlarge", "db.r3.8xlarge", "db.m2.xlarge", "db.m2.2xlarge", "db.m2.4xlarge", "db.cr1.8xlarge"]
+,
+ "ConstraintDescription" : "must select a valid database instance type."
+ },
+
+ "EC2SecurityGroup": {
+ "Description" : "The EC2 security group that contains instances that need access to the database",
+ "Default": "default",
+ "Type": "String",
+ "AllowedPattern" : "[a-zA-Z0-9\\-]+",
+ "ConstraintDescription" : "must be a valid security group name."
+ },
+
+ "MultiAZ" : {
+ "Description" : "Multi-AZ master database",
+ "Type" : "String",
+ "Default" : "false",
+ "AllowedValues" : [ "true", "false" ],
+ "ConstraintDescription" : "must be true or false."
+ }
+ },
+
+ "Conditions" : {
+ "Is-EC2-VPC" : { "Fn::Or" : [ {"Fn::Equals" : [{"Ref" : "AWS::Region"}, "eu-central-1" ]},
+ {"Fn::Equals" : [{"Ref" : "AWS::Region"}, "cn-north-1" ]}]},
+ "Is-EC2-Classic" : { "Fn::Not" : [{ "Condition" : "Is-EC2-VPC"}]}
+ },
+
+ "Resources" : {
+
+ "DBEC2SecurityGroup": {
+ "Type": "AWS::EC2::SecurityGroup",
+ "Condition" : "Is-EC2-VPC",
+ "Properties" : {
+ "GroupDescription": "Open database for access",
+ "SecurityGroupIngress" : [{
+ "IpProtocol" : "tcp",
+ "FromPort" : "3306",
+ "ToPort" : "3306",
+ "SourceSecurityGroupName" : { "Ref" : "EC2SecurityGroup" }
+ }]
+ }
+ },
+
+ "DBSecurityGroup": {
+ "Type": "AWS::RDS::DBSecurityGroup",
+ "Condition" : "Is-EC2-Classic",
+ "Properties": {
+ "DBSecurityGroupIngress": {
+ "EC2SecurityGroupName": { "Ref": "EC2SecurityGroup" }
+ },
+ "GroupDescription": "database access"
+ }
+ },
+
+ "my_vpc": {
+ "Type" : "AWS::EC2::VPC",
+ "Properties" : {
+ "CidrBlock" : "10.0.0.0/16",
+ }
+ },
+
+ "EC2Subnet": {
+ "Type" : "AWS::EC2::Subnet",
+ "Condition" : "Is-EC2-VPC",
+ "Properties" : {
+ "AvailabilityZone" : "eu-central-1a",
+ "CidrBlock" : "10.0.1.0/24",
+ "VpcId" : { "Ref" : "my_vpc" }
+ }
+ },
+
+ "DBSubnet": {
+ "Type": "AWS::RDS::DBSubnetGroup",
+ "Condition" : "Is-EC2-VPC",
+ "Properties": {
+ "DBSubnetGroupDescription": "my db subnet group",
+ "SubnetIds" : [ { "Ref": "EC2Subnet" } ],
+ }
+ },
+
+ "MasterDB" : {
+ "Type" : "AWS::RDS::DBInstance",
+ "Properties" : {
+ "DBInstanceIdentifier": { "Ref": "DBInstanceIdentifier" },
+ "DBName" : { "Ref" : "DBName" },
+ "AllocatedStorage" : { "Ref" : "DBAllocatedStorage" },
+ "DBInstanceClass" : { "Ref" : "DBInstanceClass" },
+ "Engine" : "MySQL",
+ "DBSubnetGroupName": {"Fn::If": ["Is-EC2-VPC", { "Ref": "DBSubnet" }, { "Ref": "AWS::NoValue" }]},
+ "MasterUsername" : { "Ref" : "DBUser" },
+ "MasterUserPassword" : { "Ref" : "DBPassword" },
+ "MultiAZ" : { "Ref" : "MultiAZ" },
+ "Tags" : [{ "Key" : "Name", "Value" : "Master Database" }],
+ "VPCSecurityGroups": { "Fn::If" : [ "Is-EC2-VPC", [ { "Fn::GetAtt": [ "DBEC2SecurityGroup", "GroupId" ] } ], { "Ref" : "AWS::NoValue"}]},
+ "DBSecurityGroups": { "Fn::If" : [ "Is-EC2-Classic", [ { "Ref": "DBSecurityGroup" } ], { "Ref" : "AWS::NoValue"}]}
+ },
+ "DeletionPolicy" : "Snapshot"
+ },
+
+ "ReplicaDB" : {
+ "Type" : "AWS::RDS::DBInstance",
+ "Properties" : {
+ "SourceDBInstanceIdentifier" : { "Ref" : "MasterDB" },
+ "DBInstanceClass" : { "Ref" : "DBInstanceClass" },
+ "Tags" : [{ "Key" : "Name", "Value" : "Read Replica Database" }]
+ }
+ }
+ },
+
+ "Outputs" : {
+ "EC2Platform" : {
+ "Description" : "Platform in which this stack is deployed",
+ "Value" : { "Fn::If" : [ "Is-EC2-VPC", "EC2-VPC", "EC2-Classic" ]}
+ },
+
+ "MasterJDBCConnectionString": {
+ "Description" : "JDBC connection string for the master database",
+ "Value" : { "Fn::Join": [ "", [ "jdbc:mysql://",
+ { "Fn::GetAtt": [ "MasterDB", "Endpoint.Address" ] },
+ ":",
+ { "Fn::GetAtt": [ "MasterDB", "Endpoint.Port" ] },
+ "/",
+ { "Ref": "DBName" }]]}
+ },
+ "ReplicaJDBCConnectionString": {
+ "Description" : "JDBC connection string for the replica database",
+ "Value" : { "Fn::Join": [ "", [ "jdbc:mysql://",
+ { "Fn::GetAtt": [ "ReplicaDB", "Endpoint.Address" ] },
+ ":",
+ { "Fn::GetAtt": [ "ReplicaDB", "Endpoint.Port" ] },
+ "/",
+ { "Ref": "DBName" }]]}
+ }
+ }
+}
\ No newline at end of file
diff --git a/tests/test_cloudformation/test_cloudformation_stack_integration.py b/tests/test_cloudformation/test_cloudformation_stack_integration.py
index b88be5e70..60353f205 100644
--- a/tests/test_cloudformation/test_cloudformation_stack_integration.py
+++ b/tests/test_cloudformation/test_cloudformation_stack_integration.py
@@ -18,15 +18,17 @@ from moto import (
mock_ec2,
mock_elb,
mock_iam,
+ mock_rds,
mock_sqs,
)
from .fixtures import (
- single_instance_with_ebs_volume,
- vpc_single_instance_in_subnet,
ec2_classic_eip,
+ fn_join,
+ rds_mysql_with_read_replica,
+ single_instance_with_ebs_volume,
vpc_eip,
- fn_join
+ vpc_single_instance_in_subnet,
)
@@ -350,6 +352,76 @@ def test_vpc_single_instance_in_subnet():
eip_resource.physical_resource_id.should.equal(eip.allocation_id)
+@mock_cloudformation()
+@mock_ec2()
+@mock_rds()
+def test_rds_mysql_with_read_replica():
+ ec2_conn = boto.ec2.connect_to_region("us-west-1")
+ ec2_conn.create_security_group('application', 'Our Application Group')
+
+ template_json = json.dumps(rds_mysql_with_read_replica.template)
+ conn = boto.cloudformation.connect_to_region("us-west-1")
+ conn.create_stack(
+ "test_stack",
+ template_body=template_json,
+ parameters=[
+ ("DBInstanceIdentifier", "master_db"),
+ ("DBName", "my_db"),
+ ("DBUser", "my_user"),
+ ("DBPassword", "my_password"),
+ ("DBAllocatedStorage", "20"),
+ ("DBInstanceClass", "db.m1.medium"),
+ ("EC2SecurityGroup", "application"),
+ ("MultiAZ", "true"),
+ ],
+ )
+
+ rds_conn = boto.rds.connect_to_region("us-west-1")
+
+ primary = rds_conn.get_all_dbinstances("master_db")[0]
+ primary.master_username.should.equal("my_user")
+ primary.allocated_storage.should.equal(20)
+ primary.instance_class.should.equal("db.m1.medium")
+ primary.multi_az.should.equal(True)
+ list(primary.read_replica_dbinstance_identifiers).should.have.length_of(1)
+ replica_id = primary.read_replica_dbinstance_identifiers[0]
+
+ replica = rds_conn.get_all_dbinstances(replica_id)[0]
+ replica.instance_class.should.equal("db.m1.medium")
+
+ security_group_name = primary.security_groups[0].name
+ security_group = rds_conn.get_all_dbsecurity_groups(security_group_name)[0]
+ security_group.ec2_groups[0].name.should.equal("application")
+
+
+@mock_cloudformation()
+@mock_ec2()
+@mock_rds()
+def test_rds_mysql_with_read_replica_in_vpc():
+ template_json = json.dumps(rds_mysql_with_read_replica.template)
+ conn = boto.cloudformation.connect_to_region("eu-central-1")
+ conn.create_stack(
+ "test_stack",
+ template_body=template_json,
+ parameters=[
+ ("DBInstanceIdentifier", "master_db"),
+ ("DBName", "my_db"),
+ ("DBUser", "my_user"),
+ ("DBPassword", "my_password"),
+ ("DBAllocatedStorage", "20"),
+ ("DBInstanceClass", "db.m1.medium"),
+ ("MultiAZ", "true"),
+ ],
+ )
+
+ rds_conn = boto.rds.connect_to_region("eu-central-1")
+ primary = rds_conn.get_all_dbinstances("master_db")[0]
+
+ subnet_group_name = primary.subnet_group.name
+ subnet_group = rds_conn.get_all_db_subnet_groups(subnet_group_name)[0]
+ subnet_group.description.should.equal("my db subnet group")
+
+
@mock_autoscaling()
@mock_iam()
@mock_cloudformation()
diff --git a/tests/test_cloudformation/test_stack_parsing.py b/tests/test_cloudformation/test_stack_parsing.py
index 13d602ebb..29d8282fd 100644
--- a/tests/test_cloudformation/test_stack_parsing.py
+++ b/tests/test_cloudformation/test_stack_parsing.py
@@ -151,11 +151,13 @@ def test_parse_equals_condition():
parse_condition(
condition={"Fn::Equals": [{"Ref": "EnvType"}, "prod"]},
resources_map={"EnvType": "prod"},
+ condition_map={},
).should.equal(True)
parse_condition(
condition={"Fn::Equals": [{"Ref": "EnvType"}, "prod"]},
resources_map={"EnvType": "staging"},
+ condition_map={},
).should.equal(False)
@@ -167,6 +169,7 @@ def test_parse_not_condition():
}]
},
resources_map={"EnvType": "prod"},
+ condition_map={},
).should.equal(False)
parse_condition(
@@ -176,6 +179,7 @@ def test_parse_not_condition():
}]
},
resources_map={"EnvType": "staging"},
+ condition_map={},
).should.equal(True)
@@ -188,6 +192,7 @@ def test_parse_and_condition():
]
},
resources_map={"EnvType": "prod"},
+ condition_map={},
).should.equal(False)
parse_condition(
@@ -198,6 +203,7 @@ def test_parse_and_condition():
]
},
resources_map={"EnvType": "prod"},
+ condition_map={},
).should.equal(True)
@@ -210,6 +216,7 @@ def test_parse_or_condition():
]
},
resources_map={"EnvType": "prod"},
+ condition_map={},
).should.equal(True)
parse_condition(
@@ -220,11 +227,13 @@ def test_parse_or_condition():
]
},
resources_map={"EnvType": "prod"},
+ condition_map={},
).should.equal(False)
def test_reference_other_conditions():
parse_condition(
condition={"Fn::Not": [{"Condition": "OtherCondition"}]},
- resources_map={"OtherCondition": True},
+ resources_map={},
+ condition_map={"OtherCondition": True},
).should.equal(False)
diff --git a/tests/test_rds/test_rds.py b/tests/test_rds/test_rds.py
new file mode 100644
index 000000000..80644ef3e
--- /dev/null
+++ b/tests/test_rds/test_rds.py
@@ -0,0 +1,239 @@
+from __future__ import unicode_literals
+
+import boto.rds
+import boto.vpc
+from boto.exception import BotoServerError
+import sure # noqa
+
+from moto import mock_ec2, mock_rds
+from tests.helpers import disable_on_py3
+
+
+@disable_on_py3()
+@mock_rds
+def test_create_database():
+ conn = boto.rds.connect_to_region("us-west-2")
+
+ database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2',
+ security_groups=["my_sg"])
+
+ database.status.should.equal('available')
+ database.id.should.equal("db-master-1")
+ database.allocated_storage.should.equal(10)
+ database.instance_class.should.equal("db.m1.small")
+ database.master_username.should.equal("root")
+ database.endpoint.should.equal(('db-master-1.aaaaaaaaaa.us-west-2.rds.amazonaws.com', 3306))
+ database.security_groups[0].name.should.equal('my_sg')
+
+
+@disable_on_py3()
+@mock_rds
+def test_get_databases():
+ conn = boto.rds.connect_to_region("us-west-2")
+
+ list(conn.get_all_dbinstances()).should.have.length_of(0)
+
+ conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2')
+ conn.create_dbinstance("db-master-2", 10, 'db.m1.small', 'root', 'hunter2')
+
+ list(conn.get_all_dbinstances()).should.have.length_of(2)
+
+ databases = conn.get_all_dbinstances("db-master-1")
+ list(databases).should.have.length_of(1)
+
+ databases[0].id.should.equal("db-master-1")
+
+
+@mock_rds
+def test_describe_non_existant_database():
+ conn = boto.rds.connect_to_region("us-west-2")
+ conn.get_all_dbinstances.when.called_with("not-a-db").should.throw(BotoServerError)
+
+
+@disable_on_py3()
+@mock_rds
+def test_delete_database():
+ conn = boto.rds.connect_to_region("us-west-2")
+ list(conn.get_all_dbinstances()).should.have.length_of(0)
+
+ conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2')
+ list(conn.get_all_dbinstances()).should.have.length_of(1)
+
+ conn.delete_dbinstance("db-master-1")
+ list(conn.get_all_dbinstances()).should.have.length_of(0)
+
+
+@mock_rds
+def test_delete_non_existant_database():
+ conn = boto.rds.connect_to_region("us-west-2")
+ conn.delete_dbinstance.when.called_with("not-a-db").should.throw(BotoServerError)
+
+
+@mock_rds
+def test_create_database_security_group():
+ conn = boto.rds.connect_to_region("us-west-2")
+
+ security_group = conn.create_dbsecurity_group('db_sg', 'DB Security Group')
+ security_group.name.should.equal('db_sg')
+ security_group.description.should.equal("DB Security Group")
+ list(security_group.ip_ranges).should.equal([])
+
+
+@mock_rds
+def test_get_security_groups():
+ conn = boto.rds.connect_to_region("us-west-2")
+
+ list(conn.get_all_dbsecurity_groups()).should.have.length_of(0)
+
+ conn.create_dbsecurity_group('db_sg1', 'DB Security Group')
+ conn.create_dbsecurity_group('db_sg2', 'DB Security Group')
+
+ list(conn.get_all_dbsecurity_groups()).should.have.length_of(2)
+
+ databases = conn.get_all_dbsecurity_groups("db_sg1")
+ list(databases).should.have.length_of(1)
+
+ databases[0].name.should.equal("db_sg1")
+
+
+@mock_rds
+def test_get_non_existant_security_group():
+ conn = boto.rds.connect_to_region("us-west-2")
+ conn.get_all_dbsecurity_groups.when.called_with("not-a-sg").should.throw(BotoServerError)
+
+
+@mock_rds
+def test_delete_database_security_group():
+ conn = boto.rds.connect_to_region("us-west-2")
+ conn.create_dbsecurity_group('db_sg', 'DB Security Group')
+
+ list(conn.get_all_dbsecurity_groups()).should.have.length_of(1)
+
+ conn.delete_dbsecurity_group("db_sg")
+ list(conn.get_all_dbsecurity_groups()).should.have.length_of(0)
+
+
+@mock_rds
+def test_delete_non_existant_security_group():
+ conn = boto.rds.connect_to_region("us-west-2")
+ conn.delete_dbsecurity_group.when.called_with("not-a-db").should.throw(BotoServerError)
+
+
+@disable_on_py3()
+@mock_rds
+def test_security_group_authorize():
+ conn = boto.rds.connect_to_region("us-west-2")
+ security_group = conn.create_dbsecurity_group('db_sg', 'DB Security Group')
+ list(security_group.ip_ranges).should.equal([])
+
+ security_group.authorize(cidr_ip='10.3.2.45/32')
+ security_group = conn.get_all_dbsecurity_groups()[0]
+ list(security_group.ip_ranges).should.have.length_of(1)
+ security_group.ip_ranges[0].cidr_ip.should.equal('10.3.2.45/32')
+
+
+@disable_on_py3()
+@mock_rds
+def test_add_security_group_to_database():
+ conn = boto.rds.connect_to_region("us-west-2")
+
+ database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2')
+ security_group = conn.create_dbsecurity_group('db_sg', 'DB Security Group')
+ database.modify(security_groups=[security_group])
+
+ database = conn.get_all_dbinstances()[0]
+ list(database.security_groups).should.have.length_of(1)
+
+ database.security_groups[0].name.should.equal("db_sg")
+
+
+@mock_ec2
+@mock_rds
+def test_add_database_subnet_group():
+ vpc_conn = boto.vpc.connect_to_region("us-west-2")
+ vpc = vpc_conn.create_vpc("10.0.0.0/16")
+ subnet1 = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24")
+ subnet2 = vpc_conn.create_subnet(vpc.id, "10.2.0.0/24")
+
+ subnet_ids = [subnet1.id, subnet2.id]
+ conn = boto.rds.connect_to_region("us-west-2")
+ subnet_group = conn.create_db_subnet_group("db_subnet", "my db subnet", subnet_ids)
+ subnet_group.name.should.equal('db_subnet')
+ subnet_group.description.should.equal("my db subnet")
+ list(subnet_group.subnet_ids).should.equal(subnet_ids)
+
+
+@mock_ec2
+@mock_rds
+def test_describe_database_subnet_group():
+ vpc_conn = boto.vpc.connect_to_region("us-west-2")
+ vpc = vpc_conn.create_vpc("10.0.0.0/16")
+ subnet = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24")
+
+ conn = boto.rds.connect_to_region("us-west-2")
+ conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id])
+ conn.create_db_subnet_group("db_subnet2", "my db subnet", [subnet.id])
+
+ list(conn.get_all_db_subnet_groups()).should.have.length_of(2)
+ list(conn.get_all_db_subnet_groups("db_subnet1")).should.have.length_of(1)
+
+ conn.get_all_db_subnet_groups.when.called_with("not-a-subnet").should.throw(BotoServerError)
+
+
+@mock_ec2
+@mock_rds
+def test_delete_database_subnet_group():
+ vpc_conn = boto.vpc.connect_to_region("us-west-2")
+ vpc = vpc_conn.create_vpc("10.0.0.0/16")
+ subnet = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24")
+
+ conn = boto.rds.connect_to_region("us-west-2")
+ conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id])
+ list(conn.get_all_db_subnet_groups()).should.have.length_of(1)
+
+ conn.delete_db_subnet_group("db_subnet1")
+ list(conn.get_all_db_subnet_groups()).should.have.length_of(0)
+
+ conn.delete_db_subnet_group.when.called_with("db_subnet1").should.throw(BotoServerError)
+
+
+@disable_on_py3()
+@mock_ec2
+@mock_rds
+def test_create_database_in_subnet_group():
+ vpc_conn = boto.vpc.connect_to_region("us-west-2")
+ vpc = vpc_conn.create_vpc("10.0.0.0/16")
+ subnet = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24")
+
+ conn = boto.rds.connect_to_region("us-west-2")
+ conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id])
+
+ database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small',
+ 'root', 'hunter2', db_subnet_group_name="db_subnet1")
+
+ database = conn.get_all_dbinstances("db-master-1")[0]
+ database.subnet_group.name.should.equal("db_subnet1")
+
+
+@disable_on_py3()
+@mock_rds
+def test_create_database_replica():
+ conn = boto.rds.connect_to_region("us-west-2")
+
+ primary = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2')
+
+ replica = conn.create_dbinstance_read_replica("replica", "db-master-1", "db.m1.small")
+ replica.id.should.equal("replica")
+ replica.instance_class.should.equal("db.m1.small")
+ status_info = replica.status_infos[0]
+ status_info.normal.should.equal(True)
+ status_info.status_type.should.equal('read replication')
+ status_info.status.should.equal('replicating')
+
+ primary = conn.get_all_dbinstances("db-master-1")[0]
+ primary.read_replica_dbinstance_identifiers[0].should.equal("replica")
+
+ conn.delete_dbinstance("replica")
+
+ primary = conn.get_all_dbinstances("db-master-1")[0]
+ list(primary.read_replica_dbinstance_identifiers).should.have.length_of(0)
diff --git a/tests/test_rds/test_server.py b/tests/test_rds/test_server.py
new file mode 100644
index 000000000..224704a0b
--- /dev/null
+++ b/tests/test_rds/test_server.py
@@ -0,0 +1,20 @@
+from __future__ import unicode_literals
+
+import sure # noqa
+
+import moto.server as server
+from moto import mock_rds
+
+'''
+Test the different server responses
+'''
+
+
+@mock_rds
+def test_list_databases():
+ backend = server.create_backend_app("rds")
+ test_client = backend.test_client()
+
+ res = test_client.get('/?Action=DescribeDBInstances')
+
+ res.data.decode("utf-8").should.contain("")