Add cloudformation for RDS.
This commit is contained in:
parent
12118374bd
commit
166f4893ba
@ -1,11 +1,13 @@
|
||||
from __future__ import unicode_literals
|
||||
import collections
|
||||
import functools
|
||||
import logging
|
||||
|
||||
from moto.autoscaling import models as autoscaling_models
|
||||
from moto.ec2 import models as ec2_models
|
||||
from moto.elb import models as elb_models
|
||||
from moto.iam import models as iam_models
|
||||
from moto.rds import models as rds_models
|
||||
from moto.sqs import models as sqs_models
|
||||
from .utils import random_suffix
|
||||
from .exceptions import MissingParameterError, UnformattedGetAttTemplateException
|
||||
@ -31,6 +33,9 @@ MODEL_MAP = {
|
||||
"AWS::ElasticLoadBalancing::LoadBalancer": elb_models.FakeLoadBalancer,
|
||||
"AWS::IAM::InstanceProfile": iam_models.InstanceProfile,
|
||||
"AWS::IAM::Role": iam_models.Role,
|
||||
"AWS::RDS::DBInstance": rds_models.Database,
|
||||
"AWS::RDS::DBSecurityGroup": rds_models.SecurityGroup,
|
||||
"AWS::RDS::DBSubnetGroup": rds_models.SubnetGroup,
|
||||
"AWS::SQS::Queue": sqs_models.Queue,
|
||||
}
|
||||
|
||||
@ -57,6 +62,15 @@ NULL_MODELS = [
|
||||
logger = logging.getLogger("moto")
|
||||
|
||||
|
||||
class LazyDict(dict):
|
||||
def __getitem__(self, key):
|
||||
val = dict.__getitem__(self, key)
|
||||
if callable(val):
|
||||
val = val()
|
||||
self[key] = val
|
||||
return val
|
||||
|
||||
|
||||
def clean_json(resource_json, resources_map):
|
||||
"""
|
||||
Cleanup the a resource dict. For now, this just means replacing any Ref node
|
||||
@ -99,15 +113,15 @@ def clean_json(resource_json, resources_map):
|
||||
if 'Fn::If' in resource_json:
|
||||
condition_name, true_value, false_value = resource_json['Fn::If']
|
||||
if resources_map[condition_name]:
|
||||
return true_value
|
||||
return clean_json(true_value, resources_map)
|
||||
else:
|
||||
return false_value
|
||||
return clean_json(false_value, resources_map)
|
||||
|
||||
if 'Fn::Join' in resource_json:
|
||||
join_list = []
|
||||
for val in resource_json['Fn::Join'][1]:
|
||||
cleaned_val = clean_json(val, resources_map)
|
||||
join_list.append(cleaned_val if cleaned_val else '{0}'.format(val))
|
||||
join_list.append('{0}'.format(cleaned_val) if cleaned_val else '{0}'.format(val))
|
||||
return resource_json['Fn::Join'][0].join(join_list)
|
||||
|
||||
cleaned_json = {}
|
||||
@ -168,7 +182,7 @@ def parse_resource(logical_id, resource_json, resources_map, region_name):
|
||||
return resource
|
||||
|
||||
|
||||
def parse_condition(condition, resources_map):
|
||||
def parse_condition(condition, resources_map, condition_map):
|
||||
if isinstance(condition, bool):
|
||||
return condition
|
||||
|
||||
@ -178,22 +192,22 @@ def parse_condition(condition, resources_map):
|
||||
for value in list(condition.values())[0]:
|
||||
# Check if we are referencing another Condition
|
||||
if 'Condition' in value:
|
||||
condition_values.append(resources_map[value['Condition']])
|
||||
condition_values.append(condition_map[value['Condition']])
|
||||
else:
|
||||
condition_values.append(clean_json(value, resources_map))
|
||||
|
||||
if condition_operator == "Fn::Equals":
|
||||
return condition_values[0] == condition_values[1]
|
||||
elif condition_operator == "Fn::Not":
|
||||
return not parse_condition(condition_values[0], resources_map)
|
||||
return not parse_condition(condition_values[0], resources_map, condition_map)
|
||||
elif condition_operator == "Fn::And":
|
||||
return all([
|
||||
parse_condition(condition_value, resources_map)
|
||||
parse_condition(condition_value, resources_map, condition_map)
|
||||
for condition_value
|
||||
in condition_values])
|
||||
elif condition_operator == "Fn::Or":
|
||||
return any([
|
||||
parse_condition(condition_value, resources_map)
|
||||
parse_condition(condition_value, resources_map, condition_map)
|
||||
for condition_value
|
||||
in condition_values])
|
||||
|
||||
@ -227,6 +241,7 @@ class ResourceMap(collections.Mapping):
|
||||
"AWS::Region": self._region_name,
|
||||
"AWS::StackId": stack_id,
|
||||
"AWS::StackName": stack_name,
|
||||
"AWS::NoValue": None,
|
||||
}
|
||||
|
||||
def __getitem__(self, key):
|
||||
@ -273,9 +288,13 @@ class ResourceMap(collections.Mapping):
|
||||
|
||||
def load_conditions(self):
|
||||
conditions = self._template.get('Conditions', {})
|
||||
|
||||
lazy_condition_map = LazyDict()
|
||||
for condition_name, condition in conditions.items():
|
||||
self._parsed_resources[condition_name] = parse_condition(condition, self._parsed_resources)
|
||||
lazy_condition_map[condition_name] = functools.partial(parse_condition,
|
||||
condition, self._parsed_resources, lazy_condition_map)
|
||||
|
||||
for condition_name in lazy_condition_map:
|
||||
self._parsed_resources[condition_name] = lazy_condition_map[condition_name]
|
||||
|
||||
def create(self):
|
||||
self.load_mapping()
|
||||
|
@ -996,6 +996,7 @@ class SecurityGroup(object):
|
||||
self.egress_rules = []
|
||||
self.enis = {}
|
||||
self.vpc_id = vpc_id
|
||||
self.owner_id = "123456789012"
|
||||
|
||||
@classmethod
|
||||
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
|
||||
|
@ -5,7 +5,10 @@ import copy
|
||||
import boto.rds
|
||||
from jinja2 import Template
|
||||
|
||||
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
|
||||
from moto.core import BaseBackend
|
||||
from moto.core.utils import get_random_hex
|
||||
from moto.ec2.models import ec2_backends
|
||||
from .exceptions import DBInstanceNotFoundError, DBSecurityGroupNotFoundError, DBSubnetGroupNotFoundError
|
||||
|
||||
|
||||
@ -19,11 +22,15 @@ class Database(object):
|
||||
self.region = kwargs.get('region')
|
||||
self.engine = kwargs.get("engine")
|
||||
self.engine_version = kwargs.get("engine_version")
|
||||
if self.engine_version is None:
|
||||
self.engine_version = "5.6.21"
|
||||
self.iops = kwargs.get("iops")
|
||||
self.storage_type = kwargs.get("storage_type")
|
||||
self.master_username = kwargs.get('master_username')
|
||||
self.master_password = kwargs.get('master_password')
|
||||
self.auto_minor_version_upgrade = kwargs.get('auto_minor_version_upgrade')
|
||||
if self.auto_minor_version_upgrade is None:
|
||||
self.auto_minor_version_upgrade = True
|
||||
self.allocated_storage = kwargs.get('allocated_storage')
|
||||
self.db_instance_identifier = kwargs.get('db_instance_identifier')
|
||||
self.source_db_identifier = kwargs.get("source_db_identifier")
|
||||
@ -32,6 +39,8 @@ class Database(object):
|
||||
self.db_instance_identifier = kwargs.get('db_instance_identifier')
|
||||
self.db_name = kwargs.get("db_name")
|
||||
self.publicly_accessible = kwargs.get("publicly_accessible")
|
||||
if self.publicly_accessible is None:
|
||||
self.publicly_accessible = True
|
||||
|
||||
self.backup_retention_period = kwargs.get("backup_retention_period")
|
||||
if self.backup_retention_period is None:
|
||||
@ -73,6 +82,58 @@ class Database(object):
|
||||
if value is not None:
|
||||
setattr(self, key, value)
|
||||
|
||||
def get_cfn_attribute(self, attribute_name):
|
||||
if attribute_name == 'Endpoint.Address':
|
||||
return self.address
|
||||
elif attribute_name == 'Endpoint.Port':
|
||||
return self.port
|
||||
raise UnformattedGetAttTemplateException()
|
||||
|
||||
@classmethod
|
||||
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
|
||||
properties = cloudformation_json['Properties']
|
||||
|
||||
db_instance_identifier = properties.get('DBInstanceIdentifier')
|
||||
if not db_instance_identifier:
|
||||
db_instance_identifier = resource_name.lower() + get_random_hex(12)
|
||||
db_security_groups = properties.get('DBSecurityGroups')
|
||||
if not db_security_groups:
|
||||
db_security_groups = []
|
||||
security_groups = [group.group_name for group in db_security_groups]
|
||||
db_subnet_group = properties.get("DBSubnetGroupName")
|
||||
db_subnet_group_name = db_subnet_group.subnet_name if db_subnet_group else None
|
||||
db_kwargs = {
|
||||
"auto_minor_version_upgrade": properties.get('AutoMinorVersionUpgrade'),
|
||||
"allocated_storage": properties.get('AllocatedStorage'),
|
||||
"availability_zone": properties.get("AvailabilityZone"),
|
||||
"backup_retention_period": properties.get("BackupRetentionPeriod"),
|
||||
"db_instance_class": properties.get('DBInstanceClass'),
|
||||
"db_instance_identifier": db_instance_identifier,
|
||||
"db_name": properties.get("DBName"),
|
||||
"db_subnet_group_name": db_subnet_group_name,
|
||||
"engine": properties.get("Engine"),
|
||||
"engine_version": properties.get("EngineVersion"),
|
||||
"iops": properties.get("Iops"),
|
||||
"master_password": properties.get('MasterUserPassword'),
|
||||
"master_username": properties.get('MasterUsername'),
|
||||
"multi_az": properties.get("MultiAZ"),
|
||||
"port": properties.get('Port', 3306),
|
||||
"publicly_accessible": properties.get("PubliclyAccessible"),
|
||||
"region": region_name,
|
||||
"security_groups": security_groups,
|
||||
"storage_type": properties.get("StorageType"),
|
||||
}
|
||||
|
||||
rds_backend = rds_backends[region_name]
|
||||
source_db_identifier = properties.get("SourceDBInstanceIdentifier")
|
||||
if source_db_identifier:
|
||||
# Replica
|
||||
db_kwargs["source_db_identifier"] = source_db_identifier.db_instance_identifier
|
||||
database = rds_backend.create_database_replica(db_kwargs)
|
||||
else:
|
||||
database = rds_backend.create_database(db_kwargs)
|
||||
return database
|
||||
|
||||
def to_xml(self):
|
||||
template = Template("""<DBInstance>
|
||||
<BackupRetentionPeriod>{{ database.backup_retention_period }}</BackupRetentionPeriod>
|
||||
@ -115,6 +176,7 @@ class Database(object):
|
||||
</DBSecurityGroup>
|
||||
{% endfor %}
|
||||
</DBSecurityGroups>
|
||||
{% if database.db_subnet_group %}
|
||||
<DBSubnetGroup>
|
||||
<DBSubnetGroupName>{{ database.db_subnet_group.subnet_name }}</DBSubnetGroupName>
|
||||
<DBSubnetGroupDescription>{{ database.db_subnet_group.description }}</DBSubnetGroupDescription>
|
||||
@ -133,6 +195,7 @@ class Database(object):
|
||||
</Subnets>
|
||||
<VpcId>{{ database.db_subnet_group.vpc_id }}</VpcId>
|
||||
</DBSubnetGroup>
|
||||
{% endif %}
|
||||
<PubliclyAccessible>{{ database.publicly_accessible }}</PubliclyAccessible>
|
||||
<AutoMinorVersionUpgrade>{{ database.auto_minor_version_upgrade }}</AutoMinorVersionUpgrade>
|
||||
<AllocatedStorage>{{ database.allocated_storage }}</AllocatedStorage>
|
||||
@ -150,12 +213,23 @@ class SecurityGroup(object):
|
||||
def __init__(self, group_name, description):
|
||||
self.group_name = group_name
|
||||
self.description = description
|
||||
self.ip_ranges = []
|
||||
self.status = "authorized"
|
||||
self.ip_ranges = []
|
||||
self.ec2_security_groups = []
|
||||
|
||||
def to_xml(self):
|
||||
template = Template("""<DBSecurityGroup>
|
||||
<EC2SecurityGroups/>
|
||||
<EC2SecurityGroups>
|
||||
{% for security_group in security_group.ec2_security_groups %}
|
||||
<EC2SecurityGroup>
|
||||
<EC2SecurityGroupId>{{ security_group.id }}</EC2SecurityGroupId>
|
||||
<EC2SecurityGroupName>{{ security_group.name }}</EC2SecurityGroupName>
|
||||
<EC2SecurityGroupOwnerId>{{ security_group.owner_id }}</EC2SecurityGroupOwnerId>
|
||||
<Status>authorized</Status>
|
||||
</EC2SecurityGroup>
|
||||
{% endfor %}
|
||||
</EC2SecurityGroups>
|
||||
|
||||
<DBSecurityGroupDescription>{{ security_group.description }}</DBSecurityGroupDescription>
|
||||
<IPRanges>
|
||||
{% for ip_range in security_group.ip_ranges %}
|
||||
@ -170,9 +244,36 @@ class SecurityGroup(object):
|
||||
</DBSecurityGroup>""")
|
||||
return template.render(security_group=self)
|
||||
|
||||
def authorize(self, cidr_ip):
|
||||
def authorize_cidr(self, cidr_ip):
|
||||
self.ip_ranges.append(cidr_ip)
|
||||
|
||||
def authorize_security_group(self, security_group):
|
||||
self.ec2_security_groups.append(security_group)
|
||||
|
||||
@classmethod
|
||||
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
|
||||
properties = cloudformation_json['Properties']
|
||||
group_name = resource_name.lower() + get_random_hex(12)
|
||||
description = properties['GroupDescription']
|
||||
security_group_ingress = properties['DBSecurityGroupIngress']
|
||||
|
||||
ec2_backend = ec2_backends[region_name]
|
||||
rds_backend = rds_backends[region_name]
|
||||
security_group = rds_backend.create_security_group(
|
||||
group_name,
|
||||
description,
|
||||
)
|
||||
for ingress_type, ingress_value in security_group_ingress.items():
|
||||
if ingress_type == "CIDRIP":
|
||||
security_group.authorize_cidr(ingress_value)
|
||||
elif ingress_type == "EC2SecurityGroupName":
|
||||
subnet = ec2_backend.get_security_group_from_name(ingress_value)
|
||||
security_group.authorize_security_group(subnet)
|
||||
elif ingress_type == "EC2SecurityGroupId":
|
||||
subnet = ec2_backend.get_security_group_from_id(ingress_value)
|
||||
security_group.authorize_security_group(subnet)
|
||||
return security_group
|
||||
|
||||
|
||||
class SubnetGroup(object):
|
||||
def __init__(self, subnet_name, description, subnets):
|
||||
@ -204,6 +305,24 @@ class SubnetGroup(object):
|
||||
</DBSubnetGroup>""")
|
||||
return template.render(subnet_group=self)
|
||||
|
||||
@classmethod
|
||||
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
|
||||
properties = cloudformation_json['Properties']
|
||||
|
||||
subnet_name = resource_name.lower() + get_random_hex(12)
|
||||
description = properties['DBSubnetGroupDescription']
|
||||
subnet_ids = properties['SubnetIds']
|
||||
|
||||
ec2_backend = ec2_backends[region_name]
|
||||
subnets = [ec2_backend.get_subnet(subnet_id) for subnet_id in subnet_ids]
|
||||
rds_backend = rds_backends[region_name]
|
||||
subnet_group = rds_backend.create_subnet_group(
|
||||
subnet_name,
|
||||
description,
|
||||
subnets,
|
||||
)
|
||||
return subnet_group
|
||||
|
||||
|
||||
class RDSBackend(BaseBackend):
|
||||
|
||||
@ -273,7 +392,7 @@ class RDSBackend(BaseBackend):
|
||||
|
||||
def authorize_security_group(self, security_group_name, cidr_ip):
|
||||
security_group = self.describe_security_groups(security_group_name)[0]
|
||||
security_group.authorize(cidr_ip)
|
||||
security_group.authorize_cidr(cidr_ip)
|
||||
return security_group
|
||||
|
||||
def create_subnet_group(self, subnet_name, description, subnets):
|
||||
|
@ -18,15 +18,17 @@ from moto import (
|
||||
mock_ec2,
|
||||
mock_elb,
|
||||
mock_iam,
|
||||
mock_rds,
|
||||
mock_sqs,
|
||||
)
|
||||
|
||||
from .fixtures import (
|
||||
single_instance_with_ebs_volume,
|
||||
vpc_single_instance_in_subnet,
|
||||
ec2_classic_eip,
|
||||
fn_join,
|
||||
rds_mysql_with_read_replica,
|
||||
single_instance_with_ebs_volume,
|
||||
vpc_eip,
|
||||
fn_join
|
||||
vpc_single_instance_in_subnet,
|
||||
)
|
||||
|
||||
|
||||
@ -350,6 +352,76 @@ def test_vpc_single_instance_in_subnet():
|
||||
eip_resource.physical_resource_id.should.equal(eip.allocation_id)
|
||||
|
||||
|
||||
@mock_cloudformation()
|
||||
@mock_ec2()
|
||||
@mock_rds()
|
||||
def test_rds_mysql_with_read_replica():
|
||||
ec2_conn = boto.ec2.connect_to_region("us-west-1")
|
||||
ec2_conn.create_security_group('application', 'Our Application Group')
|
||||
|
||||
template_json = json.dumps(rds_mysql_with_read_replica.template)
|
||||
conn = boto.cloudformation.connect_to_region("us-west-1")
|
||||
conn.create_stack(
|
||||
"test_stack",
|
||||
template_body=template_json,
|
||||
parameters=[
|
||||
("DBInstanceIdentifier", "master_db"),
|
||||
("DBName", "my_db"),
|
||||
("DBUser", "my_user"),
|
||||
("DBPassword", "my_password"),
|
||||
("DBAllocatedStorage", "20"),
|
||||
("DBInstanceClass", "db.m1.medium"),
|
||||
("EC2SecurityGroup", "application"),
|
||||
("MultiAZ", "true"),
|
||||
],
|
||||
)
|
||||
|
||||
rds_conn = boto.rds.connect_to_region("us-west-1")
|
||||
|
||||
primary = rds_conn.get_all_dbinstances("master_db")[0]
|
||||
primary.master_username.should.equal("my_user")
|
||||
primary.allocated_storage.should.equal(20)
|
||||
primary.instance_class.should.equal("db.m1.medium")
|
||||
primary.multi_az.should.equal(True)
|
||||
list(primary.read_replica_dbinstance_identifiers).should.have.length_of(1)
|
||||
replica_id = primary.read_replica_dbinstance_identifiers[0]
|
||||
|
||||
replica = rds_conn.get_all_dbinstances(replica_id)[0]
|
||||
replica.instance_class.should.equal("db.m1.medium")
|
||||
|
||||
security_group_name = primary.security_groups[0].name
|
||||
security_group = rds_conn.get_all_dbsecurity_groups(security_group_name)[0]
|
||||
security_group.ec2_groups[0].name.should.equal("application")
|
||||
|
||||
|
||||
@mock_cloudformation()
|
||||
@mock_ec2()
|
||||
@mock_rds()
|
||||
def test_rds_mysql_with_read_replica_in_vpc():
|
||||
template_json = json.dumps(rds_mysql_with_read_replica.template)
|
||||
conn = boto.cloudformation.connect_to_region("eu-central-1")
|
||||
conn.create_stack(
|
||||
"test_stack",
|
||||
template_body=template_json,
|
||||
parameters=[
|
||||
("DBInstanceIdentifier", "master_db"),
|
||||
("DBName", "my_db"),
|
||||
("DBUser", "my_user"),
|
||||
("DBPassword", "my_password"),
|
||||
("DBAllocatedStorage", "20"),
|
||||
("DBInstanceClass", "db.m1.medium"),
|
||||
("MultiAZ", "true"),
|
||||
],
|
||||
)
|
||||
|
||||
rds_conn = boto.rds.connect_to_region("eu-central-1")
|
||||
primary = rds_conn.get_all_dbinstances("master_db")[0]
|
||||
|
||||
subnet_group_name = primary.subnet_group.name
|
||||
subnet_group = rds_conn.get_all_db_subnet_groups(subnet_group_name)[0]
|
||||
subnet_group.description.should.equal("my db subnet group")
|
||||
|
||||
|
||||
@mock_autoscaling()
|
||||
@mock_iam()
|
||||
@mock_cloudformation()
|
||||
|
@ -151,11 +151,13 @@ def test_parse_equals_condition():
|
||||
parse_condition(
|
||||
condition={"Fn::Equals": [{"Ref": "EnvType"}, "prod"]},
|
||||
resources_map={"EnvType": "prod"},
|
||||
condition_map={},
|
||||
).should.equal(True)
|
||||
|
||||
parse_condition(
|
||||
condition={"Fn::Equals": [{"Ref": "EnvType"}, "prod"]},
|
||||
resources_map={"EnvType": "staging"},
|
||||
condition_map={},
|
||||
).should.equal(False)
|
||||
|
||||
|
||||
@ -167,6 +169,7 @@ def test_parse_not_condition():
|
||||
}]
|
||||
},
|
||||
resources_map={"EnvType": "prod"},
|
||||
condition_map={},
|
||||
).should.equal(False)
|
||||
|
||||
parse_condition(
|
||||
@ -176,6 +179,7 @@ def test_parse_not_condition():
|
||||
}]
|
||||
},
|
||||
resources_map={"EnvType": "staging"},
|
||||
condition_map={},
|
||||
).should.equal(True)
|
||||
|
||||
|
||||
@ -188,6 +192,7 @@ def test_parse_and_condition():
|
||||
]
|
||||
},
|
||||
resources_map={"EnvType": "prod"},
|
||||
condition_map={},
|
||||
).should.equal(False)
|
||||
|
||||
parse_condition(
|
||||
@ -198,6 +203,7 @@ def test_parse_and_condition():
|
||||
]
|
||||
},
|
||||
resources_map={"EnvType": "prod"},
|
||||
condition_map={},
|
||||
).should.equal(True)
|
||||
|
||||
|
||||
@ -210,6 +216,7 @@ def test_parse_or_condition():
|
||||
]
|
||||
},
|
||||
resources_map={"EnvType": "prod"},
|
||||
condition_map={},
|
||||
).should.equal(True)
|
||||
|
||||
parse_condition(
|
||||
@ -220,11 +227,13 @@ def test_parse_or_condition():
|
||||
]
|
||||
},
|
||||
resources_map={"EnvType": "prod"},
|
||||
condition_map={},
|
||||
).should.equal(False)
|
||||
|
||||
|
||||
def test_reference_other_conditions():
|
||||
parse_condition(
|
||||
condition={"Fn::Not": [{"Condition": "OtherCondition"}]},
|
||||
resources_map={"OtherCondition": True},
|
||||
resources_map={},
|
||||
condition_map={"OtherCondition": True},
|
||||
).should.equal(False)
|
||||
|
Loading…
Reference in New Issue
Block a user