commit
5c589b24af
@ -5,7 +5,6 @@ python:
|
||||
env:
|
||||
matrix:
|
||||
- BOTO_VERSION=2.34.0
|
||||
- BOTO_VERSION=2.25.0
|
||||
matrix:
|
||||
include:
|
||||
- python: "3.3"
|
||||
|
@ -11,6 +11,7 @@ from .ec2 import mock_ec2 # flake8: noqa
|
||||
from .elb import mock_elb # flake8: noqa
|
||||
from .emr import mock_emr # flake8: noqa
|
||||
from .iam import mock_iam # flake8: noqa
|
||||
from .redshift import mock_redshift # flake8: noqa
|
||||
from .s3 import mock_s3 # flake8: noqa
|
||||
from .s3bucket_path import mock_s3bucket_path # flake8: noqa
|
||||
from .ses import mock_ses # flake8: noqa
|
||||
|
@ -6,6 +6,7 @@ from moto.dynamodb2 import dynamodb_backend2
|
||||
from moto.ec2 import ec2_backend
|
||||
from moto.elb import elb_backend
|
||||
from moto.emr import emr_backend
|
||||
from moto.redshift import redshift_backend
|
||||
from moto.s3 import s3_backend
|
||||
from moto.s3bucket_path import s3bucket_path_backend
|
||||
from moto.ses import ses_backend
|
||||
@ -21,6 +22,7 @@ BACKENDS = {
|
||||
'ec2': ec2_backend,
|
||||
'elb': elb_backend,
|
||||
'emr': emr_backend,
|
||||
'redshift': redshift_backend,
|
||||
's3': s3_backend,
|
||||
's3bucket_path': s3bucket_path_backend,
|
||||
'ses': ses_backend,
|
||||
|
@ -110,6 +110,19 @@ class BaseResponse(object):
|
||||
def _get_param(self, param_name):
|
||||
return self.querystring.get(param_name, [None])[0]
|
||||
|
||||
def _get_int_param(self, param_name):
|
||||
val = self._get_param(param_name)
|
||||
if val is not None:
|
||||
return int(val)
|
||||
|
||||
def _get_bool_param(self, param_name):
|
||||
val = self._get_param(param_name)
|
||||
if val is not None:
|
||||
if val.lower() == 'true':
|
||||
return True
|
||||
elif val.lower() == 'false':
|
||||
return False
|
||||
|
||||
def _get_multi_param(self, param_prefix):
|
||||
if param_prefix.endswith("."):
|
||||
prefix = param_prefix
|
||||
|
@ -1576,6 +1576,12 @@ class Subnet(TaggedEC2Resource):
|
||||
)
|
||||
return subnet
|
||||
|
||||
@property
|
||||
def availability_zone(self):
|
||||
# This could probably be smarter, but there doesn't appear to be a
|
||||
# way to pull AZs for a region in boto
|
||||
return self.ec2_backend.region_name + "a"
|
||||
|
||||
@property
|
||||
def physical_resource_id(self):
|
||||
return self.id
|
||||
@ -2435,6 +2441,15 @@ class EC2Backend(BaseBackend, InstanceBackend, TagBackend, AmiBackend,
|
||||
ElasticAddressBackend, KeyPairBackend, DHCPOptionsSetBackend,
|
||||
NetworkAclBackend):
|
||||
|
||||
def __init__(self, region_name):
|
||||
super(EC2Backend, self).__init__()
|
||||
self.region_name = region_name
|
||||
|
||||
def reset(self):
|
||||
region_name = self.region_name
|
||||
self.__dict__ = {}
|
||||
self.__init__(region_name)
|
||||
|
||||
# Use this to generate a proper error template response when in a response handler.
|
||||
def raise_error(self, code, message):
|
||||
raise EC2ClientError(code, message)
|
||||
@ -2488,4 +2503,4 @@ class EC2Backend(BaseBackend, InstanceBackend, TagBackend, AmiBackend,
|
||||
|
||||
ec2_backends = {}
|
||||
for region in boto.ec2.regions():
|
||||
ec2_backends[region.name] = EC2Backend()
|
||||
ec2_backends[region.name] = EC2Backend(region.name)
|
||||
|
12
moto/redshift/__init__.py
Normal file
12
moto/redshift/__init__.py
Normal file
@ -0,0 +1,12 @@
|
||||
from __future__ import unicode_literals
|
||||
from .models import redshift_backends
|
||||
from ..core.models import MockAWS
|
||||
|
||||
redshift_backend = redshift_backends['us-east-1']
|
||||
|
||||
|
||||
def mock_redshift(func=None):
|
||||
if func:
|
||||
return MockAWS(redshift_backends)(func)
|
||||
else:
|
||||
return MockAWS(redshift_backends)
|
52
moto/redshift/exceptions.py
Normal file
52
moto/redshift/exceptions.py
Normal file
@ -0,0 +1,52 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
from werkzeug.exceptions import BadRequest
|
||||
|
||||
|
||||
class RedshiftClientError(BadRequest):
|
||||
def __init__(self, code, message):
|
||||
super(RedshiftClientError, self).__init__()
|
||||
self.description = json.dumps({
|
||||
"Error": {
|
||||
"Code": code,
|
||||
"Message": message,
|
||||
'Type': 'Sender',
|
||||
},
|
||||
'RequestId': '6876f774-7273-11e4-85dc-39e55ca848d1',
|
||||
})
|
||||
|
||||
|
||||
class ClusterNotFoundError(RedshiftClientError):
|
||||
def __init__(self, cluster_identifier):
|
||||
super(ClusterNotFoundError, self).__init__(
|
||||
'ClusterNotFound',
|
||||
"Cluster {0} not found.".format(cluster_identifier))
|
||||
|
||||
|
||||
class ClusterSubnetGroupNotFoundError(RedshiftClientError):
|
||||
def __init__(self, subnet_identifier):
|
||||
super(ClusterSubnetGroupNotFoundError, self).__init__(
|
||||
'ClusterSubnetGroupNotFound',
|
||||
"Subnet group {0} not found.".format(subnet_identifier))
|
||||
|
||||
|
||||
class ClusterSecurityGroupNotFoundError(RedshiftClientError):
|
||||
def __init__(self, group_identifier):
|
||||
super(ClusterSecurityGroupNotFoundError, self).__init__(
|
||||
'ClusterSecurityGroupNotFound',
|
||||
"Security group {0} not found.".format(group_identifier))
|
||||
|
||||
|
||||
class ClusterParameterGroupNotFoundError(RedshiftClientError):
|
||||
def __init__(self, group_identifier):
|
||||
super(ClusterParameterGroupNotFoundError, self).__init__(
|
||||
'ClusterParameterGroupNotFound',
|
||||
"Parameter group {0} not found.".format(group_identifier))
|
||||
|
||||
|
||||
class InvalidSubnetError(RedshiftClientError):
|
||||
def __init__(self, subnet_identifier):
|
||||
super(InvalidSubnetError, self).__init__(
|
||||
'InvalidSubnet',
|
||||
"Subnet {0} not found.".format(subnet_identifier))
|
304
moto/redshift/models.py
Normal file
304
moto/redshift/models.py
Normal file
@ -0,0 +1,304 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import boto.redshift
|
||||
from moto.core import BaseBackend
|
||||
from moto.ec2 import ec2_backends
|
||||
from .exceptions import (
|
||||
ClusterNotFoundError,
|
||||
ClusterParameterGroupNotFoundError,
|
||||
ClusterSecurityGroupNotFoundError,
|
||||
ClusterSubnetGroupNotFoundError,
|
||||
InvalidSubnetError,
|
||||
)
|
||||
|
||||
|
||||
class Cluster(object):
|
||||
def __init__(self, redshift_backend, cluster_identifier, node_type, master_username,
|
||||
master_user_password, db_name, cluster_type, cluster_security_groups,
|
||||
vpc_security_group_ids, cluster_subnet_group_name, availability_zone,
|
||||
preferred_maintenance_window, cluster_parameter_group_name,
|
||||
automated_snapshot_retention_period, port, cluster_version,
|
||||
allow_version_upgrade, number_of_nodes, publicly_accessible,
|
||||
encrypted, region):
|
||||
self.redshift_backend = redshift_backend
|
||||
self.cluster_identifier = cluster_identifier
|
||||
self.node_type = node_type
|
||||
self.master_username = master_username
|
||||
self.master_user_password = master_user_password
|
||||
self.db_name = db_name if db_name else "dev"
|
||||
self.vpc_security_group_ids = vpc_security_group_ids
|
||||
self.cluster_subnet_group_name = cluster_subnet_group_name
|
||||
self.publicly_accessible = publicly_accessible
|
||||
self.encrypted = encrypted
|
||||
|
||||
self.allow_version_upgrade = allow_version_upgrade if allow_version_upgrade is not None else True
|
||||
self.cluster_version = cluster_version if cluster_version else "1.0"
|
||||
self.port = port if port else 5439
|
||||
self.automated_snapshot_retention_period = automated_snapshot_retention_period if automated_snapshot_retention_period else 1
|
||||
self.preferred_maintenance_window = preferred_maintenance_window if preferred_maintenance_window else "Mon:03:00-Mon:03:30"
|
||||
|
||||
if cluster_parameter_group_name:
|
||||
self.cluster_parameter_group_name = [cluster_parameter_group_name]
|
||||
else:
|
||||
self.cluster_parameter_group_name = ['default.redshift-1.0']
|
||||
|
||||
if cluster_security_groups:
|
||||
self.cluster_security_groups = cluster_security_groups
|
||||
else:
|
||||
self.cluster_security_groups = ["Default"]
|
||||
|
||||
if availability_zone:
|
||||
self.availability_zone = availability_zone
|
||||
else:
|
||||
# This could probably be smarter, but there doesn't appear to be a
|
||||
# way to pull AZs for a region in boto
|
||||
self.availability_zone = region + "a"
|
||||
|
||||
if cluster_type == 'single-node':
|
||||
self.number_of_nodes = 1
|
||||
elif number_of_nodes:
|
||||
self.number_of_nodes = number_of_nodes
|
||||
else:
|
||||
self.number_of_nodes = 1
|
||||
|
||||
@property
|
||||
def security_groups(self):
|
||||
return [
|
||||
security_group for security_group
|
||||
in self.redshift_backend.describe_cluster_security_groups()
|
||||
if security_group.cluster_security_group_name in self.cluster_security_groups
|
||||
]
|
||||
|
||||
@property
|
||||
def vpc_security_groups(self):
|
||||
return [
|
||||
security_group for security_group
|
||||
in self.redshift_backend.ec2_backend.describe_security_groups()
|
||||
if security_group.id in self.vpc_security_group_ids
|
||||
]
|
||||
|
||||
@property
|
||||
def parameter_groups(self):
|
||||
return [
|
||||
parameter_group for parameter_group
|
||||
in self.redshift_backend.describe_cluster_parameter_groups()
|
||||
if parameter_group.cluster_parameter_group_name in self.cluster_parameter_group_name
|
||||
]
|
||||
|
||||
def to_json(self):
|
||||
return {
|
||||
"MasterUsername": self.master_username,
|
||||
"MasterUserPassword": "****",
|
||||
"ClusterVersion": self.cluster_version,
|
||||
"VpcSecurityGroups": [{
|
||||
"Status": "active",
|
||||
"VpcSecurityGroupId": group.id
|
||||
} for group in self.vpc_security_groups],
|
||||
"ClusterSubnetGroupName": self.cluster_subnet_group_name,
|
||||
"AvailabilityZone": self.availability_zone,
|
||||
"ClusterStatus": "creating",
|
||||
"NumberOfNodes": self.number_of_nodes,
|
||||
"AutomatedSnapshotRetentionPeriod": self.automated_snapshot_retention_period,
|
||||
"PubliclyAccessible": self.publicly_accessible,
|
||||
"Encrypted": self.encrypted,
|
||||
"DBName": self.db_name,
|
||||
"PreferredMaintenanceWindow": self.preferred_maintenance_window,
|
||||
"ClusterParameterGroups": [{
|
||||
"ParameterApplyStatus": "in-sync",
|
||||
"ParameterGroupName": group.cluster_parameter_group_name,
|
||||
} for group in self.parameter_groups],
|
||||
"ClusterSecurityGroups": [{
|
||||
"Status": "active",
|
||||
"ClusterSecurityGroupName": group.cluster_security_group_name,
|
||||
} for group in self.security_groups],
|
||||
"Port": self.port,
|
||||
"NodeType": self.node_type,
|
||||
"ClusterIdentifier": self.cluster_identifier,
|
||||
"AllowVersionUpgrade": self.allow_version_upgrade,
|
||||
}
|
||||
|
||||
|
||||
class SubnetGroup(object):
|
||||
|
||||
def __init__(self, ec2_backend, cluster_subnet_group_name, description, subnet_ids):
|
||||
self.ec2_backend = ec2_backend
|
||||
self.cluster_subnet_group_name = cluster_subnet_group_name
|
||||
self.description = description
|
||||
self.subnet_ids = subnet_ids
|
||||
if not self.subnets:
|
||||
raise InvalidSubnetError(subnet_ids)
|
||||
|
||||
@property
|
||||
def subnets(self):
|
||||
return self.ec2_backend.get_all_subnets(filters={'subnet-id': self.subnet_ids})
|
||||
|
||||
@property
|
||||
def vpc_id(self):
|
||||
return self.subnets[0].vpc_id
|
||||
|
||||
def to_json(self):
|
||||
return {
|
||||
"VpcId": self.vpc_id,
|
||||
"Description": self.description,
|
||||
"ClusterSubnetGroupName": self.cluster_subnet_group_name,
|
||||
"SubnetGroupStatus": "Complete",
|
||||
"Subnets": [{
|
||||
"SubnetStatus": "Active",
|
||||
"SubnetIdentifier": subnet.id,
|
||||
"SubnetAvailabilityZone": {
|
||||
"Name": subnet.availability_zone
|
||||
},
|
||||
} for subnet in self.subnets],
|
||||
}
|
||||
|
||||
|
||||
class SecurityGroup(object):
|
||||
def __init__(self, cluster_security_group_name, description):
|
||||
self.cluster_security_group_name = cluster_security_group_name
|
||||
self.description = description
|
||||
|
||||
def to_json(self):
|
||||
return {
|
||||
"EC2SecurityGroups": [],
|
||||
"IPRanges": [],
|
||||
"Description": self.description,
|
||||
"ClusterSecurityGroupName": self.cluster_security_group_name,
|
||||
}
|
||||
|
||||
|
||||
class ParameterGroup(object):
|
||||
|
||||
def __init__(self, cluster_parameter_group_name, group_family, description):
|
||||
self.cluster_parameter_group_name = cluster_parameter_group_name
|
||||
self.group_family = group_family
|
||||
self.description = description
|
||||
|
||||
def to_json(self):
|
||||
return {
|
||||
"ParameterGroupFamily": self.group_family,
|
||||
"Description": self.description,
|
||||
"ParameterGroupName": self.cluster_parameter_group_name,
|
||||
}
|
||||
|
||||
|
||||
class RedshiftBackend(BaseBackend):
|
||||
|
||||
def __init__(self, ec2_backend):
|
||||
self.clusters = {}
|
||||
self.subnet_groups = {}
|
||||
self.security_groups = {
|
||||
"Default": SecurityGroup("Default", "Default Redshift Security Group")
|
||||
}
|
||||
self.parameter_groups = {
|
||||
"default.redshift-1.0": ParameterGroup(
|
||||
"default.redshift-1.0",
|
||||
"redshift-1.0",
|
||||
"Default Redshift parameter group",
|
||||
)
|
||||
}
|
||||
self.ec2_backend = ec2_backend
|
||||
|
||||
def reset(self):
|
||||
ec2_backend = self.ec2_backend
|
||||
self.__dict__ = {}
|
||||
self.__init__(ec2_backend)
|
||||
|
||||
def create_cluster(self, **cluster_kwargs):
|
||||
cluster_identifier = cluster_kwargs['cluster_identifier']
|
||||
cluster = Cluster(self, **cluster_kwargs)
|
||||
self.clusters[cluster_identifier] = cluster
|
||||
return cluster
|
||||
|
||||
def describe_clusters(self, cluster_identifier=None):
|
||||
clusters = self.clusters.values()
|
||||
if cluster_identifier:
|
||||
if cluster_identifier in self.clusters:
|
||||
return [self.clusters[cluster_identifier]]
|
||||
else:
|
||||
raise ClusterNotFoundError(cluster_identifier)
|
||||
return clusters
|
||||
|
||||
def modify_cluster(self, **cluster_kwargs):
|
||||
cluster_identifier = cluster_kwargs.pop('cluster_identifier')
|
||||
new_cluster_identifier = cluster_kwargs.pop('new_cluster_identifier', None)
|
||||
|
||||
cluster = self.describe_clusters(cluster_identifier)[0]
|
||||
|
||||
for key, value in cluster_kwargs.items():
|
||||
setattr(cluster, key, value)
|
||||
|
||||
if new_cluster_identifier:
|
||||
self.delete_cluster(cluster_identifier)
|
||||
cluster.cluster_identifier = new_cluster_identifier
|
||||
self.clusters[new_cluster_identifier] = cluster
|
||||
|
||||
return cluster
|
||||
|
||||
def delete_cluster(self, cluster_identifier):
|
||||
if cluster_identifier in self.clusters:
|
||||
return self.clusters.pop(cluster_identifier)
|
||||
raise ClusterNotFoundError(cluster_identifier)
|
||||
|
||||
def create_cluster_subnet_group(self, cluster_subnet_group_name, description, subnet_ids):
|
||||
subnet_group = SubnetGroup(self.ec2_backend, cluster_subnet_group_name, description, subnet_ids)
|
||||
self.subnet_groups[cluster_subnet_group_name] = subnet_group
|
||||
return subnet_group
|
||||
|
||||
def describe_cluster_subnet_groups(self, subnet_identifier=None):
|
||||
subnet_groups = self.subnet_groups.values()
|
||||
if subnet_identifier:
|
||||
if subnet_identifier in self.subnet_groups:
|
||||
return [self.subnet_groups[subnet_identifier]]
|
||||
else:
|
||||
raise ClusterSubnetGroupNotFoundError(subnet_identifier)
|
||||
return subnet_groups
|
||||
|
||||
def delete_cluster_subnet_group(self, subnet_identifier):
|
||||
if subnet_identifier in self.subnet_groups:
|
||||
return self.subnet_groups.pop(subnet_identifier)
|
||||
raise ClusterSubnetGroupNotFoundError(subnet_identifier)
|
||||
|
||||
def create_cluster_security_group(self, cluster_security_group_name, description):
|
||||
security_group = SecurityGroup(cluster_security_group_name, description)
|
||||
self.security_groups[cluster_security_group_name] = security_group
|
||||
return security_group
|
||||
|
||||
def describe_cluster_security_groups(self, security_group_name=None):
|
||||
security_groups = self.security_groups.values()
|
||||
if security_group_name:
|
||||
if security_group_name in self.security_groups:
|
||||
return [self.security_groups[security_group_name]]
|
||||
else:
|
||||
raise ClusterSecurityGroupNotFoundError(security_group_name)
|
||||
return security_groups
|
||||
|
||||
def delete_cluster_security_group(self, security_group_identifier):
|
||||
if security_group_identifier in self.security_groups:
|
||||
return self.security_groups.pop(security_group_identifier)
|
||||
raise ClusterSecurityGroupNotFoundError(security_group_identifier)
|
||||
|
||||
def create_cluster_parameter_group(self, cluster_parameter_group_name,
|
||||
group_family, description):
|
||||
parameter_group = ParameterGroup(cluster_parameter_group_name, group_family, description)
|
||||
self.parameter_groups[cluster_parameter_group_name] = parameter_group
|
||||
|
||||
return parameter_group
|
||||
|
||||
def describe_cluster_parameter_groups(self, parameter_group_name=None):
|
||||
parameter_groups = self.parameter_groups.values()
|
||||
if parameter_group_name:
|
||||
if parameter_group_name in self.parameter_groups:
|
||||
return [self.parameter_groups[parameter_group_name]]
|
||||
else:
|
||||
raise ClusterParameterGroupNotFoundError(parameter_group_name)
|
||||
return parameter_groups
|
||||
|
||||
def delete_cluster_parameter_group(self, parameter_group_name):
|
||||
if parameter_group_name in self.parameter_groups:
|
||||
return self.parameter_groups.pop(parameter_group_name)
|
||||
raise ClusterParameterGroupNotFoundError(parameter_group_name)
|
||||
|
||||
|
||||
redshift_backends = {}
|
||||
for region in boto.redshift.regions():
|
||||
redshift_backends[region.name] = RedshiftBackend(ec2_backends[region.name])
|
256
moto/redshift/responses.py
Normal file
256
moto/redshift/responses.py
Normal file
@ -0,0 +1,256 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
|
||||
from moto.core.responses import BaseResponse
|
||||
from .models import redshift_backends
|
||||
|
||||
|
||||
class RedshiftResponse(BaseResponse):
|
||||
|
||||
@property
|
||||
def redshift_backend(self):
|
||||
return redshift_backends[self.region]
|
||||
|
||||
def create_cluster(self):
|
||||
cluster_kwargs = {
|
||||
"cluster_identifier": self._get_param('ClusterIdentifier'),
|
||||
"node_type": self._get_param('NodeType'),
|
||||
"master_username": self._get_param('MasterUsername'),
|
||||
"master_user_password": self._get_param('MasterUserPassword'),
|
||||
"db_name": self._get_param('DBName'),
|
||||
"cluster_type": self._get_param('ClusterType'),
|
||||
"cluster_security_groups": self._get_multi_param('ClusterSecurityGroups.member'),
|
||||
"vpc_security_group_ids": self._get_multi_param('VpcSecurityGroupIds.member'),
|
||||
"cluster_subnet_group_name": self._get_param('ClusterSubnetGroupName'),
|
||||
"availability_zone": self._get_param('AvailabilityZone'),
|
||||
"preferred_maintenance_window": self._get_param('PreferredMaintenanceWindow'),
|
||||
"cluster_parameter_group_name": self._get_param('ClusterParameterGroupName'),
|
||||
"automated_snapshot_retention_period": self._get_int_param('AutomatedSnapshotRetentionPeriod'),
|
||||
"port": self._get_int_param('Port'),
|
||||
"cluster_version": self._get_param('ClusterVersion'),
|
||||
"allow_version_upgrade": self._get_bool_param('AllowVersionUpgrade'),
|
||||
"number_of_nodes": self._get_int_param('NumberOfNodes'),
|
||||
"publicly_accessible": self._get_param("PubliclyAccessible"),
|
||||
"encrypted": self._get_param("Encrypted"),
|
||||
"region": self.region,
|
||||
}
|
||||
cluster = self.redshift_backend.create_cluster(**cluster_kwargs)
|
||||
|
||||
return json.dumps({
|
||||
"CreateClusterResponse": {
|
||||
"CreateClusterResult": {
|
||||
"Cluster": cluster.to_json(),
|
||||
},
|
||||
"ResponseMetadata": {
|
||||
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
def describe_clusters(self):
|
||||
cluster_identifier = self._get_param("ClusterIdentifier")
|
||||
clusters = self.redshift_backend.describe_clusters(cluster_identifier)
|
||||
|
||||
return json.dumps({
|
||||
"DescribeClustersResponse": {
|
||||
"DescribeClustersResult": {
|
||||
"Clusters": [cluster.to_json() for cluster in clusters]
|
||||
},
|
||||
"ResponseMetadata": {
|
||||
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
def modify_cluster(self):
|
||||
cluster_kwargs = {
|
||||
"cluster_identifier": self._get_param('ClusterIdentifier'),
|
||||
"new_cluster_identifier": self._get_param('NewClusterIdentifier'),
|
||||
"node_type": self._get_param('NodeType'),
|
||||
"master_user_password": self._get_param('MasterUserPassword'),
|
||||
"cluster_type": self._get_param('ClusterType'),
|
||||
"cluster_security_groups": self._get_multi_param('ClusterSecurityGroups.member'),
|
||||
"vpc_security_group_ids": self._get_multi_param('VpcSecurityGroupIds.member'),
|
||||
"cluster_subnet_group_name": self._get_param('ClusterSubnetGroupName'),
|
||||
"preferred_maintenance_window": self._get_param('PreferredMaintenanceWindow'),
|
||||
"cluster_parameter_group_name": self._get_param('ClusterParameterGroupName'),
|
||||
"automated_snapshot_retention_period": self._get_int_param('AutomatedSnapshotRetentionPeriod'),
|
||||
"cluster_version": self._get_param('ClusterVersion'),
|
||||
"allow_version_upgrade": self._get_bool_param('AllowVersionUpgrade'),
|
||||
"number_of_nodes": self._get_int_param('NumberOfNodes'),
|
||||
"publicly_accessible": self._get_param("PubliclyAccessible"),
|
||||
"encrypted": self._get_param("Encrypted"),
|
||||
}
|
||||
cluster = self.redshift_backend.modify_cluster(**cluster_kwargs)
|
||||
|
||||
return json.dumps({
|
||||
"ModifyClusterResponse": {
|
||||
"ModifyClusterResult": {
|
||||
"Cluster": cluster.to_json(),
|
||||
},
|
||||
"ResponseMetadata": {
|
||||
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
def delete_cluster(self):
|
||||
cluster_identifier = self._get_param("ClusterIdentifier")
|
||||
cluster = self.redshift_backend.delete_cluster(cluster_identifier)
|
||||
|
||||
return json.dumps({
|
||||
"DeleteClusterResponse": {
|
||||
"DeleteClusterResult": {
|
||||
"Cluster": cluster.to_json()
|
||||
},
|
||||
"ResponseMetadata": {
|
||||
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
def create_cluster_subnet_group(self):
|
||||
cluster_subnet_group_name = self._get_param('ClusterSubnetGroupName')
|
||||
description = self._get_param('Description')
|
||||
subnet_ids = self._get_multi_param('SubnetIds.member')
|
||||
|
||||
subnet_group = self.redshift_backend.create_cluster_subnet_group(
|
||||
cluster_subnet_group_name=cluster_subnet_group_name,
|
||||
description=description,
|
||||
subnet_ids=subnet_ids,
|
||||
)
|
||||
|
||||
return json.dumps({
|
||||
"CreateClusterSubnetGroupResponse": {
|
||||
"CreateClusterSubnetGroupResult": {
|
||||
"ClusterSubnetGroup": subnet_group.to_json(),
|
||||
},
|
||||
"ResponseMetadata": {
|
||||
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
def describe_cluster_subnet_groups(self):
|
||||
subnet_identifier = self._get_param("ClusterSubnetGroupName")
|
||||
subnet_groups = self.redshift_backend.describe_cluster_subnet_groups(subnet_identifier)
|
||||
|
||||
return json.dumps({
|
||||
"DescribeClusterSubnetGroupsResponse": {
|
||||
"DescribeClusterSubnetGroupsResult": {
|
||||
"ClusterSubnetGroups": [subnet_group.to_json() for subnet_group in subnet_groups]
|
||||
},
|
||||
"ResponseMetadata": {
|
||||
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
def delete_cluster_subnet_group(self):
|
||||
subnet_identifier = self._get_param("ClusterSubnetGroupName")
|
||||
self.redshift_backend.delete_cluster_subnet_group(subnet_identifier)
|
||||
|
||||
return json.dumps({
|
||||
"DeleteClusterSubnetGroupResponse": {
|
||||
"ResponseMetadata": {
|
||||
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
def create_cluster_security_group(self):
|
||||
cluster_security_group_name = self._get_param('ClusterSecurityGroupName')
|
||||
description = self._get_param('Description')
|
||||
|
||||
security_group = self.redshift_backend.create_cluster_security_group(
|
||||
cluster_security_group_name=cluster_security_group_name,
|
||||
description=description,
|
||||
)
|
||||
|
||||
return json.dumps({
|
||||
"CreateClusterSecurityGroupResponse": {
|
||||
"CreateClusterSecurityGroupResult": {
|
||||
"ClusterSecurityGroup": security_group.to_json(),
|
||||
},
|
||||
"ResponseMetadata": {
|
||||
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
def describe_cluster_security_groups(self):
|
||||
cluster_security_group_name = self._get_param("ClusterSecurityGroupName")
|
||||
security_groups = self.redshift_backend.describe_cluster_security_groups(cluster_security_group_name)
|
||||
|
||||
return json.dumps({
|
||||
"DescribeClusterSecurityGroupsResponse": {
|
||||
"DescribeClusterSecurityGroupsResult": {
|
||||
"ClusterSecurityGroups": [security_group.to_json() for security_group in security_groups]
|
||||
},
|
||||
"ResponseMetadata": {
|
||||
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
def delete_cluster_security_group(self):
|
||||
security_group_identifier = self._get_param("ClusterSecurityGroupName")
|
||||
self.redshift_backend.delete_cluster_security_group(security_group_identifier)
|
||||
|
||||
return json.dumps({
|
||||
"DeleteClusterSecurityGroupResponse": {
|
||||
"ResponseMetadata": {
|
||||
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
def create_cluster_parameter_group(self):
|
||||
cluster_parameter_group_name = self._get_param('ParameterGroupName')
|
||||
group_family = self._get_param('ParameterGroupFamily')
|
||||
description = self._get_param('Description')
|
||||
|
||||
parameter_group = self.redshift_backend.create_cluster_parameter_group(
|
||||
cluster_parameter_group_name,
|
||||
group_family,
|
||||
description,
|
||||
)
|
||||
|
||||
return json.dumps({
|
||||
"CreateClusterParameterGroupResponse": {
|
||||
"CreateClusterParameterGroupResult": {
|
||||
"ClusterParameterGroup": parameter_group.to_json(),
|
||||
},
|
||||
"ResponseMetadata": {
|
||||
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
def describe_cluster_parameter_groups(self):
|
||||
cluster_parameter_group_name = self._get_param("ParameterGroupName")
|
||||
parameter_groups = self.redshift_backend.describe_cluster_parameter_groups(cluster_parameter_group_name)
|
||||
|
||||
return json.dumps({
|
||||
"DescribeClusterParameterGroupsResponse": {
|
||||
"DescribeClusterParameterGroupsResult": {
|
||||
"ParameterGroups": [parameter_group.to_json() for parameter_group in parameter_groups]
|
||||
},
|
||||
"ResponseMetadata": {
|
||||
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
def delete_cluster_parameter_group(self):
|
||||
cluster_parameter_group_name = self._get_param("ParameterGroupName")
|
||||
self.redshift_backend.delete_cluster_parameter_group(cluster_parameter_group_name)
|
||||
|
||||
return json.dumps({
|
||||
"DeleteClusterParameterGroupResponse": {
|
||||
"ResponseMetadata": {
|
||||
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
|
||||
}
|
||||
}
|
||||
})
|
10
moto/redshift/urls.py
Normal file
10
moto/redshift/urls.py
Normal file
@ -0,0 +1,10 @@
|
||||
from __future__ import unicode_literals
|
||||
from .responses import RedshiftResponse
|
||||
|
||||
url_bases = [
|
||||
"https?://redshift.(.+).amazonaws.com",
|
||||
]
|
||||
|
||||
url_paths = {
|
||||
'{0}/$': RedshiftResponse().dispatch,
|
||||
}
|
1
moto/redshift/utils.py
Normal file
1
moto/redshift/utils.py
Normal file
@ -0,0 +1 @@
|
||||
from __future__ import unicode_literals
|
441
tests/test_redshift/test_redshift.py
Normal file
441
tests/test_redshift/test_redshift.py
Normal file
@ -0,0 +1,441 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import boto
|
||||
from boto.redshift.exceptions import (
|
||||
ClusterNotFound,
|
||||
ClusterParameterGroupNotFound,
|
||||
ClusterSecurityGroupNotFound,
|
||||
ClusterSubnetGroupNotFound,
|
||||
InvalidSubnet,
|
||||
)
|
||||
import sure # noqa
|
||||
|
||||
from moto import mock_ec2, mock_redshift
|
||||
|
||||
|
||||
@mock_redshift
|
||||
def test_create_cluster():
|
||||
conn = boto.redshift.connect_to_region("us-east-1")
|
||||
cluster_identifier = 'my_cluster'
|
||||
|
||||
conn.create_cluster(
|
||||
cluster_identifier,
|
||||
node_type="dw.hs1.xlarge",
|
||||
master_username="username",
|
||||
master_user_password="password",
|
||||
db_name="my_db",
|
||||
cluster_type="multi-node",
|
||||
availability_zone="us-east-1d",
|
||||
preferred_maintenance_window="Mon:03:00-Mon:11:00",
|
||||
automated_snapshot_retention_period=10,
|
||||
port=1234,
|
||||
cluster_version="1.0",
|
||||
allow_version_upgrade=True,
|
||||
number_of_nodes=3,
|
||||
)
|
||||
|
||||
cluster_response = conn.describe_clusters(cluster_identifier)
|
||||
cluster = cluster_response['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
|
||||
|
||||
cluster['ClusterIdentifier'].should.equal(cluster_identifier)
|
||||
cluster['NodeType'].should.equal("dw.hs1.xlarge")
|
||||
cluster['MasterUsername'].should.equal("username")
|
||||
cluster['DBName'].should.equal("my_db")
|
||||
cluster['ClusterSecurityGroups'][0]['ClusterSecurityGroupName'].should.equal("Default")
|
||||
cluster['VpcSecurityGroups'].should.equal([])
|
||||
cluster['ClusterSubnetGroupName'].should.equal(None)
|
||||
cluster['AvailabilityZone'].should.equal("us-east-1d")
|
||||
cluster['PreferredMaintenanceWindow'].should.equal("Mon:03:00-Mon:11:00")
|
||||
cluster['ClusterParameterGroups'][0]['ParameterGroupName'].should.equal("default.redshift-1.0")
|
||||
cluster['AutomatedSnapshotRetentionPeriod'].should.equal(10)
|
||||
cluster['Port'].should.equal(1234)
|
||||
cluster['ClusterVersion'].should.equal("1.0")
|
||||
cluster['AllowVersionUpgrade'].should.equal(True)
|
||||
cluster['NumberOfNodes'].should.equal(3)
|
||||
|
||||
|
||||
@mock_redshift
|
||||
def test_create_single_node_cluster():
|
||||
conn = boto.redshift.connect_to_region("us-east-1")
|
||||
cluster_identifier = 'my_cluster'
|
||||
|
||||
conn.create_cluster(
|
||||
cluster_identifier,
|
||||
node_type="dw.hs1.xlarge",
|
||||
master_username="username",
|
||||
master_user_password="password",
|
||||
db_name="my_db",
|
||||
cluster_type="single-node",
|
||||
)
|
||||
|
||||
cluster_response = conn.describe_clusters(cluster_identifier)
|
||||
cluster = cluster_response['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
|
||||
|
||||
cluster['ClusterIdentifier'].should.equal(cluster_identifier)
|
||||
cluster['NodeType'].should.equal("dw.hs1.xlarge")
|
||||
cluster['MasterUsername'].should.equal("username")
|
||||
cluster['DBName'].should.equal("my_db")
|
||||
cluster['NumberOfNodes'].should.equal(1)
|
||||
|
||||
|
||||
@mock_redshift
|
||||
def test_default_cluster_attibutes():
|
||||
conn = boto.redshift.connect_to_region("us-east-1")
|
||||
cluster_identifier = 'my_cluster'
|
||||
|
||||
conn.create_cluster(
|
||||
cluster_identifier,
|
||||
node_type="dw.hs1.xlarge",
|
||||
master_username="username",
|
||||
master_user_password="password",
|
||||
)
|
||||
|
||||
cluster_response = conn.describe_clusters(cluster_identifier)
|
||||
cluster = cluster_response['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
|
||||
|
||||
cluster['DBName'].should.equal("dev")
|
||||
cluster['ClusterSubnetGroupName'].should.equal(None)
|
||||
assert "us-east-" in cluster['AvailabilityZone']
|
||||
cluster['PreferredMaintenanceWindow'].should.equal("Mon:03:00-Mon:03:30")
|
||||
cluster['ClusterParameterGroups'][0]['ParameterGroupName'].should.equal("default.redshift-1.0")
|
||||
cluster['AutomatedSnapshotRetentionPeriod'].should.equal(1)
|
||||
cluster['Port'].should.equal(5439)
|
||||
cluster['ClusterVersion'].should.equal("1.0")
|
||||
cluster['AllowVersionUpgrade'].should.equal(True)
|
||||
cluster['NumberOfNodes'].should.equal(1)
|
||||
|
||||
|
||||
@mock_redshift
|
||||
@mock_ec2
|
||||
def test_create_cluster_in_subnet_group():
|
||||
vpc_conn = boto.connect_vpc()
|
||||
vpc = vpc_conn.create_vpc("10.0.0.0/16")
|
||||
subnet = vpc_conn.create_subnet(vpc.id, "10.0.0.0/24")
|
||||
redshift_conn = boto.connect_redshift()
|
||||
redshift_conn.create_cluster_subnet_group(
|
||||
"my_subnet_group",
|
||||
"This is my subnet group",
|
||||
subnet_ids=[subnet.id],
|
||||
)
|
||||
|
||||
redshift_conn.create_cluster(
|
||||
"my_cluster",
|
||||
node_type="dw.hs1.xlarge",
|
||||
master_username="username",
|
||||
master_user_password="password",
|
||||
cluster_subnet_group_name='my_subnet_group',
|
||||
)
|
||||
|
||||
cluster_response = redshift_conn.describe_clusters("my_cluster")
|
||||
cluster = cluster_response['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
|
||||
cluster['ClusterSubnetGroupName'].should.equal('my_subnet_group')
|
||||
|
||||
|
||||
@mock_redshift
|
||||
def test_create_cluster_with_security_group():
|
||||
conn = boto.redshift.connect_to_region("us-east-1")
|
||||
conn.create_cluster_security_group(
|
||||
"security_group1",
|
||||
"This is my security group",
|
||||
)
|
||||
conn.create_cluster_security_group(
|
||||
"security_group2",
|
||||
"This is my security group",
|
||||
)
|
||||
|
||||
cluster_identifier = 'my_cluster'
|
||||
conn.create_cluster(
|
||||
cluster_identifier,
|
||||
node_type="dw.hs1.xlarge",
|
||||
master_username="username",
|
||||
master_user_password="password",
|
||||
cluster_security_groups=["security_group1", "security_group2"]
|
||||
)
|
||||
|
||||
cluster_response = conn.describe_clusters(cluster_identifier)
|
||||
cluster = cluster_response['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
|
||||
group_names = [group['ClusterSecurityGroupName'] for group in cluster['ClusterSecurityGroups']]
|
||||
set(group_names).should.equal(set(["security_group1", "security_group2"]))
|
||||
|
||||
|
||||
@mock_redshift
|
||||
@mock_ec2
|
||||
def test_create_cluster_with_vpc_security_groups():
|
||||
vpc_conn = boto.connect_vpc()
|
||||
ec2_conn = boto.connect_ec2()
|
||||
redshift_conn = boto.connect_redshift()
|
||||
vpc = vpc_conn.create_vpc("10.0.0.0/16")
|
||||
security_group = ec2_conn.create_security_group("vpc_security_group", "a group", vpc_id=vpc.id)
|
||||
|
||||
redshift_conn.create_cluster(
|
||||
"my_cluster",
|
||||
node_type="dw.hs1.xlarge",
|
||||
master_username="username",
|
||||
master_user_password="password",
|
||||
vpc_security_group_ids=[security_group.id],
|
||||
)
|
||||
|
||||
cluster_response = redshift_conn.describe_clusters("my_cluster")
|
||||
cluster = cluster_response['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
|
||||
group_ids = [group['VpcSecurityGroupId'] for group in cluster['VpcSecurityGroups']]
|
||||
list(group_ids).should.equal([security_group.id])
|
||||
|
||||
|
||||
@mock_redshift
|
||||
def test_create_cluster_with_parameter_group():
|
||||
conn = boto.connect_redshift()
|
||||
conn.create_cluster_parameter_group(
|
||||
"my_parameter_group",
|
||||
"redshift-1.0",
|
||||
"This is my parameter group",
|
||||
)
|
||||
|
||||
conn.create_cluster(
|
||||
"my_cluster",
|
||||
node_type="dw.hs1.xlarge",
|
||||
master_username="username",
|
||||
master_user_password="password",
|
||||
cluster_parameter_group_name='my_parameter_group',
|
||||
)
|
||||
|
||||
cluster_response = conn.describe_clusters("my_cluster")
|
||||
cluster = cluster_response['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
|
||||
cluster['ClusterParameterGroups'][0]['ParameterGroupName'].should.equal("my_parameter_group")
|
||||
|
||||
|
||||
@mock_redshift
|
||||
def test_describe_non_existant_cluster():
|
||||
conn = boto.redshift.connect_to_region("us-east-1")
|
||||
conn.describe_clusters.when.called_with("not-a-cluster").should.throw(ClusterNotFound)
|
||||
|
||||
|
||||
@mock_redshift
|
||||
def test_delete_cluster():
|
||||
conn = boto.connect_redshift()
|
||||
cluster_identifier = 'my_cluster'
|
||||
|
||||
conn.create_cluster(
|
||||
cluster_identifier,
|
||||
node_type='single-node',
|
||||
master_username="username",
|
||||
master_user_password="password",
|
||||
)
|
||||
|
||||
clusters = conn.describe_clusters()['DescribeClustersResponse']['DescribeClustersResult']['Clusters']
|
||||
list(clusters).should.have.length_of(1)
|
||||
|
||||
conn.delete_cluster(cluster_identifier)
|
||||
|
||||
clusters = conn.describe_clusters()['DescribeClustersResponse']['DescribeClustersResult']['Clusters']
|
||||
list(clusters).should.have.length_of(0)
|
||||
|
||||
# Delete invalid id
|
||||
conn.delete_cluster.when.called_with("not-a-cluster").should.throw(ClusterNotFound)
|
||||
|
||||
|
||||
@mock_redshift
|
||||
def test_modify_cluster():
|
||||
conn = boto.connect_redshift()
|
||||
cluster_identifier = 'my_cluster'
|
||||
conn.create_cluster_security_group(
|
||||
"security_group",
|
||||
"This is my security group",
|
||||
)
|
||||
conn.create_cluster_parameter_group(
|
||||
"my_parameter_group",
|
||||
"redshift-1.0",
|
||||
"This is my parameter group",
|
||||
)
|
||||
|
||||
conn.create_cluster(
|
||||
cluster_identifier,
|
||||
node_type='single-node',
|
||||
master_username="username",
|
||||
master_user_password="password",
|
||||
)
|
||||
|
||||
conn.modify_cluster(
|
||||
cluster_identifier,
|
||||
cluster_type="multi-node",
|
||||
node_type="dw.hs1.xlarge",
|
||||
number_of_nodes=2,
|
||||
cluster_security_groups="security_group",
|
||||
master_user_password="new_password",
|
||||
cluster_parameter_group_name="my_parameter_group",
|
||||
automated_snapshot_retention_period=7,
|
||||
preferred_maintenance_window="Tue:03:00-Tue:11:00",
|
||||
allow_version_upgrade=False,
|
||||
new_cluster_identifier="new_identifier",
|
||||
)
|
||||
|
||||
cluster_response = conn.describe_clusters("new_identifier")
|
||||
cluster = cluster_response['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
|
||||
|
||||
cluster['ClusterIdentifier'].should.equal("new_identifier")
|
||||
cluster['NodeType'].should.equal("dw.hs1.xlarge")
|
||||
cluster['ClusterSecurityGroups'][0]['ClusterSecurityGroupName'].should.equal("security_group")
|
||||
cluster['PreferredMaintenanceWindow'].should.equal("Tue:03:00-Tue:11:00")
|
||||
cluster['ClusterParameterGroups'][0]['ParameterGroupName'].should.equal("my_parameter_group")
|
||||
cluster['AutomatedSnapshotRetentionPeriod'].should.equal(7)
|
||||
cluster['AllowVersionUpgrade'].should.equal(False)
|
||||
cluster['NumberOfNodes'].should.equal(2)
|
||||
|
||||
|
||||
@mock_redshift
|
||||
@mock_ec2
|
||||
def test_create_cluster_subnet_group():
|
||||
vpc_conn = boto.connect_vpc()
|
||||
vpc = vpc_conn.create_vpc("10.0.0.0/16")
|
||||
subnet1 = vpc_conn.create_subnet(vpc.id, "10.0.0.0/24")
|
||||
subnet2 = vpc_conn.create_subnet(vpc.id, "10.0.1.0/24")
|
||||
|
||||
redshift_conn = boto.connect_redshift()
|
||||
|
||||
redshift_conn.create_cluster_subnet_group(
|
||||
"my_subnet",
|
||||
"This is my subnet group",
|
||||
subnet_ids=[subnet1.id, subnet2.id],
|
||||
)
|
||||
|
||||
subnets_response = redshift_conn.describe_cluster_subnet_groups("my_subnet")
|
||||
my_subnet = subnets_response['DescribeClusterSubnetGroupsResponse']['DescribeClusterSubnetGroupsResult']['ClusterSubnetGroups'][0]
|
||||
|
||||
my_subnet['ClusterSubnetGroupName'].should.equal("my_subnet")
|
||||
my_subnet['Description'].should.equal("This is my subnet group")
|
||||
subnet_ids = [subnet['SubnetIdentifier'] for subnet in my_subnet['Subnets']]
|
||||
set(subnet_ids).should.equal(set([subnet1.id, subnet2.id]))
|
||||
|
||||
|
||||
@mock_redshift
|
||||
@mock_ec2
|
||||
def test_create_invalid_cluster_subnet_group():
|
||||
redshift_conn = boto.connect_redshift()
|
||||
redshift_conn.create_cluster_subnet_group.when.called_with(
|
||||
"my_subnet",
|
||||
"This is my subnet group",
|
||||
subnet_ids=["subnet-1234"],
|
||||
).should.throw(InvalidSubnet)
|
||||
|
||||
|
||||
@mock_redshift
|
||||
def test_describe_non_existant_subnet_group():
|
||||
conn = boto.redshift.connect_to_region("us-east-1")
|
||||
conn.describe_cluster_subnet_groups.when.called_with("not-a-subnet-group").should.throw(ClusterSubnetGroupNotFound)
|
||||
|
||||
|
||||
@mock_redshift
|
||||
@mock_ec2
|
||||
def test_delete_cluster_subnet_group():
|
||||
vpc_conn = boto.connect_vpc()
|
||||
vpc = vpc_conn.create_vpc("10.0.0.0/16")
|
||||
subnet = vpc_conn.create_subnet(vpc.id, "10.0.0.0/24")
|
||||
redshift_conn = boto.connect_redshift()
|
||||
|
||||
redshift_conn.create_cluster_subnet_group(
|
||||
"my_subnet",
|
||||
"This is my subnet group",
|
||||
subnet_ids=[subnet.id],
|
||||
)
|
||||
|
||||
subnets_response = redshift_conn.describe_cluster_subnet_groups()
|
||||
subnets = subnets_response['DescribeClusterSubnetGroupsResponse']['DescribeClusterSubnetGroupsResult']['ClusterSubnetGroups']
|
||||
subnets.should.have.length_of(1)
|
||||
|
||||
redshift_conn.delete_cluster_subnet_group("my_subnet")
|
||||
|
||||
subnets_response = redshift_conn.describe_cluster_subnet_groups()
|
||||
subnets = subnets_response['DescribeClusterSubnetGroupsResponse']['DescribeClusterSubnetGroupsResult']['ClusterSubnetGroups']
|
||||
subnets.should.have.length_of(0)
|
||||
|
||||
# Delete invalid id
|
||||
redshift_conn.delete_cluster_subnet_group.when.called_with("not-a-subnet-group").should.throw(ClusterSubnetGroupNotFound)
|
||||
|
||||
|
||||
@mock_redshift
|
||||
def test_create_cluster_security_group():
|
||||
conn = boto.connect_redshift()
|
||||
conn.create_cluster_security_group(
|
||||
"my_security_group",
|
||||
"This is my security group",
|
||||
)
|
||||
|
||||
groups_response = conn.describe_cluster_security_groups("my_security_group")
|
||||
my_group = groups_response['DescribeClusterSecurityGroupsResponse']['DescribeClusterSecurityGroupsResult']['ClusterSecurityGroups'][0]
|
||||
|
||||
my_group['ClusterSecurityGroupName'].should.equal("my_security_group")
|
||||
my_group['Description'].should.equal("This is my security group")
|
||||
list(my_group['IPRanges']).should.equal([])
|
||||
|
||||
|
||||
@mock_redshift
|
||||
def test_describe_non_existant_security_group():
|
||||
conn = boto.redshift.connect_to_region("us-east-1")
|
||||
conn.describe_cluster_security_groups.when.called_with("not-a-security-group").should.throw(ClusterSecurityGroupNotFound)
|
||||
|
||||
|
||||
@mock_redshift
|
||||
def test_delete_cluster_security_group():
|
||||
conn = boto.connect_redshift()
|
||||
conn.create_cluster_security_group(
|
||||
"my_security_group",
|
||||
"This is my security group",
|
||||
)
|
||||
|
||||
groups_response = conn.describe_cluster_security_groups()
|
||||
groups = groups_response['DescribeClusterSecurityGroupsResponse']['DescribeClusterSecurityGroupsResult']['ClusterSecurityGroups']
|
||||
groups.should.have.length_of(2) # The default group already exists
|
||||
|
||||
conn.delete_cluster_security_group("my_security_group")
|
||||
|
||||
groups_response = conn.describe_cluster_security_groups()
|
||||
groups = groups_response['DescribeClusterSecurityGroupsResponse']['DescribeClusterSecurityGroupsResult']['ClusterSecurityGroups']
|
||||
groups.should.have.length_of(1)
|
||||
|
||||
# Delete invalid id
|
||||
conn.delete_cluster_security_group.when.called_with("not-a-security-group").should.throw(ClusterSecurityGroupNotFound)
|
||||
|
||||
|
||||
@mock_redshift
|
||||
def test_create_cluster_parameter_group():
|
||||
conn = boto.connect_redshift()
|
||||
conn.create_cluster_parameter_group(
|
||||
"my_parameter_group",
|
||||
"redshift-1.0",
|
||||
"This is my parameter group",
|
||||
)
|
||||
|
||||
groups_response = conn.describe_cluster_parameter_groups("my_parameter_group")
|
||||
my_group = groups_response['DescribeClusterParameterGroupsResponse']['DescribeClusterParameterGroupsResult']['ParameterGroups'][0]
|
||||
|
||||
my_group['ParameterGroupName'].should.equal("my_parameter_group")
|
||||
my_group['ParameterGroupFamily'].should.equal("redshift-1.0")
|
||||
my_group['Description'].should.equal("This is my parameter group")
|
||||
|
||||
|
||||
@mock_redshift
|
||||
def test_describe_non_existant_parameter_group():
|
||||
conn = boto.redshift.connect_to_region("us-east-1")
|
||||
conn.describe_cluster_parameter_groups.when.called_with("not-a-parameter-group").should.throw(ClusterParameterGroupNotFound)
|
||||
|
||||
|
||||
@mock_redshift
|
||||
def test_delete_cluster_parameter_group():
|
||||
conn = boto.connect_redshift()
|
||||
conn.create_cluster_parameter_group(
|
||||
"my_parameter_group",
|
||||
"redshift-1.0",
|
||||
"This is my parameter group",
|
||||
)
|
||||
|
||||
groups_response = conn.describe_cluster_parameter_groups()
|
||||
groups = groups_response['DescribeClusterParameterGroupsResponse']['DescribeClusterParameterGroupsResult']['ParameterGroups']
|
||||
groups.should.have.length_of(2) # The default group already exists
|
||||
|
||||
conn.delete_cluster_parameter_group("my_parameter_group")
|
||||
|
||||
groups_response = conn.describe_cluster_parameter_groups()
|
||||
groups = groups_response['DescribeClusterParameterGroupsResponse']['DescribeClusterParameterGroupsResult']['ParameterGroups']
|
||||
groups.should.have.length_of(1)
|
||||
|
||||
# Delete invalid id
|
||||
conn.delete_cluster_parameter_group.when.called_with("not-a-parameter-group").should.throw(ClusterParameterGroupNotFound)
|
23
tests/test_redshift/test_server.py
Normal file
23
tests/test_redshift/test_server.py
Normal file
@ -0,0 +1,23 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
import sure # noqa
|
||||
|
||||
import moto.server as server
|
||||
from moto import mock_redshift
|
||||
|
||||
'''
|
||||
Test the different server responses
|
||||
'''
|
||||
|
||||
|
||||
@mock_redshift
|
||||
def test_describe_clusters():
|
||||
backend = server.create_backend_app("redshift")
|
||||
test_client = backend.test_client()
|
||||
|
||||
res = test_client.get('/?Action=DescribeClusters')
|
||||
|
||||
json_data = json.loads(res.data.decode("utf-8"))
|
||||
clusters = json_data['DescribeClustersResponse']['DescribeClustersResult']['Clusters']
|
||||
list(clusters).should.equal([])
|
Loading…
Reference in New Issue
Block a user