Update upstream branch 'master' into instance_modify_security_groups

This commit is contained in:
Tyler Sanders 2014-11-27 11:14:46 -06:00
commit 7476c63119
29 changed files with 1837 additions and 99 deletions

View File

@ -5,7 +5,6 @@ python:
env:
matrix:
- BOTO_VERSION=2.34.0
- BOTO_VERSION=2.25.0
matrix:
include:
- python: "3.3"

View File

@ -192,6 +192,16 @@ $ moto_server ec2 -p3000
Then go to [localhost](http://localhost:5000/?Action=DescribeInstances) to see a list of running instances (it will be empty since you haven't added any yet).
If you want to use boto with this (using the simpler decorators above instead is strongly encouraged), the easiest way is to create a boto config file (`~/.boto`) with the following values:
```
[Boto]
is_secure = False
https_validate_certificates = False
proxy_port = 5000
proxy = 127.0.0.1
```
## Install
```console

View File

@ -11,6 +11,8 @@ from .ec2 import mock_ec2 # flake8: noqa
from .elb import mock_elb # flake8: noqa
from .emr import mock_emr # flake8: noqa
from .iam import mock_iam # flake8: noqa
from .kinesis import mock_kinesis # flake8: noqa
from .redshift import mock_redshift # flake8: noqa
from .s3 import mock_s3 # flake8: noqa
from .s3bucket_path import mock_s3bucket_path # flake8: noqa
from .ses import mock_ses # flake8: noqa

View File

@ -6,6 +6,8 @@ from moto.dynamodb2 import dynamodb_backend2
from moto.ec2 import ec2_backend
from moto.elb import elb_backend
from moto.emr import emr_backend
from moto.kinesis import kinesis_backend
from moto.redshift import redshift_backend
from moto.s3 import s3_backend
from moto.s3bucket_path import s3bucket_path_backend
from moto.ses import ses_backend
@ -21,6 +23,8 @@ BACKENDS = {
'ec2': ec2_backend,
'elb': elb_backend,
'emr': emr_backend,
'kinesis': kinesis_backend,
'redshift': redshift_backend,
's3': s3_backend,
's3bucket_path': s3bucket_path_backend,
'ses': ses_backend,

View File

@ -90,6 +90,12 @@ class BaseResponse(object):
def call_action(self):
headers = self.response_headers
action = self.querystring.get('Action', [""])[0]
if not action: # Some services use a header for the action
# Headers are case-insensitive. Probably a better way to do this.
match = self.headers.get('x-amz-target') or self.headers.get('X-Amz-Target')
if match:
action = match.split(".")[1]
action = camelcase_to_underscores(action)
method_names = method_names_from_class(self.__class__)
if action in method_names:
@ -110,6 +116,19 @@ class BaseResponse(object):
def _get_param(self, param_name):
return self.querystring.get(param_name, [None])[0]
def _get_int_param(self, param_name):
val = self._get_param(param_name)
if val is not None:
return int(val)
def _get_bool_param(self, param_name):
val = self._get_param(param_name)
if val is not None:
if val.lower() == 'true':
return True
elif val.lower() == 'false':
return False
def _get_multi_param(self, param_prefix):
if param_prefix.endswith("."):
prefix = param_prefix

View File

@ -1398,7 +1398,9 @@ class VPC(TaggedEC2Resource):
return self.id
def get_filter_value(self, filter_name):
if filter_name == 'cidr':
if filter_name == 'vpc-id':
return self.id
elif filter_name == 'cidr':
return self.cidr_block
elif filter_name == 'dhcp-options-id':
if not self.dhcp_options:
@ -1583,6 +1585,12 @@ class Subnet(TaggedEC2Resource):
)
return subnet
@property
def availability_zone(self):
# This could probably be smarter, but there doesn't appear to be a
# way to pull AZs for a region in boto
return self.ec2_backend.region_name + "a"
@property
def physical_resource_id(self):
return self.id
@ -1705,6 +1713,8 @@ class RouteTable(TaggedEC2Resource):
return 'true'
else:
return 'false'
elif filter_name == "route-table-id":
return self.id
elif filter_name == "vpc-id":
return self.vpc_id
elif filter_name == "association.route-table-id":
@ -2442,6 +2452,15 @@ class EC2Backend(BaseBackend, InstanceBackend, TagBackend, AmiBackend,
ElasticAddressBackend, KeyPairBackend, DHCPOptionsSetBackend,
NetworkAclBackend):
def __init__(self, region_name):
super(EC2Backend, self).__init__()
self.region_name = region_name
def reset(self):
region_name = self.region_name
self.__dict__ = {}
self.__init__(region_name)
# Use this to generate a proper error template response when in a response handler.
def raise_error(self, code, message):
raise EC2ClientError(code, message)
@ -2495,4 +2514,4 @@ class EC2Backend(BaseBackend, InstanceBackend, TagBackend, AmiBackend,
ec2_backends = {}
for region in boto.ec2.regions():
ec2_backends[region.name] = EC2Backend()
ec2_backends[region.name] = EC2Backend(region.name)

12
moto/kinesis/__init__.py Normal file
View File

@ -0,0 +1,12 @@
from __future__ import unicode_literals
from .models import kinesis_backends
from ..core.models import MockAWS
kinesis_backend = kinesis_backends['us-east-1']
def mock_kinesis(func=None):
if func:
return MockAWS(kinesis_backends)(func)
else:
return MockAWS(kinesis_backends)

View File

@ -0,0 +1,34 @@
from __future__ import unicode_literals
import json
from werkzeug.exceptions import BadRequest
class ResourceNotFoundError(BadRequest):
def __init__(self, message):
super(ResourceNotFoundError, self).__init__()
self.description = json.dumps({
"message": message,
'__type': 'ResourceNotFoundException',
})
class StreamNotFoundError(ResourceNotFoundError):
def __init__(self, stream_name):
super(StreamNotFoundError, self).__init__(
'Stream {0} under account 123456789012 not found.'.format(stream_name))
class ShardNotFoundError(ResourceNotFoundError):
def __init__(self, shard_id):
super(ShardNotFoundError, self).__init__(
'Shard {0} under account 123456789012 not found.'.format(shard_id))
class InvalidArgumentError(BadRequest):
def __init__(self, message):
super(InvalidArgumentError, self).__init__()
self.description = json.dumps({
"message": message,
'__type': 'InvalidArgumentException',
})

190
moto/kinesis/models.py Normal file
View File

@ -0,0 +1,190 @@
from __future__ import unicode_literals
import boto.kinesis
from moto.core import BaseBackend
from .exceptions import StreamNotFoundError, ShardNotFoundError
from .utils import compose_shard_iterator, compose_new_shard_iterator, decompose_shard_iterator
try:
from collections import OrderedDict
except ImportError:
# python 2.6 or earlier, use backport
from ordereddict import OrderedDict
class Record(object):
def __init__(self, partition_key, data, sequence_number):
self.partition_key = partition_key
self.data = data
self.sequence_number = sequence_number
def to_json(self):
return {
"Data": self.data,
"PartitionKey": self.partition_key,
"SequenceNumber": str(self.sequence_number),
}
class Shard(object):
def __init__(self, shard_id):
self.shard_id = shard_id
self.records = OrderedDict()
def get_records(self, last_sequence_id, limit):
last_sequence_id = int(last_sequence_id)
results = []
for sequence_number, record in self.records.items():
if sequence_number > last_sequence_id:
results.append(record)
last_sequence_id = sequence_number
if len(results) == limit:
break
return results, last_sequence_id
def put_record(self, partition_key, data):
# Note: this function is not safe for concurrency
if self.records:
last_sequence_number = self.get_max_sequence_number()
else:
last_sequence_number = 0
sequence_number = last_sequence_number + 1
self.records[sequence_number] = Record(partition_key, data, sequence_number)
return sequence_number
def get_min_sequence_number(self):
if self.records:
return list(self.records.keys())[0]
return 0
def get_max_sequence_number(self):
if self.records:
return list(self.records.keys())[-1]
return 0
def to_json(self):
return {
"HashKeyRange": {
"EndingHashKey": "113427455640312821154458202477256070484",
"StartingHashKey": "0"
},
"SequenceNumberRange": {
"EndingSequenceNumber": self.get_max_sequence_number(),
"StartingSequenceNumber": self.get_min_sequence_number(),
},
"ShardId": self.shard_id
}
class Stream(object):
def __init__(self, stream_name, shard_count, region):
self.stream_name = stream_name
self.shard_count = shard_count
self.region = region
self.account_number = "123456789012"
self.shards = {}
for index in range(shard_count):
shard_id = "shardId-{0}".format(str(index).zfill(12))
self.shards[shard_id] = Shard(shard_id)
@property
def arn(self):
return "arn:aws:kinesis:{region}:{account_number}:{stream_name}".format(
region=self.region,
account_number=self.account_number,
stream_name=self.stream_name
)
def get_shard(self, shard_id):
if shard_id in self.shards:
return self.shards[shard_id]
else:
raise ShardNotFoundError(shard_id)
def get_shard_for_key(self, partition_key):
# TODO implement sharding
shard = list(self.shards.values())[0]
return shard
def put_record(self, partition_key, explicit_hash_key, sequence_number_for_ordering, data):
partition_key = explicit_hash_key if explicit_hash_key else partition_key
shard = self.get_shard_for_key(partition_key)
sequence_number = shard.put_record(partition_key, data)
return sequence_number, shard.shard_id
def to_json(self):
return {
"StreamDescription": {
"StreamARN": self.arn,
"StreamName": self.stream_name,
"StreamStatus": "ACTIVE",
"HasMoreShards": False,
"Shards": [shard.to_json() for shard in self.shards.values()],
}
}
class KinesisBackend(BaseBackend):
def __init__(self):
self.streams = {}
def create_stream(self, stream_name, shard_count, region):
stream = Stream(stream_name, shard_count, region)
self.streams[stream_name] = stream
return stream
def describe_stream(self, stream_name):
if stream_name in self.streams:
return self.streams[stream_name]
else:
raise StreamNotFoundError(stream_name)
def list_streams(self):
return self.streams.values()
def delete_stream(self, stream_name):
if stream_name in self.streams:
return self.streams.pop(stream_name)
raise StreamNotFoundError(stream_name)
def get_shard_iterator(self, stream_name, shard_id, shard_iterator_type, starting_sequence_number):
# Validate params
stream = self.describe_stream(stream_name)
shard = stream.get_shard(shard_id)
shard_iterator = compose_new_shard_iterator(
stream_name, shard, shard_iterator_type, starting_sequence_number
)
return shard_iterator
def get_records(self, shard_iterator, limit):
decomposed = decompose_shard_iterator(shard_iterator)
stream_name, shard_id, last_sequence_id = decomposed
stream = self.describe_stream(stream_name)
shard = stream.get_shard(shard_id)
records, last_sequence_id = shard.get_records(last_sequence_id, limit)
next_shard_iterator = compose_shard_iterator(stream_name, shard, last_sequence_id)
return next_shard_iterator, records
def put_record(self, stream_name, partition_key, explicit_hash_key, sequence_number_for_ordering, data):
stream = self.describe_stream(stream_name)
sequence_number, shard_id = stream.put_record(
partition_key, explicit_hash_key, sequence_number_for_ordering, data
)
return sequence_number, shard_id
kinesis_backends = {}
for region in boto.kinesis.regions():
kinesis_backends[region.name] = KinesisBackend()

83
moto/kinesis/responses.py Normal file
View File

@ -0,0 +1,83 @@
from __future__ import unicode_literals
import json
from moto.core.responses import BaseResponse
from .models import kinesis_backends
class KinesisResponse(BaseResponse):
@property
def parameters(self):
return json.loads(self.body.decode("utf-8"))
@property
def kinesis_backend(self):
return kinesis_backends[self.region]
def create_stream(self):
stream_name = self.parameters.get('StreamName')
shard_count = self.parameters.get('ShardCount')
self.kinesis_backend.create_stream(stream_name, shard_count, self.region)
return ""
def describe_stream(self):
stream_name = self.parameters.get('StreamName')
stream = self.kinesis_backend.describe_stream(stream_name)
return json.dumps(stream.to_json())
def list_streams(self):
streams = self.kinesis_backend.list_streams()
return json.dumps({
"HasMoreStreams": False,
"StreamNames": [stream.stream_name for stream in streams],
})
def delete_stream(self):
stream_name = self.parameters.get("StreamName")
self.kinesis_backend.delete_stream(stream_name)
return ""
def get_shard_iterator(self):
stream_name = self.parameters.get("StreamName")
shard_id = self.parameters.get("ShardId")
shard_iterator_type = self.parameters.get("ShardIteratorType")
starting_sequence_number = self.parameters.get("StartingSequenceNumber")
shard_iterator = self.kinesis_backend.get_shard_iterator(
stream_name, shard_id, shard_iterator_type, starting_sequence_number,
)
return json.dumps({
"ShardIterator": shard_iterator
})
def get_records(self):
shard_iterator = self.parameters.get("ShardIterator")
limit = self.parameters.get("Limit")
next_shard_iterator, records = self.kinesis_backend.get_records(shard_iterator, limit)
return json.dumps({
"NextShardIterator": next_shard_iterator,
"Records": [record.to_json() for record in records]
})
def put_record(self):
stream_name = self.parameters.get("StreamName")
partition_key = self.parameters.get("PartitionKey")
explicit_hash_key = self.parameters.get("ExplicitHashKey")
sequence_number_for_ordering = self.parameters.get("SequenceNumberForOrdering")
data = self.parameters.get("Data")
sequence_number, shard_id = self.kinesis_backend.put_record(
stream_name, partition_key, explicit_hash_key, sequence_number_for_ordering, data
)
return json.dumps({
"SequenceNumber": sequence_number,
"ShardId": shard_id,
})

10
moto/kinesis/urls.py Normal file
View File

@ -0,0 +1,10 @@
from __future__ import unicode_literals
from .responses import KinesisResponse
url_bases = [
"https?://kinesis.(.+).amazonaws.com",
]
url_paths = {
'{0}/$': KinesisResponse().dispatch,
}

31
moto/kinesis/utils.py Normal file
View File

@ -0,0 +1,31 @@
import base64
from .exceptions import InvalidArgumentError
def compose_new_shard_iterator(stream_name, shard, shard_iterator_type, starting_sequence_number):
if shard_iterator_type == "AT_SEQUENCE_NUMBER":
last_sequence_id = int(starting_sequence_number) - 1
elif shard_iterator_type == "AFTER_SEQUENCE_NUMBER":
last_sequence_id = int(starting_sequence_number)
elif shard_iterator_type == "TRIM_HORIZON":
last_sequence_id = 0
elif shard_iterator_type == "LATEST":
last_sequence_id = shard.get_max_sequence_number()
else:
raise InvalidArgumentError("Invalid ShardIteratorType: {0}".format(shard_iterator_type))
return compose_shard_iterator(stream_name, shard, last_sequence_id)
def compose_shard_iterator(stream_name, shard, last_sequence_id):
return base64.encodestring(
"{0}:{1}:{2}".format(
stream_name,
shard.shard_id,
last_sequence_id,
).encode("utf-8")
).decode("utf-8")
def decompose_shard_iterator(shard_iterator):
return base64.decodestring(shard_iterator.encode("utf-8")).decode("utf-8").split(":")

12
moto/redshift/__init__.py Normal file
View File

@ -0,0 +1,12 @@
from __future__ import unicode_literals
from .models import redshift_backends
from ..core.models import MockAWS
redshift_backend = redshift_backends['us-east-1']
def mock_redshift(func=None):
if func:
return MockAWS(redshift_backends)(func)
else:
return MockAWS(redshift_backends)

View File

@ -0,0 +1,52 @@
from __future__ import unicode_literals
import json
from werkzeug.exceptions import BadRequest
class RedshiftClientError(BadRequest):
def __init__(self, code, message):
super(RedshiftClientError, self).__init__()
self.description = json.dumps({
"Error": {
"Code": code,
"Message": message,
'Type': 'Sender',
},
'RequestId': '6876f774-7273-11e4-85dc-39e55ca848d1',
})
class ClusterNotFoundError(RedshiftClientError):
def __init__(self, cluster_identifier):
super(ClusterNotFoundError, self).__init__(
'ClusterNotFound',
"Cluster {0} not found.".format(cluster_identifier))
class ClusterSubnetGroupNotFoundError(RedshiftClientError):
def __init__(self, subnet_identifier):
super(ClusterSubnetGroupNotFoundError, self).__init__(
'ClusterSubnetGroupNotFound',
"Subnet group {0} not found.".format(subnet_identifier))
class ClusterSecurityGroupNotFoundError(RedshiftClientError):
def __init__(self, group_identifier):
super(ClusterSecurityGroupNotFoundError, self).__init__(
'ClusterSecurityGroupNotFound',
"Security group {0} not found.".format(group_identifier))
class ClusterParameterGroupNotFoundError(RedshiftClientError):
def __init__(self, group_identifier):
super(ClusterParameterGroupNotFoundError, self).__init__(
'ClusterParameterGroupNotFound',
"Parameter group {0} not found.".format(group_identifier))
class InvalidSubnetError(RedshiftClientError):
def __init__(self, subnet_identifier):
super(InvalidSubnetError, self).__init__(
'InvalidSubnet',
"Subnet {0} not found.".format(subnet_identifier))

304
moto/redshift/models.py Normal file
View File

@ -0,0 +1,304 @@
from __future__ import unicode_literals
import boto.redshift
from moto.core import BaseBackend
from moto.ec2 import ec2_backends
from .exceptions import (
ClusterNotFoundError,
ClusterParameterGroupNotFoundError,
ClusterSecurityGroupNotFoundError,
ClusterSubnetGroupNotFoundError,
InvalidSubnetError,
)
class Cluster(object):
def __init__(self, redshift_backend, cluster_identifier, node_type, master_username,
master_user_password, db_name, cluster_type, cluster_security_groups,
vpc_security_group_ids, cluster_subnet_group_name, availability_zone,
preferred_maintenance_window, cluster_parameter_group_name,
automated_snapshot_retention_period, port, cluster_version,
allow_version_upgrade, number_of_nodes, publicly_accessible,
encrypted, region):
self.redshift_backend = redshift_backend
self.cluster_identifier = cluster_identifier
self.node_type = node_type
self.master_username = master_username
self.master_user_password = master_user_password
self.db_name = db_name if db_name else "dev"
self.vpc_security_group_ids = vpc_security_group_ids
self.cluster_subnet_group_name = cluster_subnet_group_name
self.publicly_accessible = publicly_accessible
self.encrypted = encrypted
self.allow_version_upgrade = allow_version_upgrade if allow_version_upgrade is not None else True
self.cluster_version = cluster_version if cluster_version else "1.0"
self.port = port if port else 5439
self.automated_snapshot_retention_period = automated_snapshot_retention_period if automated_snapshot_retention_period else 1
self.preferred_maintenance_window = preferred_maintenance_window if preferred_maintenance_window else "Mon:03:00-Mon:03:30"
if cluster_parameter_group_name:
self.cluster_parameter_group_name = [cluster_parameter_group_name]
else:
self.cluster_parameter_group_name = ['default.redshift-1.0']
if cluster_security_groups:
self.cluster_security_groups = cluster_security_groups
else:
self.cluster_security_groups = ["Default"]
if availability_zone:
self.availability_zone = availability_zone
else:
# This could probably be smarter, but there doesn't appear to be a
# way to pull AZs for a region in boto
self.availability_zone = region + "a"
if cluster_type == 'single-node':
self.number_of_nodes = 1
elif number_of_nodes:
self.number_of_nodes = number_of_nodes
else:
self.number_of_nodes = 1
@property
def security_groups(self):
return [
security_group for security_group
in self.redshift_backend.describe_cluster_security_groups()
if security_group.cluster_security_group_name in self.cluster_security_groups
]
@property
def vpc_security_groups(self):
return [
security_group for security_group
in self.redshift_backend.ec2_backend.describe_security_groups()
if security_group.id in self.vpc_security_group_ids
]
@property
def parameter_groups(self):
return [
parameter_group for parameter_group
in self.redshift_backend.describe_cluster_parameter_groups()
if parameter_group.cluster_parameter_group_name in self.cluster_parameter_group_name
]
def to_json(self):
return {
"MasterUsername": self.master_username,
"MasterUserPassword": "****",
"ClusterVersion": self.cluster_version,
"VpcSecurityGroups": [{
"Status": "active",
"VpcSecurityGroupId": group.id
} for group in self.vpc_security_groups],
"ClusterSubnetGroupName": self.cluster_subnet_group_name,
"AvailabilityZone": self.availability_zone,
"ClusterStatus": "creating",
"NumberOfNodes": self.number_of_nodes,
"AutomatedSnapshotRetentionPeriod": self.automated_snapshot_retention_period,
"PubliclyAccessible": self.publicly_accessible,
"Encrypted": self.encrypted,
"DBName": self.db_name,
"PreferredMaintenanceWindow": self.preferred_maintenance_window,
"ClusterParameterGroups": [{
"ParameterApplyStatus": "in-sync",
"ParameterGroupName": group.cluster_parameter_group_name,
} for group in self.parameter_groups],
"ClusterSecurityGroups": [{
"Status": "active",
"ClusterSecurityGroupName": group.cluster_security_group_name,
} for group in self.security_groups],
"Port": self.port,
"NodeType": self.node_type,
"ClusterIdentifier": self.cluster_identifier,
"AllowVersionUpgrade": self.allow_version_upgrade,
}
class SubnetGroup(object):
def __init__(self, ec2_backend, cluster_subnet_group_name, description, subnet_ids):
self.ec2_backend = ec2_backend
self.cluster_subnet_group_name = cluster_subnet_group_name
self.description = description
self.subnet_ids = subnet_ids
if not self.subnets:
raise InvalidSubnetError(subnet_ids)
@property
def subnets(self):
return self.ec2_backend.get_all_subnets(filters={'subnet-id': self.subnet_ids})
@property
def vpc_id(self):
return self.subnets[0].vpc_id
def to_json(self):
return {
"VpcId": self.vpc_id,
"Description": self.description,
"ClusterSubnetGroupName": self.cluster_subnet_group_name,
"SubnetGroupStatus": "Complete",
"Subnets": [{
"SubnetStatus": "Active",
"SubnetIdentifier": subnet.id,
"SubnetAvailabilityZone": {
"Name": subnet.availability_zone
},
} for subnet in self.subnets],
}
class SecurityGroup(object):
def __init__(self, cluster_security_group_name, description):
self.cluster_security_group_name = cluster_security_group_name
self.description = description
def to_json(self):
return {
"EC2SecurityGroups": [],
"IPRanges": [],
"Description": self.description,
"ClusterSecurityGroupName": self.cluster_security_group_name,
}
class ParameterGroup(object):
def __init__(self, cluster_parameter_group_name, group_family, description):
self.cluster_parameter_group_name = cluster_parameter_group_name
self.group_family = group_family
self.description = description
def to_json(self):
return {
"ParameterGroupFamily": self.group_family,
"Description": self.description,
"ParameterGroupName": self.cluster_parameter_group_name,
}
class RedshiftBackend(BaseBackend):
def __init__(self, ec2_backend):
self.clusters = {}
self.subnet_groups = {}
self.security_groups = {
"Default": SecurityGroup("Default", "Default Redshift Security Group")
}
self.parameter_groups = {
"default.redshift-1.0": ParameterGroup(
"default.redshift-1.0",
"redshift-1.0",
"Default Redshift parameter group",
)
}
self.ec2_backend = ec2_backend
def reset(self):
ec2_backend = self.ec2_backend
self.__dict__ = {}
self.__init__(ec2_backend)
def create_cluster(self, **cluster_kwargs):
cluster_identifier = cluster_kwargs['cluster_identifier']
cluster = Cluster(self, **cluster_kwargs)
self.clusters[cluster_identifier] = cluster
return cluster
def describe_clusters(self, cluster_identifier=None):
clusters = self.clusters.values()
if cluster_identifier:
if cluster_identifier in self.clusters:
return [self.clusters[cluster_identifier]]
else:
raise ClusterNotFoundError(cluster_identifier)
return clusters
def modify_cluster(self, **cluster_kwargs):
cluster_identifier = cluster_kwargs.pop('cluster_identifier')
new_cluster_identifier = cluster_kwargs.pop('new_cluster_identifier', None)
cluster = self.describe_clusters(cluster_identifier)[0]
for key, value in cluster_kwargs.items():
setattr(cluster, key, value)
if new_cluster_identifier:
self.delete_cluster(cluster_identifier)
cluster.cluster_identifier = new_cluster_identifier
self.clusters[new_cluster_identifier] = cluster
return cluster
def delete_cluster(self, cluster_identifier):
if cluster_identifier in self.clusters:
return self.clusters.pop(cluster_identifier)
raise ClusterNotFoundError(cluster_identifier)
def create_cluster_subnet_group(self, cluster_subnet_group_name, description, subnet_ids):
subnet_group = SubnetGroup(self.ec2_backend, cluster_subnet_group_name, description, subnet_ids)
self.subnet_groups[cluster_subnet_group_name] = subnet_group
return subnet_group
def describe_cluster_subnet_groups(self, subnet_identifier=None):
subnet_groups = self.subnet_groups.values()
if subnet_identifier:
if subnet_identifier in self.subnet_groups:
return [self.subnet_groups[subnet_identifier]]
else:
raise ClusterSubnetGroupNotFoundError(subnet_identifier)
return subnet_groups
def delete_cluster_subnet_group(self, subnet_identifier):
if subnet_identifier in self.subnet_groups:
return self.subnet_groups.pop(subnet_identifier)
raise ClusterSubnetGroupNotFoundError(subnet_identifier)
def create_cluster_security_group(self, cluster_security_group_name, description):
security_group = SecurityGroup(cluster_security_group_name, description)
self.security_groups[cluster_security_group_name] = security_group
return security_group
def describe_cluster_security_groups(self, security_group_name=None):
security_groups = self.security_groups.values()
if security_group_name:
if security_group_name in self.security_groups:
return [self.security_groups[security_group_name]]
else:
raise ClusterSecurityGroupNotFoundError(security_group_name)
return security_groups
def delete_cluster_security_group(self, security_group_identifier):
if security_group_identifier in self.security_groups:
return self.security_groups.pop(security_group_identifier)
raise ClusterSecurityGroupNotFoundError(security_group_identifier)
def create_cluster_parameter_group(self, cluster_parameter_group_name,
group_family, description):
parameter_group = ParameterGroup(cluster_parameter_group_name, group_family, description)
self.parameter_groups[cluster_parameter_group_name] = parameter_group
return parameter_group
def describe_cluster_parameter_groups(self, parameter_group_name=None):
parameter_groups = self.parameter_groups.values()
if parameter_group_name:
if parameter_group_name in self.parameter_groups:
return [self.parameter_groups[parameter_group_name]]
else:
raise ClusterParameterGroupNotFoundError(parameter_group_name)
return parameter_groups
def delete_cluster_parameter_group(self, parameter_group_name):
if parameter_group_name in self.parameter_groups:
return self.parameter_groups.pop(parameter_group_name)
raise ClusterParameterGroupNotFoundError(parameter_group_name)
redshift_backends = {}
for region in boto.redshift.regions():
redshift_backends[region.name] = RedshiftBackend(ec2_backends[region.name])

256
moto/redshift/responses.py Normal file
View File

@ -0,0 +1,256 @@
from __future__ import unicode_literals
import json
from moto.core.responses import BaseResponse
from .models import redshift_backends
class RedshiftResponse(BaseResponse):
@property
def redshift_backend(self):
return redshift_backends[self.region]
def create_cluster(self):
cluster_kwargs = {
"cluster_identifier": self._get_param('ClusterIdentifier'),
"node_type": self._get_param('NodeType'),
"master_username": self._get_param('MasterUsername'),
"master_user_password": self._get_param('MasterUserPassword'),
"db_name": self._get_param('DBName'),
"cluster_type": self._get_param('ClusterType'),
"cluster_security_groups": self._get_multi_param('ClusterSecurityGroups.member'),
"vpc_security_group_ids": self._get_multi_param('VpcSecurityGroupIds.member'),
"cluster_subnet_group_name": self._get_param('ClusterSubnetGroupName'),
"availability_zone": self._get_param('AvailabilityZone'),
"preferred_maintenance_window": self._get_param('PreferredMaintenanceWindow'),
"cluster_parameter_group_name": self._get_param('ClusterParameterGroupName'),
"automated_snapshot_retention_period": self._get_int_param('AutomatedSnapshotRetentionPeriod'),
"port": self._get_int_param('Port'),
"cluster_version": self._get_param('ClusterVersion'),
"allow_version_upgrade": self._get_bool_param('AllowVersionUpgrade'),
"number_of_nodes": self._get_int_param('NumberOfNodes'),
"publicly_accessible": self._get_param("PubliclyAccessible"),
"encrypted": self._get_param("Encrypted"),
"region": self.region,
}
cluster = self.redshift_backend.create_cluster(**cluster_kwargs)
return json.dumps({
"CreateClusterResponse": {
"CreateClusterResult": {
"Cluster": cluster.to_json(),
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
}
}
})
def describe_clusters(self):
cluster_identifier = self._get_param("ClusterIdentifier")
clusters = self.redshift_backend.describe_clusters(cluster_identifier)
return json.dumps({
"DescribeClustersResponse": {
"DescribeClustersResult": {
"Clusters": [cluster.to_json() for cluster in clusters]
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
}
}
})
def modify_cluster(self):
cluster_kwargs = {
"cluster_identifier": self._get_param('ClusterIdentifier'),
"new_cluster_identifier": self._get_param('NewClusterIdentifier'),
"node_type": self._get_param('NodeType'),
"master_user_password": self._get_param('MasterUserPassword'),
"cluster_type": self._get_param('ClusterType'),
"cluster_security_groups": self._get_multi_param('ClusterSecurityGroups.member'),
"vpc_security_group_ids": self._get_multi_param('VpcSecurityGroupIds.member'),
"cluster_subnet_group_name": self._get_param('ClusterSubnetGroupName'),
"preferred_maintenance_window": self._get_param('PreferredMaintenanceWindow'),
"cluster_parameter_group_name": self._get_param('ClusterParameterGroupName'),
"automated_snapshot_retention_period": self._get_int_param('AutomatedSnapshotRetentionPeriod'),
"cluster_version": self._get_param('ClusterVersion'),
"allow_version_upgrade": self._get_bool_param('AllowVersionUpgrade'),
"number_of_nodes": self._get_int_param('NumberOfNodes'),
"publicly_accessible": self._get_param("PubliclyAccessible"),
"encrypted": self._get_param("Encrypted"),
}
cluster = self.redshift_backend.modify_cluster(**cluster_kwargs)
return json.dumps({
"ModifyClusterResponse": {
"ModifyClusterResult": {
"Cluster": cluster.to_json(),
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
}
}
})
def delete_cluster(self):
cluster_identifier = self._get_param("ClusterIdentifier")
cluster = self.redshift_backend.delete_cluster(cluster_identifier)
return json.dumps({
"DeleteClusterResponse": {
"DeleteClusterResult": {
"Cluster": cluster.to_json()
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
}
}
})
def create_cluster_subnet_group(self):
cluster_subnet_group_name = self._get_param('ClusterSubnetGroupName')
description = self._get_param('Description')
subnet_ids = self._get_multi_param('SubnetIds.member')
subnet_group = self.redshift_backend.create_cluster_subnet_group(
cluster_subnet_group_name=cluster_subnet_group_name,
description=description,
subnet_ids=subnet_ids,
)
return json.dumps({
"CreateClusterSubnetGroupResponse": {
"CreateClusterSubnetGroupResult": {
"ClusterSubnetGroup": subnet_group.to_json(),
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
}
}
})
def describe_cluster_subnet_groups(self):
subnet_identifier = self._get_param("ClusterSubnetGroupName")
subnet_groups = self.redshift_backend.describe_cluster_subnet_groups(subnet_identifier)
return json.dumps({
"DescribeClusterSubnetGroupsResponse": {
"DescribeClusterSubnetGroupsResult": {
"ClusterSubnetGroups": [subnet_group.to_json() for subnet_group in subnet_groups]
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
}
}
})
def delete_cluster_subnet_group(self):
subnet_identifier = self._get_param("ClusterSubnetGroupName")
self.redshift_backend.delete_cluster_subnet_group(subnet_identifier)
return json.dumps({
"DeleteClusterSubnetGroupResponse": {
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
}
}
})
def create_cluster_security_group(self):
cluster_security_group_name = self._get_param('ClusterSecurityGroupName')
description = self._get_param('Description')
security_group = self.redshift_backend.create_cluster_security_group(
cluster_security_group_name=cluster_security_group_name,
description=description,
)
return json.dumps({
"CreateClusterSecurityGroupResponse": {
"CreateClusterSecurityGroupResult": {
"ClusterSecurityGroup": security_group.to_json(),
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
}
}
})
def describe_cluster_security_groups(self):
cluster_security_group_name = self._get_param("ClusterSecurityGroupName")
security_groups = self.redshift_backend.describe_cluster_security_groups(cluster_security_group_name)
return json.dumps({
"DescribeClusterSecurityGroupsResponse": {
"DescribeClusterSecurityGroupsResult": {
"ClusterSecurityGroups": [security_group.to_json() for security_group in security_groups]
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
}
}
})
def delete_cluster_security_group(self):
security_group_identifier = self._get_param("ClusterSecurityGroupName")
self.redshift_backend.delete_cluster_security_group(security_group_identifier)
return json.dumps({
"DeleteClusterSecurityGroupResponse": {
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
}
}
})
def create_cluster_parameter_group(self):
cluster_parameter_group_name = self._get_param('ParameterGroupName')
group_family = self._get_param('ParameterGroupFamily')
description = self._get_param('Description')
parameter_group = self.redshift_backend.create_cluster_parameter_group(
cluster_parameter_group_name,
group_family,
description,
)
return json.dumps({
"CreateClusterParameterGroupResponse": {
"CreateClusterParameterGroupResult": {
"ClusterParameterGroup": parameter_group.to_json(),
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
}
}
})
def describe_cluster_parameter_groups(self):
cluster_parameter_group_name = self._get_param("ParameterGroupName")
parameter_groups = self.redshift_backend.describe_cluster_parameter_groups(cluster_parameter_group_name)
return json.dumps({
"DescribeClusterParameterGroupsResponse": {
"DescribeClusterParameterGroupsResult": {
"ParameterGroups": [parameter_group.to_json() for parameter_group in parameter_groups]
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
}
}
})
def delete_cluster_parameter_group(self):
cluster_parameter_group_name = self._get_param("ParameterGroupName")
self.redshift_backend.delete_cluster_parameter_group(cluster_parameter_group_name)
return json.dumps({
"DeleteClusterParameterGroupResponse": {
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
}
}
})

10
moto/redshift/urls.py Normal file
View File

@ -0,0 +1,10 @@
from __future__ import unicode_literals
from .responses import RedshiftResponse
url_bases = [
"https?://redshift.(.+).amazonaws.com",
]
url_paths = {
'{0}/$': RedshiftResponse().dispatch,
}

1
moto/redshift/utils.py Normal file
View File

@ -0,0 +1 @@
from __future__ import unicode_literals

View File

@ -1,9 +1,12 @@
from __future__ import unicode_literals
import six
from six.moves.urllib.parse import parse_qs, urlparse
import re
from boto.s3.key import Key
from jinja2 import Template
import six
from six.moves.urllib.parse import parse_qs, urlparse
from .exceptions import BucketAlreadyExists, MissingBucket
from .models import s3_backend
@ -34,10 +37,10 @@ class ResponseObject(object):
return 404, headers, ""
if isinstance(response, six.string_types):
return 200, headers, response
return 200, headers, response.encode("utf-8")
else:
status_code, headers, response_content = response
return status_code, headers, response_content
return status_code, headers, response_content.encode("utf-8")
def _bucket_response(self, request, full_url, headers):
parsed_url = urlparse(full_url)
@ -232,8 +235,14 @@ class ResponseObject(object):
for header, value in request.headers.items():
if isinstance(header, six.string_types):
result = meta_regex.match(header)
meta_key = None
if result:
# Check for extra metadata
meta_key = result.group(0).lower()
elif header.lower() in Key.base_user_settable_fields:
# Check for special metadata that doesn't start with x-amz-meta
meta_key = header
if meta_key:
metadata = request.headers[header]
key.set_metadata(meta_key, metadata)

View File

@ -1,6 +1,5 @@
from __future__ import unicode_literals
import boto
import six
from nose.plugins.skip import SkipTest
@ -24,19 +23,3 @@ class requires_boto_gte(object):
if boto_version >= required:
return test
return skip_test
class py3_requires_boto_gte(object):
"""Decorator for requiring boto version greater than or equal to 'version'
when running on Python 3. (Not all of boto is Python 3 compatible.)"""
def __init__(self, version):
self.version = version
def __call__(self, test):
if not six.PY3:
return test
boto_version = version_tuple(boto.__version__)
required = version_tuple(self.version)
if boto_version >= required:
return test
return skip_test

View File

@ -1,11 +1,10 @@
from __future__ import unicode_literals
import six
import boto
import sure # noqa
from freezegun import freeze_time
from moto import mock_dynamodb
from tests.helpers import py3_requires_boto_gte
from boto.dynamodb import condition
from boto.dynamodb.exceptions import DynamoDBKeyNotFoundError, DynamoDBValidationError
@ -29,7 +28,6 @@ def create_table(conn):
return table
@py3_requires_boto_gte("2.33.0")
@freeze_time("2012-01-14")
@mock_dynamodb
def test_create_table():
@ -62,7 +60,6 @@ def test_create_table():
conn.describe_table('messages').should.equal(expected)
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb
def test_delete_table():
conn = boto.connect_dynamodb()
@ -75,7 +72,6 @@ def test_delete_table():
conn.layer1.delete_table.when.called_with('messages').should.throw(DynamoDBResponseError)
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb
def test_update_table_throughput():
conn = boto.connect_dynamodb()
@ -90,7 +86,6 @@ def test_update_table_throughput():
table.write_units.should.equal(6)
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb
def test_item_add_and_describe_and_update():
conn = boto.connect_dynamodb()
@ -138,7 +133,6 @@ def test_item_add_and_describe_and_update():
})
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb
def test_item_put_without_table():
conn = boto.connect_dynamodb()
@ -152,7 +146,6 @@ def test_item_put_without_table():
).should.throw(DynamoDBResponseError)
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb
def test_get_missing_item():
conn = boto.connect_dynamodb()
@ -165,7 +158,6 @@ def test_get_missing_item():
table.has_item("foobar", "more").should.equal(False)
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb
def test_get_item_with_undeclared_table():
conn = boto.connect_dynamodb()
@ -179,7 +171,6 @@ def test_get_item_with_undeclared_table():
).should.throw(DynamoDBKeyNotFoundError)
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb
def test_get_item_without_range_key():
conn = boto.connect_dynamodb()
@ -204,7 +195,6 @@ def test_get_item_without_range_key():
table.get_item.when.called_with(hash_key=hash_key).should.throw(DynamoDBValidationError)
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb
def test_delete_item():
conn = boto.connect_dynamodb()
@ -233,7 +223,6 @@ def test_delete_item():
item.delete.when.called_with().should.throw(DynamoDBResponseError)
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb
def test_delete_item_with_attribute_response():
conn = boto.connect_dynamodb()
@ -271,7 +260,6 @@ def test_delete_item_with_attribute_response():
item.delete.when.called_with().should.throw(DynamoDBResponseError)
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb
def test_delete_item_with_undeclared_table():
conn = boto.connect_dynamodb()
@ -285,7 +273,6 @@ def test_delete_item_with_undeclared_table():
).should.throw(DynamoDBResponseError)
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb
def test_query():
conn = boto.connect_dynamodb()
@ -336,7 +323,6 @@ def test_query():
results.response['Items'].should.have.length_of(1)
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb
def test_query_with_undeclared_table():
conn = boto.connect_dynamodb()
@ -353,7 +339,6 @@ def test_query_with_undeclared_table():
).should.throw(DynamoDBResponseError)
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb
def test_scan():
conn = boto.connect_dynamodb()
@ -417,7 +402,6 @@ def test_scan():
results.response['Items'].should.have.length_of(1)
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb
def test_scan_with_undeclared_table():
conn = boto.connect_dynamodb()
@ -435,7 +419,6 @@ def test_scan_with_undeclared_table():
).should.throw(DynamoDBResponseError)
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb
def test_write_batch():
conn = boto.connect_dynamodb()
@ -480,7 +463,6 @@ def test_write_batch():
table.item_count.should.equal(1)
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb
def test_batch_read():
conn = boto.connect_dynamodb()

View File

@ -1,11 +1,10 @@
from __future__ import unicode_literals
import six
import boto
import sure # noqa
from freezegun import freeze_time
from moto import mock_dynamodb
from tests.helpers import py3_requires_boto_gte
from boto.dynamodb import condition
from boto.dynamodb.exceptions import DynamoDBKeyNotFoundError
@ -27,7 +26,6 @@ def create_table(conn):
return table
@py3_requires_boto_gte("2.33.0")
@freeze_time("2012-01-14")
@mock_dynamodb
def test_create_table():
@ -56,7 +54,6 @@ def test_create_table():
conn.describe_table('messages').should.equal(expected)
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb
def test_delete_table():
conn = boto.connect_dynamodb()
@ -69,7 +66,6 @@ def test_delete_table():
conn.layer1.delete_table.when.called_with('messages').should.throw(DynamoDBResponseError)
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb
def test_update_table_throughput():
conn = boto.connect_dynamodb()
@ -84,7 +80,6 @@ def test_update_table_throughput():
table.write_units.should.equal(6)
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb
def test_item_add_and_describe_and_update():
conn = boto.connect_dynamodb()
@ -125,7 +120,6 @@ def test_item_add_and_describe_and_update():
})
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb
def test_item_put_without_table():
conn = boto.connect_dynamodb()
@ -138,7 +132,6 @@ def test_item_put_without_table():
).should.throw(DynamoDBResponseError)
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb
def test_get_missing_item():
conn = boto.connect_dynamodb()
@ -149,7 +142,6 @@ def test_get_missing_item():
).should.throw(DynamoDBKeyNotFoundError)
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb
def test_get_item_with_undeclared_table():
conn = boto.connect_dynamodb()
@ -162,7 +154,6 @@ def test_get_item_with_undeclared_table():
).should.throw(DynamoDBKeyNotFoundError)
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb
def test_delete_item():
conn = boto.connect_dynamodb()
@ -190,7 +181,6 @@ def test_delete_item():
item.delete.when.called_with().should.throw(DynamoDBResponseError)
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb
def test_delete_item_with_attribute_response():
conn = boto.connect_dynamodb()
@ -226,7 +216,6 @@ def test_delete_item_with_attribute_response():
item.delete.when.called_with().should.throw(DynamoDBResponseError)
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb
def test_delete_item_with_undeclared_table():
conn = boto.connect_dynamodb()
@ -239,7 +228,6 @@ def test_delete_item_with_undeclared_table():
).should.throw(DynamoDBResponseError)
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb
def test_query():
conn = boto.connect_dynamodb()
@ -260,7 +248,6 @@ def test_query():
results.response['Items'].should.have.length_of(1)
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb
def test_query_with_undeclared_table():
conn = boto.connect_dynamodb()
@ -271,7 +258,6 @@ def test_query_with_undeclared_table():
).should.throw(DynamoDBResponseError)
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb
def test_scan():
conn = boto.connect_dynamodb()
@ -332,7 +318,6 @@ def test_scan():
results.response['Items'].should.have.length_of(1)
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb
def test_scan_with_undeclared_table():
conn = boto.connect_dynamodb()
@ -350,7 +335,6 @@ def test_scan_with_undeclared_table():
).should.throw(DynamoDBResponseError)
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb
def test_write_batch():
conn = boto.connect_dynamodb()
@ -393,7 +377,6 @@ def test_write_batch():
table.item_count.should.equal(1)
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb
def test_batch_read():
conn = boto.connect_dynamodb()

View File

@ -1,11 +1,11 @@
from __future__ import unicode_literals
import six
import boto
import sure # noqa
from freezegun import freeze_time
from moto import mock_dynamodb2
from boto.exception import JSONResponseError
from tests.helpers import requires_boto_gte, py3_requires_boto_gte
from tests.helpers import requires_boto_gte
try:
from boto.dynamodb2.fields import HashKey
from boto.dynamodb2.fields import RangeKey
@ -33,7 +33,6 @@ def iterate_results(res):
@requires_boto_gte("2.9")
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb2
@freeze_time("2012-01-14")
def test_create_table():
@ -61,7 +60,6 @@ def test_create_table():
@requires_boto_gte("2.9")
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb2
def test_delete_table():
conn = boto.dynamodb2.layer1.DynamoDBConnection()
@ -74,7 +72,6 @@ def test_delete_table():
@requires_boto_gte("2.9")
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb2
def test_update_table_throughput():
table = create_table()
@ -100,7 +97,6 @@ def test_update_table_throughput():
@requires_boto_gte("2.9")
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb2
def test_item_add_and_describe_and_update():
table = create_table()
@ -144,7 +140,6 @@ def test_item_add_and_describe_and_update():
@requires_boto_gte("2.9")
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb2
def test_item_put_without_table():
table = Table('undeclared-table')
@ -159,7 +154,6 @@ def test_item_put_without_table():
@requires_boto_gte("2.9")
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb2
def test_get_missing_item():
table = create_table()
@ -171,7 +165,6 @@ def test_get_missing_item():
@requires_boto_gte("2.9")
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb2
def test_get_item_with_undeclared_table():
table = Table('undeclared-table')
@ -179,7 +172,6 @@ def test_get_item_with_undeclared_table():
@requires_boto_gte("2.9")
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb2
def test_get_item_without_range_key():
table = Table.create('messages', schema=[
@ -197,7 +189,6 @@ def test_get_item_without_range_key():
@requires_boto_gte("2.30.0")
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb2
def test_delete_item():
table = create_table()
@ -220,7 +211,6 @@ def test_delete_item():
@requires_boto_gte("2.9")
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb2
def test_delete_item_with_undeclared_table():
table = Table("undeclared-table")
@ -235,7 +225,6 @@ def test_delete_item_with_undeclared_table():
@requires_boto_gte("2.9")
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb2
def test_query():
table = create_table()
@ -293,7 +282,6 @@ def test_query():
@requires_boto_gte("2.9")
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb2
def test_query_with_undeclared_table():
table = Table('undeclared')
@ -306,7 +294,6 @@ def test_query_with_undeclared_table():
@requires_boto_gte("2.9")
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb2
def test_scan():
table = create_table()
@ -362,7 +349,6 @@ def test_scan():
@requires_boto_gte("2.9")
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb2
def test_scan_with_undeclared_table():
conn = boto.dynamodb2.layer1.DynamoDBConnection()
@ -380,7 +366,6 @@ def test_scan_with_undeclared_table():
@requires_boto_gte("2.9")
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb2
def test_write_batch():
table = create_table()
@ -411,7 +396,6 @@ def test_write_batch():
@requires_boto_gte("2.9")
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb2
def test_batch_read():
table = create_table()
@ -456,7 +440,6 @@ def test_batch_read():
@requires_boto_gte("2.9")
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb2
def test_get_key_fields():
table = create_table()

View File

@ -1,11 +1,11 @@
from __future__ import unicode_literals
import six
import boto
import sure # noqa
from freezegun import freeze_time
from boto.exception import JSONResponseError
from moto import mock_dynamodb2
from tests.helpers import requires_boto_gte, py3_requires_boto_gte
from tests.helpers import requires_boto_gte
try:
from boto.dynamodb2.fields import HashKey
from boto.dynamodb2.table import Table
@ -25,7 +25,6 @@ def create_table():
@requires_boto_gte("2.9")
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb2
@freeze_time("2012-01-14")
def test_create_table():
@ -57,7 +56,6 @@ def test_create_table():
@requires_boto_gte("2.9")
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb2
def test_delete_table():
create_table()
@ -71,7 +69,6 @@ def test_delete_table():
@requires_boto_gte("2.9")
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb2
def test_update_table_throughput():
table = create_table()
@ -88,7 +85,6 @@ def test_update_table_throughput():
@requires_boto_gte("2.9")
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb2
def test_item_add_and_describe_and_update():
table = create_table()
@ -123,7 +119,6 @@ def test_item_add_and_describe_and_update():
@requires_boto_gte("2.9")
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb2
def test_item_put_without_table():
conn = boto.dynamodb2.layer1.DynamoDBConnection()
@ -139,7 +134,6 @@ def test_item_put_without_table():
@requires_boto_gte("2.9")
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb2
def test_get_missing_item():
table = create_table()
@ -148,7 +142,6 @@ def test_get_missing_item():
@requires_boto_gte("2.9")
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb2
def test_get_item_with_undeclared_table():
conn = boto.dynamodb2.layer1.DynamoDBConnection()
@ -160,7 +153,6 @@ def test_get_item_with_undeclared_table():
@requires_boto_gte("2.30.0")
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb2
def test_delete_item():
table = create_table()
@ -185,7 +177,6 @@ def test_delete_item():
@requires_boto_gte("2.9")
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb2
def test_delete_item_with_undeclared_table():
conn = boto.dynamodb2.layer1.DynamoDBConnection()
@ -197,7 +188,6 @@ def test_delete_item_with_undeclared_table():
@requires_boto_gte("2.9")
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb2
def test_query():
table = create_table()
@ -218,7 +208,6 @@ def test_query():
@requires_boto_gte("2.9")
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb2
def test_query_with_undeclared_table():
conn = boto.dynamodb2.layer1.DynamoDBConnection()
@ -230,7 +219,6 @@ def test_query_with_undeclared_table():
@requires_boto_gte("2.9")
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb2
def test_scan():
table = create_table()
@ -282,7 +270,6 @@ def test_scan():
@requires_boto_gte("2.9")
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb2
def test_scan_with_undeclared_table():
conn = boto.dynamodb2.layer1.DynamoDBConnection()
@ -301,7 +288,6 @@ def test_scan_with_undeclared_table():
@requires_boto_gte("2.9")
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb2
def test_write_batch():
table = create_table()
@ -333,7 +319,6 @@ def test_write_batch():
@requires_boto_gte("2.9")
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb2
def test_batch_read():
table = create_table()
@ -375,7 +360,6 @@ def test_batch_read():
@requires_boto_gte("2.9")
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb2
def test_get_key_fields():
table = create_table()
@ -384,7 +368,6 @@ def test_get_key_fields():
@requires_boto_gte("2.9")
@py3_requires_boto_gte("2.33.0")
@mock_dynamodb2
def test_get_special_item():
table = Table.create('messages', schema=[

View File

@ -0,0 +1,239 @@
from __future__ import unicode_literals
import boto.kinesis
from boto.kinesis.exceptions import ResourceNotFoundException, InvalidArgumentException
import sure # noqa
from moto import mock_kinesis
@mock_kinesis
def test_create_cluster():
conn = boto.kinesis.connect_to_region("us-west-2")
conn.create_stream("my_stream", 2)
stream_response = conn.describe_stream("my_stream")
stream = stream_response["StreamDescription"]
stream["StreamName"].should.equal("my_stream")
stream["HasMoreShards"].should.equal(False)
stream["StreamARN"].should.equal("arn:aws:kinesis:us-west-2:123456789012:my_stream")
stream["StreamStatus"].should.equal("ACTIVE")
shards = stream['Shards']
shards.should.have.length_of(2)
@mock_kinesis
def test_describe_non_existant_stream():
conn = boto.kinesis.connect_to_region("us-east-1")
conn.describe_stream.when.called_with("not-a-stream").should.throw(ResourceNotFoundException)
@mock_kinesis
def test_list_and_delete_stream():
conn = boto.kinesis.connect_to_region("us-west-2")
conn.create_stream("stream1", 1)
conn.create_stream("stream2", 1)
conn.list_streams()['StreamNames'].should.have.length_of(2)
conn.delete_stream("stream2")
conn.list_streams()['StreamNames'].should.have.length_of(1)
# Delete invalid id
conn.delete_stream.when.called_with("not-a-stream").should.throw(ResourceNotFoundException)
@mock_kinesis
def test_basic_shard_iterator():
conn = boto.kinesis.connect_to_region("us-west-2")
stream_name = "my_stream"
conn.create_stream(stream_name, 1)
response = conn.describe_stream(stream_name)
shard_id = response['StreamDescription']['Shards'][0]['ShardId']
response = conn.get_shard_iterator(stream_name, shard_id, 'TRIM_HORIZON')
shard_iterator = response['ShardIterator']
response = conn.get_records(shard_iterator)
shard_iterator = response['NextShardIterator']
response['Records'].should.equal([])
@mock_kinesis
def test_get_invalid_shard_iterator():
conn = boto.kinesis.connect_to_region("us-west-2")
stream_name = "my_stream"
conn.create_stream(stream_name, 1)
conn.get_shard_iterator.when.called_with(stream_name, "123", 'TRIM_HORIZON').should.throw(ResourceNotFoundException)
@mock_kinesis
def test_put_records():
conn = boto.kinesis.connect_to_region("us-west-2")
stream_name = "my_stream"
conn.create_stream(stream_name, 1)
data = "hello world"
partition_key = "1234"
conn.put_record(stream_name, data, partition_key)
response = conn.describe_stream(stream_name)
shard_id = response['StreamDescription']['Shards'][0]['ShardId']
response = conn.get_shard_iterator(stream_name, shard_id, 'TRIM_HORIZON')
shard_iterator = response['ShardIterator']
response = conn.get_records(shard_iterator)
shard_iterator = response['NextShardIterator']
response['Records'].should.have.length_of(1)
record = response['Records'][0]
record["Data"].should.equal("hello world")
record["PartitionKey"].should.equal("1234")
record["SequenceNumber"].should.equal("1")
@mock_kinesis
def test_get_records_limit():
conn = boto.kinesis.connect_to_region("us-west-2")
stream_name = "my_stream"
conn.create_stream(stream_name, 1)
# Create some data
data = "hello world"
for index in range(5):
conn.put_record(stream_name, data, index)
# Get a shard iterator
response = conn.describe_stream(stream_name)
shard_id = response['StreamDescription']['Shards'][0]['ShardId']
response = conn.get_shard_iterator(stream_name, shard_id, 'TRIM_HORIZON')
shard_iterator = response['ShardIterator']
# Retrieve only 3 records
response = conn.get_records(shard_iterator, limit=3)
response['Records'].should.have.length_of(3)
# Then get the rest of the results
next_shard_iterator = response['NextShardIterator']
response = conn.get_records(next_shard_iterator)
response['Records'].should.have.length_of(2)
@mock_kinesis
def test_get_records_at_sequence_number():
# AT_SEQUENCE_NUMBER - Start reading exactly from the position denoted by a specific sequence number.
conn = boto.kinesis.connect_to_region("us-west-2")
stream_name = "my_stream"
conn.create_stream(stream_name, 1)
# Create some data
for index in range(1, 5):
conn.put_record(stream_name, str(index), index)
# Get a shard iterator
response = conn.describe_stream(stream_name)
shard_id = response['StreamDescription']['Shards'][0]['ShardId']
response = conn.get_shard_iterator(stream_name, shard_id, 'TRIM_HORIZON')
shard_iterator = response['ShardIterator']
# Get the second record
response = conn.get_records(shard_iterator, limit=2)
second_sequence_id = response['Records'][1]['SequenceNumber']
# Then get a new iterator starting at that id
response = conn.get_shard_iterator(stream_name, shard_id, 'AT_SEQUENCE_NUMBER', second_sequence_id)
shard_iterator = response['ShardIterator']
response = conn.get_records(shard_iterator)
# And the first result returned should be the second item
response['Records'][0]['SequenceNumber'].should.equal(second_sequence_id)
response['Records'][0]['Data'].should.equal('2')
@mock_kinesis
def test_get_records_after_sequence_number():
# AFTER_SEQUENCE_NUMBER - Start reading right after the position denoted by a specific sequence number.
conn = boto.kinesis.connect_to_region("us-west-2")
stream_name = "my_stream"
conn.create_stream(stream_name, 1)
# Create some data
for index in range(1, 5):
conn.put_record(stream_name, str(index), index)
# Get a shard iterator
response = conn.describe_stream(stream_name)
shard_id = response['StreamDescription']['Shards'][0]['ShardId']
response = conn.get_shard_iterator(stream_name, shard_id, 'TRIM_HORIZON')
shard_iterator = response['ShardIterator']
# Get the second record
response = conn.get_records(shard_iterator, limit=2)
second_sequence_id = response['Records'][1]['SequenceNumber']
# Then get a new iterator starting after that id
response = conn.get_shard_iterator(stream_name, shard_id, 'AFTER_SEQUENCE_NUMBER', second_sequence_id)
shard_iterator = response['ShardIterator']
response = conn.get_records(shard_iterator)
# And the first result returned should be the third item
response['Records'][0]['Data'].should.equal('3')
@mock_kinesis
def test_get_records_latest():
# LATEST - Start reading just after the most recent record in the shard, so that you always read the most recent data in the shard.
conn = boto.kinesis.connect_to_region("us-west-2")
stream_name = "my_stream"
conn.create_stream(stream_name, 1)
# Create some data
for index in range(1, 5):
conn.put_record(stream_name, str(index), index)
# Get a shard iterator
response = conn.describe_stream(stream_name)
shard_id = response['StreamDescription']['Shards'][0]['ShardId']
response = conn.get_shard_iterator(stream_name, shard_id, 'TRIM_HORIZON')
shard_iterator = response['ShardIterator']
# Get the second record
response = conn.get_records(shard_iterator, limit=2)
second_sequence_id = response['Records'][1]['SequenceNumber']
# Then get a new iterator starting after that id
response = conn.get_shard_iterator(stream_name, shard_id, 'LATEST', second_sequence_id)
shard_iterator = response['ShardIterator']
# Write some more data
conn.put_record(stream_name, "last_record", "last_record")
response = conn.get_records(shard_iterator)
# And the only result returned should be the new item
response['Records'].should.have.length_of(1)
response['Records'][0]['PartitionKey'].should.equal('last_record')
response['Records'][0]['Data'].should.equal('last_record')
@mock_kinesis
def test_invalid_shard_iterator_type():
conn = boto.kinesis.connect_to_region("us-west-2")
stream_name = "my_stream"
conn.create_stream(stream_name, 1)
response = conn.describe_stream(stream_name)
shard_id = response['StreamDescription']['Shards'][0]['ShardId']
response = conn.get_shard_iterator.when.called_with(
stream_name, shard_id, 'invalid-type').should.throw(InvalidArgumentException)

View File

@ -0,0 +1,25 @@
from __future__ import unicode_literals
import json
import sure # noqa
import moto.server as server
from moto import mock_kinesis
'''
Test the different server responses
'''
@mock_kinesis
def test_list_streams():
backend = server.create_backend_app("kinesis")
test_client = backend.test_client()
res = test_client.get('/?Action=ListStreams')
json_data = json.loads(res.data.decode("utf-8"))
json_data.should.equal({
"HasMoreStreams": False,
"StreamNames": [],
})

View File

@ -0,0 +1,441 @@
from __future__ import unicode_literals
import boto
from boto.redshift.exceptions import (
ClusterNotFound,
ClusterParameterGroupNotFound,
ClusterSecurityGroupNotFound,
ClusterSubnetGroupNotFound,
InvalidSubnet,
)
import sure # noqa
from moto import mock_ec2, mock_redshift
@mock_redshift
def test_create_cluster():
conn = boto.redshift.connect_to_region("us-east-1")
cluster_identifier = 'my_cluster'
conn.create_cluster(
cluster_identifier,
node_type="dw.hs1.xlarge",
master_username="username",
master_user_password="password",
db_name="my_db",
cluster_type="multi-node",
availability_zone="us-east-1d",
preferred_maintenance_window="Mon:03:00-Mon:11:00",
automated_snapshot_retention_period=10,
port=1234,
cluster_version="1.0",
allow_version_upgrade=True,
number_of_nodes=3,
)
cluster_response = conn.describe_clusters(cluster_identifier)
cluster = cluster_response['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
cluster['ClusterIdentifier'].should.equal(cluster_identifier)
cluster['NodeType'].should.equal("dw.hs1.xlarge")
cluster['MasterUsername'].should.equal("username")
cluster['DBName'].should.equal("my_db")
cluster['ClusterSecurityGroups'][0]['ClusterSecurityGroupName'].should.equal("Default")
cluster['VpcSecurityGroups'].should.equal([])
cluster['ClusterSubnetGroupName'].should.equal(None)
cluster['AvailabilityZone'].should.equal("us-east-1d")
cluster['PreferredMaintenanceWindow'].should.equal("Mon:03:00-Mon:11:00")
cluster['ClusterParameterGroups'][0]['ParameterGroupName'].should.equal("default.redshift-1.0")
cluster['AutomatedSnapshotRetentionPeriod'].should.equal(10)
cluster['Port'].should.equal(1234)
cluster['ClusterVersion'].should.equal("1.0")
cluster['AllowVersionUpgrade'].should.equal(True)
cluster['NumberOfNodes'].should.equal(3)
@mock_redshift
def test_create_single_node_cluster():
conn = boto.redshift.connect_to_region("us-east-1")
cluster_identifier = 'my_cluster'
conn.create_cluster(
cluster_identifier,
node_type="dw.hs1.xlarge",
master_username="username",
master_user_password="password",
db_name="my_db",
cluster_type="single-node",
)
cluster_response = conn.describe_clusters(cluster_identifier)
cluster = cluster_response['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
cluster['ClusterIdentifier'].should.equal(cluster_identifier)
cluster['NodeType'].should.equal("dw.hs1.xlarge")
cluster['MasterUsername'].should.equal("username")
cluster['DBName'].should.equal("my_db")
cluster['NumberOfNodes'].should.equal(1)
@mock_redshift
def test_default_cluster_attibutes():
conn = boto.redshift.connect_to_region("us-east-1")
cluster_identifier = 'my_cluster'
conn.create_cluster(
cluster_identifier,
node_type="dw.hs1.xlarge",
master_username="username",
master_user_password="password",
)
cluster_response = conn.describe_clusters(cluster_identifier)
cluster = cluster_response['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
cluster['DBName'].should.equal("dev")
cluster['ClusterSubnetGroupName'].should.equal(None)
assert "us-east-" in cluster['AvailabilityZone']
cluster['PreferredMaintenanceWindow'].should.equal("Mon:03:00-Mon:03:30")
cluster['ClusterParameterGroups'][0]['ParameterGroupName'].should.equal("default.redshift-1.0")
cluster['AutomatedSnapshotRetentionPeriod'].should.equal(1)
cluster['Port'].should.equal(5439)
cluster['ClusterVersion'].should.equal("1.0")
cluster['AllowVersionUpgrade'].should.equal(True)
cluster['NumberOfNodes'].should.equal(1)
@mock_redshift
@mock_ec2
def test_create_cluster_in_subnet_group():
vpc_conn = boto.connect_vpc()
vpc = vpc_conn.create_vpc("10.0.0.0/16")
subnet = vpc_conn.create_subnet(vpc.id, "10.0.0.0/24")
redshift_conn = boto.connect_redshift()
redshift_conn.create_cluster_subnet_group(
"my_subnet_group",
"This is my subnet group",
subnet_ids=[subnet.id],
)
redshift_conn.create_cluster(
"my_cluster",
node_type="dw.hs1.xlarge",
master_username="username",
master_user_password="password",
cluster_subnet_group_name='my_subnet_group',
)
cluster_response = redshift_conn.describe_clusters("my_cluster")
cluster = cluster_response['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
cluster['ClusterSubnetGroupName'].should.equal('my_subnet_group')
@mock_redshift
def test_create_cluster_with_security_group():
conn = boto.redshift.connect_to_region("us-east-1")
conn.create_cluster_security_group(
"security_group1",
"This is my security group",
)
conn.create_cluster_security_group(
"security_group2",
"This is my security group",
)
cluster_identifier = 'my_cluster'
conn.create_cluster(
cluster_identifier,
node_type="dw.hs1.xlarge",
master_username="username",
master_user_password="password",
cluster_security_groups=["security_group1", "security_group2"]
)
cluster_response = conn.describe_clusters(cluster_identifier)
cluster = cluster_response['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
group_names = [group['ClusterSecurityGroupName'] for group in cluster['ClusterSecurityGroups']]
set(group_names).should.equal(set(["security_group1", "security_group2"]))
@mock_redshift
@mock_ec2
def test_create_cluster_with_vpc_security_groups():
vpc_conn = boto.connect_vpc()
ec2_conn = boto.connect_ec2()
redshift_conn = boto.connect_redshift()
vpc = vpc_conn.create_vpc("10.0.0.0/16")
security_group = ec2_conn.create_security_group("vpc_security_group", "a group", vpc_id=vpc.id)
redshift_conn.create_cluster(
"my_cluster",
node_type="dw.hs1.xlarge",
master_username="username",
master_user_password="password",
vpc_security_group_ids=[security_group.id],
)
cluster_response = redshift_conn.describe_clusters("my_cluster")
cluster = cluster_response['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
group_ids = [group['VpcSecurityGroupId'] for group in cluster['VpcSecurityGroups']]
list(group_ids).should.equal([security_group.id])
@mock_redshift
def test_create_cluster_with_parameter_group():
conn = boto.connect_redshift()
conn.create_cluster_parameter_group(
"my_parameter_group",
"redshift-1.0",
"This is my parameter group",
)
conn.create_cluster(
"my_cluster",
node_type="dw.hs1.xlarge",
master_username="username",
master_user_password="password",
cluster_parameter_group_name='my_parameter_group',
)
cluster_response = conn.describe_clusters("my_cluster")
cluster = cluster_response['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
cluster['ClusterParameterGroups'][0]['ParameterGroupName'].should.equal("my_parameter_group")
@mock_redshift
def test_describe_non_existant_cluster():
conn = boto.redshift.connect_to_region("us-east-1")
conn.describe_clusters.when.called_with("not-a-cluster").should.throw(ClusterNotFound)
@mock_redshift
def test_delete_cluster():
conn = boto.connect_redshift()
cluster_identifier = 'my_cluster'
conn.create_cluster(
cluster_identifier,
node_type='single-node',
master_username="username",
master_user_password="password",
)
clusters = conn.describe_clusters()['DescribeClustersResponse']['DescribeClustersResult']['Clusters']
list(clusters).should.have.length_of(1)
conn.delete_cluster(cluster_identifier)
clusters = conn.describe_clusters()['DescribeClustersResponse']['DescribeClustersResult']['Clusters']
list(clusters).should.have.length_of(0)
# Delete invalid id
conn.delete_cluster.when.called_with("not-a-cluster").should.throw(ClusterNotFound)
@mock_redshift
def test_modify_cluster():
conn = boto.connect_redshift()
cluster_identifier = 'my_cluster'
conn.create_cluster_security_group(
"security_group",
"This is my security group",
)
conn.create_cluster_parameter_group(
"my_parameter_group",
"redshift-1.0",
"This is my parameter group",
)
conn.create_cluster(
cluster_identifier,
node_type='single-node',
master_username="username",
master_user_password="password",
)
conn.modify_cluster(
cluster_identifier,
cluster_type="multi-node",
node_type="dw.hs1.xlarge",
number_of_nodes=2,
cluster_security_groups="security_group",
master_user_password="new_password",
cluster_parameter_group_name="my_parameter_group",
automated_snapshot_retention_period=7,
preferred_maintenance_window="Tue:03:00-Tue:11:00",
allow_version_upgrade=False,
new_cluster_identifier="new_identifier",
)
cluster_response = conn.describe_clusters("new_identifier")
cluster = cluster_response['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
cluster['ClusterIdentifier'].should.equal("new_identifier")
cluster['NodeType'].should.equal("dw.hs1.xlarge")
cluster['ClusterSecurityGroups'][0]['ClusterSecurityGroupName'].should.equal("security_group")
cluster['PreferredMaintenanceWindow'].should.equal("Tue:03:00-Tue:11:00")
cluster['ClusterParameterGroups'][0]['ParameterGroupName'].should.equal("my_parameter_group")
cluster['AutomatedSnapshotRetentionPeriod'].should.equal(7)
cluster['AllowVersionUpgrade'].should.equal(False)
cluster['NumberOfNodes'].should.equal(2)
@mock_redshift
@mock_ec2
def test_create_cluster_subnet_group():
vpc_conn = boto.connect_vpc()
vpc = vpc_conn.create_vpc("10.0.0.0/16")
subnet1 = vpc_conn.create_subnet(vpc.id, "10.0.0.0/24")
subnet2 = vpc_conn.create_subnet(vpc.id, "10.0.1.0/24")
redshift_conn = boto.connect_redshift()
redshift_conn.create_cluster_subnet_group(
"my_subnet",
"This is my subnet group",
subnet_ids=[subnet1.id, subnet2.id],
)
subnets_response = redshift_conn.describe_cluster_subnet_groups("my_subnet")
my_subnet = subnets_response['DescribeClusterSubnetGroupsResponse']['DescribeClusterSubnetGroupsResult']['ClusterSubnetGroups'][0]
my_subnet['ClusterSubnetGroupName'].should.equal("my_subnet")
my_subnet['Description'].should.equal("This is my subnet group")
subnet_ids = [subnet['SubnetIdentifier'] for subnet in my_subnet['Subnets']]
set(subnet_ids).should.equal(set([subnet1.id, subnet2.id]))
@mock_redshift
@mock_ec2
def test_create_invalid_cluster_subnet_group():
redshift_conn = boto.connect_redshift()
redshift_conn.create_cluster_subnet_group.when.called_with(
"my_subnet",
"This is my subnet group",
subnet_ids=["subnet-1234"],
).should.throw(InvalidSubnet)
@mock_redshift
def test_describe_non_existant_subnet_group():
conn = boto.redshift.connect_to_region("us-east-1")
conn.describe_cluster_subnet_groups.when.called_with("not-a-subnet-group").should.throw(ClusterSubnetGroupNotFound)
@mock_redshift
@mock_ec2
def test_delete_cluster_subnet_group():
vpc_conn = boto.connect_vpc()
vpc = vpc_conn.create_vpc("10.0.0.0/16")
subnet = vpc_conn.create_subnet(vpc.id, "10.0.0.0/24")
redshift_conn = boto.connect_redshift()
redshift_conn.create_cluster_subnet_group(
"my_subnet",
"This is my subnet group",
subnet_ids=[subnet.id],
)
subnets_response = redshift_conn.describe_cluster_subnet_groups()
subnets = subnets_response['DescribeClusterSubnetGroupsResponse']['DescribeClusterSubnetGroupsResult']['ClusterSubnetGroups']
subnets.should.have.length_of(1)
redshift_conn.delete_cluster_subnet_group("my_subnet")
subnets_response = redshift_conn.describe_cluster_subnet_groups()
subnets = subnets_response['DescribeClusterSubnetGroupsResponse']['DescribeClusterSubnetGroupsResult']['ClusterSubnetGroups']
subnets.should.have.length_of(0)
# Delete invalid id
redshift_conn.delete_cluster_subnet_group.when.called_with("not-a-subnet-group").should.throw(ClusterSubnetGroupNotFound)
@mock_redshift
def test_create_cluster_security_group():
conn = boto.connect_redshift()
conn.create_cluster_security_group(
"my_security_group",
"This is my security group",
)
groups_response = conn.describe_cluster_security_groups("my_security_group")
my_group = groups_response['DescribeClusterSecurityGroupsResponse']['DescribeClusterSecurityGroupsResult']['ClusterSecurityGroups'][0]
my_group['ClusterSecurityGroupName'].should.equal("my_security_group")
my_group['Description'].should.equal("This is my security group")
list(my_group['IPRanges']).should.equal([])
@mock_redshift
def test_describe_non_existant_security_group():
conn = boto.redshift.connect_to_region("us-east-1")
conn.describe_cluster_security_groups.when.called_with("not-a-security-group").should.throw(ClusterSecurityGroupNotFound)
@mock_redshift
def test_delete_cluster_security_group():
conn = boto.connect_redshift()
conn.create_cluster_security_group(
"my_security_group",
"This is my security group",
)
groups_response = conn.describe_cluster_security_groups()
groups = groups_response['DescribeClusterSecurityGroupsResponse']['DescribeClusterSecurityGroupsResult']['ClusterSecurityGroups']
groups.should.have.length_of(2) # The default group already exists
conn.delete_cluster_security_group("my_security_group")
groups_response = conn.describe_cluster_security_groups()
groups = groups_response['DescribeClusterSecurityGroupsResponse']['DescribeClusterSecurityGroupsResult']['ClusterSecurityGroups']
groups.should.have.length_of(1)
# Delete invalid id
conn.delete_cluster_security_group.when.called_with("not-a-security-group").should.throw(ClusterSecurityGroupNotFound)
@mock_redshift
def test_create_cluster_parameter_group():
conn = boto.connect_redshift()
conn.create_cluster_parameter_group(
"my_parameter_group",
"redshift-1.0",
"This is my parameter group",
)
groups_response = conn.describe_cluster_parameter_groups("my_parameter_group")
my_group = groups_response['DescribeClusterParameterGroupsResponse']['DescribeClusterParameterGroupsResult']['ParameterGroups'][0]
my_group['ParameterGroupName'].should.equal("my_parameter_group")
my_group['ParameterGroupFamily'].should.equal("redshift-1.0")
my_group['Description'].should.equal("This is my parameter group")
@mock_redshift
def test_describe_non_existant_parameter_group():
conn = boto.redshift.connect_to_region("us-east-1")
conn.describe_cluster_parameter_groups.when.called_with("not-a-parameter-group").should.throw(ClusterParameterGroupNotFound)
@mock_redshift
def test_delete_cluster_parameter_group():
conn = boto.connect_redshift()
conn.create_cluster_parameter_group(
"my_parameter_group",
"redshift-1.0",
"This is my parameter group",
)
groups_response = conn.describe_cluster_parameter_groups()
groups = groups_response['DescribeClusterParameterGroupsResponse']['DescribeClusterParameterGroupsResult']['ParameterGroups']
groups.should.have.length_of(2) # The default group already exists
conn.delete_cluster_parameter_group("my_parameter_group")
groups_response = conn.describe_cluster_parameter_groups()
groups = groups_response['DescribeClusterParameterGroupsResponse']['DescribeClusterParameterGroupsResult']['ParameterGroups']
groups.should.have.length_of(1)
# Delete invalid id
conn.delete_cluster_parameter_group.when.called_with("not-a-parameter-group").should.throw(ClusterParameterGroupNotFound)

View File

@ -0,0 +1,23 @@
from __future__ import unicode_literals
import json
import sure # noqa
import moto.server as server
from moto import mock_redshift
'''
Test the different server responses
'''
@mock_redshift
def test_describe_clusters():
backend = server.create_backend_app("redshift")
test_client = backend.test_client()
res = test_client.get('/?Action=DescribeClusters')
json_data = json.loads(res.data.decode("utf-8"))
clusters = json_data['DescribeClustersResponse']['DescribeClustersResult']['Clusters']
list(clusters).should.equal([])

View File

@ -1,3 +1,5 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from six.moves.urllib.request import urlopen
from six.moves.urllib.error import HTTPError
@ -615,3 +617,40 @@ def test_acl_is_ignored_for_now():
key = bucket.get_key(keyname)
assert key.get_contents_as_string() == content
@mock_s3
def test_unicode_key():
conn = boto.connect_s3()
bucket = conn.create_bucket('mybucket')
key = Key(bucket)
key.key = u'こんにちは.jpg'
key.set_contents_from_string('Hello world!')
list(bucket.list())
key = bucket.get_key(key.key)
assert key.get_contents_as_string().decode("utf-8") == 'Hello world!'
@mock_s3
def test_unicode_value():
conn = boto.connect_s3()
bucket = conn.create_bucket('mybucket')
key = Key(bucket)
key.key = 'some_key'
key.set_contents_from_string(u'こんにちは.jpg')
list(bucket.list())
key = bucket.get_key(key.key)
assert key.get_contents_as_string().decode("utf-8") == u'こんにちは.jpg'
@mock_s3
def test_setting_content_encoding():
conn = boto.connect_s3()
bucket = conn.create_bucket('mybucket')
key = bucket.new_key("keyname")
key.set_metadata("Content-Encoding", "gzip")
compressed_data = "abcdef"
key.set_contents_from_string(compressed_data)
key = bucket.get_key("keyname")
key.content_encoding.should.equal("gzip")