Merge branch 'master' into Bug-Fix-Secondary-Indexes-Ignored
This commit is contained in:
commit
5c7f01ab29
@ -64,7 +64,7 @@ It gets even better! Moto isn't just S3. Here's the status of the other AWS serv
|
||||
| Data Pipeline | @mock_datapipeline| basic endpoints done |
|
||||
|------------------------------------------------------------------------------|
|
||||
| DynamoDB | @mock_dynamodb | core endpoints done |
|
||||
| DynamoDB2 | @mock_dynamodb2 | core endpoints done - no indexes |
|
||||
| DynamoDB2 | @mock_dynamodb2 | core endpoints + partial indexes |
|
||||
|------------------------------------------------------------------------------|
|
||||
| EC2 | @mock_ec2 | core endpoints done |
|
||||
| - AMI | | core endpoints done |
|
||||
@ -73,6 +73,8 @@ It gets even better! Moto isn't just S3. Here's the status of the other AWS serv
|
||||
| - Security Groups | | core endpoints done |
|
||||
| - Tags | | all endpoints done |
|
||||
|------------------------------------------------------------------------------|
|
||||
| ECS | @mock_ecs | basic endpoints done |
|
||||
|------------------------------------------------------------------------------|
|
||||
| ELB | @mock_elb | core endpoints done |
|
||||
|------------------------------------------------------------------------------|
|
||||
| EMR | @mock_emr | core endpoints done |
|
||||
|
@ -12,6 +12,7 @@ from .datapipeline import mock_datapipeline # flake8: noqa
|
||||
from .dynamodb import mock_dynamodb # flake8: noqa
|
||||
from .dynamodb2 import mock_dynamodb2 # flake8: noqa
|
||||
from .ec2 import mock_ec2 # flake8: noqa
|
||||
from .ecs import mock_ecs # flake8: noqa
|
||||
from .elb import mock_elb # flake8: noqa
|
||||
from .emr import mock_emr # flake8: noqa
|
||||
from .glacier import mock_glacier # flake8: noqa
|
||||
|
@ -275,7 +275,10 @@ class AutoScalingBackend(BaseBackend):
|
||||
max_size = make_int(max_size)
|
||||
min_size = make_int(min_size)
|
||||
default_cooldown = make_int(default_cooldown)
|
||||
health_check_period = make_int(health_check_period)
|
||||
if health_check_period is None:
|
||||
health_check_period = 300
|
||||
else:
|
||||
health_check_period = make_int(health_check_period)
|
||||
|
||||
group = FakeAutoScalingGroup(
|
||||
name=name,
|
||||
@ -385,4 +388,3 @@ class AutoScalingBackend(BaseBackend):
|
||||
autoscaling_backends = {}
|
||||
for region, ec2_backend in ec2_backends.items():
|
||||
autoscaling_backends[region] = AutoScalingBackend(ec2_backend, elb_backends[region])
|
||||
|
||||
|
@ -124,11 +124,22 @@ class Item(object):
|
||||
def update_with_attribute_updates(self, attribute_updates):
|
||||
for attribute_name, update_action in attribute_updates.items():
|
||||
action = update_action['Action']
|
||||
if action == 'DELETE' and not 'Value' in update_action:
|
||||
if attribute_name in self.attrs:
|
||||
del self.attrs[attribute_name]
|
||||
continue
|
||||
new_value = list(update_action['Value'].values())[0]
|
||||
if action == 'PUT':
|
||||
# TODO deal with other types
|
||||
if isinstance(new_value, list) or isinstance(new_value, set):
|
||||
self.attrs[attribute_name] = DynamoType({"SS": new_value})
|
||||
elif isinstance(new_value, dict):
|
||||
self.attrs[attribute_name] = DynamoType({"M": new_value})
|
||||
elif update_action['Value'].keys() == ['N']:
|
||||
self.attrs[attribute_name] = DynamoType({"N": new_value})
|
||||
elif update_action['Value'].keys() == ['NULL']:
|
||||
if attribute_name in self.attrs:
|
||||
del self.attrs[attribute_name]
|
||||
else:
|
||||
self.attrs[attribute_name] = DynamoType({"S": new_value})
|
||||
|
||||
@ -278,20 +289,63 @@ class Table(object):
|
||||
except KeyError:
|
||||
return None
|
||||
|
||||
def query(self, hash_key, range_comparison, range_objs):
|
||||
def query(self, hash_key, range_comparison, range_objs, index_name=None):
|
||||
results = []
|
||||
last_page = True # Once pagination is implemented, change this
|
||||
|
||||
possible_results = [item for item in list(self.all_items()) if isinstance(item, Item) and item.hash_key == hash_key]
|
||||
if index_name:
|
||||
all_indexes = (self.global_indexes or []) + (self.indexes or [])
|
||||
indexes_by_name = dict((i['IndexName'], i) for i in all_indexes)
|
||||
if index_name not in indexes_by_name:
|
||||
raise ValueError('Invalid index: %s for table: %s. Available indexes are: %s' % (
|
||||
index_name, self.name, ', '.join(indexes_by_name.keys())
|
||||
))
|
||||
|
||||
index = indexes_by_name[index_name]
|
||||
try:
|
||||
index_hash_key = [key for key in index['KeySchema'] if key['KeyType'] == 'HASH'][0]
|
||||
except IndexError:
|
||||
raise ValueError('Missing Hash Key. KeySchema: %s' % index['KeySchema'])
|
||||
|
||||
possible_results = []
|
||||
for item in self.all_items():
|
||||
if not isinstance(item, Item):
|
||||
continue
|
||||
item_hash_key = item.attrs.get(index_hash_key['AttributeName'])
|
||||
if item_hash_key and item_hash_key == hash_key:
|
||||
possible_results.append(item)
|
||||
else:
|
||||
possible_results = [item for item in list(self.all_items()) if isinstance(item, Item) and item.hash_key == hash_key]
|
||||
|
||||
if index_name:
|
||||
try:
|
||||
index_range_key = [key for key in index['KeySchema'] if key['KeyType'] == 'RANGE'][0]
|
||||
except IndexError:
|
||||
index_range_key = None
|
||||
|
||||
if range_comparison:
|
||||
for result in possible_results:
|
||||
if result.range_key.compare(range_comparison, range_objs):
|
||||
results.append(result)
|
||||
if index_name and not index_range_key:
|
||||
raise ValueError('Range Key comparison but no range key found for index: %s' % index_name)
|
||||
|
||||
elif index_name:
|
||||
for result in possible_results:
|
||||
if result.attrs.get(index_range_key['AttributeName']).compare(range_comparison, range_objs):
|
||||
results.append(result)
|
||||
else:
|
||||
for result in possible_results:
|
||||
if result.range_key.compare(range_comparison, range_objs):
|
||||
results.append(result)
|
||||
else:
|
||||
# If we're not filtering on range key, return all values
|
||||
results = possible_results
|
||||
|
||||
results.sort(key=lambda item: item.range_key)
|
||||
if index_name:
|
||||
|
||||
if index_range_key:
|
||||
results.sort(key=lambda item: item.attrs[index_range_key['AttributeName']].value
|
||||
if item.attrs.get(index_range_key['AttributeName']) else None)
|
||||
else:
|
||||
results.sort(key=lambda item: item.range_key)
|
||||
return results, last_page
|
||||
|
||||
def all_items(self):
|
||||
@ -361,6 +415,38 @@ class DynamoDBBackend(BaseBackend):
|
||||
table.throughput = throughput
|
||||
return table
|
||||
|
||||
def update_table_global_indexes(self, name, global_index_updates):
|
||||
table = self.tables[name]
|
||||
gsis_by_name = dict((i['IndexName'], i) for i in table.global_indexes)
|
||||
for gsi_update in global_index_updates:
|
||||
gsi_to_create = gsi_update.get('Create')
|
||||
gsi_to_update = gsi_update.get('Update')
|
||||
gsi_to_delete = gsi_update.get('Delete')
|
||||
|
||||
if gsi_to_delete:
|
||||
index_name = gsi_to_delete['IndexName']
|
||||
if index_name not in gsis_by_name:
|
||||
raise ValueError('Global Secondary Index does not exist, but tried to delete: %s' %
|
||||
gsi_to_delete['IndexName'])
|
||||
|
||||
del gsis_by_name[index_name]
|
||||
|
||||
if gsi_to_update:
|
||||
index_name = gsi_to_update['IndexName']
|
||||
if index_name not in gsis_by_name:
|
||||
raise ValueError('Global Secondary Index does not exist, but tried to update: %s' %
|
||||
gsi_to_update['IndexName'])
|
||||
gsis_by_name[index_name].update(gsi_to_update)
|
||||
|
||||
if gsi_to_create:
|
||||
if gsi_to_create['IndexName'] in gsis_by_name:
|
||||
raise ValueError('Global Secondary Index already exists: %s' % gsi_to_create['IndexName'])
|
||||
|
||||
gsis_by_name[gsi_to_create['IndexName']] = gsi_to_create
|
||||
|
||||
table.global_indexes = gsis_by_name.values()
|
||||
return table
|
||||
|
||||
def put_item(self, table_name, item_attrs, expected=None, overwrite=False):
|
||||
table = self.tables.get(table_name)
|
||||
if not table:
|
||||
@ -400,7 +486,7 @@ class DynamoDBBackend(BaseBackend):
|
||||
hash_key, range_key = self.get_keys_value(table, keys)
|
||||
return table.get_item(hash_key, range_key)
|
||||
|
||||
def query(self, table_name, hash_key_dict, range_comparison, range_value_dicts):
|
||||
def query(self, table_name, hash_key_dict, range_comparison, range_value_dicts, index_name=None):
|
||||
table = self.tables.get(table_name)
|
||||
if not table:
|
||||
return None, None
|
||||
@ -408,7 +494,7 @@ class DynamoDBBackend(BaseBackend):
|
||||
hash_key = DynamoType(hash_key_dict)
|
||||
range_values = [DynamoType(range_value) for range_value in range_value_dicts]
|
||||
|
||||
return table.query(hash_key, range_comparison, range_values)
|
||||
return table.query(hash_key, range_comparison, range_values, index_name)
|
||||
|
||||
def scan(self, table_name, filters):
|
||||
table = self.tables.get(table_name)
|
||||
@ -425,12 +511,20 @@ class DynamoDBBackend(BaseBackend):
|
||||
def update_item(self, table_name, key, update_expression, attribute_updates):
|
||||
table = self.get_table(table_name)
|
||||
|
||||
if table.hash_key_attr in key:
|
||||
# Sometimes the key is wrapped in a dict with the key name
|
||||
key = key[table.hash_key_attr]
|
||||
if all([table.hash_key_attr in key, table.range_key_attr in key]):
|
||||
# Covers cases where table has hash and range keys, ``key`` param will be a dict
|
||||
hash_value = DynamoType(key[table.hash_key_attr])
|
||||
range_value = DynamoType(key[table.range_key_attr])
|
||||
elif table.hash_key_attr in key:
|
||||
# Covers tables that have a range key where ``key`` param is a dict
|
||||
hash_value = DynamoType(key[table.hash_key_attr])
|
||||
range_value = None
|
||||
else:
|
||||
# Covers other cases
|
||||
hash_value = DynamoType(key)
|
||||
range_value = None
|
||||
|
||||
hash_value = DynamoType(key)
|
||||
item = table.get_item(hash_value)
|
||||
item = table.get_item(hash_value, range_value)
|
||||
if update_expression:
|
||||
item.update(update_expression)
|
||||
else:
|
||||
|
@ -125,8 +125,11 @@ class DynamoHandler(BaseResponse):
|
||||
|
||||
def update_table(self):
|
||||
name = self.body['TableName']
|
||||
throughput = self.body["ProvisionedThroughput"]
|
||||
table = dynamodb_backend2.update_table_throughput(name, throughput)
|
||||
if 'GlobalSecondaryIndexUpdates' in self.body:
|
||||
table = dynamodb_backend2.update_table_global_indexes(name, self.body['GlobalSecondaryIndexUpdates'])
|
||||
if 'ProvisionedThroughput' in self.body:
|
||||
throughput = self.body["ProvisionedThroughput"]
|
||||
table = dynamodb_backend2.update_table_throughput(name, throughput)
|
||||
return dynamo_json_dump(table.describe)
|
||||
|
||||
def describe_table(self):
|
||||
@ -241,11 +244,31 @@ class DynamoHandler(BaseResponse):
|
||||
if key_condition_expression:
|
||||
value_alias_map = self.body['ExpressionAttributeValues']
|
||||
|
||||
table = dynamodb_backend2.get_table(name)
|
||||
index_name = self.body.get('IndexName')
|
||||
if index_name:
|
||||
all_indexes = (table.global_indexes or []) + (table.indexes or [])
|
||||
indexes_by_name = dict((i['IndexName'], i) for i in all_indexes)
|
||||
if index_name not in indexes_by_name:
|
||||
raise ValueError('Invalid index: %s for table: %s. Available indexes are: %s' % (
|
||||
index_name, name, ', '.join(indexes_by_name.keys())
|
||||
))
|
||||
|
||||
index = indexes_by_name[index_name]['KeySchema']
|
||||
else:
|
||||
index = table.schema
|
||||
|
||||
key_map = [column for _, column in sorted((k, v) for k, v in self.body['ExpressionAttributeNames'].items())]
|
||||
|
||||
if " AND " in key_condition_expression:
|
||||
expressions = key_condition_expression.split(" AND ", 1)
|
||||
hash_key_expression = expressions[0]
|
||||
|
||||
index_hash_key = [key for key in index if key['KeyType'] == 'HASH'][0]
|
||||
hash_key_index_in_key_map = key_map.index(index_hash_key['AttributeName'])
|
||||
|
||||
hash_key_expression = expressions.pop(hash_key_index_in_key_map).strip('()')
|
||||
# TODO implement more than one range expression and OR operators
|
||||
range_key_expression = expressions[1].replace(")", "")
|
||||
range_key_expression = expressions[0].strip('()')
|
||||
range_key_expression_components = range_key_expression.split()
|
||||
range_comparison = range_key_expression_components[1]
|
||||
if 'AND' in range_key_expression:
|
||||
@ -293,24 +316,26 @@ class DynamoHandler(BaseResponse):
|
||||
range_comparison = None
|
||||
range_values = []
|
||||
|
||||
items, last_page = dynamodb_backend2.query(name, hash_key, range_comparison, range_values)
|
||||
index_name = self.body.get('IndexName')
|
||||
items, last_page = dynamodb_backend2.query(name, hash_key, range_comparison, range_values, index_name=index_name)
|
||||
if items is None:
|
||||
er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'
|
||||
return self.error(er)
|
||||
|
||||
limit = self.body.get("Limit")
|
||||
if limit:
|
||||
items = items[:limit]
|
||||
|
||||
reversed = self.body.get("ScanIndexForward")
|
||||
if reversed is False:
|
||||
items.reverse()
|
||||
|
||||
limit = self.body.get("Limit")
|
||||
if limit:
|
||||
items = items[:limit]
|
||||
|
||||
result = {
|
||||
"Count": len(items),
|
||||
"Items": [item.attrs for item in items],
|
||||
"ConsumedCapacityUnits": 1,
|
||||
}
|
||||
if self.body.get('Select', '').upper() != 'COUNT':
|
||||
result["Items"] = [item.attrs for item in items]
|
||||
|
||||
# Implement this when we do pagination
|
||||
# if not last_page:
|
||||
|
@ -92,6 +92,14 @@ class InvalidVpnConnectionIdError(EC2ClientError):
|
||||
.format(network_acl_id))
|
||||
|
||||
|
||||
class InvalidCustomerGatewayIdError(EC2ClientError):
|
||||
def __init__(self, customer_gateway_id):
|
||||
super(InvalidCustomerGatewayIdError, self).__init__(
|
||||
"InvalidCustomerGatewayID.NotFound",
|
||||
"The customer gateway ID '{0}' does not exist"
|
||||
.format(customer_gateway_id))
|
||||
|
||||
|
||||
class InvalidNetworkInterfaceIdError(EC2ClientError):
|
||||
def __init__(self, eni_id):
|
||||
super(InvalidNetworkInterfaceIdError, self).__init__(
|
||||
|
@ -55,7 +55,8 @@ from .exceptions import (
|
||||
InvalidCIDRSubnetError,
|
||||
InvalidNetworkAclIdError,
|
||||
InvalidVpnGatewayIdError,
|
||||
InvalidVpnConnectionIdError
|
||||
InvalidVpnConnectionIdError,
|
||||
InvalidCustomerGatewayIdError,
|
||||
)
|
||||
from .utils import (
|
||||
EC2_RESOURCE_TO_PREFIX,
|
||||
@ -95,6 +96,7 @@ from .utils import (
|
||||
random_network_acl_subnet_association_id,
|
||||
random_vpn_gateway_id,
|
||||
random_vpn_connection_id,
|
||||
random_customer_gateway_id,
|
||||
is_tag_filter,
|
||||
)
|
||||
|
||||
@ -340,6 +342,9 @@ class Instance(BotoInstance, TaggedEC2Resource):
|
||||
if self.subnet_id:
|
||||
subnet = ec2_backend.get_subnet(self.subnet_id)
|
||||
self.vpc_id = subnet.vpc_id
|
||||
self._placement.zone = subnet.availability_zone
|
||||
else:
|
||||
self._placement.zone = ec2_backend.region_name + 'a'
|
||||
|
||||
self.block_device_mapping = BlockDeviceMapping()
|
||||
|
||||
@ -1430,6 +1435,36 @@ class Volume(TaggedEC2Resource):
|
||||
else:
|
||||
return 'available'
|
||||
|
||||
def get_filter_value(self, filter_name):
|
||||
|
||||
if filter_name.startswith('attachment') and not self.attachment:
|
||||
return None
|
||||
if filter_name == 'attachment.attach-time':
|
||||
return self.attachment.attach_time
|
||||
if filter_name == 'attachment.device':
|
||||
return self.attachment.device
|
||||
if filter_name == 'attachment.instance-id':
|
||||
return self.attachment.instance.id
|
||||
|
||||
if filter_name == 'create-time':
|
||||
return self.create_time
|
||||
|
||||
if filter_name == 'size':
|
||||
return self.size
|
||||
|
||||
if filter_name == 'snapshot-id':
|
||||
return self.snapshot_id
|
||||
|
||||
if filter_name == 'status':
|
||||
return self.status
|
||||
|
||||
filter_value = super(Volume, self).get_filter_value(filter_name)
|
||||
|
||||
if filter_value is None:
|
||||
self.ec2_backend.raise_not_implemented_error("The filter '{0}' for DescribeVolumes".format(filter_name))
|
||||
|
||||
return filter_value
|
||||
|
||||
|
||||
class Snapshot(TaggedEC2Resource):
|
||||
def __init__(self, ec2_backend, snapshot_id, volume, description):
|
||||
@ -1440,6 +1475,30 @@ class Snapshot(TaggedEC2Resource):
|
||||
self.create_volume_permission_groups = set()
|
||||
self.ec2_backend = ec2_backend
|
||||
|
||||
def get_filter_value(self, filter_name):
|
||||
|
||||
if filter_name == 'description':
|
||||
return self.description
|
||||
|
||||
if filter_name == 'snapshot-id':
|
||||
return self.id
|
||||
|
||||
if filter_name == 'start-time':
|
||||
return self.start_time
|
||||
|
||||
if filter_name == 'volume-id':
|
||||
return self.volume.id
|
||||
|
||||
if filter_name == 'volume-size':
|
||||
return self.volume.size
|
||||
|
||||
filter_value = super(Snapshot, self).get_filter_value(filter_name)
|
||||
|
||||
if filter_value is None:
|
||||
self.ec2_backend.raise_not_implemented_error("The filter '{0}' for DescribeSnapshots".format(filter_name))
|
||||
|
||||
return filter_value
|
||||
|
||||
|
||||
class EBSBackend(object):
|
||||
def __init__(self):
|
||||
@ -1459,7 +1518,10 @@ class EBSBackend(object):
|
||||
self.volumes[volume_id] = volume
|
||||
return volume
|
||||
|
||||
def describe_volumes(self):
|
||||
def describe_volumes(self, filters=None):
|
||||
if filters:
|
||||
volumes = self.volumes.values()
|
||||
return generic_filter(filters, volumes)
|
||||
return self.volumes.values()
|
||||
|
||||
def get_volume(self, volume_id):
|
||||
@ -1505,7 +1567,10 @@ class EBSBackend(object):
|
||||
self.snapshots[snapshot_id] = snapshot
|
||||
return snapshot
|
||||
|
||||
def describe_snapshots(self):
|
||||
def describe_snapshots(self, filters=None):
|
||||
if filters:
|
||||
snapshots = self.snapshots.values()
|
||||
return generic_filter(filters, snapshots)
|
||||
return self.snapshots.values()
|
||||
|
||||
def get_snapshot(self, snapshot_id):
|
||||
@ -2798,6 +2863,45 @@ class VpnGatewayBackend(object):
|
||||
return detached
|
||||
|
||||
|
||||
class CustomerGateway(TaggedEC2Resource):
|
||||
def __init__(self, ec2_backend, id, type, ip_address, bgp_asn):
|
||||
self.ec2_backend = ec2_backend
|
||||
self.id = id
|
||||
self.type = type
|
||||
self.ip_address = ip_address
|
||||
self.bgp_asn = bgp_asn
|
||||
self.attachments = {}
|
||||
super(CustomerGateway, self).__init__()
|
||||
|
||||
|
||||
class CustomerGatewayBackend(object):
|
||||
def __init__(self):
|
||||
self.customer_gateways = {}
|
||||
super(CustomerGatewayBackend, self).__init__()
|
||||
|
||||
def create_customer_gateway(self, type='ipsec.1', ip_address=None, bgp_asn=None):
|
||||
customer_gateway_id = random_customer_gateway_id()
|
||||
customer_gateway = CustomerGateway(self, customer_gateway_id, type, ip_address, bgp_asn)
|
||||
self.customer_gateways[customer_gateway_id] = customer_gateway
|
||||
return customer_gateway
|
||||
|
||||
def get_all_customer_gateways(self, filters=None):
|
||||
customer_gateways = self.customer_gateways.values()
|
||||
return generic_filter(filters, customer_gateways)
|
||||
|
||||
def get_customer_gateway(self, customer_gateway_id):
|
||||
customer_gateway = self.customer_gateways.get(customer_gateway_id, None)
|
||||
if not customer_gateway:
|
||||
raise InvalidCustomerGatewayIdError(customer_gateway_id)
|
||||
return customer_gateway
|
||||
|
||||
def delete_customer_gateway(self, customer_gateway_id):
|
||||
deleted = self.customer_gateways.pop(customer_gateway_id, None)
|
||||
if not deleted:
|
||||
raise InvalidCustomerGatewayIdError(customer_gateway_id)
|
||||
return deleted
|
||||
|
||||
|
||||
class EC2Backend(BaseBackend, InstanceBackend, TagBackend, AmiBackend,
|
||||
RegionsAndZonesBackend, SecurityGroupBackend, EBSBackend,
|
||||
VPCBackend, SubnetBackend, SubnetRouteTableAssociationBackend,
|
||||
@ -2806,7 +2910,7 @@ class EC2Backend(BaseBackend, InstanceBackend, TagBackend, AmiBackend,
|
||||
RouteTableBackend, RouteBackend, InternetGatewayBackend,
|
||||
VPCGatewayAttachmentBackend, SpotRequestBackend,
|
||||
ElasticAddressBackend, KeyPairBackend, DHCPOptionsSetBackend,
|
||||
NetworkAclBackend, VpnGatewayBackend):
|
||||
NetworkAclBackend, VpnGatewayBackend, CustomerGatewayBackend):
|
||||
|
||||
def __init__(self, region_name):
|
||||
super(EC2Backend, self).__init__()
|
||||
@ -2831,7 +2935,7 @@ class EC2Backend(BaseBackend, InstanceBackend, TagBackend, AmiBackend,
|
||||
for resource_id in resource_ids:
|
||||
resource_prefix = get_prefix(resource_id)
|
||||
if resource_prefix == EC2_RESOURCE_TO_PREFIX['customer-gateway']:
|
||||
self.raise_not_implemented_error('DescribeCustomerGateways')
|
||||
self.get_customer_gateway(customer_gateway_id=resource_id)
|
||||
elif resource_prefix == EC2_RESOURCE_TO_PREFIX['dhcp-options']:
|
||||
self.describe_dhcp_options(options_ids=[resource_id])
|
||||
elif resource_prefix == EC2_RESOURCE_TO_PREFIX['image']:
|
||||
|
@ -1,13 +1,82 @@
|
||||
from __future__ import unicode_literals
|
||||
from moto.core.responses import BaseResponse
|
||||
from moto.ec2.utils import filters_from_querystring
|
||||
|
||||
|
||||
class CustomerGateways(BaseResponse):
|
||||
|
||||
def create_customer_gateway(self):
|
||||
raise NotImplementedError('CustomerGateways(AmazonVPC).create_customer_gateway is not yet implemented')
|
||||
# raise NotImplementedError('CustomerGateways(AmazonVPC).create_customer_gateway is not yet implemented')
|
||||
type = self.querystring.get('Type', None)[0]
|
||||
ip_address = self.querystring.get('IpAddress', None)[0]
|
||||
bgp_asn = self.querystring.get('BgpAsn', None)[0]
|
||||
customer_gateway = self.ec2_backend.create_customer_gateway(type, ip_address=ip_address, bgp_asn=bgp_asn)
|
||||
template = self.response_template(CREATE_CUSTOMER_GATEWAY_RESPONSE)
|
||||
return template.render(customer_gateway=customer_gateway)
|
||||
|
||||
def delete_customer_gateway(self):
|
||||
raise NotImplementedError('CustomerGateways(AmazonVPC).delete_customer_gateway is not yet implemented')
|
||||
customer_gateway_id = self.querystring.get('CustomerGatewayId')[0]
|
||||
delete_status = self.ec2_backend.delete_customer_gateway(customer_gateway_id)
|
||||
template = self.response_template(DELETE_CUSTOMER_GATEWAY_RESPONSE)
|
||||
return template.render(customer_gateway=delete_status)
|
||||
|
||||
def describe_customer_gateways(self):
|
||||
raise NotImplementedError('CustomerGateways(AmazonVPC).describe_customer_gateways is not yet implemented')
|
||||
filters = filters_from_querystring(self.querystring)
|
||||
customer_gateways = self.ec2_backend.get_all_customer_gateways(filters)
|
||||
template = self.response_template(DESCRIBE_CUSTOMER_GATEWAYS_RESPONSE)
|
||||
return template.render(customer_gateways=customer_gateways)
|
||||
|
||||
|
||||
CREATE_CUSTOMER_GATEWAY_RESPONSE = """
|
||||
<CreateCustomerGatewayResponse xmlns="http://ec2.amazonaws.com/doc/2014-10-01/">
|
||||
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
|
||||
<customerGateway>
|
||||
<customerGatewayId>{{ customer_gateway.id }}</customerGatewayId>
|
||||
<state>pending</state>
|
||||
<type>{{ customer_gateway.type }}</type>
|
||||
<ipAddress>{{ customer_gateway.ip_address }}</ipAddress>
|
||||
<bgpAsn>{{ customer_gateway.bgp_asn }}</bgpAsn>
|
||||
<tagSet>
|
||||
{% for tag in customer_gateway.get_tags() %}
|
||||
<item>
|
||||
<resourceId>{{ tag.resource_id }}</resourceId>
|
||||
<resourceType>{{ tag.resource_type }}</resourceType>
|
||||
<key>{{ tag.key }}</key>
|
||||
<value>{{ tag.value }}</value>
|
||||
</item>
|
||||
{% endfor %}
|
||||
</tagSet>
|
||||
</customerGateway>
|
||||
</CreateCustomerGatewayResponse>"""
|
||||
|
||||
DELETE_CUSTOMER_GATEWAY_RESPONSE = """
|
||||
<DeleteCustomerGatewayResponse xmlns="http://ec2.amazonaws.com/doc/2014-10-01/">
|
||||
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
|
||||
<return>{{ delete_status }}</return>
|
||||
</DeleteCustomerGatewayResponse>"""
|
||||
|
||||
DESCRIBE_CUSTOMER_GATEWAYS_RESPONSE = """
|
||||
<DescribeCustomerGatewaysResponse xmlns="http://ec2.amazonaws.com/doc/2014-10- 01/">
|
||||
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
|
||||
<customerGatewaySet>
|
||||
{% for customer_gateway in customer_gateways %}
|
||||
<item>
|
||||
<customerGatewayId>{{ customer_gateway.id }}</customerGatewayId>
|
||||
<state>{{ customer_gateway.state }}</state>
|
||||
<type>available</type>
|
||||
<ipAddress>{{ customer_gateway.ip_address }}</ipAddress>
|
||||
<bgpAsn>{{ customer_gateway.bgp_asn }}</bgpAsn>
|
||||
<tagSet>
|
||||
{% for tag in customer_gateway.get_tags() %}
|
||||
<item>
|
||||
<resourceId>{{ tag.resource_id }}</resourceId>
|
||||
<resourceType>{{ tag.resource_type }}</resourceType>
|
||||
<key>{{ tag.key }}</key>
|
||||
<value>{{ tag.value }}</value>
|
||||
</item>
|
||||
{% endfor %}
|
||||
</tagSet>
|
||||
</item>
|
||||
{% endfor %}
|
||||
</customerGatewaySet>
|
||||
</DescribeCustomerGatewaysResponse>"""
|
||||
|
@ -1,5 +1,6 @@
|
||||
from __future__ import unicode_literals
|
||||
from moto.core.responses import BaseResponse
|
||||
from moto.ec2.utils import filters_from_querystring
|
||||
|
||||
|
||||
class ElasticBlockStore(BaseResponse):
|
||||
@ -43,22 +44,22 @@ class ElasticBlockStore(BaseResponse):
|
||||
return DELETE_VOLUME_RESPONSE
|
||||
|
||||
def describe_snapshots(self):
|
||||
filters = filters_from_querystring(self.querystring)
|
||||
# querystring for multiple snapshotids results in SnapshotId.1, SnapshotId.2 etc
|
||||
snapshot_ids = ','.join([','.join(s[1]) for s in self.querystring.items() if 'SnapshotId' in s[0]])
|
||||
snapshots = self.ec2_backend.describe_snapshots()
|
||||
snapshots = self.ec2_backend.describe_snapshots(filters=filters)
|
||||
# Describe snapshots to handle filter on snapshot_ids
|
||||
snapshots = [s for s in snapshots if s.id in snapshot_ids] if snapshot_ids else snapshots
|
||||
# snapshots = self.ec2_backend.describe_snapshots()
|
||||
template = self.response_template(DESCRIBE_SNAPSHOTS_RESPONSE)
|
||||
return template.render(snapshots=snapshots)
|
||||
|
||||
def describe_volumes(self):
|
||||
filters = filters_from_querystring(self.querystring)
|
||||
# querystring for multiple volumeids results in VolumeId.1, VolumeId.2 etc
|
||||
volume_ids = ','.join([','.join(v[1]) for v in self.querystring.items() if 'VolumeId' in v[0]])
|
||||
volumes = self.ec2_backend.describe_volumes()
|
||||
volumes = self.ec2_backend.describe_volumes(filters=filters)
|
||||
# Describe volumes to handle filter on volume_ids
|
||||
volumes = [v for v in volumes if v.id in volume_ids] if volume_ids else volumes
|
||||
# volumes = self.ec2_backend.describe_volumes()
|
||||
template = self.response_template(DESCRIBE_VOLUMES_RESPONSE)
|
||||
return template.render(volumes=volumes)
|
||||
|
||||
|
@ -89,6 +89,10 @@ def random_vpn_connection_id():
|
||||
return random_id(prefix=EC2_RESOURCE_TO_PREFIX['vpn-connection'])
|
||||
|
||||
|
||||
def random_customer_gateway_id():
|
||||
return random_id(prefix=EC2_RESOURCE_TO_PREFIX['customer-gateway'])
|
||||
|
||||
|
||||
def random_volume_id():
|
||||
return random_id(prefix=EC2_RESOURCE_TO_PREFIX['volume'])
|
||||
|
||||
@ -314,7 +318,7 @@ def get_object_value(obj, attr):
|
||||
|
||||
|
||||
def is_tag_filter(filter_name):
|
||||
return (filter_name.startswith('tag:') or
|
||||
return (filter_name.startswith('tag:') or
|
||||
filter_name.startswith('tag-value') or
|
||||
filter_name.startswith('tag-key'))
|
||||
|
||||
|
11
moto/ecs/__init__.py
Normal file
11
moto/ecs/__init__.py
Normal file
@ -0,0 +1,11 @@
|
||||
from __future__ import unicode_literals
|
||||
from .models import ecs_backends
|
||||
from ..core.models import MockAWS
|
||||
|
||||
ecs_backend = ecs_backends['us-east-1']
|
||||
|
||||
def mock_ecs(func=None):
|
||||
if func:
|
||||
return MockAWS(ecs_backends)(func)
|
||||
else:
|
||||
return MockAWS(ecs_backends)
|
204
moto/ecs/models.py
Normal file
204
moto/ecs/models.py
Normal file
@ -0,0 +1,204 @@
|
||||
from __future__ import unicode_literals
|
||||
import uuid
|
||||
|
||||
from moto.core import BaseBackend
|
||||
from moto.ec2 import ec2_backends
|
||||
|
||||
|
||||
class BaseObject(object):
|
||||
def camelCase(self, key):
|
||||
words = []
|
||||
for i, word in enumerate(key.split('_')):
|
||||
if i > 0:
|
||||
words.append(word.title())
|
||||
else:
|
||||
words.append(word)
|
||||
return ''.join(words)
|
||||
|
||||
def gen_response_object(self):
|
||||
response_object = self.__dict__.copy()
|
||||
for key, value in response_object.items():
|
||||
if '_' in key:
|
||||
response_object[self.camelCase(key)] = value
|
||||
del response_object[key]
|
||||
return response_object
|
||||
|
||||
@property
|
||||
def response_object(self):
|
||||
return self.gen_response_object()
|
||||
|
||||
|
||||
class Cluster(BaseObject):
|
||||
def __init__(self, cluster_name):
|
||||
self.active_services_count = 0
|
||||
self.arn = 'arn:aws:ecs:us-east-1:012345678910:cluster/{0}'.format(cluster_name)
|
||||
self.name = cluster_name
|
||||
self.pending_tasks_count = 0
|
||||
self.registered_container_instances_count = 0
|
||||
self.running_tasks_count = 0
|
||||
self.status = 'ACTIVE'
|
||||
|
||||
@property
|
||||
def response_object(self):
|
||||
response_object = self.gen_response_object()
|
||||
response_object['clusterArn'] = self.arn
|
||||
response_object['clusterName'] = self.name
|
||||
del response_object['arn'], response_object['name']
|
||||
return response_object
|
||||
|
||||
|
||||
class TaskDefinition(BaseObject):
|
||||
def __init__(self, family, revision, container_definitions, volumes=None):
|
||||
self.family = family
|
||||
self.arn = 'arn:aws:ecs:us-east-1:012345678910:task-definition/{0}:{1}'.format(family, revision)
|
||||
self.container_definitions = container_definitions
|
||||
if volumes is not None:
|
||||
self.volumes = volumes
|
||||
|
||||
@property
|
||||
def response_object(self):
|
||||
response_object = self.gen_response_object()
|
||||
response_object['taskDefinitionArn'] = response_object['arn']
|
||||
del response_object['arn']
|
||||
return response_object
|
||||
|
||||
|
||||
class Service(BaseObject):
|
||||
def __init__(self, cluster, service_name, task_definition, desired_count):
|
||||
self.cluster_arn = cluster.arn
|
||||
self.arn = 'arn:aws:ecs:us-east-1:012345678910:service/{0}'.format(service_name)
|
||||
self.name = service_name
|
||||
self.status = 'ACTIVE'
|
||||
self.running_count = 0
|
||||
self.task_definition = task_definition.arn
|
||||
self.desired_count = desired_count
|
||||
self.events = []
|
||||
self.load_balancers = []
|
||||
self.pending_count = 0
|
||||
|
||||
@property
|
||||
def response_object(self):
|
||||
response_object = self.gen_response_object()
|
||||
del response_object['name'], response_object['arn']
|
||||
response_object['serviceName'] = self.name
|
||||
response_object['serviceArn'] = self.arn
|
||||
return response_object
|
||||
|
||||
|
||||
class EC2ContainerServiceBackend(BaseBackend):
|
||||
def __init__(self):
|
||||
self.clusters = {}
|
||||
self.task_definitions = {}
|
||||
self.services = {}
|
||||
|
||||
def fetch_task_definition(self, task_definition_str):
|
||||
task_definition_components = task_definition_str.split(':')
|
||||
if len(task_definition_components) == 2:
|
||||
family, revision = task_definition_components
|
||||
revision = int(revision)
|
||||
else:
|
||||
family = task_definition_components[0]
|
||||
revision = -1
|
||||
if family in self.task_definitions and 0 < revision <= len(self.task_definitions[family]):
|
||||
return self.task_definitions[family][revision - 1]
|
||||
elif family in self.task_definitions and revision == -1:
|
||||
return self.task_definitions[family][revision]
|
||||
else:
|
||||
raise Exception("{0} is not a task_definition".format(task_definition_str))
|
||||
|
||||
def create_cluster(self, cluster_name):
|
||||
cluster = Cluster(cluster_name)
|
||||
self.clusters[cluster_name] = cluster
|
||||
return cluster
|
||||
|
||||
def list_clusters(self):
|
||||
"""
|
||||
maxSize and pagination not implemented
|
||||
"""
|
||||
return [cluster.arn for cluster in self.clusters.values()]
|
||||
|
||||
def delete_cluster(self, cluster_str):
|
||||
cluster_name = cluster_str.split('/')[-1]
|
||||
if cluster_name in self.clusters:
|
||||
return self.clusters.pop(cluster_name)
|
||||
else:
|
||||
raise Exception("{0} is not a cluster".format(cluster_name))
|
||||
|
||||
def register_task_definition(self, family, container_definitions, volumes):
|
||||
if family in self.task_definitions:
|
||||
revision = len(self.task_definitions[family]) + 1
|
||||
else:
|
||||
self.task_definitions[family] = []
|
||||
revision = 1
|
||||
task_definition = TaskDefinition(family, revision, container_definitions, volumes)
|
||||
self.task_definitions[family].append(task_definition)
|
||||
|
||||
return task_definition
|
||||
|
||||
def list_task_definitions(self):
|
||||
"""
|
||||
Filtering not implemented
|
||||
"""
|
||||
task_arns = []
|
||||
for task_definition_list in self.task_definitions.values():
|
||||
task_arns.extend([task_definition.arn for task_definition in task_definition_list])
|
||||
return task_arns
|
||||
|
||||
def deregister_task_definition(self, task_definition_str):
|
||||
task_definition_name = task_definition_str.split('/')[-1]
|
||||
family, revision = task_definition_name.split(':')
|
||||
revision = int(revision)
|
||||
if family in self.task_definitions and 0 < revision <= len(self.task_definitions[family]):
|
||||
return self.task_definitions[family].pop(revision - 1)
|
||||
else:
|
||||
raise Exception("{0} is not a task_definition".format(task_definition_name))
|
||||
|
||||
def create_service(self, cluster_str, service_name, task_definition_str, desired_count):
|
||||
cluster_name = cluster_str.split('/')[-1]
|
||||
if cluster_name in self.clusters:
|
||||
cluster = self.clusters[cluster_name]
|
||||
else:
|
||||
raise Exception("{0} is not a cluster".format(cluster_name))
|
||||
task_definition = self.fetch_task_definition(task_definition_str)
|
||||
desired_count = desired_count if desired_count is not None else 0
|
||||
service = Service(cluster, service_name, task_definition, desired_count)
|
||||
cluster_service_pair = '{0}:{1}'.format(cluster_name, service_name)
|
||||
self.services[cluster_service_pair] = service
|
||||
return service
|
||||
|
||||
def list_services(self, cluster_str):
|
||||
cluster_name = cluster_str.split('/')[-1]
|
||||
service_arns = []
|
||||
for key, value in self.services.items():
|
||||
if cluster_name + ':' in key:
|
||||
service_arns.append(self.services[key].arn)
|
||||
return sorted(service_arns)
|
||||
|
||||
def update_service(self, cluster_str, service_name, task_definition_str, desired_count):
|
||||
cluster_name = cluster_str.split('/')[-1]
|
||||
cluster_service_pair = '{0}:{1}'.format(cluster_name, service_name)
|
||||
if cluster_service_pair in self.services:
|
||||
if task_definition_str is not None:
|
||||
task_definition = self.fetch_task_definition(task_definition_str)
|
||||
self.services[cluster_service_pair].task_definition = task_definition
|
||||
if desired_count is not None:
|
||||
self.services[cluster_service_pair].desired_count = desired_count
|
||||
return self.services[cluster_service_pair]
|
||||
else:
|
||||
raise Exception("cluster {0} or service {1} does not exist".format(cluster_name, service_name))
|
||||
|
||||
def delete_service(self, cluster_name, service_name):
|
||||
cluster_service_pair = '{0}:{1}'.format(cluster_name, service_name)
|
||||
if cluster_service_pair in self.services:
|
||||
service = self.services[cluster_service_pair]
|
||||
if service.desired_count > 0:
|
||||
raise Exception("Service must have desiredCount=0")
|
||||
else:
|
||||
return self.services.pop(cluster_service_pair)
|
||||
else:
|
||||
raise Exception("cluster {0} or service {1} does not exist".format(cluster_name, service_name))
|
||||
|
||||
|
||||
ecs_backends = {}
|
||||
for region, ec2_backend in ec2_backends.items():
|
||||
ecs_backends[region] = EC2ContainerServiceBackend()
|
102
moto/ecs/responses.py
Normal file
102
moto/ecs/responses.py
Normal file
@ -0,0 +1,102 @@
|
||||
from __future__ import unicode_literals
|
||||
import json
|
||||
import uuid
|
||||
|
||||
from moto.core.responses import BaseResponse
|
||||
from .models import ecs_backends
|
||||
|
||||
|
||||
class EC2ContainerServiceResponse(BaseResponse):
|
||||
@property
|
||||
def ecs_backend(self):
|
||||
return ecs_backends[self.region]
|
||||
|
||||
@property
|
||||
def request_params(self):
|
||||
try:
|
||||
return json.loads(self.body.decode())
|
||||
except ValueError:
|
||||
return {}
|
||||
|
||||
def _get_param(self, param):
|
||||
return self.request_params.get(param, None)
|
||||
|
||||
def create_cluster(self):
|
||||
cluster_name = self._get_param('clusterName')
|
||||
cluster = self.ecs_backend.create_cluster(cluster_name)
|
||||
return json.dumps({
|
||||
'cluster': cluster.response_object
|
||||
})
|
||||
|
||||
def list_clusters(self):
|
||||
cluster_arns = self.ecs_backend.list_clusters()
|
||||
return json.dumps({
|
||||
'clusterArns': cluster_arns,
|
||||
'nextToken': str(uuid.uuid1())
|
||||
})
|
||||
|
||||
def delete_cluster(self):
|
||||
cluster_str = self._get_param('cluster')
|
||||
cluster = self.ecs_backend.delete_cluster(cluster_str)
|
||||
return json.dumps({
|
||||
'cluster': cluster.response_object
|
||||
})
|
||||
|
||||
def register_task_definition(self):
|
||||
family = self._get_param('family')
|
||||
container_definitions = self._get_param('containerDefinitions')
|
||||
volumes = self._get_param('volumes')
|
||||
task_definition = self.ecs_backend.register_task_definition(family, container_definitions, volumes)
|
||||
return json.dumps({
|
||||
'taskDefinition': task_definition.response_object
|
||||
})
|
||||
|
||||
def list_task_definitions(self):
|
||||
task_definition_arns = self.ecs_backend.list_task_definitions()
|
||||
return json.dumps({
|
||||
'taskDefinitionArns': task_definition_arns,
|
||||
'nextToken': str(uuid.uuid1())
|
||||
})
|
||||
|
||||
def deregister_task_definition(self):
|
||||
task_definition_str = self._get_param('taskDefinition')
|
||||
task_definition = self.ecs_backend.deregister_task_definition(task_definition_str)
|
||||
return json.dumps({
|
||||
'taskDefinition': task_definition.response_object
|
||||
})
|
||||
|
||||
def create_service(self):
|
||||
cluster_str = self._get_param('cluster')
|
||||
service_name = self._get_param('serviceName')
|
||||
task_definition_str = self._get_param('taskDefinition')
|
||||
desired_count = self._get_int_param('desiredCount')
|
||||
service = self.ecs_backend.create_service(cluster_str, service_name, task_definition_str, desired_count)
|
||||
return json.dumps({
|
||||
'service': service.response_object
|
||||
})
|
||||
|
||||
def list_services(self):
|
||||
cluster_str = self._get_param('cluster')
|
||||
service_arns = self.ecs_backend.list_services(cluster_str)
|
||||
return json.dumps({
|
||||
'serviceArns': service_arns,
|
||||
'nextToken': str(uuid.uuid1())
|
||||
})
|
||||
|
||||
def update_service(self):
|
||||
cluster_str = self._get_param('cluster')
|
||||
service_name = self._get_param('service')
|
||||
task_definition = self._get_param('taskDefinition')
|
||||
desired_count = self._get_int_param('desiredCount')
|
||||
service = self.ecs_backend.update_service(cluster_str, service_name, task_definition, desired_count)
|
||||
return json.dumps({
|
||||
'service': service.response_object
|
||||
})
|
||||
|
||||
def delete_service(self):
|
||||
service_name = self._get_param('service')
|
||||
cluster_name = self._get_param('cluster')
|
||||
service = self.ecs_backend.delete_service(cluster_name, service_name)
|
||||
return json.dumps({
|
||||
'service': service.response_object
|
||||
})
|
10
moto/ecs/urls.py
Normal file
10
moto/ecs/urls.py
Normal file
@ -0,0 +1,10 @@
|
||||
from __future__ import unicode_literals
|
||||
from .responses import EC2ContainerServiceResponse
|
||||
|
||||
url_bases = [
|
||||
"https?://ecs.(.+).amazonaws.com",
|
||||
]
|
||||
|
||||
url_paths = {
|
||||
'{0}/$': EC2ContainerServiceResponse.dispatch,
|
||||
}
|
@ -1,5 +1,6 @@
|
||||
from __future__ import unicode_literals
|
||||
import boto
|
||||
import boto3
|
||||
import boto.ec2.autoscale
|
||||
from boto.ec2.autoscale.launchconfig import LaunchConfiguration
|
||||
from boto.ec2.autoscale.group import AutoScalingGroup
|
||||
@ -103,7 +104,7 @@ def test_create_autoscaling_groups_defaults():
|
||||
group.desired_capacity.should.equal(2)
|
||||
group.vpc_zone_identifier.should.equal('')
|
||||
group.default_cooldown.should.equal(300)
|
||||
group.health_check_period.should.equal(None)
|
||||
group.health_check_period.should.equal(300)
|
||||
group.health_check_type.should.equal("EC2")
|
||||
list(group.load_balancers).should.equal([])
|
||||
group.placement_group.should.equal(None)
|
||||
@ -378,3 +379,43 @@ def test_autoscaling_group_with_elb():
|
||||
elb = elb_conn.get_all_load_balancers()[0]
|
||||
elb.instances.should.have.length_of(0)
|
||||
|
||||
|
||||
'''
|
||||
Boto3
|
||||
'''
|
||||
|
||||
|
||||
@mock_autoscaling
|
||||
def test_create_autoscaling_group():
|
||||
client = boto3.client('autoscaling', region_name='us-east-1')
|
||||
_ = client.create_launch_configuration(
|
||||
LaunchConfigurationName='test_launch_configuration'
|
||||
)
|
||||
response = client.create_auto_scaling_group(
|
||||
AutoScalingGroupName='test_asg',
|
||||
LaunchConfigurationName='test_launch_configuration',
|
||||
MinSize=0,
|
||||
MaxSize=20,
|
||||
DesiredCapacity=5
|
||||
)
|
||||
response['ResponseMetadata']['HTTPStatusCode'].should.equal(200)
|
||||
|
||||
|
||||
@mock_autoscaling
|
||||
def test_describe_autoscaling_groups():
|
||||
client = boto3.client('autoscaling', region_name='us-east-1')
|
||||
_ = client.create_launch_configuration(
|
||||
LaunchConfigurationName='test_launch_configuration'
|
||||
)
|
||||
_ = client.create_auto_scaling_group(
|
||||
AutoScalingGroupName='test_asg',
|
||||
LaunchConfigurationName='test_launch_configuration',
|
||||
MinSize=0,
|
||||
MaxSize=20,
|
||||
DesiredCapacity=5
|
||||
)
|
||||
response = client.describe_auto_scaling_groups(
|
||||
AutoScalingGroupNames=["test_asg"]
|
||||
)
|
||||
response['ResponseMetadata']['HTTPStatusCode'].should.equal(200)
|
||||
response['AutoScalingGroups'][0]['AutoScalingGroupName'].should.equal('test_asg')
|
@ -1,5 +1,7 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from decimal import Decimal
|
||||
|
||||
import boto
|
||||
import boto3
|
||||
from boto3.dynamodb.conditions import Key
|
||||
@ -141,6 +143,36 @@ def test_item_add_and_describe_and_update():
|
||||
})
|
||||
|
||||
|
||||
@requires_boto_gte("2.9")
|
||||
@mock_dynamodb2
|
||||
def test_item_partial_save():
|
||||
table = create_table()
|
||||
|
||||
data = {
|
||||
'forum_name': 'LOLCat Forum',
|
||||
'subject': 'The LOLz',
|
||||
'Body': 'http://url_to_lolcat.gif',
|
||||
'SentBy': 'User A',
|
||||
}
|
||||
|
||||
table.put_item(data=data)
|
||||
returned_item = table.get_item(forum_name="LOLCat Forum", subject='The LOLz')
|
||||
|
||||
returned_item['SentBy'] = 'User B'
|
||||
returned_item.partial_save()
|
||||
|
||||
returned_item = table.get_item(
|
||||
forum_name='LOLCat Forum',
|
||||
subject='The LOLz'
|
||||
)
|
||||
dict(returned_item).should.equal({
|
||||
'forum_name': 'LOLCat Forum',
|
||||
'subject': 'The LOLz',
|
||||
'Body': 'http://url_to_lolcat.gif',
|
||||
'SentBy': 'User B',
|
||||
})
|
||||
|
||||
|
||||
@requires_boto_gte("2.9")
|
||||
@mock_dynamodb2
|
||||
def test_item_put_without_table():
|
||||
@ -538,6 +570,30 @@ def test_query_with_global_indexes():
|
||||
list(results).should.have.length_of(0)
|
||||
|
||||
|
||||
@mock_dynamodb2
|
||||
def test_reverse_query():
|
||||
conn = boto.dynamodb2.layer1.DynamoDBConnection()
|
||||
|
||||
table = Table.create('messages', schema=[
|
||||
HashKey('subject'),
|
||||
RangeKey('created_at', data_type='N')
|
||||
])
|
||||
|
||||
for i in range(10):
|
||||
table.put_item({
|
||||
'subject': "Hi",
|
||||
'created_at': i
|
||||
})
|
||||
|
||||
results = table.query_2(subject__eq="Hi",
|
||||
created_at__lt=6,
|
||||
limit=4,
|
||||
reverse=True)
|
||||
|
||||
expected = [Decimal(5), Decimal(4), Decimal(3), Decimal(2)]
|
||||
[r['created_at'] for r in results].should.equal(expected)
|
||||
|
||||
|
||||
@mock_dynamodb2
|
||||
def test_lookup():
|
||||
from decimal import Decimal
|
||||
@ -693,7 +749,7 @@ def test_boto3_conditions():
|
||||
results['Count'].should.equal(1)
|
||||
|
||||
results = table.query(
|
||||
KeyConditionExpression=Key('forum_name').eq('the-key') & Key("subject").begins_with('7')
|
||||
KeyConditionExpression=Key("subject").begins_with('7') & Key('forum_name').eq('the-key')
|
||||
)
|
||||
results['Count'].should.equal(1)
|
||||
|
||||
@ -701,3 +757,471 @@ def test_boto3_conditions():
|
||||
KeyConditionExpression=Key('forum_name').eq('the-key') & Key("subject").between('567', '890')
|
||||
)
|
||||
results['Count'].should.equal(1)
|
||||
|
||||
|
||||
def _create_table_with_range_key():
|
||||
dynamodb = boto3.resource('dynamodb', region_name='us-east-1')
|
||||
|
||||
# Create the DynamoDB table.
|
||||
table = dynamodb.create_table(
|
||||
TableName='users',
|
||||
KeySchema=[
|
||||
{
|
||||
'AttributeName': 'forum_name',
|
||||
'KeyType': 'HASH'
|
||||
},
|
||||
{
|
||||
'AttributeName': 'subject',
|
||||
'KeyType': 'RANGE'
|
||||
},
|
||||
],
|
||||
GlobalSecondaryIndexes=[{
|
||||
'IndexName': 'TestGSI',
|
||||
'KeySchema': [
|
||||
{
|
||||
'AttributeName': 'username',
|
||||
'KeyType': 'HASH',
|
||||
},
|
||||
{
|
||||
'AttributeName': 'created',
|
||||
'KeyType': 'RANGE',
|
||||
}
|
||||
],
|
||||
'Projection': {
|
||||
'ProjectionType': 'ALL',
|
||||
},
|
||||
'ProvisionedThroughput': {
|
||||
'ReadCapacityUnits': 5,
|
||||
'WriteCapacityUnits': 5
|
||||
}
|
||||
}],
|
||||
AttributeDefinitions=[
|
||||
{
|
||||
'AttributeName': 'forum_name',
|
||||
'AttributeType': 'S'
|
||||
},
|
||||
{
|
||||
'AttributeName': 'subject',
|
||||
'AttributeType': 'S'
|
||||
},
|
||||
],
|
||||
ProvisionedThroughput={
|
||||
'ReadCapacityUnits': 5,
|
||||
'WriteCapacityUnits': 5
|
||||
}
|
||||
)
|
||||
return dynamodb.Table('users')
|
||||
|
||||
|
||||
@mock_dynamodb2
|
||||
def test_update_item_range_key_set():
|
||||
table = _create_table_with_range_key()
|
||||
table.put_item(Item={
|
||||
'forum_name': 'the-key',
|
||||
'subject': '123',
|
||||
'username': 'johndoe',
|
||||
'created': Decimal('3'),
|
||||
})
|
||||
|
||||
item_key = {'forum_name': 'the-key', 'subject': '123'}
|
||||
table.update_item(
|
||||
Key=item_key,
|
||||
AttributeUpdates={
|
||||
'username': {
|
||||
'Action': u'PUT',
|
||||
'Value': 'johndoe2'
|
||||
},
|
||||
'created': {
|
||||
'Action': u'PUT',
|
||||
'Value': Decimal('4'),
|
||||
},
|
||||
'mapfield': {
|
||||
'Action': u'PUT',
|
||||
'Value': {'key': 'value'},
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
returned_item = dict((k, str(v) if isinstance(v, Decimal) else v)
|
||||
for k, v in table.get_item(Key=item_key)['Item'].items())
|
||||
dict(returned_item).should.equal({
|
||||
'username': "johndoe2",
|
||||
'forum_name': 'the-key',
|
||||
'subject': '123',
|
||||
'created': '4',
|
||||
'mapfield': {'key': 'value'},
|
||||
})
|
||||
|
||||
|
||||
@mock_dynamodb2
|
||||
def test_boto3_query_gsi_range_comparison():
|
||||
table = _create_table_with_range_key()
|
||||
|
||||
table.put_item(Item={
|
||||
'forum_name': 'the-key',
|
||||
'subject': '123',
|
||||
'username': 'johndoe',
|
||||
'created': 3,
|
||||
})
|
||||
table.put_item(Item={
|
||||
'forum_name': 'the-key',
|
||||
'subject': '456',
|
||||
'username': 'johndoe',
|
||||
'created': 1,
|
||||
})
|
||||
table.put_item(Item={
|
||||
'forum_name': 'the-key',
|
||||
'subject': '789',
|
||||
'username': 'johndoe',
|
||||
'created': 2,
|
||||
})
|
||||
table.put_item(Item={
|
||||
'forum_name': 'the-key',
|
||||
'subject': '159',
|
||||
'username': 'janedoe',
|
||||
'created': 2,
|
||||
})
|
||||
table.put_item(Item={
|
||||
'forum_name': 'the-key',
|
||||
'subject': '601',
|
||||
'username': 'janedoe',
|
||||
'created': 5,
|
||||
})
|
||||
|
||||
# Test a query returning all johndoe items
|
||||
results = table.query(
|
||||
KeyConditionExpression=Key('username').eq('johndoe') & Key("created").gt('0'),
|
||||
ScanIndexForward=True,
|
||||
IndexName='TestGSI',
|
||||
)
|
||||
expected = ["456", "789", "123"]
|
||||
for index, item in enumerate(results['Items']):
|
||||
item["subject"].should.equal(expected[index])
|
||||
|
||||
# Return all johndoe items again, but in reverse
|
||||
results = table.query(
|
||||
KeyConditionExpression=Key('username').eq('johndoe') & Key("created").gt('0'),
|
||||
ScanIndexForward=False,
|
||||
IndexName='TestGSI',
|
||||
)
|
||||
for index, item in enumerate(reversed(results['Items'])):
|
||||
item["subject"].should.equal(expected[index])
|
||||
|
||||
# Filter the creation to only return some of the results
|
||||
# And reverse order of hash + range key
|
||||
results = table.query(
|
||||
KeyConditionExpression=Key("created").gt('1') & Key('username').eq('johndoe'),
|
||||
ConsistentRead=True,
|
||||
IndexName='TestGSI',
|
||||
)
|
||||
results['Count'].should.equal(2)
|
||||
|
||||
# Filter to return no results
|
||||
results = table.query(
|
||||
KeyConditionExpression=Key('username').eq('janedoe') & Key("created").gt('9'),
|
||||
IndexName='TestGSI',
|
||||
)
|
||||
results['Count'].should.equal(0)
|
||||
|
||||
results = table.query(
|
||||
KeyConditionExpression=Key('username').eq('janedoe') & Key("created").eq('5'),
|
||||
IndexName='TestGSI',
|
||||
)
|
||||
results['Count'].should.equal(1)
|
||||
|
||||
# Test range key sorting
|
||||
results = table.query(
|
||||
KeyConditionExpression=Key('username').eq('johndoe') & Key("created").gt('0'),
|
||||
IndexName='TestGSI',
|
||||
)
|
||||
expected = [Decimal('1'), Decimal('2'), Decimal('3')]
|
||||
for index, item in enumerate(results['Items']):
|
||||
item["created"].should.equal(expected[index])
|
||||
|
||||
|
||||
@mock_dynamodb2
|
||||
def test_update_table_throughput():
|
||||
dynamodb = boto3.resource('dynamodb', region_name='us-east-1')
|
||||
|
||||
# Create the DynamoDB table.
|
||||
table = dynamodb.create_table(
|
||||
TableName='users',
|
||||
KeySchema=[
|
||||
{
|
||||
'AttributeName': 'forum_name',
|
||||
'KeyType': 'HASH'
|
||||
},
|
||||
{
|
||||
'AttributeName': 'subject',
|
||||
'KeyType': 'RANGE'
|
||||
},
|
||||
],
|
||||
AttributeDefinitions=[
|
||||
{
|
||||
'AttributeName': 'forum_name',
|
||||
'AttributeType': 'S'
|
||||
},
|
||||
{
|
||||
'AttributeName': 'subject',
|
||||
'AttributeType': 'S'
|
||||
},
|
||||
],
|
||||
ProvisionedThroughput={
|
||||
'ReadCapacityUnits': 5,
|
||||
'WriteCapacityUnits': 6
|
||||
}
|
||||
)
|
||||
table = dynamodb.Table('users')
|
||||
|
||||
table.provisioned_throughput['ReadCapacityUnits'].should.equal(5)
|
||||
table.provisioned_throughput['WriteCapacityUnits'].should.equal(6)
|
||||
|
||||
table.update(ProvisionedThroughput={
|
||||
'ReadCapacityUnits': 10,
|
||||
'WriteCapacityUnits': 11,
|
||||
})
|
||||
|
||||
table = dynamodb.Table('users')
|
||||
|
||||
table.provisioned_throughput['ReadCapacityUnits'].should.equal(10)
|
||||
table.provisioned_throughput['WriteCapacityUnits'].should.equal(11)
|
||||
|
||||
|
||||
@mock_dynamodb2
|
||||
def test_update_table_gsi_throughput():
|
||||
dynamodb = boto3.resource('dynamodb', region_name='us-east-1')
|
||||
|
||||
# Create the DynamoDB table.
|
||||
table = dynamodb.create_table(
|
||||
TableName='users',
|
||||
KeySchema=[
|
||||
{
|
||||
'AttributeName': 'forum_name',
|
||||
'KeyType': 'HASH'
|
||||
},
|
||||
{
|
||||
'AttributeName': 'subject',
|
||||
'KeyType': 'RANGE'
|
||||
},
|
||||
],
|
||||
GlobalSecondaryIndexes=[{
|
||||
'IndexName': 'TestGSI',
|
||||
'KeySchema': [
|
||||
{
|
||||
'AttributeName': 'username',
|
||||
'KeyType': 'HASH',
|
||||
},
|
||||
{
|
||||
'AttributeName': 'created',
|
||||
'KeyType': 'RANGE',
|
||||
}
|
||||
],
|
||||
'Projection': {
|
||||
'ProjectionType': 'ALL',
|
||||
},
|
||||
'ProvisionedThroughput': {
|
||||
'ReadCapacityUnits': 3,
|
||||
'WriteCapacityUnits': 4
|
||||
}
|
||||
}],
|
||||
AttributeDefinitions=[
|
||||
{
|
||||
'AttributeName': 'forum_name',
|
||||
'AttributeType': 'S'
|
||||
},
|
||||
{
|
||||
'AttributeName': 'subject',
|
||||
'AttributeType': 'S'
|
||||
},
|
||||
],
|
||||
ProvisionedThroughput={
|
||||
'ReadCapacityUnits': 5,
|
||||
'WriteCapacityUnits': 6
|
||||
}
|
||||
)
|
||||
table = dynamodb.Table('users')
|
||||
|
||||
gsi_throughput = table.global_secondary_indexes[0]['ProvisionedThroughput']
|
||||
gsi_throughput['ReadCapacityUnits'].should.equal(3)
|
||||
gsi_throughput['WriteCapacityUnits'].should.equal(4)
|
||||
|
||||
table.provisioned_throughput['ReadCapacityUnits'].should.equal(5)
|
||||
table.provisioned_throughput['WriteCapacityUnits'].should.equal(6)
|
||||
|
||||
table.update(GlobalSecondaryIndexUpdates=[{
|
||||
'Update': {
|
||||
'IndexName': 'TestGSI',
|
||||
'ProvisionedThroughput': {
|
||||
'ReadCapacityUnits': 10,
|
||||
'WriteCapacityUnits': 11,
|
||||
}
|
||||
},
|
||||
}])
|
||||
|
||||
table = dynamodb.Table('users')
|
||||
|
||||
# Primary throughput has not changed
|
||||
table.provisioned_throughput['ReadCapacityUnits'].should.equal(5)
|
||||
table.provisioned_throughput['WriteCapacityUnits'].should.equal(6)
|
||||
|
||||
gsi_throughput = table.global_secondary_indexes[0]['ProvisionedThroughput']
|
||||
gsi_throughput['ReadCapacityUnits'].should.equal(10)
|
||||
gsi_throughput['WriteCapacityUnits'].should.equal(11)
|
||||
|
||||
|
||||
|
||||
@mock_dynamodb2
|
||||
def test_update_table_gsi_create():
|
||||
dynamodb = boto3.resource('dynamodb', region_name='us-east-1')
|
||||
|
||||
# Create the DynamoDB table.
|
||||
table = dynamodb.create_table(
|
||||
TableName='users',
|
||||
KeySchema=[
|
||||
{
|
||||
'AttributeName': 'forum_name',
|
||||
'KeyType': 'HASH'
|
||||
},
|
||||
{
|
||||
'AttributeName': 'subject',
|
||||
'KeyType': 'RANGE'
|
||||
},
|
||||
],
|
||||
AttributeDefinitions=[
|
||||
{
|
||||
'AttributeName': 'forum_name',
|
||||
'AttributeType': 'S'
|
||||
},
|
||||
{
|
||||
'AttributeName': 'subject',
|
||||
'AttributeType': 'S'
|
||||
},
|
||||
],
|
||||
ProvisionedThroughput={
|
||||
'ReadCapacityUnits': 5,
|
||||
'WriteCapacityUnits': 6
|
||||
}
|
||||
)
|
||||
table = dynamodb.Table('users')
|
||||
|
||||
table.global_secondary_indexes.should.have.length_of(0)
|
||||
|
||||
table.update(GlobalSecondaryIndexUpdates=[{
|
||||
'Create': {
|
||||
'IndexName': 'TestGSI',
|
||||
'KeySchema': [
|
||||
{
|
||||
'AttributeName': 'username',
|
||||
'KeyType': 'HASH',
|
||||
},
|
||||
{
|
||||
'AttributeName': 'created',
|
||||
'KeyType': 'RANGE',
|
||||
}
|
||||
],
|
||||
'Projection': {
|
||||
'ProjectionType': 'ALL',
|
||||
},
|
||||
'ProvisionedThroughput': {
|
||||
'ReadCapacityUnits': 3,
|
||||
'WriteCapacityUnits': 4
|
||||
}
|
||||
},
|
||||
}])
|
||||
|
||||
table = dynamodb.Table('users')
|
||||
table.global_secondary_indexes.should.have.length_of(1)
|
||||
|
||||
gsi_throughput = table.global_secondary_indexes[0]['ProvisionedThroughput']
|
||||
assert gsi_throughput['ReadCapacityUnits'].should.equal(3)
|
||||
assert gsi_throughput['WriteCapacityUnits'].should.equal(4)
|
||||
|
||||
# Check update works
|
||||
table.update(GlobalSecondaryIndexUpdates=[{
|
||||
'Update': {
|
||||
'IndexName': 'TestGSI',
|
||||
'ProvisionedThroughput': {
|
||||
'ReadCapacityUnits': 10,
|
||||
'WriteCapacityUnits': 11,
|
||||
}
|
||||
},
|
||||
}])
|
||||
table = dynamodb.Table('users')
|
||||
|
||||
gsi_throughput = table.global_secondary_indexes[0]['ProvisionedThroughput']
|
||||
assert gsi_throughput['ReadCapacityUnits'].should.equal(10)
|
||||
assert gsi_throughput['WriteCapacityUnits'].should.equal(11)
|
||||
|
||||
table.update(GlobalSecondaryIndexUpdates=[{
|
||||
'Delete': {
|
||||
'IndexName': 'TestGSI',
|
||||
},
|
||||
}])
|
||||
|
||||
table = dynamodb.Table('users')
|
||||
table.global_secondary_indexes.should.have.length_of(0)
|
||||
|
||||
|
||||
@mock_dynamodb2
|
||||
def test_update_table_gsi_throughput():
|
||||
dynamodb = boto3.resource('dynamodb', region_name='us-east-1')
|
||||
|
||||
# Create the DynamoDB table.
|
||||
table = dynamodb.create_table(
|
||||
TableName='users',
|
||||
KeySchema=[
|
||||
{
|
||||
'AttributeName': 'forum_name',
|
||||
'KeyType': 'HASH'
|
||||
},
|
||||
{
|
||||
'AttributeName': 'subject',
|
||||
'KeyType': 'RANGE'
|
||||
},
|
||||
],
|
||||
GlobalSecondaryIndexes=[{
|
||||
'IndexName': 'TestGSI',
|
||||
'KeySchema': [
|
||||
{
|
||||
'AttributeName': 'username',
|
||||
'KeyType': 'HASH',
|
||||
},
|
||||
{
|
||||
'AttributeName': 'created',
|
||||
'KeyType': 'RANGE',
|
||||
}
|
||||
],
|
||||
'Projection': {
|
||||
'ProjectionType': 'ALL',
|
||||
},
|
||||
'ProvisionedThroughput': {
|
||||
'ReadCapacityUnits': 3,
|
||||
'WriteCapacityUnits': 4
|
||||
}
|
||||
}],
|
||||
AttributeDefinitions=[
|
||||
{
|
||||
'AttributeName': 'forum_name',
|
||||
'AttributeType': 'S'
|
||||
},
|
||||
{
|
||||
'AttributeName': 'subject',
|
||||
'AttributeType': 'S'
|
||||
},
|
||||
],
|
||||
ProvisionedThroughput={
|
||||
'ReadCapacityUnits': 5,
|
||||
'WriteCapacityUnits': 6
|
||||
}
|
||||
)
|
||||
table = dynamodb.Table('users')
|
||||
table.global_secondary_indexes.should.have.length_of(1)
|
||||
|
||||
table.update(GlobalSecondaryIndexUpdates=[{
|
||||
'Delete': {
|
||||
'IndexName': 'TestGSI',
|
||||
},
|
||||
}])
|
||||
|
||||
table = dynamodb.Table('users')
|
||||
table.global_secondary_indexes.should.have.length_of(0)
|
||||
|
@ -1,10 +1,46 @@
|
||||
from __future__ import unicode_literals
|
||||
import boto
|
||||
import sure # noqa
|
||||
from nose.tools import assert_raises
|
||||
from nose.tools import assert_false
|
||||
from boto.exception import EC2ResponseError
|
||||
|
||||
from moto import mock_ec2
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_customer_gateways():
|
||||
pass
|
||||
def test_create_customer_gateways():
|
||||
conn = boto.connect_vpc('the_key', 'the_secret')
|
||||
|
||||
customer_gateway = conn.create_customer_gateway('ipsec.1', '205.251.242.54', 65534)
|
||||
customer_gateway.should_not.be.none
|
||||
customer_gateway.id.should.match(r'cgw-\w+')
|
||||
customer_gateway.type.should.equal('ipsec.1')
|
||||
customer_gateway.bgp_asn.should.equal(65534)
|
||||
customer_gateway.ip_address.should.equal('205.251.242.54')
|
||||
|
||||
@mock_ec2
|
||||
def test_describe_customer_gateways():
|
||||
conn = boto.connect_vpc('the_key', 'the_secret')
|
||||
customer_gateway = conn.create_customer_gateway('ipsec.1', '205.251.242.54', 65534)
|
||||
cgws = conn.get_all_customer_gateways()
|
||||
cgws.should.have.length_of(1)
|
||||
cgws[0].id.should.match(customer_gateway.id)
|
||||
|
||||
@mock_ec2
|
||||
def test_delete_customer_gateways():
|
||||
conn = boto.connect_vpc('the_key', 'the_secret')
|
||||
|
||||
customer_gateway = conn.create_customer_gateway('ipsec.1', '205.251.242.54', 65534)
|
||||
customer_gateway.should_not.be.none
|
||||
cgws = conn.get_all_customer_gateways()
|
||||
cgws[0].id.should.match(customer_gateway.id)
|
||||
deleted = conn.delete_customer_gateway(customer_gateway.id)
|
||||
cgws = conn.get_all_customer_gateways()
|
||||
cgws.should.have.length_of(0)
|
||||
|
||||
@mock_ec2
|
||||
def test_delete_customer_gateways_bad_id():
|
||||
conn = boto.connect_vpc('the_key', 'the_secret')
|
||||
with assert_raises(EC2ResponseError) as cm:
|
||||
conn.delete_customer_gateway('cgw-0123abcd')
|
||||
|
@ -48,6 +48,63 @@ def test_filter_volume_by_id():
|
||||
vol2.should.have.length_of(2)
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_volume_filters():
|
||||
conn = boto.connect_ec2('the_key', 'the_secret')
|
||||
|
||||
reservation = conn.run_instances('ami-1234abcd')
|
||||
instance = reservation.instances[0]
|
||||
|
||||
instance.update()
|
||||
|
||||
volume1 = conn.create_volume(80, "us-east-1a")
|
||||
volume2 = conn.create_volume(36, "us-east-1b")
|
||||
volume3 = conn.create_volume(20, "us-east-1c")
|
||||
|
||||
snapshot = volume3.create_snapshot(description='testsnap')
|
||||
volume4 = conn.create_volume(25, "us-east-1a", snapshot=snapshot)
|
||||
|
||||
conn.create_tags([volume1.id], {'testkey1': 'testvalue1'})
|
||||
conn.create_tags([volume2.id], {'testkey2': 'testvalue2'})
|
||||
|
||||
volume1.update()
|
||||
volume2.update()
|
||||
volume3.update()
|
||||
volume4.update()
|
||||
|
||||
block_mapping = instance.block_device_mapping['/dev/sda1']
|
||||
|
||||
volumes_by_attach_time = conn.get_all_volumes(filters={'attachment.attach-time': block_mapping.attach_time})
|
||||
set([vol.id for vol in volumes_by_attach_time]).should.equal(set([block_mapping.volume_id]))
|
||||
|
||||
volumes_by_attach_device = conn.get_all_volumes(filters={'attachment.device': '/dev/sda1'})
|
||||
set([vol.id for vol in volumes_by_attach_device]).should.equal(set([block_mapping.volume_id]))
|
||||
|
||||
volumes_by_attach_instance_id = conn.get_all_volumes(filters={'attachment.instance-id': instance.id})
|
||||
set([vol.id for vol in volumes_by_attach_instance_id]).should.equal(set([block_mapping.volume_id]))
|
||||
|
||||
volumes_by_create_time = conn.get_all_volumes(filters={'create-time': volume4.create_time})
|
||||
set([vol.create_time for vol in volumes_by_create_time]).should.equal(set([volume4.create_time]))
|
||||
|
||||
volumes_by_size = conn.get_all_volumes(filters={'size': volume2.size})
|
||||
set([vol.id for vol in volumes_by_size]).should.equal(set([volume2.id]))
|
||||
|
||||
volumes_by_snapshot_id = conn.get_all_volumes(filters={'snapshot-id': snapshot.id})
|
||||
set([vol.id for vol in volumes_by_snapshot_id]).should.equal(set([volume4.id]))
|
||||
|
||||
volumes_by_status = conn.get_all_volumes(filters={'status': 'in-use'})
|
||||
set([vol.id for vol in volumes_by_status]).should.equal(set([block_mapping.volume_id]))
|
||||
|
||||
volumes_by_tag_key = conn.get_all_volumes(filters={'tag-key': 'testkey1'})
|
||||
set([vol.id for vol in volumes_by_tag_key]).should.equal(set([volume1.id]))
|
||||
|
||||
volumes_by_tag_value = conn.get_all_volumes(filters={'tag-value': 'testvalue1'})
|
||||
set([vol.id for vol in volumes_by_tag_value]).should.equal(set([volume1.id]))
|
||||
|
||||
volumes_by_tag = conn.get_all_volumes(filters={'tag:testkey1': 'testvalue1'})
|
||||
set([vol.id for vol in volumes_by_tag]).should.equal(set([volume1.id]))
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_volume_attach_and_detach():
|
||||
conn = boto.connect_ec2('the_key', 'the_secret')
|
||||
@ -139,6 +196,44 @@ def test_filter_snapshot_by_id():
|
||||
s.region.name.should.equal(conn.region.name)
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_snapshot_filters():
|
||||
conn = boto.connect_ec2('the_key', 'the_secret')
|
||||
volume1 = conn.create_volume(20, "us-east-1a")
|
||||
volume2 = conn.create_volume(25, "us-east-1a")
|
||||
|
||||
snapshot1 = volume1.create_snapshot(description='testsnapshot1')
|
||||
snapshot2 = volume1.create_snapshot(description='testsnapshot2')
|
||||
snapshot3 = volume2.create_snapshot(description='testsnapshot3')
|
||||
|
||||
conn.create_tags([snapshot1.id], {'testkey1': 'testvalue1'})
|
||||
conn.create_tags([snapshot2.id], {'testkey2': 'testvalue2'})
|
||||
|
||||
snapshots_by_description = conn.get_all_snapshots(filters={'description': 'testsnapshot1'})
|
||||
set([snap.id for snap in snapshots_by_description]).should.equal(set([snapshot1.id]))
|
||||
|
||||
snapshots_by_id = conn.get_all_snapshots(filters={'snapshot-id': snapshot1.id})
|
||||
set([snap.id for snap in snapshots_by_id]).should.equal(set([snapshot1.id]))
|
||||
|
||||
snapshots_by_start_time = conn.get_all_snapshots(filters={'start-time': snapshot1.start_time})
|
||||
set([snap.start_time for snap in snapshots_by_start_time]).should.equal(set([snapshot1.start_time]))
|
||||
|
||||
snapshots_by_volume_id = conn.get_all_snapshots(filters={'volume-id': volume1.id})
|
||||
set([snap.id for snap in snapshots_by_volume_id]).should.equal(set([snapshot1.id, snapshot2.id]))
|
||||
|
||||
snapshots_by_volume_size = conn.get_all_snapshots(filters={'volume-size': volume1.size})
|
||||
set([snap.id for snap in snapshots_by_volume_size]).should.equal(set([snapshot1.id, snapshot2.id]))
|
||||
|
||||
snapshots_by_tag_key = conn.get_all_snapshots(filters={'tag-key': 'testkey1'})
|
||||
set([snap.id for snap in snapshots_by_tag_key]).should.equal(set([snapshot1.id]))
|
||||
|
||||
snapshots_by_tag_value = conn.get_all_snapshots(filters={'tag-value': 'testvalue1'})
|
||||
set([snap.id for snap in snapshots_by_tag_value]).should.equal(set([snapshot1.id]))
|
||||
|
||||
snapshots_by_tag = conn.get_all_snapshots(filters={'tag:testkey1': 'testvalue1'})
|
||||
set([snap.id for snap in snapshots_by_tag]).should.equal(set([snapshot1.id]))
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_snapshot_attribute():
|
||||
conn = boto.connect_ec2('the_key', 'the_secret')
|
||||
|
@ -51,21 +51,23 @@ def test_instance_launch_and_terminate():
|
||||
reservations[0].id.should.equal(reservation.id)
|
||||
instances = reservations[0].instances
|
||||
instances.should.have.length_of(1)
|
||||
instances[0].id.should.equal(instance.id)
|
||||
instances[0].state.should.equal('running')
|
||||
instances[0].launch_time.should.equal("2014-01-01T05:00:00.000Z")
|
||||
instances[0].vpc_id.should.equal(None)
|
||||
instance = instances[0]
|
||||
instance.id.should.equal(instance.id)
|
||||
instance.state.should.equal('running')
|
||||
instance.launch_time.should.equal("2014-01-01T05:00:00.000Z")
|
||||
instance.vpc_id.should.equal(None)
|
||||
instance.placement.should.equal('us-east-1a')
|
||||
|
||||
root_device_name = instances[0].root_device_name
|
||||
instances[0].block_device_mapping[root_device_name].status.should.equal('in-use')
|
||||
volume_id = instances[0].block_device_mapping[root_device_name].volume_id
|
||||
root_device_name = instance.root_device_name
|
||||
instance.block_device_mapping[root_device_name].status.should.equal('in-use')
|
||||
volume_id = instance.block_device_mapping[root_device_name].volume_id
|
||||
volume_id.should.match(r'vol-\w+')
|
||||
|
||||
volume = conn.get_all_volumes(volume_ids=[volume_id])[0]
|
||||
volume.attach_data.instance_id.should.equal(instance.id)
|
||||
volume.status.should.equal('in-use')
|
||||
|
||||
conn.terminate_instances([instances[0].id])
|
||||
conn.terminate_instances([instance.id])
|
||||
|
||||
reservations = conn.get_all_instances()
|
||||
instance = reservations[0].instances[0]
|
||||
|
336
tests/test_ecs/test_ecs_boto3.py
Normal file
336
tests/test_ecs/test_ecs_boto3.py
Normal file
@ -0,0 +1,336 @@
|
||||
from __future__ import unicode_literals
|
||||
import boto3
|
||||
import sure # noqa
|
||||
|
||||
from moto import mock_ecs
|
||||
|
||||
|
||||
@mock_ecs
|
||||
def test_create_cluster():
|
||||
client = boto3.client('ecs', region_name='us-east-1')
|
||||
response = client.create_cluster(
|
||||
clusterName='test_ecs_cluster'
|
||||
)
|
||||
response['cluster']['clusterName'].should.equal('test_ecs_cluster')
|
||||
response['cluster']['clusterArn'].should.equal('arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster')
|
||||
response['cluster']['status'].should.equal('ACTIVE')
|
||||
response['cluster']['registeredContainerInstancesCount'].should.equal(0)
|
||||
response['cluster']['runningTasksCount'].should.equal(0)
|
||||
response['cluster']['pendingTasksCount'].should.equal(0)
|
||||
response['cluster']['activeServicesCount'].should.equal(0)
|
||||
|
||||
|
||||
@mock_ecs
|
||||
def test_list_clusters():
|
||||
client = boto3.client('ecs', region_name='us-east-1')
|
||||
_ = client.create_cluster(
|
||||
clusterName='test_cluster0'
|
||||
)
|
||||
_ = client.create_cluster(
|
||||
clusterName='test_cluster1'
|
||||
)
|
||||
response = client.list_clusters()
|
||||
response['clusterArns'].should.contain('arn:aws:ecs:us-east-1:012345678910:cluster/test_cluster0')
|
||||
response['clusterArns'].should.contain('arn:aws:ecs:us-east-1:012345678910:cluster/test_cluster1')
|
||||
|
||||
|
||||
@mock_ecs
|
||||
def test_delete_cluster():
|
||||
client = boto3.client('ecs', region_name='us-east-1')
|
||||
_ = client.create_cluster(
|
||||
clusterName='test_ecs_cluster'
|
||||
)
|
||||
response = client.delete_cluster(cluster='test_ecs_cluster')
|
||||
response['cluster']['clusterName'].should.equal('test_ecs_cluster')
|
||||
response['cluster']['clusterArn'].should.equal('arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster')
|
||||
response['cluster']['status'].should.equal('ACTIVE')
|
||||
response['cluster']['registeredContainerInstancesCount'].should.equal(0)
|
||||
response['cluster']['runningTasksCount'].should.equal(0)
|
||||
response['cluster']['pendingTasksCount'].should.equal(0)
|
||||
response['cluster']['activeServicesCount'].should.equal(0)
|
||||
|
||||
response = client.list_clusters()
|
||||
len(response['clusterArns']).should.equal(0)
|
||||
|
||||
|
||||
@mock_ecs
|
||||
def test_register_task_definition():
|
||||
client = boto3.client('ecs', region_name='us-east-1')
|
||||
response = client.register_task_definition(
|
||||
family='test_ecs_task',
|
||||
containerDefinitions=[
|
||||
{
|
||||
'name': 'hello_world',
|
||||
'image': 'docker/hello-world:latest',
|
||||
'cpu': 1024,
|
||||
'memory': 400,
|
||||
'essential': True,
|
||||
'environment': [{
|
||||
'name': 'AWS_ACCESS_KEY_ID',
|
||||
'value': 'SOME_ACCESS_KEY'
|
||||
}],
|
||||
'logConfiguration': {'logDriver': 'json-file'}
|
||||
}
|
||||
]
|
||||
)
|
||||
type(response['taskDefinition']).should.be(dict)
|
||||
response['taskDefinition']['taskDefinitionArn'].should.equal('arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1')
|
||||
response['taskDefinition']['containerDefinitions'][0]['name'].should.equal('hello_world')
|
||||
response['taskDefinition']['containerDefinitions'][0]['image'].should.equal('docker/hello-world:latest')
|
||||
response['taskDefinition']['containerDefinitions'][0]['cpu'].should.equal(1024)
|
||||
response['taskDefinition']['containerDefinitions'][0]['memory'].should.equal(400)
|
||||
response['taskDefinition']['containerDefinitions'][0]['essential'].should.equal(True)
|
||||
response['taskDefinition']['containerDefinitions'][0]['environment'][0]['name'].should.equal('AWS_ACCESS_KEY_ID')
|
||||
response['taskDefinition']['containerDefinitions'][0]['environment'][0]['value'].should.equal('SOME_ACCESS_KEY')
|
||||
response['taskDefinition']['containerDefinitions'][0]['logConfiguration']['logDriver'].should.equal('json-file')
|
||||
|
||||
|
||||
@mock_ecs
|
||||
def test_list_task_definitions():
|
||||
client = boto3.client('ecs', region_name='us-east-1')
|
||||
_ = client.register_task_definition(
|
||||
family='test_ecs_task',
|
||||
containerDefinitions=[
|
||||
{
|
||||
'name': 'hello_world',
|
||||
'image': 'docker/hello-world:latest',
|
||||
'cpu': 1024,
|
||||
'memory': 400,
|
||||
'essential': True,
|
||||
'environment': [{
|
||||
'name': 'AWS_ACCESS_KEY_ID',
|
||||
'value': 'SOME_ACCESS_KEY'
|
||||
}],
|
||||
'logConfiguration': {'logDriver': 'json-file'}
|
||||
}
|
||||
]
|
||||
)
|
||||
_ = client.register_task_definition(
|
||||
family='test_ecs_task',
|
||||
containerDefinitions=[
|
||||
{
|
||||
'name': 'hello_world2',
|
||||
'image': 'docker/hello-world2:latest',
|
||||
'cpu': 1024,
|
||||
'memory': 400,
|
||||
'essential': True,
|
||||
'environment': [{
|
||||
'name': 'AWS_ACCESS_KEY_ID',
|
||||
'value': 'SOME_ACCESS_KEY2'
|
||||
}],
|
||||
'logConfiguration': {'logDriver': 'json-file'}
|
||||
}
|
||||
]
|
||||
)
|
||||
response = client.list_task_definitions()
|
||||
len(response['taskDefinitionArns']).should.equal(2)
|
||||
response['taskDefinitionArns'][0].should.equal('arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1')
|
||||
response['taskDefinitionArns'][1].should.equal('arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:2')
|
||||
|
||||
|
||||
@mock_ecs
|
||||
def test_deregister_task_definition():
|
||||
client = boto3.client('ecs', region_name='us-east-1')
|
||||
_ = client.register_task_definition(
|
||||
family='test_ecs_task',
|
||||
containerDefinitions=[
|
||||
{
|
||||
'name': 'hello_world',
|
||||
'image': 'docker/hello-world:latest',
|
||||
'cpu': 1024,
|
||||
'memory': 400,
|
||||
'essential': True,
|
||||
'environment': [{
|
||||
'name': 'AWS_ACCESS_KEY_ID',
|
||||
'value': 'SOME_ACCESS_KEY'
|
||||
}],
|
||||
'logConfiguration': {'logDriver': 'json-file'}
|
||||
}
|
||||
]
|
||||
)
|
||||
response = client.deregister_task_definition(
|
||||
taskDefinition='test_ecs_task:1'
|
||||
)
|
||||
type(response['taskDefinition']).should.be(dict)
|
||||
response['taskDefinition']['taskDefinitionArn'].should.equal('arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1')
|
||||
response['taskDefinition']['containerDefinitions'][0]['name'].should.equal('hello_world')
|
||||
response['taskDefinition']['containerDefinitions'][0]['image'].should.equal('docker/hello-world:latest')
|
||||
response['taskDefinition']['containerDefinitions'][0]['cpu'].should.equal(1024)
|
||||
response['taskDefinition']['containerDefinitions'][0]['memory'].should.equal(400)
|
||||
response['taskDefinition']['containerDefinitions'][0]['essential'].should.equal(True)
|
||||
response['taskDefinition']['containerDefinitions'][0]['environment'][0]['name'].should.equal('AWS_ACCESS_KEY_ID')
|
||||
response['taskDefinition']['containerDefinitions'][0]['environment'][0]['value'].should.equal('SOME_ACCESS_KEY')
|
||||
response['taskDefinition']['containerDefinitions'][0]['logConfiguration']['logDriver'].should.equal('json-file')
|
||||
|
||||
|
||||
@mock_ecs
|
||||
def test_create_service():
|
||||
client = boto3.client('ecs', region_name='us-east-1')
|
||||
_ = client.create_cluster(
|
||||
clusterName='test_ecs_cluster'
|
||||
)
|
||||
_ = client.register_task_definition(
|
||||
family='test_ecs_task',
|
||||
containerDefinitions=[
|
||||
{
|
||||
'name': 'hello_world',
|
||||
'image': 'docker/hello-world:latest',
|
||||
'cpu': 1024,
|
||||
'memory': 400,
|
||||
'essential': True,
|
||||
'environment': [{
|
||||
'name': 'AWS_ACCESS_KEY_ID',
|
||||
'value': 'SOME_ACCESS_KEY'
|
||||
}],
|
||||
'logConfiguration': {'logDriver': 'json-file'}
|
||||
}
|
||||
]
|
||||
)
|
||||
response = client.create_service(
|
||||
cluster='test_ecs_cluster',
|
||||
serviceName='test_ecs_service',
|
||||
taskDefinition='test_ecs_task',
|
||||
desiredCount=2
|
||||
)
|
||||
response['service']['clusterArn'].should.equal('arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster')
|
||||
response['service']['desiredCount'].should.equal(2)
|
||||
len(response['service']['events']).should.equal(0)
|
||||
len(response['service']['loadBalancers']).should.equal(0)
|
||||
response['service']['pendingCount'].should.equal(0)
|
||||
response['service']['runningCount'].should.equal(0)
|
||||
response['service']['serviceArn'].should.equal('arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service')
|
||||
response['service']['serviceName'].should.equal('test_ecs_service')
|
||||
response['service']['status'].should.equal('ACTIVE')
|
||||
response['service']['taskDefinition'].should.equal('arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1')
|
||||
|
||||
|
||||
@mock_ecs
|
||||
def test_list_services():
|
||||
client = boto3.client('ecs', region_name='us-east-1')
|
||||
_ = client.create_cluster(
|
||||
clusterName='test_ecs_cluster'
|
||||
)
|
||||
_ = client.register_task_definition(
|
||||
family='test_ecs_task',
|
||||
containerDefinitions=[
|
||||
{
|
||||
'name': 'hello_world',
|
||||
'image': 'docker/hello-world:latest',
|
||||
'cpu': 1024,
|
||||
'memory': 400,
|
||||
'essential': True,
|
||||
'environment': [{
|
||||
'name': 'AWS_ACCESS_KEY_ID',
|
||||
'value': 'SOME_ACCESS_KEY'
|
||||
}],
|
||||
'logConfiguration': {'logDriver': 'json-file'}
|
||||
}
|
||||
]
|
||||
)
|
||||
_ = client.create_service(
|
||||
cluster='test_ecs_cluster',
|
||||
serviceName='test_ecs_service1',
|
||||
taskDefinition='test_ecs_task',
|
||||
desiredCount=2
|
||||
)
|
||||
_ = client.create_service(
|
||||
cluster='test_ecs_cluster',
|
||||
serviceName='test_ecs_service2',
|
||||
taskDefinition='test_ecs_task',
|
||||
desiredCount=2
|
||||
)
|
||||
response = client.list_services(
|
||||
cluster='test_ecs_cluster'
|
||||
)
|
||||
len(response['serviceArns']).should.equal(2)
|
||||
response['serviceArns'][0].should.equal('arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service1')
|
||||
response['serviceArns'][1].should.equal('arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service2')
|
||||
|
||||
|
||||
@mock_ecs
|
||||
def test_update_service():
|
||||
client = boto3.client('ecs', region_name='us-east-1')
|
||||
_ = client.create_cluster(
|
||||
clusterName='test_ecs_cluster'
|
||||
)
|
||||
_ = client.register_task_definition(
|
||||
family='test_ecs_task',
|
||||
containerDefinitions=[
|
||||
{
|
||||
'name': 'hello_world',
|
||||
'image': 'docker/hello-world:latest',
|
||||
'cpu': 1024,
|
||||
'memory': 400,
|
||||
'essential': True,
|
||||
'environment': [{
|
||||
'name': 'AWS_ACCESS_KEY_ID',
|
||||
'value': 'SOME_ACCESS_KEY'
|
||||
}],
|
||||
'logConfiguration': {'logDriver': 'json-file'}
|
||||
}
|
||||
]
|
||||
)
|
||||
response = client.create_service(
|
||||
cluster='test_ecs_cluster',
|
||||
serviceName='test_ecs_service',
|
||||
taskDefinition='test_ecs_task',
|
||||
desiredCount=2
|
||||
)
|
||||
response['service']['desiredCount'].should.equal(2)
|
||||
|
||||
response = client.update_service(
|
||||
cluster='test_ecs_cluster',
|
||||
service='test_ecs_service',
|
||||
desiredCount=0
|
||||
)
|
||||
response['service']['desiredCount'].should.equal(0)
|
||||
|
||||
|
||||
@mock_ecs
|
||||
def test_delete_service():
|
||||
client = boto3.client('ecs', region_name='us-east-1')
|
||||
_ = client.create_cluster(
|
||||
clusterName='test_ecs_cluster'
|
||||
)
|
||||
_ = client.register_task_definition(
|
||||
family='test_ecs_task',
|
||||
containerDefinitions=[
|
||||
{
|
||||
'name': 'hello_world',
|
||||
'image': 'docker/hello-world:latest',
|
||||
'cpu': 1024,
|
||||
'memory': 400,
|
||||
'essential': True,
|
||||
'environment': [{
|
||||
'name': 'AWS_ACCESS_KEY_ID',
|
||||
'value': 'SOME_ACCESS_KEY'
|
||||
}],
|
||||
'logConfiguration': {'logDriver': 'json-file'}
|
||||
}
|
||||
]
|
||||
)
|
||||
_ = client.create_service(
|
||||
cluster='test_ecs_cluster',
|
||||
serviceName='test_ecs_service',
|
||||
taskDefinition='test_ecs_task',
|
||||
desiredCount=2
|
||||
)
|
||||
_ = client.update_service(
|
||||
cluster='test_ecs_cluster',
|
||||
service='test_ecs_service',
|
||||
desiredCount=0
|
||||
)
|
||||
response = client.delete_service(
|
||||
cluster='test_ecs_cluster',
|
||||
service='test_ecs_service'
|
||||
)
|
||||
response['service']['clusterArn'].should.equal('arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster')
|
||||
response['service']['desiredCount'].should.equal(0)
|
||||
len(response['service']['events']).should.equal(0)
|
||||
len(response['service']['loadBalancers']).should.equal(0)
|
||||
response['service']['pendingCount'].should.equal(0)
|
||||
response['service']['runningCount'].should.equal(0)
|
||||
response['service']['serviceArn'].should.equal('arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service')
|
||||
response['service']['serviceName'].should.equal('test_ecs_service')
|
||||
response['service']['status'].should.equal('ACTIVE')
|
||||
response['service']['taskDefinition'].should.equal('arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1')
|
Loading…
Reference in New Issue
Block a user