Merge branch 'master' into sns_regions
This commit is contained in:
commit
9904193d66
@ -12,6 +12,7 @@ matrix:
|
||||
env: BOTO_VERSION=2.36.0
|
||||
install:
|
||||
- travis_retry pip install boto==$BOTO_VERSION
|
||||
- travis_retry pip install boto3
|
||||
- travis_retry pip install .
|
||||
- travis_retry pip install -r requirements-dev.txt
|
||||
- travis_retry pip install coveralls
|
||||
|
@ -3,7 +3,7 @@ import logging
|
||||
logging.getLogger('boto').setLevel(logging.CRITICAL)
|
||||
|
||||
__title__ = 'moto'
|
||||
__version__ = '0.4.7'
|
||||
__version__ = '0.4.8'
|
||||
|
||||
from .autoscaling import mock_autoscaling # flake8: noqa
|
||||
from .cloudformation import mock_cloudformation # flake8: noqa
|
||||
|
@ -1,6 +1,7 @@
|
||||
from __future__ import unicode_literals
|
||||
from moto.autoscaling import autoscaling_backend
|
||||
from moto.cloudwatch import cloudwatch_backend
|
||||
from moto.cloudformation import cloudformation_backend
|
||||
from moto.dynamodb import dynamodb_backend
|
||||
from moto.dynamodb2 import dynamodb_backend2
|
||||
from moto.ec2 import ec2_backend
|
||||
@ -20,6 +21,7 @@ from moto.route53 import route53_backend
|
||||
|
||||
BACKENDS = {
|
||||
'autoscaling': autoscaling_backend,
|
||||
'cloudformation': cloudformation_backend,
|
||||
'cloudwatch': cloudwatch_backend,
|
||||
'dynamodb': dynamodb_backend,
|
||||
'dynamodb2': dynamodb_backend2,
|
||||
|
@ -161,7 +161,7 @@ LIST_STACKS_RESPONSE = """<ListStacksResponse>
|
||||
<StackSummaries>
|
||||
{% for stack in stacks %}
|
||||
<member>
|
||||
<StackId>{{ stack.id }}</StackId>
|
||||
<StackId>{{ stack.stack_id }}</StackId>
|
||||
<StackStatus>{{ stack.status }}</StackStatus>
|
||||
<StackName>{{ stack.name }}</StackName>
|
||||
<CreationTime>2011-05-23T15:47:44Z</CreationTime>
|
||||
|
@ -1,12 +1,32 @@
|
||||
from __future__ import unicode_literals
|
||||
# TODO add tests for all of these
|
||||
|
||||
EQ_FUNCTION = lambda item_value, test_value: item_value == test_value
|
||||
NE_FUNCTION = lambda item_value, test_value: item_value != test_value
|
||||
LE_FUNCTION = lambda item_value, test_value: item_value <= test_value
|
||||
LT_FUNCTION = lambda item_value, test_value: item_value < test_value
|
||||
GE_FUNCTION = lambda item_value, test_value: item_value >= test_value
|
||||
GT_FUNCTION = lambda item_value, test_value: item_value > test_value
|
||||
|
||||
COMPARISON_FUNCS = {
|
||||
'EQ': lambda item_value, test_value: item_value == test_value,
|
||||
'NE': lambda item_value, test_value: item_value != test_value,
|
||||
'LE': lambda item_value, test_value: item_value <= test_value,
|
||||
'LT': lambda item_value, test_value: item_value < test_value,
|
||||
'GE': lambda item_value, test_value: item_value >= test_value,
|
||||
'GT': lambda item_value, test_value: item_value > test_value,
|
||||
'EQ': EQ_FUNCTION,
|
||||
'=': EQ_FUNCTION,
|
||||
|
||||
'NE': NE_FUNCTION,
|
||||
'!=': NE_FUNCTION,
|
||||
|
||||
'LE': LE_FUNCTION,
|
||||
'<=': LE_FUNCTION,
|
||||
|
||||
'LT': LT_FUNCTION,
|
||||
'<': LT_FUNCTION,
|
||||
|
||||
'GE': GE_FUNCTION,
|
||||
'>=': GE_FUNCTION,
|
||||
|
||||
'GT': GT_FUNCTION,
|
||||
'>': GT_FUNCTION,
|
||||
|
||||
'NULL': lambda item_value: item_value is None,
|
||||
'NOT_NULL': lambda item_value: item_value is not None,
|
||||
'CONTAINS': lambda item_value, test_value: test_value in item_value,
|
||||
|
@ -228,28 +228,64 @@ class DynamoHandler(BaseResponse):
|
||||
|
||||
def query(self):
|
||||
name = self.body['TableName']
|
||||
key_conditions = self.body['KeyConditions']
|
||||
hash_key_name, range_key_name = dynamodb_backend2.get_table_keys_name(name, key_conditions.keys())
|
||||
# hash_key_name, range_key_name = dynamodb_backend2.get_table_keys_name(name)
|
||||
if hash_key_name is None:
|
||||
er = "'com.amazonaws.dynamodb.v20120810#ResourceNotFoundException"
|
||||
return self.error(er)
|
||||
hash_key = key_conditions[hash_key_name]['AttributeValueList'][0]
|
||||
if len(key_conditions) == 1:
|
||||
range_comparison = None
|
||||
range_values = []
|
||||
else:
|
||||
if range_key_name is None:
|
||||
er = "com.amazon.coral.validate#ValidationException"
|
||||
return self.error(er)
|
||||
else:
|
||||
range_condition = key_conditions[range_key_name]
|
||||
if range_condition:
|
||||
range_comparison = range_condition['ComparisonOperator']
|
||||
range_values = range_condition['AttributeValueList']
|
||||
|
||||
# {u'KeyConditionExpression': u'#n0 = :v0', u'ExpressionAttributeValues': {u':v0': {u'S': u'johndoe'}}, u'ExpressionAttributeNames': {u'#n0': u'username'}}
|
||||
key_condition_expression = self.body.get('KeyConditionExpression')
|
||||
if key_condition_expression:
|
||||
value_alias_map = self.body['ExpressionAttributeValues']
|
||||
|
||||
if " AND " in key_condition_expression:
|
||||
expressions = key_condition_expression.split(" AND ", 1)
|
||||
hash_key_expression = expressions[0]
|
||||
# TODO implement more than one range expression and OR operators
|
||||
range_key_expression = expressions[1].replace(")", "")
|
||||
range_key_expression_components = range_key_expression.split()
|
||||
range_comparison = range_key_expression_components[1]
|
||||
if 'AND' in range_key_expression:
|
||||
range_comparison = 'BETWEEN'
|
||||
range_values = [
|
||||
value_alias_map[range_key_expression_components[2]],
|
||||
value_alias_map[range_key_expression_components[4]],
|
||||
]
|
||||
elif 'begins_with' in range_key_expression:
|
||||
range_comparison = 'BEGINS_WITH'
|
||||
range_values = [
|
||||
value_alias_map[range_key_expression_components[1]],
|
||||
]
|
||||
else:
|
||||
range_values = [value_alias_map[range_key_expression_components[2]]]
|
||||
else:
|
||||
hash_key_expression = key_condition_expression
|
||||
range_comparison = None
|
||||
range_values = []
|
||||
|
||||
hash_key_value_alias = hash_key_expression.split("=")[1].strip()
|
||||
hash_key = value_alias_map[hash_key_value_alias]
|
||||
else:
|
||||
# 'KeyConditions': {u'forum_name': {u'ComparisonOperator': u'EQ', u'AttributeValueList': [{u'S': u'the-key'}]}}
|
||||
key_conditions = self.body.get('KeyConditions')
|
||||
if key_conditions:
|
||||
hash_key_name, range_key_name = dynamodb_backend2.get_table_keys_name(name, key_conditions.keys())
|
||||
if hash_key_name is None:
|
||||
er = "'com.amazonaws.dynamodb.v20120810#ResourceNotFoundException"
|
||||
return self.error(er)
|
||||
hash_key = key_conditions[hash_key_name]['AttributeValueList'][0]
|
||||
if len(key_conditions) == 1:
|
||||
range_comparison = None
|
||||
range_values = []
|
||||
else:
|
||||
if range_key_name is None:
|
||||
er = "com.amazon.coral.validate#ValidationException"
|
||||
return self.error(er)
|
||||
else:
|
||||
range_condition = key_conditions[range_key_name]
|
||||
if range_condition:
|
||||
range_comparison = range_condition['ComparisonOperator']
|
||||
range_values = range_condition['AttributeValueList']
|
||||
else:
|
||||
range_comparison = None
|
||||
range_values = []
|
||||
|
||||
items, last_page = dynamodb_backend2.query(name, hash_key, range_comparison, range_values)
|
||||
if items is None:
|
||||
er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'
|
||||
@ -260,7 +296,7 @@ class DynamoHandler(BaseResponse):
|
||||
items = items[:limit]
|
||||
|
||||
reversed = self.body.get("ScanIndexForward")
|
||||
if reversed is not False:
|
||||
if reversed is False:
|
||||
items.reverse()
|
||||
|
||||
result = {
|
||||
|
@ -1,17 +1,17 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from collections import defaultdict
|
||||
import boto
|
||||
import copy
|
||||
from datetime import datetime
|
||||
import itertools
|
||||
import re
|
||||
import six
|
||||
|
||||
import boto
|
||||
from collections import defaultdict
|
||||
from datetime import datetime
|
||||
from boto.ec2.instance import Instance as BotoInstance, Reservation
|
||||
from boto.ec2.blockdevicemapping import BlockDeviceMapping, BlockDeviceType
|
||||
from boto.ec2.spotinstancerequest import SpotInstanceRequest as BotoSpotRequest
|
||||
from boto.ec2.launchspecification import LaunchSpecification
|
||||
import six
|
||||
|
||||
from moto.core import BaseBackend
|
||||
from moto.core.models import Model
|
||||
@ -97,6 +97,10 @@ from .utils import (
|
||||
)
|
||||
|
||||
|
||||
def utc_date_and_time():
|
||||
return datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')
|
||||
|
||||
|
||||
def validate_resource_ids(resource_ids):
|
||||
for resource_id in resource_ids:
|
||||
if not is_valid_resource_id(resource_id):
|
||||
@ -309,14 +313,17 @@ class Instance(BotoInstance, TaggedEC2Resource):
|
||||
in_ec2_classic = not bool(self.subnet_id)
|
||||
self.key_name = kwargs.get("key_name")
|
||||
self.source_dest_check = "true"
|
||||
self.launch_time = datetime.utcnow().isoformat()
|
||||
self.launch_time = utc_date_and_time()
|
||||
associate_public_ip = kwargs.get("associate_public_ip", False)
|
||||
if in_ec2_classic:
|
||||
# If we are in EC2-Classic, autoassign a public IP
|
||||
associate_public_ip = True
|
||||
|
||||
self.block_device_mapping = BlockDeviceMapping()
|
||||
self.block_device_mapping['/dev/sda1'] = BlockDeviceType(volume_id=random_volume_id())
|
||||
# Default have an instance with root volume should you not wish to override with attach volume cmd.
|
||||
# However this is a ghost volume and wont show up in get_all_volumes or snapshot-able.
|
||||
self.block_device_mapping['/dev/sda1'] = BlockDeviceType(volume_id=random_volume_id(), status='attached',
|
||||
attach_time=utc_date_and_time())
|
||||
|
||||
amis = self.ec2_backend.describe_images(filters={'image-id': image_id})
|
||||
ami = amis[0] if amis else None
|
||||
@ -343,6 +350,10 @@ class Instance(BotoInstance, TaggedEC2Resource):
|
||||
private_ip=kwargs.get("private_ip"),
|
||||
associate_public_ip=associate_public_ip)
|
||||
|
||||
@property
|
||||
def get_block_device_mapping(self):
|
||||
return self.block_device_mapping.items()
|
||||
|
||||
@property
|
||||
def private_ip(self):
|
||||
return self.nics[0].private_ip_address
|
||||
@ -1349,6 +1360,7 @@ class SecurityGroupIngress(object):
|
||||
class VolumeAttachment(object):
|
||||
def __init__(self, volume, instance, device):
|
||||
self.volume = volume
|
||||
self.attach_time = utc_date_and_time()
|
||||
self.instance = instance
|
||||
self.device = device
|
||||
|
||||
@ -1373,6 +1385,7 @@ class Volume(TaggedEC2Resource):
|
||||
self.id = volume_id
|
||||
self.size = size
|
||||
self.zone = zone
|
||||
self.create_time = utc_date_and_time()
|
||||
self.attachment = None
|
||||
self.ec2_backend = ec2_backend
|
||||
|
||||
@ -1404,6 +1417,7 @@ class Snapshot(TaggedEC2Resource):
|
||||
self.id = snapshot_id
|
||||
self.volume = volume
|
||||
self.description = description
|
||||
self.start_time = utc_date_and_time()
|
||||
self.create_volume_permission_groups = set()
|
||||
self.ec2_backend = ec2_backend
|
||||
|
||||
@ -1444,6 +1458,10 @@ class EBSBackend(object):
|
||||
return False
|
||||
|
||||
volume.attachment = VolumeAttachment(volume, instance, device_path)
|
||||
# Modify instance to capture mount of block device.
|
||||
bdt = BlockDeviceType(volume_id=volume_id, status=volume.status, size=volume.size,
|
||||
attach_time=utc_date_and_time())
|
||||
instance.block_device_mapping[device_path] = bdt
|
||||
return volume.attachment
|
||||
|
||||
def detach_volume(self, volume_id, instance_id, device_path):
|
||||
|
@ -42,12 +42,22 @@ class ElasticBlockStore(BaseResponse):
|
||||
return DELETE_VOLUME_RESPONSE
|
||||
|
||||
def describe_snapshots(self):
|
||||
# querystring for multiple snapshotids results in SnapshotId.1, SnapshotId.2 etc
|
||||
snapshot_ids = ','.join([','.join(s[1]) for s in self.querystring.items() if 'SnapshotId' in s[0]])
|
||||
snapshots = self.ec2_backend.describe_snapshots()
|
||||
# Describe snapshots to handle filter on snapshot_ids
|
||||
snapshots = [s for s in snapshots if s.id in snapshot_ids] if snapshot_ids else snapshots
|
||||
# snapshots = self.ec2_backend.describe_snapshots()
|
||||
template = self.response_template(DESCRIBE_SNAPSHOTS_RESPONSE)
|
||||
return template.render(snapshots=snapshots)
|
||||
|
||||
def describe_volumes(self):
|
||||
# querystring for multiple volumeids results in VolumeId.1, VolumeId.2 etc
|
||||
volume_ids = ','.join([','.join(v[1]) for v in self.querystring.items() if 'VolumeId' in v[0]])
|
||||
volumes = self.ec2_backend.describe_volumes()
|
||||
# Describe volumes to handle filter on volume_ids
|
||||
volumes = [v for v in volumes if v.id in volume_ids] if volume_ids else volumes
|
||||
# volumes = self.ec2_backend.describe_volumes()
|
||||
template = self.response_template(DESCRIBE_VOLUMES_RESPONSE)
|
||||
return template.render(volumes=volumes)
|
||||
|
||||
@ -103,7 +113,7 @@ CREATE_VOLUME_RESPONSE = """<CreateVolumeResponse xmlns="http://ec2.amazonaws.co
|
||||
<snapshotId/>
|
||||
<availabilityZone>{{ volume.zone.name }}</availabilityZone>
|
||||
<status>creating</status>
|
||||
<createTime>2013-10-04T17:38:53.000Z</createTime>
|
||||
<createTime>{{ volume.create_time}}</createTime>
|
||||
<volumeType>standard</volumeType>
|
||||
</CreateVolumeResponse>"""
|
||||
|
||||
@ -117,7 +127,7 @@ DESCRIBE_VOLUMES_RESPONSE = """<DescribeVolumesResponse xmlns="http://ec2.amazon
|
||||
<snapshotId/>
|
||||
<availabilityZone>{{ volume.zone.name }}</availabilityZone>
|
||||
<status>{{ volume.status }}</status>
|
||||
<createTime>2013-10-04T17:38:53.000Z</createTime>
|
||||
<createTime>{{ volume.create_time}}</createTime>
|
||||
<attachmentSet>
|
||||
{% if volume.attachment %}
|
||||
<item>
|
||||
@ -125,7 +135,7 @@ DESCRIBE_VOLUMES_RESPONSE = """<DescribeVolumesResponse xmlns="http://ec2.amazon
|
||||
<instanceId>{{ volume.attachment.instance.id }}</instanceId>
|
||||
<device>{{ volume.attachment.device }}</device>
|
||||
<status>attached</status>
|
||||
<attachTime>2013-10-04T17:38:53.000Z</attachTime>
|
||||
<attachTime>{{volume.attachment.attach_time}}</attachTime>
|
||||
<deleteOnTermination>false</deleteOnTermination>
|
||||
</item>
|
||||
{% endif %}
|
||||
@ -157,7 +167,7 @@ ATTACHED_VOLUME_RESPONSE = """<AttachVolumeResponse xmlns="http://ec2.amazonaws.
|
||||
<instanceId>{{ attachment.instance.id }}</instanceId>
|
||||
<device>{{ attachment.device }}</device>
|
||||
<status>attaching</status>
|
||||
<attachTime>2013-10-04T17:38:53.000Z</attachTime>
|
||||
<attachTime>{{attachment.attach_time}}</attachTime>
|
||||
</AttachVolumeResponse>"""
|
||||
|
||||
DETATCH_VOLUME_RESPONSE = """<DetachVolumeResponse xmlns="http://ec2.amazonaws.com/doc/2012-12-01/">
|
||||
@ -174,7 +184,7 @@ CREATE_SNAPSHOT_RESPONSE = """<CreateSnapshotResponse xmlns="http://ec2.amazonaw
|
||||
<snapshotId>{{ snapshot.id }}</snapshotId>
|
||||
<volumeId>{{ snapshot.volume.id }}</volumeId>
|
||||
<status>pending</status>
|
||||
<startTime>2013-10-04T17:38:53.000Z</startTime>
|
||||
<startTime>{{ snapshot.start_time}}</startTime>
|
||||
<progress>60%</progress>
|
||||
<ownerId>111122223333</ownerId>
|
||||
<volumeSize>{{ snapshot.volume.size }}</volumeSize>
|
||||
@ -189,7 +199,7 @@ DESCRIBE_SNAPSHOTS_RESPONSE = """<DescribeSnapshotsResponse xmlns="http://ec2.am
|
||||
<snapshotId>{{ snapshot.id }}</snapshotId>
|
||||
<volumeId>{{ snapshot.volume.id }}</volumeId>
|
||||
<status>pending</status>
|
||||
<startTime>2013-10-04T17:38:53.000Z</startTime>
|
||||
<startTime>{{ snapshot.start_time}}</startTime>
|
||||
<progress>30%</progress>
|
||||
<ownerId>111122223333</ownerId>
|
||||
<volumeSize>{{ snapshot.volume.size }}</volumeSize>
|
||||
|
@ -206,7 +206,7 @@ EC2_RUN_INSTANCES = """<RunInstancesResponse xmlns="http://ec2.amazonaws.com/doc
|
||||
<instanceType>{{ instance.instance_type }}</instanceType>
|
||||
<launchTime>{{ instance.launch_time }}</launchTime>
|
||||
<placement>
|
||||
<availabilityZone>us-east-1b</availabilityZone>
|
||||
<availabilityZone>{{ instance.placement}}</availabilityZone>
|
||||
<groupName/>
|
||||
<tenancy>default</tenancy>
|
||||
</placement>
|
||||
@ -331,7 +331,7 @@ EC2_DESCRIBE_INSTANCES = """<DescribeInstancesResponse xmlns='http://ec2.amazona
|
||||
<instanceType>{{ instance.instance_type }}</instanceType>
|
||||
<launchTime>{{ instance.launch_time }}</launchTime>
|
||||
<placement>
|
||||
<availabilityZone>us-west-2a</availabilityZone>
|
||||
<availabilityZone>{{ instance.placement }}</availabilityZone>
|
||||
<groupName/>
|
||||
<tenancy>default</tenancy>
|
||||
</placement>
|
||||
@ -369,15 +369,18 @@ EC2_DESCRIBE_INSTANCES = """<DescribeInstancesResponse xmlns='http://ec2.amazona
|
||||
<rootDeviceType>ebs</rootDeviceType>
|
||||
<rootDeviceName>/dev/sda1</rootDeviceName>
|
||||
<blockDeviceMapping>
|
||||
{% for device_name,deviceobject in instance.get_block_device_mapping %}
|
||||
<item>
|
||||
<deviceName>/dev/sda1</deviceName>
|
||||
<deviceName>{{ device_name }}</deviceName>
|
||||
<ebs>
|
||||
<volumeId>{{ instance.block_device_mapping['/dev/sda1'].volume_id }}</volumeId>
|
||||
<status>attached</status>
|
||||
<attachTime>2015-01-01T00:00:00.000Z</attachTime>
|
||||
<deleteOnTermination>true</deleteOnTermination>
|
||||
<volumeId>{{ deviceobject.volume_id }}</volumeId>
|
||||
<status>{{ deviceobject.status }}</status>
|
||||
<attachTime>{{ deviceobject.attach_time }}</attachTime>
|
||||
<deleteOnTermination>{{ deviceobject.delete_on_termination }}</deleteOnTermination>
|
||||
<size>{{deviceobject.size}}</size>
|
||||
</ebs>
|
||||
</item>
|
||||
{% endfor %}
|
||||
</blockDeviceMapping>
|
||||
<virtualizationType>{{ instance.virtualization_type }}</virtualizationType>
|
||||
<clientToken>ABCDE1234567890123</clientToken>
|
||||
@ -547,7 +550,7 @@ EC2_INSTANCE_STATUS = """<?xml version="1.0" encoding="UTF-8"?>
|
||||
{% for instance in instances %}
|
||||
<item>
|
||||
<instanceId>{{ instance.id }}</instanceId>
|
||||
<availabilityZone>us-east-1d</availabilityZone>
|
||||
<availabilityZone>{{ instance.placement }}</availabilityZone>
|
||||
<instanceState>
|
||||
<code>{{ instance.state_code }}</code>
|
||||
<name>{{ instance.state }}</name>
|
||||
|
@ -2,7 +2,7 @@ from __future__ import unicode_literals
|
||||
from .responses import S3BucketPathResponseInstance as ro
|
||||
|
||||
url_bases = [
|
||||
"https?://s3.amazonaws.com"
|
||||
"https?://s3(.*).amazonaws.com"
|
||||
]
|
||||
|
||||
|
||||
|
@ -79,10 +79,17 @@ def create_backend_app(service):
|
||||
else:
|
||||
endpoint = None
|
||||
|
||||
backend_app.route(
|
||||
if endpoint in backend_app.view_functions:
|
||||
# HACK: Sometimes we map the same view to multiple url_paths. Flask
|
||||
# requries us to have different names.
|
||||
endpoint += "2"
|
||||
|
||||
backend_app.add_url_rule(
|
||||
url_path,
|
||||
endpoint=endpoint,
|
||||
methods=HTTP_METHODS)(convert_flask_to_httpretty_response(handler))
|
||||
methods=HTTP_METHODS,
|
||||
view_func=convert_flask_to_httpretty_response(handler),
|
||||
)
|
||||
|
||||
return backend_app
|
||||
|
||||
|
@ -11,10 +11,11 @@ from .exceptions import (
|
||||
)
|
||||
|
||||
MAXIMUM_VISIBILTY_TIMEOUT = 43200
|
||||
DEFAULT_RECEIVED_MESSAGES = 1
|
||||
SQS_REGION_REGEX = r'://(.+?)\.queue\.amazonaws\.com'
|
||||
|
||||
|
||||
class QueuesResponse(BaseResponse):
|
||||
class SQSResponse(BaseResponse):
|
||||
|
||||
region_regex = SQS_REGION_REGEX
|
||||
|
||||
@ -22,6 +23,14 @@ class QueuesResponse(BaseResponse):
|
||||
def sqs_backend(self):
|
||||
return sqs_backends[self.region]
|
||||
|
||||
def _get_queue_name(self):
|
||||
try:
|
||||
queue_name = self.querystring.get('QueueUrl')[0].split("/")[-1]
|
||||
except TypeError:
|
||||
# Fallback to reading from the URL
|
||||
queue_name = self.path.split("/")[-1]
|
||||
return queue_name
|
||||
|
||||
def create_queue(self):
|
||||
visibility_timeout = None
|
||||
if 'Attribute.1.Name' in self.querystring and self.querystring.get('Attribute.1.Name')[0] == 'VisibilityTimeout':
|
||||
@ -47,17 +56,8 @@ class QueuesResponse(BaseResponse):
|
||||
template = self.response_template(LIST_QUEUES_RESPONSE)
|
||||
return template.render(queues=queues)
|
||||
|
||||
|
||||
class QueueResponse(BaseResponse):
|
||||
|
||||
region_regex = SQS_REGION_REGEX
|
||||
|
||||
@property
|
||||
def sqs_backend(self):
|
||||
return sqs_backends[self.region]
|
||||
|
||||
def change_message_visibility(self):
|
||||
queue_name = self.path.split("/")[-1]
|
||||
queue_name = self._get_queue_name()
|
||||
receipt_handle = self.querystring.get("ReceiptHandle")[0]
|
||||
visibility_timeout = int(self.querystring.get("VisibilityTimeout")[0])
|
||||
|
||||
@ -79,20 +79,20 @@ class QueueResponse(BaseResponse):
|
||||
return template.render()
|
||||
|
||||
def get_queue_attributes(self):
|
||||
queue_name = self.path.split("/")[-1]
|
||||
queue_name = self._get_queue_name()
|
||||
queue = self.sqs_backend.get_queue(queue_name)
|
||||
template = self.response_template(GET_QUEUE_ATTRIBUTES_RESPONSE)
|
||||
return template.render(queue=queue)
|
||||
|
||||
def set_queue_attributes(self):
|
||||
queue_name = self.path.split("/")[-1]
|
||||
queue_name = self._get_queue_name()
|
||||
key = camelcase_to_underscores(self.querystring.get('Attribute.Name')[0])
|
||||
value = self.querystring.get('Attribute.Value')[0]
|
||||
self.sqs_backend.set_queue_attribute(queue_name, key, value)
|
||||
return SET_QUEUE_ATTRIBUTE_RESPONSE
|
||||
|
||||
def delete_queue(self):
|
||||
queue_name = self.path.split("/")[-1]
|
||||
queue_name = self._get_queue_name()
|
||||
queue = self.sqs_backend.delete_queue(queue_name)
|
||||
if not queue:
|
||||
return "A queue with name {0} does not exist".format(queue_name), dict(status=404)
|
||||
@ -113,7 +113,8 @@ class QueueResponse(BaseResponse):
|
||||
except MessageAttributesInvalid as e:
|
||||
return e.description, dict(status=e.status_code)
|
||||
|
||||
queue_name = self.path.split("/")[-1]
|
||||
queue_name = self._get_queue_name()
|
||||
|
||||
message = self.sqs_backend.send_message(
|
||||
queue_name,
|
||||
message,
|
||||
@ -135,7 +136,7 @@ class QueueResponse(BaseResponse):
|
||||
'SendMessageBatchRequestEntry.2.DelaySeconds': ['0'],
|
||||
"""
|
||||
|
||||
queue_name = self.path.split("/")[-1]
|
||||
queue_name = self._get_queue_name()
|
||||
|
||||
messages = []
|
||||
for index in range(1, 11):
|
||||
@ -164,7 +165,7 @@ class QueueResponse(BaseResponse):
|
||||
return template.render(messages=messages)
|
||||
|
||||
def delete_message(self):
|
||||
queue_name = self.path.split("/")[-1]
|
||||
queue_name = self._get_queue_name()
|
||||
receipt_handle = self.querystring.get("ReceiptHandle")[0]
|
||||
self.sqs_backend.delete_message(queue_name, receipt_handle)
|
||||
template = self.response_template(DELETE_MESSAGE_RESPONSE)
|
||||
@ -180,7 +181,7 @@ class QueueResponse(BaseResponse):
|
||||
'DeleteMessageBatchRequestEntry.2.ReceiptHandle': ['zxcvfda...'],
|
||||
...
|
||||
"""
|
||||
queue_name = self.path.split("/")[-1]
|
||||
queue_name = self._get_queue_name()
|
||||
|
||||
message_ids = []
|
||||
for index in range(1, 11):
|
||||
@ -201,14 +202,17 @@ class QueueResponse(BaseResponse):
|
||||
return template.render(message_ids=message_ids)
|
||||
|
||||
def purge_queue(self):
|
||||
queue_name = self.path.split("/")[-1]
|
||||
queue_name = self._get_queue_name()
|
||||
self.sqs_backend.purge_queue(queue_name)
|
||||
template = self.response_template(PURGE_QUEUE_RESPONSE)
|
||||
return template.render()
|
||||
|
||||
def receive_message(self):
|
||||
queue_name = self.path.split("/")[-1]
|
||||
message_count = int(self.querystring.get("MaxNumberOfMessages")[0])
|
||||
queue_name = self._get_queue_name()
|
||||
try:
|
||||
message_count = int(self.querystring.get("MaxNumberOfMessages")[0])
|
||||
except TypeError:
|
||||
message_count = DEFAULT_RECEIVED_MESSAGES
|
||||
messages = self.sqs_backend.receive_messages(queue_name, message_count)
|
||||
template = self.response_template(RECEIVE_MESSAGE_RESPONSE)
|
||||
output = template.render(messages=messages)
|
||||
|
@ -1,11 +1,13 @@
|
||||
from __future__ import unicode_literals
|
||||
from .responses import QueueResponse, QueuesResponse
|
||||
from .responses import SQSResponse
|
||||
|
||||
url_bases = [
|
||||
"https?://(.*?)(queue|sqs)(.*?).amazonaws.com"
|
||||
]
|
||||
|
||||
dispatch = SQSResponse().dispatch
|
||||
|
||||
url_paths = {
|
||||
'{0}/$': QueuesResponse.dispatch,
|
||||
'{0}/(?P<account_id>\d+)/(?P<queue_name>[a-zA-Z0-9\-_]+)': QueueResponse.dispatch,
|
||||
'{0}/$': dispatch,
|
||||
'{0}/(?P<account_id>\d+)/(?P<queue_name>[a-zA-Z0-9\-_]+)': dispatch,
|
||||
}
|
||||
|
2
setup.py
2
setup.py
@ -21,7 +21,7 @@ if sys.version_info < (2, 7):
|
||||
|
||||
setup(
|
||||
name='moto',
|
||||
version='0.4.7',
|
||||
version='0.4.8',
|
||||
description='A library that allows your python tests to easily'
|
||||
' mock out the boto library',
|
||||
author='Steve Pulec',
|
||||
|
@ -1 +1,41 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
from six.moves.urllib.parse import urlencode
|
||||
import re
|
||||
import sure # noqa
|
||||
|
||||
import moto.server as server
|
||||
|
||||
'''
|
||||
Test the different server responses
|
||||
'''
|
||||
|
||||
|
||||
def test_cloudformation_server_get():
|
||||
backend = server.create_backend_app("cloudformation")
|
||||
stack_name = 'test stack'
|
||||
test_client = backend.test_client()
|
||||
template_body = {
|
||||
"Resources": {},
|
||||
}
|
||||
res = test_client.get(
|
||||
'/?{0}'.format(
|
||||
urlencode({
|
||||
"Action": "CreateStack",
|
||||
"StackName": stack_name,
|
||||
"TemplateBody": json.dumps(template_body)
|
||||
})
|
||||
),
|
||||
headers={"Host": "cloudformation.us-east-1.amazonaws.com"}
|
||||
)
|
||||
stack_id = json.loads(res.data.decode("utf-8"))["CreateStackResponse"]["CreateStackResult"]["StackId"]
|
||||
|
||||
res = test_client.get(
|
||||
'/?Action=ListStacks',
|
||||
headers={"Host": "cloudformation.us-east-1.amazonaws.com"}
|
||||
)
|
||||
stacks = re.search("<StackId>(.*)</StackId>", res.data.decode('utf-8'))
|
||||
|
||||
list_stack_id = stacks.groups()[0]
|
||||
assert stack_id == list_stack_id
|
||||
|
@ -1,6 +1,8 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import boto
|
||||
import boto3
|
||||
from boto3.dynamodb.conditions import Key
|
||||
import sure # noqa
|
||||
from freezegun import freeze_time
|
||||
from moto import mock_dynamodb2
|
||||
@ -253,31 +255,31 @@ def test_query():
|
||||
|
||||
table.count().should.equal(4)
|
||||
|
||||
results = table.query(forum_name__eq='the-key', subject__gt='1', consistent=True)
|
||||
results = table.query_2(forum_name__eq='the-key', subject__gt='1', consistent=True)
|
||||
expected = ["123", "456", "789"]
|
||||
for index, item in enumerate(results):
|
||||
item["subject"].should.equal(expected[index])
|
||||
|
||||
results = table.query(forum_name__eq="the-key", subject__gt='1', reverse=True)
|
||||
results = table.query_2(forum_name__eq="the-key", subject__gt='1', reverse=True)
|
||||
for index, item in enumerate(results):
|
||||
item["subject"].should.equal(expected[len(expected) - 1 - index])
|
||||
|
||||
results = table.query(forum_name__eq='the-key', subject__gt='1', consistent=True)
|
||||
results = table.query_2(forum_name__eq='the-key', subject__gt='1', consistent=True)
|
||||
sum(1 for _ in results).should.equal(3)
|
||||
|
||||
results = table.query(forum_name__eq='the-key', subject__gt='234', consistent=True)
|
||||
results = table.query_2(forum_name__eq='the-key', subject__gt='234', consistent=True)
|
||||
sum(1 for _ in results).should.equal(2)
|
||||
|
||||
results = table.query(forum_name__eq='the-key', subject__gt='9999')
|
||||
results = table.query_2(forum_name__eq='the-key', subject__gt='9999')
|
||||
sum(1 for _ in results).should.equal(0)
|
||||
|
||||
results = table.query(forum_name__eq='the-key', subject__beginswith='12')
|
||||
results = table.query_2(forum_name__eq='the-key', subject__beginswith='12')
|
||||
sum(1 for _ in results).should.equal(1)
|
||||
|
||||
results = table.query(forum_name__eq='the-key', subject__beginswith='7')
|
||||
results = table.query_2(forum_name__eq='the-key', subject__beginswith='7')
|
||||
sum(1 for _ in results).should.equal(1)
|
||||
|
||||
results = table.query(forum_name__eq='the-key', subject__between=['567', '890'])
|
||||
results = table.query_2(forum_name__eq='the-key', subject__between=['567', '890'])
|
||||
sum(1 for _ in results).should.equal(1)
|
||||
|
||||
|
||||
@ -558,7 +560,6 @@ def test_lookup():
|
||||
|
||||
@mock_dynamodb2
|
||||
def test_failed_overwrite():
|
||||
from decimal import Decimal
|
||||
table = Table.create('messages', schema=[
|
||||
HashKey('id'),
|
||||
RangeKey('range'),
|
||||
@ -567,19 +568,19 @@ def test_failed_overwrite():
|
||||
'write': 3,
|
||||
})
|
||||
|
||||
data1 = {'id': '123', 'range': 'abc', 'data':'678'}
|
||||
data1 = {'id': '123', 'range': 'abc', 'data': '678'}
|
||||
table.put_item(data=data1)
|
||||
|
||||
data2 = {'id': '123', 'range': 'abc', 'data':'345'}
|
||||
table.put_item(data=data2, overwrite = True)
|
||||
data2 = {'id': '123', 'range': 'abc', 'data': '345'}
|
||||
table.put_item(data=data2, overwrite=True)
|
||||
|
||||
data3 = {'id': '123', 'range': 'abc', 'data':'812'}
|
||||
data3 = {'id': '123', 'range': 'abc', 'data': '812'}
|
||||
table.put_item.when.called_with(data=data3).should.throw(ConditionalCheckFailedException)
|
||||
|
||||
returned_item = table.lookup('123', 'abc')
|
||||
dict(returned_item).should.equal(data2)
|
||||
|
||||
data4 = {'id': '123', 'range': 'ghi', 'data':812}
|
||||
data4 = {'id': '123', 'range': 'ghi', 'data': 812}
|
||||
table.put_item(data=data4)
|
||||
|
||||
returned_item = table.lookup('123', 'ghi')
|
||||
@ -593,7 +594,7 @@ def test_conflicting_writes():
|
||||
RangeKey('range'),
|
||||
])
|
||||
|
||||
item_data = {'id': '123', 'range':'abc', 'data':'678'}
|
||||
item_data = {'id': '123', 'range': 'abc', 'data': '678'}
|
||||
item1 = Item(table, item_data)
|
||||
item2 = Item(table, item_data)
|
||||
item1.save()
|
||||
@ -603,3 +604,100 @@ def test_conflicting_writes():
|
||||
|
||||
item1.save()
|
||||
item2.save.when.called_with().should.throw(ConditionalCheckFailedException)
|
||||
|
||||
"""
|
||||
boto3
|
||||
"""
|
||||
|
||||
|
||||
@mock_dynamodb2
|
||||
def test_boto3_conditions():
|
||||
dynamodb = boto3.resource('dynamodb', region_name='us-east-1')
|
||||
|
||||
# Create the DynamoDB table.
|
||||
table = dynamodb.create_table(
|
||||
TableName='users',
|
||||
KeySchema=[
|
||||
{
|
||||
'AttributeName': 'forum_name',
|
||||
'KeyType': 'HASH'
|
||||
},
|
||||
{
|
||||
'AttributeName': 'subject',
|
||||
'KeyType': 'RANGE'
|
||||
},
|
||||
],
|
||||
AttributeDefinitions=[
|
||||
{
|
||||
'AttributeName': 'forum_name',
|
||||
'AttributeType': 'S'
|
||||
},
|
||||
{
|
||||
'AttributeName': 'subject',
|
||||
'AttributeType': 'S'
|
||||
},
|
||||
],
|
||||
ProvisionedThroughput={
|
||||
'ReadCapacityUnits': 5,
|
||||
'WriteCapacityUnits': 5
|
||||
}
|
||||
)
|
||||
table = dynamodb.Table('users')
|
||||
|
||||
table.put_item(Item={
|
||||
'forum_name': 'the-key',
|
||||
'subject': '123'
|
||||
})
|
||||
table.put_item(Item={
|
||||
'forum_name': 'the-key',
|
||||
'subject': '456'
|
||||
})
|
||||
table.put_item(Item={
|
||||
'forum_name': 'the-key',
|
||||
'subject': '789'
|
||||
})
|
||||
|
||||
# Test a query returning all items
|
||||
results = table.query(
|
||||
KeyConditionExpression=Key('forum_name').eq('the-key') & Key("subject").gt('1'),
|
||||
ScanIndexForward=True,
|
||||
)
|
||||
expected = ["123", "456", "789"]
|
||||
for index, item in enumerate(results['Items']):
|
||||
item["subject"].should.equal(expected[index])
|
||||
|
||||
# Return all items again, but in reverse
|
||||
results = table.query(
|
||||
KeyConditionExpression=Key('forum_name').eq('the-key') & Key("subject").gt('1'),
|
||||
ScanIndexForward=False,
|
||||
)
|
||||
for index, item in enumerate(reversed(results['Items'])):
|
||||
item["subject"].should.equal(expected[index])
|
||||
|
||||
# Filter the subjects to only return some of the results
|
||||
results = table.query(
|
||||
KeyConditionExpression=Key('forum_name').eq('the-key') & Key("subject").gt('234'),
|
||||
ConsistentRead=True,
|
||||
)
|
||||
results['Count'].should.equal(2)
|
||||
|
||||
# Filter to return no results
|
||||
results = table.query(
|
||||
KeyConditionExpression=Key('forum_name').eq('the-key') & Key("subject").gt('9999')
|
||||
)
|
||||
results['Count'].should.equal(0)
|
||||
|
||||
results = table.query(
|
||||
KeyConditionExpression=Key('forum_name').eq('the-key') & Key("subject").begins_with('12')
|
||||
)
|
||||
results['Count'].should.equal(1)
|
||||
|
||||
results = table.query(
|
||||
KeyConditionExpression=Key('forum_name').eq('the-key') & Key("subject").begins_with('7')
|
||||
)
|
||||
results['Count'].should.equal(1)
|
||||
|
||||
results = table.query(
|
||||
KeyConditionExpression=Key('forum_name').eq('the-key') & Key("subject").between('567', '890')
|
||||
)
|
||||
results['Count'].should.equal(1)
|
||||
|
@ -1,6 +1,8 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import boto
|
||||
import boto3
|
||||
from boto3.dynamodb.conditions import Key
|
||||
import sure # noqa
|
||||
from freezegun import freeze_time
|
||||
from boto.exception import JSONResponseError
|
||||
@ -135,14 +137,6 @@ def test_item_put_without_table():
|
||||
).should.throw(JSONResponseError)
|
||||
|
||||
|
||||
@requires_boto_gte("2.9")
|
||||
@mock_dynamodb2
|
||||
def test_get_missing_item():
|
||||
table = create_table()
|
||||
|
||||
table.get_item.when.called_with(test_hash=3241526475).should.throw(JSONResponseError)
|
||||
|
||||
|
||||
@requires_boto_gte("2.9")
|
||||
@mock_dynamodb2
|
||||
def test_get_item_with_undeclared_table():
|
||||
@ -449,7 +443,6 @@ def test_update_item_set():
|
||||
|
||||
@mock_dynamodb2
|
||||
def test_failed_overwrite():
|
||||
from decimal import Decimal
|
||||
table = Table.create('messages', schema=[
|
||||
HashKey('id'),
|
||||
], throughput={
|
||||
@ -457,19 +450,19 @@ def test_failed_overwrite():
|
||||
'write': 3,
|
||||
})
|
||||
|
||||
data1 = {'id': '123', 'data':'678'}
|
||||
data1 = {'id': '123', 'data': '678'}
|
||||
table.put_item(data=data1)
|
||||
|
||||
data2 = {'id': '123', 'data':'345'}
|
||||
table.put_item(data=data2, overwrite = True)
|
||||
data2 = {'id': '123', 'data': '345'}
|
||||
table.put_item(data=data2, overwrite=True)
|
||||
|
||||
data3 = {'id': '123', 'data':'812'}
|
||||
data3 = {'id': '123', 'data': '812'}
|
||||
table.put_item.when.called_with(data=data3).should.throw(ConditionalCheckFailedException)
|
||||
|
||||
returned_item = table.lookup('123')
|
||||
dict(returned_item).should.equal(data2)
|
||||
|
||||
data4 = {'id': '124', 'data':812}
|
||||
data4 = {'id': '124', 'data': 812}
|
||||
table.put_item(data=data4)
|
||||
|
||||
returned_item = table.lookup('124')
|
||||
@ -482,7 +475,7 @@ def test_conflicting_writes():
|
||||
HashKey('id'),
|
||||
])
|
||||
|
||||
item_data = {'id': '123', 'data':'678'}
|
||||
item_data = {'id': '123', 'data': '678'}
|
||||
item1 = Item(table, item_data)
|
||||
item2 = Item(table, item_data)
|
||||
item1.save()
|
||||
@ -491,4 +484,46 @@ def test_conflicting_writes():
|
||||
item2['data'] = '912'
|
||||
|
||||
item1.save()
|
||||
item2.save.when.called_with().should.throw(ConditionalCheckFailedException)
|
||||
item2.save.when.called_with().should.throw(ConditionalCheckFailedException)
|
||||
|
||||
|
||||
"""
|
||||
boto3
|
||||
"""
|
||||
|
||||
|
||||
@mock_dynamodb2
|
||||
def test_boto3_conditions():
|
||||
dynamodb = boto3.resource('dynamodb', region_name='us-east-1')
|
||||
|
||||
# Create the DynamoDB table.
|
||||
table = dynamodb.create_table(
|
||||
TableName='users',
|
||||
KeySchema=[
|
||||
{
|
||||
'AttributeName': 'username',
|
||||
'KeyType': 'HASH'
|
||||
},
|
||||
],
|
||||
AttributeDefinitions=[
|
||||
{
|
||||
'AttributeName': 'username',
|
||||
'AttributeType': 'S'
|
||||
},
|
||||
],
|
||||
ProvisionedThroughput={
|
||||
'ReadCapacityUnits': 5,
|
||||
'WriteCapacityUnits': 5
|
||||
}
|
||||
)
|
||||
table = dynamodb.Table('users')
|
||||
|
||||
table.put_item(Item={'username': 'johndoe'})
|
||||
table.put_item(Item={'username': 'janedoe'})
|
||||
|
||||
response = table.query(
|
||||
KeyConditionExpression=Key('username').eq('johndoe')
|
||||
)
|
||||
response['Count'].should.equal(1)
|
||||
response['Items'].should.have.length_of(1)
|
||||
response['Items'][0].should.equal({"username": "johndoe"})
|
||||
|
@ -33,6 +33,19 @@ def test_create_and_delete_volume():
|
||||
cm.exception.status.should.equal(400)
|
||||
cm.exception.request_id.should_not.be.none
|
||||
|
||||
@mock_ec2
|
||||
def test_filter_volume_by_id():
|
||||
conn = boto.connect_ec2('the_key', 'the_secret')
|
||||
volume1 = conn.create_volume(80, "us-east-1a")
|
||||
volume2 = conn.create_volume(36, "us-east-1b")
|
||||
volume3 = conn.create_volume(20, "us-east-1c")
|
||||
vol1 = conn.get_all_volumes(volume_ids=volume3.id)
|
||||
vol1.should.have.length_of(1)
|
||||
vol1[0].size.should.equal(20)
|
||||
vol1[0].zone.should.equal('us-east-1c')
|
||||
vol2 = conn.get_all_volumes(volume_ids=[volume1.id, volume2.id])
|
||||
vol2.should.have.length_of(2)
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_volume_attach_and_detach():
|
||||
@ -85,6 +98,7 @@ def test_create_snapshot():
|
||||
snapshots = conn.get_all_snapshots()
|
||||
snapshots.should.have.length_of(1)
|
||||
snapshots[0].description.should.equal('a test snapshot')
|
||||
snapshots[0].start_time.should_not.be.none
|
||||
|
||||
# Create snapshot without description
|
||||
snapshot = volume.create_snapshot()
|
||||
@ -100,6 +114,25 @@ def test_create_snapshot():
|
||||
cm.exception.status.should.equal(400)
|
||||
cm.exception.request_id.should_not.be.none
|
||||
|
||||
@mock_ec2
|
||||
def test_filter_snapshot_by_id():
|
||||
conn = boto.connect_ec2('the_key', 'the_secret')
|
||||
volume1 = conn.create_volume(36, "us-east-1a")
|
||||
snap1 = volume1.create_snapshot('a test snapshot 1')
|
||||
volume2 = conn.create_volume(42, 'us-east-1a')
|
||||
snap2 = volume2.create_snapshot('a test snapshot 2')
|
||||
volume3 = conn.create_volume(84, 'us-east-1a')
|
||||
snap3 = volume3.create_snapshot('a test snapshot 3')
|
||||
snapshots1 = conn.get_all_snapshots(snapshot_ids=snap2.id)
|
||||
snapshots1.should.have.length_of(1)
|
||||
snapshots1[0].volume_id.should.equal(volume2.id)
|
||||
snapshots1[0].region.name.should.equal('us-east-1')
|
||||
snapshots2 = conn.get_all_snapshots(snapshot_ids=[snap2.id, snap3.id])
|
||||
snapshots2.should.have.length_of(2)
|
||||
for s in snapshots2:
|
||||
s.start_time.should_not.be.none
|
||||
s.volume_id.should.be.within([volume2.id, volume3.id])
|
||||
s.region.name.should.equal('us-east-1')
|
||||
|
||||
@mock_ec2
|
||||
def test_snapshot_attribute():
|
||||
|
@ -53,7 +53,7 @@ def test_instance_launch_and_terminate():
|
||||
instances.should.have.length_of(1)
|
||||
instances[0].id.should.equal(instance.id)
|
||||
instances[0].state.should.equal('running')
|
||||
instances[0].launch_time.should.equal("2014-01-01T05:00:00")
|
||||
instances[0].launch_time.should.equal("2014-01-01T05:00:00Z")
|
||||
instances[0].vpc_id.should.equal(None)
|
||||
|
||||
root_device_name = instances[0].root_device_name
|
||||
@ -66,6 +66,35 @@ def test_instance_launch_and_terminate():
|
||||
instance = reservations[0].instances[0]
|
||||
instance.state.should.equal('terminated')
|
||||
|
||||
@freeze_time("2014-01-01 05:00:00")
|
||||
@mock_ec2
|
||||
def test_instance_attach_volume():
|
||||
conn = boto.connect_ec2('the_key', 'the_secret')
|
||||
reservation = conn.run_instances('ami-1234abcd')
|
||||
instance = reservation.instances[0]
|
||||
|
||||
vol1 = conn.create_volume(size=36, zone=conn.region.name)
|
||||
vol1.attach(instance.id, "/dev/sda1")
|
||||
vol1.update()
|
||||
vol2 = conn.create_volume(size=65, zone=conn.region.name)
|
||||
vol2.attach(instance.id, "/dev/sdb1")
|
||||
vol2.update()
|
||||
vol3 = conn.create_volume(size=130, zone=conn.region.name)
|
||||
vol3.attach(instance.id, "/dev/sdc1")
|
||||
vol3.update()
|
||||
|
||||
reservations = conn.get_all_instances()
|
||||
instance = reservations[0].instances[0]
|
||||
|
||||
instance.block_device_mapping.should.have.length_of(3)
|
||||
|
||||
for v in conn.get_all_volumes(volume_ids=[instance.block_device_mapping['/dev/sdc1'].volume_id]):
|
||||
v.attach_data.instance_id.should.equal(instance.id)
|
||||
v.attach_data.attach_time.should.equal(instance.launch_time) # can do due to freeze_time decorator.
|
||||
v.create_time.should.equal(instance.launch_time) # can do due to freeze_time decorator.
|
||||
v.region.name.should.equal(instance.region.name)
|
||||
v.status.should.equal('in-use')
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_get_instances_by_id():
|
||||
|
@ -8,6 +8,7 @@ from io import BytesIO
|
||||
|
||||
import json
|
||||
import boto
|
||||
import boto3
|
||||
from boto.exception import S3CreateError, S3ResponseError
|
||||
from boto.s3.connection import S3Connection
|
||||
from boto.s3.key import Key
|
||||
@ -869,3 +870,18 @@ def test_policy():
|
||||
bucket = conn.get_bucket(bucket_name)
|
||||
|
||||
bucket.get_policy().decode('utf-8').should.equal(policy)
|
||||
|
||||
|
||||
"""
|
||||
boto3
|
||||
"""
|
||||
|
||||
|
||||
@mock_s3
|
||||
def test_boto3_bucket_create():
|
||||
s3 = boto3.resource('s3', region_name='us-east-1')
|
||||
s3.create_bucket(Bucket="blah")
|
||||
|
||||
s3.Object('blah', 'hello.txt').put(Body="some text")
|
||||
|
||||
s3.Object('blah', 'hello.txt').get()['Body'].read().decode("utf-8").should.equal("some text")
|
||||
|
@ -1,5 +1,6 @@
|
||||
from __future__ import unicode_literals
|
||||
import boto
|
||||
import boto3
|
||||
from boto.exception import SQSError
|
||||
from boto.sqs.message import RawMessage, Message
|
||||
|
||||
@ -462,3 +463,17 @@ def test_delete_message_after_visibility_timeout():
|
||||
m1_retrieved.delete()
|
||||
|
||||
assert new_queue.count() == 0
|
||||
|
||||
"""
|
||||
boto3
|
||||
"""
|
||||
|
||||
|
||||
@mock_sqs
|
||||
def test_boto3_message_send():
|
||||
sqs = boto3.resource('sqs', region_name='us-east-1')
|
||||
queue = sqs.create_queue(QueueName="blah")
|
||||
queue.send_message(MessageBody="derp")
|
||||
|
||||
messages = queue.receive_messages()
|
||||
messages.should.have.length_of(1)
|
||||
|
Loading…
Reference in New Issue
Block a user