Merge remote-tracking branch 'spulec/master'

Conflicts:
	moto/s3/responses.py
This commit is contained in:
Konstantinos Koukopoulos 2013-11-14 17:05:46 +02:00
commit 3628e40f3c
42 changed files with 1178 additions and 313 deletions

3
.gitignore vendored
View File

@ -1,4 +1,7 @@
moto.egg-info/* moto.egg-info/*
dist/* dist/*
.tox
.coverage .coverage
*.pyc *.pyc
.noseids
build/

View File

@ -1,5 +1,6 @@
language: python language: python
python: python:
- 2.6
- 2.7 - 2.7
env: env:
matrix: matrix:
@ -12,6 +13,7 @@ env:
- BOTO_VERSION=2.7 - BOTO_VERSION=2.7
install: install:
- pip install boto==$BOTO_VERSION - pip install boto==$BOTO_VERSION
- pip install https://github.com/gabrielfalcao/HTTPretty/tarball/8bbbdfc14326678b1aeba6a2d81af0d835a2cd6f
- pip install . - pip install .
- pip install -r requirements.txt - pip install -r requirements.txt
script: script:

View File

@ -6,3 +6,5 @@ Moto is written by Steve Pulec with contributions from:
* [Dilshod Tadjibaev](https://github.com/antimora) * [Dilshod Tadjibaev](https://github.com/antimora)
* [Dan Berglund](https://github.com/cheif) * [Dan Berglund](https://github.com/cheif)
* [Lincoln de Sousa](https://github.com/clarete) * [Lincoln de Sousa](https://github.com/clarete)
* [mhock](https://github.com/mhock)
* [Ilya Sukhanov](https://github.com/IlyaSukhanov)

View File

@ -7,6 +7,7 @@ from .ec2 import mock_ec2
from .elb import mock_elb from .elb import mock_elb
from .emr import mock_emr from .emr import mock_emr
from .s3 import mock_s3 from .s3 import mock_s3
from .s3bucket_path import mock_s3bucket_path
from .ses import mock_ses from .ses import mock_ses
from .sqs import mock_sqs from .sqs import mock_sqs
from .sts import mock_sts from .sts import mock_sts

View File

@ -4,6 +4,7 @@ from moto.ec2 import ec2_backend
from moto.elb import elb_backend from moto.elb import elb_backend
from moto.emr import emr_backend from moto.emr import emr_backend
from moto.s3 import s3_backend from moto.s3 import s3_backend
from moto.s3bucket_path import s3bucket_path_backend
from moto.ses import ses_backend from moto.ses import ses_backend
from moto.sqs import sqs_backend from moto.sqs import sqs_backend
from moto.sts import sts_backend from moto.sts import sts_backend
@ -15,6 +16,7 @@ BACKENDS = {
'elb': elb_backend, 'elb': elb_backend,
'emr': emr_backend, 'emr': emr_backend,
's3': s3_backend, 's3': s3_backend,
's3bucket_path': s3bucket_path_backend,
'ses': ses_backend, 'ses': ses_backend,
'sqs': sqs_backend, 'sqs': sqs_backend,
'sts': sts_backend, 'sts': sts_backend,

View File

@ -9,6 +9,7 @@ from .utils import convert_regex_to_flask_path
class MockAWS(object): class MockAWS(object):
def __init__(self, backend): def __init__(self, backend):
self.backend = backend self.backend = backend
HTTPretty.reset()
def __call__(self, func): def __call__(self, func):
return self.decorate_callable(func) return self.decorate_callable(func)

View File

@ -46,7 +46,7 @@ class BaseResponse(object):
status = new_headers.pop('status', 200) status = new_headers.pop('status', 200)
headers.update(new_headers) headers.update(new_headers)
return status, headers, body return status, headers, body
raise NotImplementedError("The {} action has not been implemented".format(action)) raise NotImplementedError("The {0} action has not been implemented".format(action))
def metadata_response(request, full_url, headers): def metadata_response(request, full_url, headers):

View File

@ -31,7 +31,7 @@ def get_random_hex(length=8):
def get_random_message_id(): def get_random_message_id():
return '{}-{}-{}-{}-{}'.format(get_random_hex(8), get_random_hex(4), get_random_hex(4), get_random_hex(4), get_random_hex(12)) return '{0}-{1}-{2}-{3}-{4}'.format(get_random_hex(8), get_random_hex(4), get_random_hex(4), get_random_hex(4), get_random_hex(12))
def convert_regex_to_flask_path(url_path): def convert_regex_to_flask_path(url_path):
@ -61,7 +61,7 @@ class convert_flask_to_httpretty_response(object):
outer = self.callback.im_class.__name__ outer = self.callback.im_class.__name__
else: else:
outer = self.callback.__module__ outer = self.callback.__module__
return "{}.{}".format(outer, self.callback.__name__) return "{0}.{1}".format(outer, self.callback.__name__)
def __call__(self, args=None, **kwargs): def __call__(self, args=None, **kwargs):
headers = dict(request.headers) headers = dict(request.headers)

View File

@ -1,7 +1,14 @@
from collections import defaultdict, OrderedDict from collections import defaultdict
import datetime import datetime
import json import json
try:
from collections import OrderedDict
except ImportError:
# python 2.6 or earlier, use backport
from ordereddict import OrderedDict
from moto.core import BaseBackend from moto.core import BaseBackend
from .comparisons import get_comparison_func from .comparisons import get_comparison_func
from .utils import unix_time from .utils import unix_time
@ -36,7 +43,7 @@ class DynamoType(object):
) )
def __repr__(self): def __repr__(self):
return "DynamoType: {}".format(self.to_json()) return "DynamoType: {0}".format(self.to_json())
def to_json(self): def to_json(self):
return {self.type: self.value} return {self.type: self.value}
@ -62,7 +69,7 @@ class Item(object):
self.attrs[key] = DynamoType(value) self.attrs[key] = DynamoType(value)
def __repr__(self): def __repr__(self):
return "Item: {}".format(self.to_json()) return "Item: {0}".format(self.to_json())
def to_json(self): def to_json(self):
attributes = {} attributes = {}

View File

@ -1,7 +1,5 @@
import datetime import calendar
def unix_time(dt): def unix_time(dt):
epoch = datetime.datetime.utcfromtimestamp(0) return calendar.timegm(dt.timetuple())
delta = dt - epoch
return delta.total_seconds()

View File

@ -15,6 +15,9 @@ from .utils import (
random_subnet_id, random_subnet_id,
random_volume_id, random_volume_id,
random_vpc_id, random_vpc_id,
random_eip_association_id,
random_eip_allocation_id,
random_ip,
) )
@ -29,24 +32,24 @@ class Instance(BotoInstance):
super(Instance, self).__init__() super(Instance, self).__init__()
self.id = random_instance_id() self.id = random_instance_id()
self.image_id = image_id self.image_id = image_id
self._state = InstanceState() self._state = InstanceState("running", 16)
self.user_data = user_data self.user_data = user_data
def start(self): def start(self):
self._state.name = "pending" self._state.name = "running"
self._state.code = 0 self._state.code = 16
def stop(self): def stop(self):
self._state.name = "stopping" self._state.name = "stopped"
self._state.code = 64 self._state.code = 80
def terminate(self): def terminate(self):
self._state.name = "shutting-down" self._state.name = "terminated"
self._state.code = 32 self._state.code = 48
def reboot(self): def reboot(self):
self._state.name = "pending" self._state.name = "running"
self._state.code = 0 self._state.code = 16
def get_tags(self): def get_tags(self):
tags = ec2_backend.describe_tags(self.id) tags = ec2_backend.describe_tags(self.id)
@ -215,8 +218,12 @@ class AmiBackend(object):
self.amis[ami_id] = ami self.amis[ami_id] = ami
return ami return ami
def describe_images(self): def describe_images(self, ami_ids=None):
return self.amis.values() if ami_ids:
images = [image for image in self.amis.values() if image.id in ami_ids]
else:
images = self.amis.values()
return images
def deregister_image(self, ami_id): def deregister_image(self, ami_id):
if ami_id in self.amis: if ami_id in self.amis:
@ -280,7 +287,7 @@ class SecurityRule(object):
@property @property
def unique_representation(self): def unique_representation(self):
return "{}-{}-{}-{}-{}".format( return "{0}-{1}-{2}-{3}-{4}".format(
self.ip_protocol, self.ip_protocol,
self.from_port, self.from_port,
self.to_port, self.to_port,
@ -571,9 +578,92 @@ class SpotRequestBackend(object):
return requests return requests
class ElasticAddress():
def __init__(self, domain):
self.public_ip = random_ip()
self.allocation_id = random_eip_allocation_id() if domain == "vpc" else None
self.domain = domain
self.instance = None
self.association_id = None
class ElasticAddressBackend(object):
def __init__(self):
self.addresses = []
super(ElasticAddressBackend, self).__init__()
def allocate_address(self, domain):
address = ElasticAddress(domain)
self.addresses.append(address)
return address
def address_by_ip(self, ips):
return [address for address in self.addresses
if address.public_ip in ips]
def address_by_allocation(self, allocation_ids):
return [address for address in self.addresses
if address.allocation_id in allocation_ids]
def address_by_association(self, association_ids):
return [address for address in self.addresses
if address.association_id in association_ids]
def associate_address(self, instance, address=None, allocation_id=None, reassociate=False):
eips = []
if address:
eips = self.address_by_ip([address])
elif allocation_id:
eips = self.address_by_allocation([allocation_id])
eip = eips[0] if len(eips) > 0 else None
if eip and eip.instance is None or reassociate:
eip.instance = instance
if eip.domain == "vpc":
eip.association_id = random_eip_association_id()
return eip
else:
return None
def describe_addresses(self):
return self.addresses
def disassociate_address(self, address=None, association_id=None):
eips = []
if address:
eips = self.address_by_ip([address])
elif association_id:
eips = self.address_by_association([association_id])
if eips:
eip = eips[0]
eip.instance = None
eip.association_id = None
return True
else:
return False
def release_address(self, address=None, allocation_id=None):
eips = []
if address:
eips = self.address_by_ip([address])
elif allocation_id:
eips = self.address_by_allocation([allocation_id])
if eips:
eip = eips[0]
self.disassociate_address(address=eip.public_ip)
eip.allocation_id = None
self.addresses.remove(eip)
return True
else:
return False
class EC2Backend(BaseBackend, InstanceBackend, TagBackend, AmiBackend, class EC2Backend(BaseBackend, InstanceBackend, TagBackend, AmiBackend,
RegionsAndZonesBackend, SecurityGroupBackend, EBSBackend, RegionsAndZonesBackend, SecurityGroupBackend, EBSBackend,
VPCBackend, SubnetBackend, SpotRequestBackend): VPCBackend, SubnetBackend, SpotRequestBackend, ElasticAddressBackend):
pass pass

View File

@ -1,18 +1,21 @@
from jinja2 import Template from jinja2 import Template
from moto.ec2.models import ec2_backend from moto.ec2.models import ec2_backend
from moto.ec2.utils import instance_ids_from_querystring from moto.ec2.utils import instance_ids_from_querystring, image_ids_from_querystring
class AmisResponse(object): class AmisResponse(object):
def create_image(self): def create_image(self):
name = self.querystring.get('Name')[0] name = self.querystring.get('Name')[0]
description = self.querystring.get('Description')[0] if "Description" in self.querystring:
description = self.querystring.get('Description')[0]
else:
description = ""
instance_ids = instance_ids_from_querystring(self.querystring) instance_ids = instance_ids_from_querystring(self.querystring)
instance_id = instance_ids[0] instance_id = instance_ids[0]
image = ec2_backend.create_image(instance_id, name, description) image = ec2_backend.create_image(instance_id, name, description)
if not image: if not image:
return "There is not instance with id {}".format(instance_id), dict(status=404) return "There is not instance with id {0}".format(instance_id), dict(status=404)
template = Template(CREATE_IMAGE_RESPONSE) template = Template(CREATE_IMAGE_RESPONSE)
return template.render(image=image) return template.render(image=image)
@ -30,7 +33,8 @@ class AmisResponse(object):
raise NotImplementedError('AMIs.describe_image_attribute is not yet implemented') raise NotImplementedError('AMIs.describe_image_attribute is not yet implemented')
def describe_images(self): def describe_images(self):
images = ec2_backend.describe_images() ami_ids = image_ids_from_querystring(self.querystring)
images = ec2_backend.describe_images(ami_ids=ami_ids)
template = Template(DESCRIBE_IMAGES_RESPONSE) template = Template(DESCRIBE_IMAGES_RESPONSE)
return template.render(images=images) return template.render(images=images)

View File

@ -39,7 +39,7 @@ class ElasticBlockStore(object):
success = ec2_backend.delete_snapshot(snapshot_id) success = ec2_backend.delete_snapshot(snapshot_id)
if not success: if not success:
# Snapshot doesn't exist # Snapshot doesn't exist
return "Snapshot with id {} does not exist".format(snapshot_id), dict(status=404) return "Snapshot with id {0} does not exist".format(snapshot_id), dict(status=404)
return DELETE_SNAPSHOT_RESPONSE return DELETE_SNAPSHOT_RESPONSE
def delete_volume(self): def delete_volume(self):
@ -47,7 +47,7 @@ class ElasticBlockStore(object):
success = ec2_backend.delete_volume(volume_id) success = ec2_backend.delete_volume(volume_id)
if not success: if not success:
# Volume doesn't exist # Volume doesn't exist
return "Volume with id {} does not exist".format(volume_id), dict(status=404) return "Volume with id {0} does not exist".format(volume_id), dict(status=404)
return DELETE_VOLUME_RESPONSE return DELETE_VOLUME_RESPONSE
def describe_snapshot_attribute(self): def describe_snapshot_attribute(self):
@ -77,7 +77,7 @@ class ElasticBlockStore(object):
attachment = ec2_backend.detach_volume(volume_id, instance_id, device_path) attachment = ec2_backend.detach_volume(volume_id, instance_id, device_path)
if not attachment: if not attachment:
# Volume wasn't attached # Volume wasn't attached
return "Volume {} can not be detached from {} because it is not attached".format(volume_id, instance_id), dict(status=404) return "Volume {0} can not be detached from {1} because it is not attached".format(volume_id, instance_id), dict(status=404)
template = Template(DETATCH_VOLUME_RESPONSE) template = Template(DETATCH_VOLUME_RESPONSE)
return template.render(attachment=attachment) return template.render(attachment=attachment)

View File

@ -1,21 +1,132 @@
from jinja2 import Template from jinja2 import Template
from moto.ec2.models import ec2_backend from moto.ec2.models import ec2_backend
from moto.ec2.utils import resource_ids_from_querystring from moto.ec2.utils import sequence_from_querystring
class ElasticIPAddresses(object): class ElasticIPAddresses(object):
def allocate_address(self): def allocate_address(self):
raise NotImplementedError('ElasticIPAddresses.allocate_address is not yet implemented') if "Domain" in self.querystring:
domain = self.querystring.get('Domain')[0]
if domain != "vpc":
return "Invalid domain:{0}.".format(domain), dict(status=400)
else:
domain = "standard"
address = ec2_backend.allocate_address(domain)
template = Template(ALLOCATE_ADDRESS_RESPONSE)
return template.render(address=address)
def associate_address(self): def associate_address(self):
raise NotImplementedError('ElasticIPAddresses.associate_address is not yet implemented') if "InstanceId" in self.querystring:
instance = ec2_backend.get_instance(self.querystring['InstanceId'][0])
elif "NetworkInterfaceId" in self.querystring:
raise NotImplementedError("Lookup by allocation id not implemented")
else:
return "Invalid request, expect InstanceId/NetworkId parameter.", dict(status=400)
reassociate = False
if "AllowReassociation" in self.querystring:
reassociate = self.querystring['AllowReassociation'][0] == "true"
if "PublicIp" in self.querystring:
eip = ec2_backend.associate_address(instance, address=self.querystring['PublicIp'][0], reassociate=reassociate)
elif "AllocationId" in self.querystring:
eip = ec2_backend.associate_address(instance, allocation_id=self.querystring['AllocationId'][0], reassociate=reassociate)
else:
return "Invalid request, expect PublicIp/AllocationId parameter.", dict(status=400)
if eip:
template = Template(ASSOCIATE_ADDRESS_RESPONSE)
return template.render(address=eip)
else:
return "Failed to associate address.", dict(status=400)
def describe_addresses(self): def describe_addresses(self):
raise NotImplementedError('ElasticIPAddresses.describe_addresses is not yet implemented') template = Template(DESCRIBE_ADDRESS_RESPONSE)
if "Filter.1.Name" in self.querystring:
raise NotImplementedError("Filtering not supported in describe_address.")
elif "PublicIp.1" in self.querystring:
public_ips = sequence_from_querystring("PublicIp", self.querystring)
addresses = ec2_backend.address_by_ip(public_ips)
elif "AllocationId.1" in self.querystring:
allocation_ids = sequence_from_querystring("AllocationId", self.querystring)
addresses = ec2_backend.address_by_allocation(allocation_ids)
else:
addresses = ec2_backend.describe_addresses()
return template.render(addresses=addresses)
def disassociate_address(self): def disassociate_address(self):
raise NotImplementedError('ElasticIPAddresses.disassociate_address is not yet implemented') if "PublicIp" in self.querystring:
disassociated = ec2_backend.disassociate_address(address=self.querystring['PublicIp'][0])
elif "AssociationId" in self.querystring:
disassociated = ec2_backend.disassociate_address(association_id=self.querystring['AssociationId'][0])
else:
return "Invalid request, expect PublicIp/AssociationId parameter.", dict(status=400)
if disassociated:
return Template(DISASSOCIATE_ADDRESS_RESPONSE).render()
else:
return "Address conresponding to PublicIp/AssociationIP not found.", dict(status=400)
def release_address(self): def release_address(self):
raise NotImplementedError('ElasticIPAddresses.release_address is not yet implemented') if "PublicIp" in self.querystring:
released = ec2_backend.release_address(address=self.querystring['PublicIp'][0])
elif "AllocationId" in self.querystring:
released = ec2_backend.release_address(allocation_id=self.querystring['AllocationId'][0])
else:
return "Invalid request, expect PublicIp/AllocationId parameter.", dict(status=400)
if released:
return Template(RELEASE_ADDRESS_RESPONSE).render()
else:
return "Address conresponding to PublicIp/AssociationIP not found.", dict(status=400)
ALLOCATE_ADDRESS_RESPONSE = """<AllocateAddressResponse xmlns="http://ec2.amazonaws.com/doc/2013-07-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<publicIp>{{ address.public_ip }}</publicIp>
<domain>{{ address.domain }}</domain>
{% if address.allocation_id %}
<allocationId>{{ address.allocation_id }}</allocationId>
{% endif %}
</AllocateAddressResponse>"""
ASSOCIATE_ADDRESS_RESPONSE = """<AssociateAddressResponse xmlns="http://ec2.amazonaws.com/doc/2013-07-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
{% if address.association_id %}
<associationId>{{ address.association_id }}</associationId>
{% endif %}
</AssociateAddressResponse>"""
DESCRIBE_ADDRESS_RESPONSE = """<DescribeAddressesResponse xmlns="http://ec2.amazonaws.com/doc/2013-07-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<addressesSet>
{% for address in addresses %}
<item>
<publicIp>{{ address.public_ip }}</publicIp>
<domain>{{ address.domain }}</domain>
{% if address.instance %}
<instanceId>{{ address.instance.id }}</instanceId>
{% else %}
<instanceId/>
{% endif %}
{% if address.association_id %}
<associationId>{{ address.association_id }}</associationId>
{% endif %}
</item>
{% endfor %}
</addressesSet>
</DescribeAddressesResponse>"""
DISASSOCIATE_ADDRESS_RESPONSE = """<DisassociateAddressResponse xmlns="http://ec2.amazonaws.com/doc/2013-07-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</DisassociateAddressResponse>"""
RELEASE_ADDRESS_RESPONSE = """<ReleaseAddressResponse xmlns="http://ec2.amazonaws.com/doc/2013-07-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</ReleaseAddressResponse>"""

View File

@ -95,8 +95,8 @@ EC2_RUN_INSTANCES = """<RunInstancesResponse xmlns="http://ec2.amazonaws.com/doc
<instanceId>{{ instance.id }}</instanceId> <instanceId>{{ instance.id }}</instanceId>
<imageId>{{ instance.image_id }}</imageId> <imageId>{{ instance.image_id }}</imageId>
<instanceState> <instanceState>
<code>{{ instance._state.code }}</code> <code>0</code>
<name>{{ instance._state.name }}</name> <name>pending</name>
</instanceState> </instanceState>
<privateDnsName/> <privateDnsName/>
<dnsName/> <dnsName/>
@ -150,8 +150,8 @@ EC2_DESCRIBE_INSTANCES = """<DescribeInstancesResponse xmlns='http://ec2.amazona
<code>{{ instance._state.code }}</code> <code>{{ instance._state.code }}</code>
<name>{{ instance._state.name }}</name> <name>{{ instance._state.name }}</name>
</instanceState> </instanceState>
<privateDnsName/> <privateDnsName>ip-10.0.0.12.ec2.internal</privateDnsName>
<dnsName/> <dnsName>ec2-46.51.219.63.compute-1.amazonaws.com</dnsName>
<reason/> <reason/>
<keyName>gsg-keypair</keyName> <keyName>gsg-keypair</keyName>
<amiLaunchIndex>0</amiLaunchIndex> <amiLaunchIndex>0</amiLaunchIndex>
@ -216,8 +216,8 @@ EC2_TERMINATE_INSTANCES = """
<name>running</name> <name>running</name>
</previousState> </previousState>
<currentState> <currentState>
<code>{{ instance._state.code }}</code> <code>32</code>
<name>{{ instance._state.name }}</name> <name>shutting-down</name>
</currentState> </currentState>
</item> </item>
{% endfor %} {% endfor %}
@ -236,8 +236,8 @@ EC2_STOP_INSTANCES = """
<name>running</name> <name>running</name>
</previousState> </previousState>
<currentState> <currentState>
<code>{{ instance._state.code }}</code> <code>64</code>
<name>{{ instance._state.name }}</name> <name>stopping</name>
</currentState> </currentState>
</item> </item>
{% endfor %} {% endfor %}
@ -256,8 +256,8 @@ EC2_START_INSTANCES = """
<name>running</name> <name>running</name>
</previousState> </previousState>
<currentState> <currentState>
<code>{{ instance._state.code }}</code> <code>0</code>
<name>{{ instance._state.name }}</name> <name>pending</name>
</currentState> </currentState>
</item> </item>
{% endfor %} {% endfor %}

View File

@ -34,7 +34,7 @@ class SecurityGroups(object):
group = ec2_backend.create_security_group(name, description) group = ec2_backend.create_security_group(name, description)
if not group: if not group:
# There was an exisitng group # There was an exisitng group
return "There was an existing security group with name {}".format(name), dict(status=409) return "There was an existing security group with name {0}".format(name), dict(status=409)
template = Template(CREATE_SECURITY_GROUP_RESPONSE) template = Template(CREATE_SECURITY_GROUP_RESPONSE)
return template.render(group=group) return template.render(group=group)
@ -45,7 +45,7 @@ class SecurityGroups(object):
if not group: if not group:
# There was no such group # There was no such group
return "There was no security group with name {}".format(name), dict(status=404) return "There was no security group with name {0}".format(name), dict(status=404)
return DELETE_GROUP_RESPONSE return DELETE_GROUP_RESPONSE
def describe_security_groups(self): def describe_security_groups(self):

View File

@ -7,7 +7,7 @@ def random_id(prefix=''):
chars = range(10) + ['a', 'b', 'c', 'd', 'e', 'f'] chars = range(10) + ['a', 'b', 'c', 'd', 'e', 'f']
instance_tag = ''.join(unicode(random.choice(chars)) for x in range(size)) instance_tag = ''.join(unicode(random.choice(chars)) for x in range(size))
return '{}-{}'.format(prefix, instance_tag) return '{0}-{1}'.format(prefix, instance_tag)
def random_ami_id(): def random_ami_id():
@ -46,6 +46,22 @@ def random_vpc_id():
return random_id(prefix='vpc') return random_id(prefix='vpc')
def random_eip_association_id():
return random_id(prefix='eipassoc')
def random_eip_allocation_id():
return random_id(prefix='eipalloc')
def random_ip():
return "127.{0}.{1}.{2}".format(
random.randint(0, 255),
random.randint(0, 255),
random.randint(0, 255)
)
def instance_ids_from_querystring(querystring_dict): def instance_ids_from_querystring(querystring_dict):
instance_ids = [] instance_ids = []
for key, value in querystring_dict.iteritems(): for key, value in querystring_dict.iteritems():
@ -54,15 +70,31 @@ def instance_ids_from_querystring(querystring_dict):
return instance_ids return instance_ids
def image_ids_from_querystring(querystring_dict):
image_ids = []
for key, value in querystring_dict.iteritems():
if 'ImageId' in key:
image_ids.append(value[0])
return image_ids
def sequence_from_querystring(parameter, querystring_dict):
parameter_values = []
for key, value in querystring_dict.iteritems():
if parameter in key:
parameter_values.append(value[0])
return parameter_values
def resource_ids_from_querystring(querystring_dict): def resource_ids_from_querystring(querystring_dict):
prefix = 'ResourceId' prefix = 'ResourceId'
response_values = {} response_values = {}
for key, value in querystring_dict.iteritems(): for key, value in querystring_dict.iteritems():
if key.startswith(prefix): if key.startswith(prefix):
resource_index = key.replace(prefix + ".", "") resource_index = key.replace(prefix + ".", "")
tag_key = querystring_dict.get("Tag.{}.Key".format(resource_index))[0] tag_key = querystring_dict.get("Tag.{0}.Key".format(resource_index))[0]
tag_value_key = "Tag.{}.Value".format(resource_index) tag_value_key = "Tag.{0}.Value".format(resource_index)
if tag_value_key in querystring_dict: if tag_value_key in querystring_dict:
tag_value = querystring_dict.get(tag_value_key)[0] tag_value = querystring_dict.get(tag_value_key)[0]
else: else:
@ -78,7 +110,7 @@ def filters_from_querystring(querystring_dict):
match = re.search("Filter.(\d).Name", key) match = re.search("Filter.(\d).Name", key)
if match: if match:
filter_index = match.groups()[0] filter_index = match.groups()[0]
value_prefix = "Filter.{}.Value".format(filter_index) value_prefix = "Filter.{0}.Value".format(filter_index)
filter_values = [filter_value[0] for filter_key, filter_value in querystring_dict.iteritems() if filter_key.startswith(value_prefix)] filter_values = [filter_value[0] for filter_key, filter_value in querystring_dict.iteritems() if filter_key.startswith(value_prefix)]
response_values[value[0]] = filter_values response_values[value[0]] = filter_values
return response_values return response_values

View File

@ -16,11 +16,11 @@ class ELBResponse(BaseResponse):
port_index = 1 port_index = 1
while True: while True:
try: try:
protocol = self.querystring['Listeners.member.{}.Protocol'.format(port_index)][0] protocol = self.querystring['Listeners.member.{0}.Protocol'.format(port_index)][0]
except KeyError: except KeyError:
break break
lb_port = self.querystring['Listeners.member.{}.LoadBalancerPort'.format(port_index)][0] lb_port = self.querystring['Listeners.member.{0}.LoadBalancerPort'.format(port_index)][0]
instance_port = self.querystring['Listeners.member.{}.InstancePort'.format(port_index)][0] instance_port = self.querystring['Listeners.member.{0}.InstancePort'.format(port_index)][0]
ports.append([protocol, lb_port, instance_port]) ports.append([protocol, lb_port, instance_port])
port_index += 1 port_index += 1
elb_backend.create_load_balancer( elb_backend.create_load_balancer(

View File

@ -41,7 +41,7 @@ class FakeStep(object):
arg_index = 1 arg_index = 1
while True: while True:
arg = kwargs.get('hadoop_jar_step._args.member.{}'.format(arg_index)) arg = kwargs.get('hadoop_jar_step._args.member.{0}'.format(arg_index))
if arg: if arg:
self.args.append(arg) self.args.append(arg)
arg_index += 1 arg_index += 1

View File

@ -14,23 +14,21 @@ class ElasticMapReduceResponse(BaseResponse):
return [value[0] for key, value in self.querystring.items() if key.startswith(param_prefix)] return [value[0] for key, value in self.querystring.items() if key.startswith(param_prefix)]
def _get_dict_param(self, param_prefix): def _get_dict_param(self, param_prefix):
return { params = {}
camelcase_to_underscores(key.replace(param_prefix, "")): value[0] for key, value in self.querystring.items():
for key, value if key.startswith(param_prefix):
in self.querystring.items() params[camelcase_to_underscores(key.replace(param_prefix, ""))] = value[0]
if key.startswith(param_prefix) return params
}
def _get_list_prefix(self, param_prefix): def _get_list_prefix(self, param_prefix):
results = [] results = []
param_index = 1 param_index = 1
while True: while True:
index_prefix = "{}.{}.".format(param_prefix, param_index) index_prefix = "{0}.{1}.".format(param_prefix, param_index)
new_items = { new_items = {}
camelcase_to_underscores(key.replace(index_prefix, "")): value[0] for key, value in self.querystring.items():
for key, value in self.querystring.items() if key.startswith(index_prefix):
if key.startswith(index_prefix) new_items[camelcase_to_underscores(key.replace(index_prefix, ""))] = value[0]
}
if not new_items: if not new_items:
break break
results.append(new_items) results.append(new_items)

View File

@ -5,10 +5,10 @@ import string
def random_job_id(size=13): def random_job_id(size=13):
chars = range(10) + list(string.uppercase) chars = range(10) + list(string.uppercase)
job_tag = ''.join(unicode(random.choice(chars)) for x in range(size)) job_tag = ''.join(unicode(random.choice(chars)) for x in range(size))
return 'j-{}'.format(job_tag) return 'j-{0}'.format(job_tag)
def random_instance_group_id(size=13): def random_instance_group_id(size=13):
chars = range(10) + list(string.uppercase) chars = range(10) + list(string.uppercase)
job_tag = ''.join(unicode(random.choice(chars)) for x in range(size)) job_tag = ''.join(unicode(random.choice(chars)) for x in range(size))
return 'i-{}'.format(job_tag) return 'i-{0}'.format(job_tag)

View File

@ -186,7 +186,7 @@ class S3Backend(BaseBackend):
if delimiter and delimiter in key_without_prefix: if delimiter and delimiter in key_without_prefix:
# If delimiter, we need to split out folder_results # If delimiter, we need to split out folder_results
key_without_delimiter = key_without_prefix.split(delimiter)[0] key_without_delimiter = key_without_prefix.split(delimiter)[0]
folder_results.add("{}{}{}".format(prefix, key_without_delimiter, delimiter)) folder_results.add("{0}{1}{2}".format(prefix, key_without_delimiter, delimiter))
else: else:
key_results.add(key) key_results.add(key)
else: else:

View File

@ -7,223 +7,233 @@ from .models import s3_backend
from .utils import bucket_name_from_url from .utils import bucket_name_from_url
def all_buckets(): def parse_key_name(pth):
# No bucket specified. Listing all buckets return pth.lstrip("/")
all_buckets = s3_backend.get_all_buckets()
template = Template(S3_ALL_BUCKETS)
return template.render(buckets=all_buckets)
def bucket_response(request, full_url, headers): class ResponseObject(object):
response = _bucket_response(request, full_url, headers) def __init__(self, backend, bucket_name_from_url, parse_key_name):
if isinstance(response, basestring): self.backend = backend
return 200, headers, response self.bucket_name_from_url = bucket_name_from_url
self.parse_key_name = parse_key_name
else: def all_buckets(self):
status_code, headers, response_content = response # No bucket specified. Listing all buckets
return status_code, headers, response_content all_buckets = self.backend.get_all_buckets()
template = Template(S3_ALL_BUCKETS)
return template.render(buckets=all_buckets)
def bucket_response(self, request, full_url, headers):
response = self._bucket_response(request, full_url, headers)
if isinstance(response, basestring):
return 200, headers, response
def _bucket_response(request, full_url, headers):
parsed_url = urlparse(full_url)
querystring = parse_qs(parsed_url.query)
method = request.method
bucket_name = bucket_name_from_url(full_url)
if not bucket_name:
# If no bucket specified, list all buckets
return all_buckets()
if method == 'GET':
bucket = s3_backend.get_bucket(bucket_name)
if bucket:
prefix = querystring.get('prefix', [None])[0]
delimiter = querystring.get('delimiter', [None])[0]
result_keys, result_folders = s3_backend.prefix_query(bucket, prefix, delimiter)
template = Template(S3_BUCKET_GET_RESPONSE)
return template.render(
bucket=bucket,
prefix=prefix,
delimiter=delimiter,
result_keys=result_keys,
result_folders=result_folders
)
else: else:
return 404, headers, "" status_code, headers, response_content = response
elif method == 'PUT': return status_code, headers, response_content
new_bucket = s3_backend.create_bucket(bucket_name)
template = Template(S3_BUCKET_CREATE_RESPONSE)
return template.render(bucket=new_bucket)
elif method == 'DELETE':
removed_bucket = s3_backend.delete_bucket(bucket_name)
if removed_bucket is None:
# Non-existant bucket
template = Template(S3_DELETE_NON_EXISTING_BUCKET)
return 404, headers, template.render(bucket_name=bucket_name)
elif removed_bucket:
# Bucket exists
template = Template(S3_DELETE_BUCKET_SUCCESS)
return 204, headers, template.render(bucket=removed_bucket)
else:
# Tried to delete a bucket that still has keys
template = Template(S3_DELETE_BUCKET_WITH_ITEMS_ERROR)
return 409, headers, template.render(bucket=removed_bucket)
elif method == 'POST':
#POST to bucket-url should create file from form
if hasattr(request, 'form'):
#Not HTTPretty
form = request.form
else:
#HTTPretty, build new form object
form = {}
for kv in request.body.split('&'):
k, v = kv.split('=')
form[k] = v
key = form['key'] def _bucket_response(self, request, full_url, headers):
f = form['file'] parsed_url = urlparse(full_url)
querystring = parse_qs(parsed_url.query)
method = request.method
new_key = s3_backend.set_key(bucket_name, key, f) bucket_name = self.bucket_name_from_url(full_url)
if not bucket_name:
# If no bucket specified, list all buckets
return self.all_buckets()
#Metadata if method == 'GET':
meta_regex = re.compile('^x-amz-meta-([a-zA-Z0-9\-_]+)$', flags=re.IGNORECASE) bucket = self.backend.get_bucket(bucket_name)
for form_id in form: if bucket:
result = meta_regex.match(form_id) prefix = querystring.get('prefix', [None])[0]
if result: delimiter = querystring.get('delimiter', [None])[0]
meta_key = result.group(0).lower() result_keys, result_folders = self.backend.prefix_query(bucket, prefix, delimiter)
metadata = form[form_id] template = Template(S3_BUCKET_GET_RESPONSE)
new_key.set_metadata(meta_key, metadata) return template.render(
return 200, headers, "" bucket=bucket,
else: prefix=prefix,
raise NotImplementedError("Method {} has not been impelemented in the S3 backend yet".format(method)) delimiter=delimiter,
result_keys=result_keys,
result_folders=result_folders
def key_response(request, full_url, headers):
response = _key_response(request, full_url, headers)
if isinstance(response, basestring):
return 200, headers, response
else:
status_code, headers, response_content = response
return status_code, headers, response_content
def _key_response(request, full_url, headers):
parsed_url = urlparse(full_url)
method = request.method
key_name = parsed_url.path.lstrip('/')
query = parse_qs(parsed_url.query)
bucket_name = bucket_name_from_url(full_url)
if hasattr(request, 'body'):
# Boto
body = request.body
else:
# Flask server
body = request.data
if method == 'GET':
if 'uploadId' in query:
upload_id = query['uploadId'][0]
parts = s3_backend.list_multipart(bucket_name, upload_id)
template = Template(S3_MULTIPART_LIST_RESPONSE)
return 200, headers, template.render(
bucket_name=bucket_name,
key_name=key_name,
upload_id=upload_id,
count=len(parts),
parts=parts
) )
key = s3_backend.get_key(bucket_name, key_name) else:
if key: return 404, headers, ""
headers.update(key.metadata) elif method == 'PUT':
return 200, headers, key.value new_bucket = self.backend.create_bucket(bucket_name)
else: template = Template(S3_BUCKET_CREATE_RESPONSE)
return 404, headers, "" return template.render(bucket=new_bucket)
if method == 'PUT': elif method == 'DELETE':
if 'uploadId' in query and 'partNumber' in query and body: removed_bucket = self.backend.delete_bucket(bucket_name)
upload_id = query['uploadId'][0] if removed_bucket is None:
part_number = int(query['partNumber'][0]) # Non-existant bucket
key = s3_backend.set_part(bucket_name, upload_id, part_number, body) template = Template(S3_DELETE_NON_EXISTING_BUCKET)
template = Template(S3_MULTIPART_UPLOAD_RESPONSE) return 404, headers, template.render(bucket_name=bucket_name)
headers.update(key.response_dict) elif removed_bucket:
return 200, headers, template.render(part=key) # Bucket exists
template = Template(S3_DELETE_BUCKET_SUCCESS)
return 204, headers, template.render(bucket=removed_bucket)
else:
# Tried to delete a bucket that still has keys
template = Template(S3_DELETE_BUCKET_WITH_ITEMS_ERROR)
return 409, headers, template.render(bucket=removed_bucket)
elif method == 'POST':
#POST to bucket-url should create file from form
if hasattr(request, 'form'):
#Not HTTPretty
form = request.form
else:
#HTTPretty, build new form object
form = {}
for kv in request.body.split('&'):
k, v = kv.split('=')
form[k] = v
if 'x-amz-copy-source' in request.headers: key = form['key']
# Copy key f = form['file']
src_bucket, src_key = request.headers.get("x-amz-copy-source").split("/")
s3_backend.copy_key(src_bucket, src_key, bucket_name, key_name) new_key = self.backend.set_key(bucket_name, key, f)
template = Template(S3_OBJECT_COPY_RESPONSE)
return template.render(key=src_key)
streaming_request = hasattr(request, 'streaming') and request.streaming
closing_connection = headers.get('connection') == 'close'
if closing_connection and streaming_request:
# Closing the connection of a streaming request. No more data
new_key = s3_backend.get_key(bucket_name, key_name)
elif streaming_request:
# Streaming request, more data
new_key = s3_backend.append_to_key(bucket_name, key_name, body)
else:
# Initial data
new_key = s3_backend.set_key(bucket_name, key_name, body)
request.streaming = True
#Metadata #Metadata
meta_regex = re.compile('^x-amz-meta-([a-zA-Z0-9\-_]+)$', flags=re.IGNORECASE) meta_regex = re.compile('^x-amz-meta-([a-zA-Z0-9\-_]+)$', flags=re.IGNORECASE)
for header in request.headers:
if isinstance(header, basestring): for form_id in form:
result = meta_regex.match(header) result = meta_regex.match(form_id)
if result: if result:
meta_key = result.group(0).lower() meta_key = result.group(0).lower()
metadata = request.headers[header] metadata = form[form_id]
new_key.set_metadata(meta_key, metadata) new_key.set_metadata(meta_key, metadata)
template = Template(S3_OBJECT_RESPONSE)
headers.update(new_key.response_dict)
return 200, headers, template.render(key=new_key)
elif method == 'HEAD':
key = s3_backend.get_key(bucket_name, key_name)
if key:
headers.update(key.metadata)
headers.update(key.response_dict)
return 200, headers, "" return 200, headers, ""
else: else:
return 404, headers, "" raise NotImplementedError("Method {0} has not been impelemented in the S3 backend yet".format(method))
elif method == 'DELETE':
if 'uploadId' in query: def key_response(self, request, full_url, headers):
upload_id = query['uploadId'][0] response = self._key_response(request, full_url, headers)
s3_backend.cancel_multipart(bucket_name, upload_id) if isinstance(response, basestring):
return 204, headers, ""
removed_key = s3_backend.delete_key(bucket_name, key_name)
template = Template(S3_DELETE_OBJECT_SUCCESS)
return 204, headers, template.render(bucket=removed_key)
elif method == 'POST':
if body == '' and parsed_url.query == 'uploads':
multipart = s3_backend.initiate_multipart(bucket_name, key_name)
template = Template(S3_MULTIPART_INITIATE_RESPONSE)
response = template.render(
bucket_name=bucket_name,
key_name=key_name,
upload_id=multipart.id,
)
return 200, headers, response return 200, headers, response
if 'uploadId' in query:
upload_id = query['uploadId'][0]
key = s3_backend.complete_multipart(bucket_name, upload_id)
if key is not None:
template = Template(S3_MULTIPART_COMPLETE_RESPONSE)
return template.render(
bucket_name=bucket_name,
key_name=key.name,
etag=key.etag,
)
template = Template(S3_MULTIPART_COMPLETE_TOO_SMALL_ERROR)
return 400, headers, template.render()
else: else:
raise NotImplementedError("Method POST had only been implemented for multipart uploads so far") status_code, headers, response_content = response
else: return status_code, headers, response_content
raise NotImplementedError("Method {} has not been impelemented in the S3 backend yet".format(method))
def _key_response(self, request, full_url, headers):
parsed_url = urlparse(full_url)
query = parse_qs(parsed_url.query)
method = request.method
key_name = self.parse_key_name(parsed_url.path)
bucket_name = self.bucket_name_from_url(full_url)
if hasattr(request, 'body'):
# Boto
body = request.body
else:
# Flask server
body = request.data
if method == 'GET':
if 'uploadId' in query:
upload_id = query['uploadId'][0]
parts = self.backend.list_multipart(bucket_name, upload_id)
template = Template(S3_MULTIPART_LIST_RESPONSE)
return 200, headers, template.render(
bucket_name=bucket_name,
key_name=key_name,
upload_id=upload_id,
count=len(parts),
parts=parts
)
key = self.backend.get_key(bucket_name, key_name)
if key:
headers.update(key.metadata)
return 200, headers, key.value
else:
return 404, headers, ""
if method == 'PUT':
if 'uploadId' in query and 'partNumber' in query and body:
upload_id = query['uploadId'][0]
part_number = int(query['partNumber'][0])
key = self.backend.set_part(bucket_name, upload_id, part_number, body)
template = Template(S3_MULTIPART_UPLOAD_RESPONSE)
headers.update(key.response_dict)
return 200, headers, template.render(part=key)
if 'x-amz-copy-source' in request.headers:
# Copy key
src_bucket, src_key = request.headers.get("x-amz-copy-source").split("/", 1)
self.backend.copy_key(src_bucket, src_key, bucket_name, key_name)
template = Template(S3_OBJECT_COPY_RESPONSE)
return template.render(key=src_key)
streaming_request = hasattr(request, 'streaming') and request.streaming
closing_connection = headers.get('connection') == 'close'
if closing_connection and streaming_request:
# Closing the connection of a streaming request. No more data
new_key = self.backend.get_key(bucket_name, key_name)
elif streaming_request:
# Streaming request, more data
new_key = self.backend.append_to_key(bucket_name, key_name, body)
else:
# Initial data
new_key = self.backend.set_key(bucket_name, key_name, body)
request.streaming = True
#Metadata
meta_regex = re.compile('^x-amz-meta-([a-zA-Z0-9\-_]+)$', flags=re.IGNORECASE)
for header in request.headers:
if isinstance(header, basestring):
result = meta_regex.match(header)
if result:
meta_key = result.group(0).lower()
metadata = request.headers[header]
new_key.set_metadata(meta_key, metadata)
template = Template(S3_OBJECT_RESPONSE)
headers.update(new_key.response_dict)
return 200, headers, template.render(key=new_key)
elif method == 'HEAD':
key = self.backend.get_key(bucket_name, key_name)
if key:
headers.update(key.metadata)
headers.update(key.response_dict)
return 200, headers, ""
else:
return 404, headers, ""
elif method == 'DELETE':
if 'uploadId' in query:
upload_id = query['uploadId'][0]
self.backend.cancel_multipart(bucket_name, upload_id)
return 204, headers, ""
removed_key = self.backend.delete_key(bucket_name, key_name)
template = Template(S3_DELETE_OBJECT_SUCCESS)
return 204, headers, template.render(bucket=removed_key)
elif method == 'POST':
if body == '' and parsed_url.query == 'uploads':
multipart = self.backend.initiate_multipart(bucket_name, key_name)
template = Template(S3_MULTIPART_INITIATE_RESPONSE)
response = template.render(
bucket_name=bucket_name,
key_name=key_name,
upload_id=multipart.id,
)
return 200, headers, response
if 'uploadId' in query:
upload_id = query['uploadId'][0]
key = self.backend.complete_multipart(bucket_name, upload_id)
if key is not None:
template = Template(S3_MULTIPART_COMPLETE_RESPONSE)
return template.render(
bucket_name=bucket_name,
key_name=key.name,
etag=key.etag,
)
template = Template(S3_MULTIPART_COMPLETE_TOO_SMALL_ERROR)
return 400, headers, template.render()
else:
raise NotImplementedError("Method POST had only been implemented for multipart uploads so far")
else:
raise NotImplementedError("Method {0} has not been impelemented in the S3 backend yet".format(method))
S3ResponseInstance = ResponseObject(s3_backend, bucket_name_from_url, parse_key_name)
S3_ALL_BUCKETS = """<ListAllMyBucketsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01"> S3_ALL_BUCKETS = """<ListAllMyBucketsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<Owner> <Owner>

View File

@ -1,10 +1,10 @@
from .responses import bucket_response, key_response from .responses import S3ResponseInstance
url_bases = [ url_bases = [
"https?://(?P<bucket_name>[a-zA-Z0-9\-_.]*)\.?s3.amazonaws.com" "https?://(?P<bucket_name>[a-zA-Z0-9\-_.]*)\.?s3.amazonaws.com"
] ]
url_paths = { url_paths = {
'{0}/$': bucket_response, '{0}/$': S3ResponseInstance.bucket_response,
'{0}/(?P<key_name>[a-zA-Z0-9\-_.]+)': key_response, '{0}/(?P<key_name>[a-zA-Z0-9\-_.]+)': S3ResponseInstance.key_response,
} }

View File

@ -0,0 +1,2 @@
from .models import s3bucket_path_backend
mock_s3bucket_path = s3bucket_path_backend.decorator

View File

@ -0,0 +1,7 @@
from moto.s3.models import S3Backend
class S3BucketPathBackend(S3Backend):
True
s3bucket_path_backend = S3BucketPathBackend()

View File

@ -0,0 +1,15 @@
from .models import s3bucket_path_backend
from .utils import bucket_name_from_url
from moto.s3.responses import ResponseObject
def parse_key_name(pth):
return "/".join(pth.rstrip("/").split("/")[2:])
S3BucketPathResponseInstance = ResponseObject(
s3bucket_path_backend,
bucket_name_from_url,
parse_key_name,
)

View File

@ -0,0 +1,20 @@
from .responses import S3BucketPathResponseInstance as ro
url_bases = [
"https?://s3.amazonaws.com"
]
def bucket_response2(*args):
return ro.bucket_response(*args)
def bucket_response3(*args):
return ro.bucket_response(*args)
url_paths = {
'{0}/$': bucket_response3,
'{0}/(?P<bucket_name>[a-zA-Z0-9\-_.]+)$': ro.bucket_response,
'{0}/(?P<bucket_name>[a-zA-Z0-9\-_.]+)/$': bucket_response2,
'{0}/(?P<bucket_name>[a-zA-Z0-9\-_./]+)/(?P<key_name>[a-zA-Z0-9\-_.?]+)': ro.key_response
}

View File

@ -0,0 +1,10 @@
import urlparse
def bucket_name_from_url(url):
pth = urlparse.urlparse(url).path.lstrip("/")
l = pth.lstrip("/").split("/")
if len(l) == 0 or l[0] == "":
return None
return l[0]

View File

@ -45,7 +45,7 @@ class EmailResponse(BaseResponse):
destination = self.querystring.get('Destination.ToAddresses.member.1')[0] destination = self.querystring.get('Destination.ToAddresses.member.1')[0]
message = ses_backend.send_email(source, subject, body, destination) message = ses_backend.send_email(source, subject, body, destination)
if not message: if not message:
return "Did not have authority to send from email {}".format(source), dict(status=400) return "Did not have authority to send from email {0}".format(source), dict(status=400)
template = Template(SEND_EMAIL_RESPONSE) template = Template(SEND_EMAIL_RESPONSE)
return template.render(message=message) return template.render(message=message)
@ -56,7 +56,7 @@ class EmailResponse(BaseResponse):
message = ses_backend.send_raw_email(source, destination, raw_data) message = ses_backend.send_raw_email(source, destination, raw_data)
if not message: if not message:
return "Did not have authority to send from email {}".format(source), dict(status=400) return "Did not have authority to send from email {0}".format(source), dict(status=400)
template = Template(SEND_RAW_EMAIL_RESPONSE) template = Template(SEND_RAW_EMAIL_RESPONSE)
return template.render(message=message) return template.render(message=message)

View File

@ -7,7 +7,7 @@ def random_hex(length):
def get_random_message_id(): def get_random_message_id():
return "{}-{}-{}-{}-{}-{}-{}".format( return "{0}-{1}-{2}-{3}-{4}-{5}-{6}".format(
random_hex(16), random_hex(16),
random_hex(8), random_hex(8),
random_hex(4), random_hex(4),

View File

@ -26,7 +26,6 @@ class QueuesResponse(BaseResponse):
else: else:
return "", dict(status=404) return "", dict(status=404)
def list_queues(self): def list_queues(self):
queues = sqs_backend.list_queues() queues = sqs_backend.list_queues()
template = Template(LIST_QUEUES_RESPONSE) template = Template(LIST_QUEUES_RESPONSE)
@ -51,7 +50,7 @@ class QueueResponse(BaseResponse):
queue_name = self.path.split("/")[-1] queue_name = self.path.split("/")[-1]
queue = sqs_backend.delete_queue(queue_name) queue = sqs_backend.delete_queue(queue_name)
if not queue: if not queue:
return "A queue with name {} does not exist".format(queue_name), dict(status=404) return "A queue with name {0} does not exist".format(queue_name), dict(status=404)
template = Template(DELETE_QUEUE_RESPONSE) template = Template(DELETE_QUEUE_RESPONSE)
return template.render(queue=queue) return template.render(queue=queue)
@ -79,15 +78,15 @@ class QueueResponse(BaseResponse):
messages = [] messages = []
for index in range(1, 11): for index in range(1, 11):
# Loop through looking for messages # Loop through looking for messages
message_key = 'SendMessageBatchRequestEntry.{}.MessageBody'.format(index) message_key = 'SendMessageBatchRequestEntry.{0}.MessageBody'.format(index)
message_body = self.querystring.get(message_key) message_body = self.querystring.get(message_key)
if not message_body: if not message_body:
# Found all messages # Found all messages
break break
message_user_id_key = 'SendMessageBatchRequestEntry.{}.Id'.format(index) message_user_id_key = 'SendMessageBatchRequestEntry.{0}.Id'.format(index)
message_user_id = self.querystring.get(message_user_id_key)[0] message_user_id = self.querystring.get(message_user_id_key)[0]
delay_key = 'SendMessageBatchRequestEntry.{}.DelaySeconds'.format(index) delay_key = 'SendMessageBatchRequestEntry.{0}.DelaySeconds'.format(index)
delay_seconds = self.querystring.get(delay_key, [None])[0] delay_seconds = self.querystring.get(delay_key, [None])[0]
message = sqs_backend.send_message(queue_name, message_body[0], delay_seconds=delay_seconds) message = sqs_backend.send_message(queue_name, message_body[0], delay_seconds=delay_seconds)
message.user_id = message_user_id message.user_id = message_user_id
@ -118,7 +117,7 @@ class QueueResponse(BaseResponse):
message_ids = [] message_ids = []
for index in range(1, 11): for index in range(1, 11):
# Loop through looking for messages # Loop through looking for messages
receipt_key = 'DeleteMessageBatchRequestEntry.{}.ReceiptHandle'.format(index) receipt_key = 'DeleteMessageBatchRequestEntry.{0}.ReceiptHandle'.format(index)
receipt_handle = self.querystring.get(receipt_key) receipt_handle = self.querystring.get(receipt_key)
if not receipt_handle: if not receipt_handle:
# Found all messages # Found all messages
@ -126,7 +125,7 @@ class QueueResponse(BaseResponse):
sqs_backend.delete_message(queue_name, receipt_handle[0]) sqs_backend.delete_message(queue_name, receipt_handle[0])
message_user_id_key = 'DeleteMessageBatchRequestEntry.{}.Id'.format(index) message_user_id_key = 'DeleteMessageBatchRequestEntry.{0}.Id'.format(index)
message_user_id = self.querystring.get(message_user_id_key)[0] message_user_id = self.querystring.get(message_user_id_key)[0]
message_ids.append(message_user_id) message_ids.append(message_user_id)

4
requirements-dev.txt Normal file
View File

@ -0,0 +1,4 @@
-r requirements.txt
flask
boto
httpretty

View File

@ -2,9 +2,22 @@
from setuptools import setup, find_packages from setuptools import setup, find_packages
install_requires = [
"boto",
"flask",
"httpretty>=0.6.1",
"Jinja2",
]
import sys
if sys.version_info < (2, 7):
# No buildint OrderedDict before 2.7
install_requires.append('ordereddict')
setup( setup(
name='moto', name='moto',
version='0.2.9', version='0.2.10',
description='A library that allows your python tests to easily' description='A library that allows your python tests to easily'
' mock out the boto library', ' mock out the boto library',
author='Steve Pulec', author='Steve Pulec',
@ -16,10 +29,5 @@ setup(
], ],
}, },
packages=find_packages(), packages=find_packages(),
install_requires=[ install_requires=install_requires,
"boto",
"flask",
"httpretty>=0.6.1",
"Jinja2",
],
) )

View File

@ -365,7 +365,7 @@ def test_scan():
'Body': 'http://url_to_lolcat.gif', 'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User B', 'SentBy': 'User B',
'ReceivedTime': '12/9/2011 11:36:03 PM', 'ReceivedTime': '12/9/2011 11:36:03 PM',
'Ids': {1, 2, 3}, 'Ids': set([1, 2, 3]),
'PK': 7, 'PK': 7,
} }
item = table.new_item( item = table.new_item(
@ -442,7 +442,7 @@ def test_write_batch():
'Body': 'http://url_to_lolcat.gif', 'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User B', 'SentBy': 'User B',
'ReceivedTime': '12/9/2011 11:36:03 PM', 'ReceivedTime': '12/9/2011 11:36:03 PM',
'Ids': {1, 2, 3}, 'Ids': set([1, 2, 3]),
'PK': 7, 'PK': 7,
}, },
)) ))
@ -489,7 +489,7 @@ def test_batch_read():
'Body': 'http://url_to_lolcat.gif', 'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User B', 'SentBy': 'User B',
'ReceivedTime': '12/9/2011 11:36:03 PM', 'ReceivedTime': '12/9/2011 11:36:03 PM',
'Ids': {1, 2, 3}, 'Ids': set([1, 2, 3]),
'PK': 7, 'PK': 7,
} }
item = table.new_item( item = table.new_item(

View File

@ -282,7 +282,7 @@ def test_scan():
'Body': 'http://url_to_lolcat.gif', 'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User B', 'SentBy': 'User B',
'ReceivedTime': '12/9/2011 11:36:03 PM', 'ReceivedTime': '12/9/2011 11:36:03 PM',
'Ids': {1, 2, 3}, 'Ids': set([1, 2, 3]),
'PK': 7, 'PK': 7,
} }
item = table.new_item( item = table.new_item(
@ -356,7 +356,7 @@ def test_write_batch():
'Body': 'http://url_to_lolcat.gif', 'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User B', 'SentBy': 'User B',
'ReceivedTime': '12/9/2011 11:36:03 PM', 'ReceivedTime': '12/9/2011 11:36:03 PM',
'Ids': {1, 2, 3}, 'Ids': set([1, 2, 3]),
'PK': 7, 'PK': 7,
}, },
)) ))
@ -401,7 +401,7 @@ def test_batch_read():
'Body': 'http://url_to_lolcat.gif', 'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User B', 'SentBy': 'User B',
'ReceivedTime': '12/9/2011 11:36:03 PM', 'ReceivedTime': '12/9/2011 11:36:03 PM',
'Ids': {1, 2, 3}, 'Ids': set([1, 2, 3]),
'PK': 7, 'PK': 7,
} }
item = table.new_item( item = table.new_item(

View File

@ -1,9 +1,194 @@
"""Test mocking of Elatic IP Address"""
import boto import boto
from boto.exception import EC2ResponseError
import sure # noqa import sure # noqa
from moto import mock_ec2 from moto import mock_ec2
import logging
import types
@mock_ec2 @mock_ec2
def test_elastic_ip_addresses(): def test_eip_allocate_classic():
pass """Allocate/release Classic EIP"""
conn = boto.connect_ec2('the_key', 'the_secret')
standard = conn.allocate_address()
standard.should.be.a(boto.ec2.address.Address)
standard.public_ip.should.be.a(types.UnicodeType)
standard.instance_id.should.be.none
standard.domain.should.be.equal("standard")
standard.release()
standard.should_not.be.within(conn.get_all_addresses())
@mock_ec2
def test_eip_allocate_vpc():
"""Allocate/release VPC EIP"""
conn = boto.connect_ec2('the_key', 'the_secret')
vpc = conn.allocate_address(domain="vpc")
vpc.should.be.a(boto.ec2.address.Address)
vpc.domain.should.be.equal("vpc")
logging.debug("vpc alloc_id:".format(vpc.allocation_id))
vpc.release()
@mock_ec2
def test_eip_allocate_invalid_domain():
"""Allocate EIP invalid domain"""
conn = boto.connect_ec2('the_key', 'the_secret')
conn.allocate_address.when.called_with(domain="bogus").should.throw(EC2ResponseError)
@mock_ec2
def test_eip_associate_classic():
"""Associate/Disassociate EIP to classic instance"""
conn = boto.connect_ec2('the_key', 'the_secret')
reservation = conn.run_instances('ami-1234abcd')
instance = reservation.instances[0]
eip = conn.allocate_address()
eip.instance_id.should.be.none
conn.associate_address.when.called_with(public_ip=eip.public_ip).should.throw(EC2ResponseError)
conn.associate_address(instance_id=instance.id, public_ip=eip.public_ip)
eip = conn.get_all_addresses(addresses=[eip.public_ip])[0] # no .update() on address ):
eip.instance_id.should.be.equal(instance.id)
conn.disassociate_address(public_ip=eip.public_ip)
eip = conn.get_all_addresses(addresses=[eip.public_ip])[0] # no .update() on address ):
eip.instance_id.should.be.equal(u'')
eip.release()
eip.should_not.be.within(conn.get_all_addresses())
eip = None
instance.terminate()
@mock_ec2
def test_eip_associate_vpc():
"""Associate/Disassociate EIP to VPC instance"""
conn = boto.connect_ec2('the_key', 'the_secret')
reservation = conn.run_instances('ami-1234abcd')
instance = reservation.instances[0]
eip = conn.allocate_address(domain='vpc')
eip.instance_id.should.be.none
conn.associate_address.when.called_with(allocation_id=eip.allocation_id).should.throw(EC2ResponseError)
conn.associate_address(instance_id=instance.id, allocation_id=eip.allocation_id)
eip = conn.get_all_addresses(addresses=[eip.public_ip])[0] # no .update() on address ):
eip.instance_id.should.be.equal(instance.id)
conn.disassociate_address(association_id=eip.association_id)
eip = conn.get_all_addresses(addresses=[eip.public_ip])[0] # no .update() on address ):
eip.instance_id.should.be.equal(u'')
eip.association_id.should.be.none
eip.release()
eip = None
instance.terminate()
@mock_ec2
def test_eip_reassociate():
"""reassociate EIP"""
conn = boto.connect_ec2('the_key', 'the_secret')
reservation = conn.run_instances('ami-1234abcd')
instance = reservation.instances[0]
eip = conn.allocate_address()
conn.associate_address(instance_id=instance.id, public_ip=eip.public_ip)
conn.associate_address.when.called_with(instance_id=instance.id, public_ip=eip.public_ip, allow_reassociation=False).should.throw(EC2ResponseError)
conn.associate_address.when.called_with(instance_id=instance.id, public_ip=eip.public_ip, allow_reassociation=True).should_not.throw(EC2ResponseError)
eip.release()
eip = None
instance.terminate()
@mock_ec2
def test_eip_associate_invalid_args():
"""Associate EIP, invalid args """
conn = boto.connect_ec2('the_key', 'the_secret')
reservation = conn.run_instances('ami-1234abcd')
instance = reservation.instances[0]
eip = conn.allocate_address()
conn.associate_address.when.called_with(instance_id=instance.id).should.throw(EC2ResponseError)
instance.terminate()
@mock_ec2
def test_eip_disassociate_bogus_association():
"""Disassociate bogus EIP"""
conn = boto.connect_ec2('the_key', 'the_secret')
conn.disassociate_address.when.called_with(association_id="bogus").should.throw(EC2ResponseError)
@mock_ec2
def test_eip_release_bogus_eip():
"""Release bogus EIP"""
conn = boto.connect_ec2('the_key', 'the_secret')
conn.release_address.when.called_with(allocation_id="bogus").should.throw(EC2ResponseError)
@mock_ec2
def test_eip_disassociate_arg_error():
"""Invalid arguments disassociate address"""
conn = boto.connect_ec2('the_key', 'the_secret')
conn.disassociate_address.when.called_with().should.throw(EC2ResponseError)
@mock_ec2
def test_eip_release_arg_error():
"""Invalid arguments release address"""
conn = boto.connect_ec2('the_key', 'the_secret')
conn.release_address.when.called_with().should.throw(EC2ResponseError)
@mock_ec2
def test_eip_describe():
"""Listing of allocated Elastic IP Addresses."""
conn = boto.connect_ec2('the_key', 'the_secret')
eips = []
number_of_classic_ips = 2
number_of_vpc_ips = 2
#allocate some IPs
for _ in range(number_of_classic_ips):
eips.append(conn.allocate_address())
for _ in range(number_of_vpc_ips):
eips.append(conn.allocate_address(domain='vpc'))
len(eips).should.be.equal(number_of_classic_ips + number_of_vpc_ips)
# Can we find each one individually?
for eip in eips:
if eip.allocation_id:
lookup_addresses = conn.get_all_addresses(allocation_ids=[eip.allocation_id])
else:
lookup_addresses = conn.get_all_addresses(addresses=[eip.public_ip])
len(lookup_addresses).should.be.equal(1)
lookup_addresses[0].public_ip.should.be.equal(eip.public_ip)
# Can we find first two when we search for them?
lookup_addresses = conn.get_all_addresses(addresses=[eips[0].public_ip, eips[1].public_ip])
len(lookup_addresses).should.be.equal(2)
lookup_addresses[0].public_ip.should.be.equal(eips[0].public_ip)
lookup_addresses[1].public_ip.should.be.equal(eips[1].public_ip)
#Release all IPs
for eip in eips:
eip.release()
len(conn.get_all_addresses()).should.be.equal(0)
@mock_ec2
def test_eip_describe_none():
"""Find nothing when seach for bogus IP"""
conn = boto.connect_ec2('the_key', 'the_secret')
lookup_addresses = conn.get_all_addresses(addresses=["256.256.256.256"])
len(lookup_addresses).should.be.equal(0)

View File

@ -35,6 +35,7 @@ def test_instance_launch_and_terminate():
reservation.should.be.a(Reservation) reservation.should.be.a(Reservation)
reservation.instances.should.have.length_of(1) reservation.instances.should.have.length_of(1)
instance = reservation.instances[0] instance = reservation.instances[0]
instance.state.should.equal('pending')
reservations = conn.get_all_instances() reservations = conn.get_all_instances()
reservations.should.have.length_of(1) reservations.should.have.length_of(1)
@ -42,13 +43,13 @@ def test_instance_launch_and_terminate():
instances = reservations[0].instances instances = reservations[0].instances
instances.should.have.length_of(1) instances.should.have.length_of(1)
instances[0].id.should.equal(instance.id) instances[0].id.should.equal(instance.id)
instances[0].state.should.equal('pending') instances[0].state.should.equal('running')
conn.terminate_instances([instances[0].id]) conn.terminate_instances([instances[0].id])
reservations = conn.get_all_instances() reservations = conn.get_all_instances()
instance = reservations[0].instances[0] instance = reservations[0].instances[0]
instance.state.should.equal('shutting-down') instance.state.should.equal('terminated')
@mock_ec2 @mock_ec2
@ -85,18 +86,18 @@ def test_get_instances_filtering_by_state():
conn.terminate_instances([instance1.id]) conn.terminate_instances([instance1.id])
reservations = conn.get_all_instances(filters={'instance-state-name': 'pending'}) reservations = conn.get_all_instances(filters={'instance-state-name': 'running'})
reservations.should.have.length_of(1) reservations.should.have.length_of(1)
# Since we terminated instance1, only instance2 and instance3 should be returned # Since we terminated instance1, only instance2 and instance3 should be returned
instance_ids = [instance.id for instance in reservations[0].instances] instance_ids = [instance.id for instance in reservations[0].instances]
set(instance_ids).should.equal(set([instance2.id, instance3.id])) set(instance_ids).should.equal(set([instance2.id, instance3.id]))
reservations = conn.get_all_instances([instance2.id], filters={'instance-state-name': 'pending'}) reservations = conn.get_all_instances([instance2.id], filters={'instance-state-name': 'running'})
reservations.should.have.length_of(1) reservations.should.have.length_of(1)
instance_ids = [instance.id for instance in reservations[0].instances] instance_ids = [instance.id for instance in reservations[0].instances]
instance_ids.should.equal([instance2.id]) instance_ids.should.equal([instance2.id])
reservations = conn.get_all_instances([instance2.id], filters={'instance-state-name': 'terminating'}) reservations = conn.get_all_instances([instance2.id], filters={'instance-state-name': 'terminated'})
list(reservations).should.equal([]) list(reservations).should.equal([])
# get_all_instances should still return all 3 # get_all_instances should still return all 3

View File

@ -0,0 +1,50 @@
import sure # noqa
import moto.server as server
'''
Test the different server responses
'''
server.configure_urls("s3bucket_path")
def test_s3_server_get():
test_client = server.app.test_client()
res = test_client.get('/')
res.data.should.contain('ListAllMyBucketsResult')
def test_s3_server_bucket_create():
test_client = server.app.test_client()
res = test_client.put('/foobar', 'http://localhost:5000')
res.status_code.should.equal(200)
res = test_client.get('/')
res.data.should.contain('<Name>foobar</Name>')
res = test_client.get('/foobar', 'http://localhost:5000')
res.status_code.should.equal(200)
res.data.should.contain("ListBucketResult")
res = test_client.put('/foobar/bar', 'http://localhost:5000', data='test value')
res.status_code.should.equal(200)
res = test_client.get('/foobar/bar', 'http://localhost:5000')
res.status_code.should.equal(200)
res.data.should.equal("test value")
def test_s3_server_post_to_bucket():
test_client = server.app.test_client()
res = test_client.put('/foobar', 'http://localhost:5000/')
res.status_code.should.equal(200)
test_client.post('/foobar', "https://localhost:5000/", data={
'key': 'the-key',
'file': 'nothing'
})
res = test_client.get('/foobar/the-key', 'http://localhost:5000/')
res.status_code.should.equal(200)
res.data.should.equal("nothing")

View File

@ -0,0 +1,281 @@
import urllib2
import boto
from boto.exception import S3ResponseError
from boto.s3.key import Key
from boto.s3.connection import OrdinaryCallingFormat
from freezegun import freeze_time
import requests
import sure # noqa
from moto import mock_s3bucket_path
def create_connection(key=None, secret=None):
return boto.connect_s3(key, secret, calling_format=OrdinaryCallingFormat())
class MyModel(object):
def __init__(self, name, value):
self.name = name
self.value = value
def save(self):
conn = create_connection('the_key', 'the_secret')
bucket = conn.get_bucket('mybucket')
k = Key(bucket)
k.key = self.name
k.set_contents_from_string(self.value)
@mock_s3bucket_path
def test_my_model_save():
# Create Bucket so that test can run
conn = create_connection('the_key', 'the_secret')
conn.create_bucket('mybucket')
####################################
model_instance = MyModel('steve', 'is awesome')
model_instance.save()
conn.get_bucket('mybucket').get_key('steve').get_contents_as_string().should.equal('is awesome')
@mock_s3bucket_path
def test_missing_key():
conn = create_connection('the_key', 'the_secret')
bucket = conn.create_bucket("foobar")
bucket.get_key("the-key").should.equal(None)
@mock_s3bucket_path
def test_missing_key_urllib2():
conn = create_connection('the_key', 'the_secret')
conn.create_bucket("foobar")
urllib2.urlopen.when.called_with("http://s3.amazonaws.com/foobar/the-key").should.throw(urllib2.HTTPError)
@mock_s3bucket_path
def test_empty_key():
conn = create_connection('the_key', 'the_secret')
bucket = conn.create_bucket("foobar")
key = Key(bucket)
key.key = "the-key"
key.set_contents_from_string("")
bucket.get_key("the-key").get_contents_as_string().should.equal('')
@mock_s3bucket_path
def test_empty_key_set_on_existing_key():
conn = create_connection('the_key', 'the_secret')
bucket = conn.create_bucket("foobar")
key = Key(bucket)
key.key = "the-key"
key.set_contents_from_string("foobar")
bucket.get_key("the-key").get_contents_as_string().should.equal('foobar')
key.set_contents_from_string("")
bucket.get_key("the-key").get_contents_as_string().should.equal('')
@mock_s3bucket_path
def test_large_key_save():
conn = create_connection('the_key', 'the_secret')
bucket = conn.create_bucket("foobar")
key = Key(bucket)
key.key = "the-key"
key.set_contents_from_string("foobar" * 100000)
bucket.get_key("the-key").get_contents_as_string().should.equal('foobar' * 100000)
@mock_s3bucket_path
def test_copy_key():
conn = create_connection('the_key', 'the_secret')
bucket = conn.create_bucket("foobar")
key = Key(bucket)
key.key = "the-key"
key.set_contents_from_string("some value")
bucket.copy_key('new-key', 'foobar', 'the-key')
bucket.get_key("the-key").get_contents_as_string().should.equal("some value")
bucket.get_key("new-key").get_contents_as_string().should.equal("some value")
@mock_s3bucket_path
def test_set_metadata():
conn = create_connection('the_key', 'the_secret')
bucket = conn.create_bucket("foobar")
key = Key(bucket)
key.key = 'the-key'
key.set_metadata('md', 'Metadatastring')
key.set_contents_from_string("Testval")
bucket.get_key('the-key').get_metadata('md').should.equal('Metadatastring')
@freeze_time("2012-01-01 12:00:00")
@mock_s3bucket_path
def test_last_modified():
# See https://github.com/boto/boto/issues/466
conn = create_connection()
bucket = conn.create_bucket("foobar")
key = Key(bucket)
key.key = "the-key"
key.set_contents_from_string("some value")
rs = bucket.get_all_keys()
rs[0].last_modified.should.equal('2012-01-01T12:00:00Z')
bucket.get_key("the-key").last_modified.should.equal('Sun, 01 Jan 2012 12:00:00 GMT')
@mock_s3bucket_path
def test_missing_bucket():
conn = create_connection('the_key', 'the_secret')
conn.get_bucket.when.called_with('mybucket').should.throw(S3ResponseError)
@mock_s3bucket_path
def test_bucket_with_dash():
conn = create_connection('the_key', 'the_secret')
conn.get_bucket.when.called_with('mybucket-test').should.throw(S3ResponseError)
@mock_s3bucket_path
def test_bucket_deletion():
conn = create_connection('the_key', 'the_secret')
bucket = conn.create_bucket("foobar")
key = Key(bucket)
key.key = "the-key"
key.set_contents_from_string("some value")
# Try to delete a bucket that still has keys
conn.delete_bucket.when.called_with("foobar").should.throw(S3ResponseError)
bucket.delete_key("the-key")
conn.delete_bucket("foobar")
# Get non-existing bucket
conn.get_bucket.when.called_with("foobar").should.throw(S3ResponseError)
# Delete non-existant bucket
conn.delete_bucket.when.called_with("foobar").should.throw(S3ResponseError)
@mock_s3bucket_path
def test_get_all_buckets():
conn = create_connection('the_key', 'the_secret')
conn.create_bucket("foobar")
conn.create_bucket("foobar2")
buckets = conn.get_all_buckets()
buckets.should.have.length_of(2)
@mock_s3bucket_path
def test_post_to_bucket():
conn = create_connection('the_key', 'the_secret')
bucket = conn.create_bucket("foobar")
requests.post("https://s3.amazonaws.com/foobar", {
'key': 'the-key',
'file': 'nothing'
})
bucket.get_key('the-key').get_contents_as_string().should.equal('nothing')
@mock_s3bucket_path
def test_post_with_metadata_to_bucket():
conn = create_connection('the_key', 'the_secret')
bucket = conn.create_bucket("foobar")
requests.post("https://s3.amazonaws.com/foobar", {
'key': 'the-key',
'file': 'nothing',
'x-amz-meta-test': 'metadata'
})
bucket.get_key('the-key').get_metadata('test').should.equal('metadata')
@mock_s3bucket_path
def test_bucket_method_not_implemented():
requests.patch.when.called_with("https://s3.amazonaws.com/foobar").should.throw(NotImplementedError)
@mock_s3bucket_path
def test_key_method_not_implemented():
requests.post.when.called_with("https://s3.amazonaws.com/foobar/foo").should.throw(NotImplementedError)
@mock_s3bucket_path
def test_bucket_name_with_dot():
conn = create_connection()
bucket = conn.create_bucket('firstname.lastname')
k = Key(bucket, 'somekey')
k.set_contents_from_string('somedata')
@mock_s3bucket_path
def test_key_with_special_characters():
conn = create_connection()
bucket = conn.create_bucket('test_bucket_name')
key = Key(bucket, 'test_list_keys_2/x?y')
key.set_contents_from_string('value1')
key_list = bucket.list('test_list_keys_2/', '/')
keys = [x for x in key_list]
keys[0].name.should.equal("test_list_keys_2/x?y")
@mock_s3bucket_path
def test_bucket_key_listing_order():
conn = create_connection()
bucket = conn.create_bucket('test_bucket')
prefix = 'toplevel/'
def store(name):
k = Key(bucket, prefix + name)
k.set_contents_from_string('somedata')
names = ['x/key', 'y.key1', 'y.key2', 'y.key3', 'x/y/key', 'x/y/z/key']
for name in names:
store(name)
delimiter = None
keys = [x.name for x in bucket.list(prefix, delimiter)]
keys.should.equal([
'toplevel/x/key', 'toplevel/x/y/key', 'toplevel/x/y/z/key',
'toplevel/y.key1', 'toplevel/y.key2', 'toplevel/y.key3'
])
delimiter = '/'
keys = [x.name for x in bucket.list(prefix, delimiter)]
keys.should.equal([
'toplevel/y.key1', 'toplevel/y.key2', 'toplevel/y.key3', 'toplevel/x/'
])
# Test delimiter with no prefix
delimiter = '/'
keys = [x.name for x in bucket.list(prefix=None, delimiter=delimiter)]
keys.should.equal(['toplevel'])
delimiter = None
keys = [x.name for x in bucket.list(prefix + 'x', delimiter)]
keys.should.equal([u'toplevel/x/key', u'toplevel/x/y/key', u'toplevel/x/y/z/key'])
delimiter = '/'
keys = [x.name for x in bucket.list(prefix + 'x', delimiter)]
keys.should.equal([u'toplevel/x/'])

View File

@ -0,0 +1,14 @@
from sure import expect
from moto.s3bucket_path.utils import bucket_name_from_url
def test_base_url():
expect(bucket_name_from_url('https://s3.amazonaws.com/')).should.equal(None)
def test_localhost_bucket():
expect(bucket_name_from_url('https://localhost:5000/wfoobar/abc')).should.equal("wfoobar")
def test_localhost_without_bucket():
expect(bucket_name_from_url('https://www.localhost:5000')).should.equal(None)

8
tox.ini Normal file
View File

@ -0,0 +1,8 @@
[tox]
envlist = py26, py27
[testenv]
deps = -r{toxinidir}/requirements.txt
commands =
{envpython} setup.py test
nosetests