This commit is contained in:
mfranke 2015-11-12 10:12:54 +01:00
commit 0270c68e0d
54 changed files with 1665 additions and 98 deletions

11
Dockerfile Normal file
View File

@ -0,0 +1,11 @@
FROM python:2
ADD . /moto/
ENV PYTHONUNBUFFERED 1
WORKDIR /moto/
RUN python setup.py install
CMD ["moto_server"]
EXPOSE 5000

View File

@ -61,6 +61,8 @@ It gets even better! Moto isn't just S3. Here's the status of the other AWS serv
|------------------------------------------------------------------------------| |------------------------------------------------------------------------------|
| Cloudwatch | @mock_cloudwatch | basic endpoints done | | Cloudwatch | @mock_cloudwatch | basic endpoints done |
|------------------------------------------------------------------------------| |------------------------------------------------------------------------------|
| Data Pipeline | @mock_datapipeline| basic endpoints done |
|------------------------------------------------------------------------------|
| DynamoDB | @mock_dynamodb | core endpoints done | | DynamoDB | @mock_dynamodb | core endpoints done |
| DynamoDB2 | @mock_dynamodb2 | core endpoints done - no indexes | | DynamoDB2 | @mock_dynamodb2 | core endpoints done - no indexes |
|------------------------------------------------------------------------------| |------------------------------------------------------------------------------|

View File

@ -3,11 +3,12 @@ import logging
logging.getLogger('boto').setLevel(logging.CRITICAL) logging.getLogger('boto').setLevel(logging.CRITICAL)
__title__ = 'moto' __title__ = 'moto'
__version__ = '0.4.12' __version__ = '0.4.18'
from .autoscaling import mock_autoscaling # flake8: noqa from .autoscaling import mock_autoscaling # flake8: noqa
from .cloudformation import mock_cloudformation # flake8: noqa from .cloudformation import mock_cloudformation # flake8: noqa
from .cloudwatch import mock_cloudwatch # flake8: noqa from .cloudwatch import mock_cloudwatch # flake8: noqa
from .datapipeline import mock_datapipeline # flake8: noqa
from .dynamodb import mock_dynamodb # flake8: noqa from .dynamodb import mock_dynamodb # flake8: noqa
from .dynamodb2 import mock_dynamodb2 # flake8: noqa from .dynamodb2 import mock_dynamodb2 # flake8: noqa
from .ec2 import mock_ec2 # flake8: noqa from .ec2 import mock_ec2 # flake8: noqa

View File

@ -113,7 +113,8 @@ class FakeAutoScalingGroup(object):
def __init__(self, name, availability_zones, desired_capacity, max_size, def __init__(self, name, availability_zones, desired_capacity, max_size,
min_size, launch_config_name, vpc_zone_identifier, min_size, launch_config_name, vpc_zone_identifier,
default_cooldown, health_check_period, health_check_type, default_cooldown, health_check_period, health_check_type,
load_balancers, placement_group, termination_policies, autoscaling_backend): load_balancers, placement_group, termination_policies,
autoscaling_backend, tags):
self.autoscaling_backend = autoscaling_backend self.autoscaling_backend = autoscaling_backend
self.name = name self.name = name
self.availability_zones = availability_zones self.availability_zones = availability_zones
@ -133,6 +134,7 @@ class FakeAutoScalingGroup(object):
self.instance_states = [] self.instance_states = []
self.set_desired_capacity(desired_capacity) self.set_desired_capacity(desired_capacity)
self.tags = tags if tags else []
@classmethod @classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
@ -156,6 +158,7 @@ class FakeAutoScalingGroup(object):
load_balancers=load_balancer_names, load_balancers=load_balancer_names,
placement_group=None, placement_group=None,
termination_policies=properties.get("TerminationPolicies", []), termination_policies=properties.get("TerminationPolicies", []),
tags=properties.get("Tags", []),
) )
return group return group
@ -261,7 +264,7 @@ class AutoScalingBackend(BaseBackend):
launch_config_name, vpc_zone_identifier, launch_config_name, vpc_zone_identifier,
default_cooldown, health_check_period, default_cooldown, health_check_period,
health_check_type, load_balancers, health_check_type, load_balancers,
placement_group, termination_policies): placement_group, termination_policies, tags):
def make_int(value): def make_int(value):
return int(value) if value is not None else value return int(value) if value is not None else value
@ -286,6 +289,7 @@ class AutoScalingBackend(BaseBackend):
placement_group=placement_group, placement_group=placement_group,
termination_policies=termination_policies, termination_policies=termination_policies,
autoscaling_backend=self, autoscaling_backend=self,
tags=tags,
) )
self.autoscaling_groups[name] = group self.autoscaling_groups[name] = group
return group return group

View File

@ -60,6 +60,7 @@ class AutoScalingResponse(BaseResponse):
load_balancers=self._get_multi_param('LoadBalancerNames.member'), load_balancers=self._get_multi_param('LoadBalancerNames.member'),
placement_group=self._get_param('PlacementGroup'), placement_group=self._get_param('PlacementGroup'),
termination_policies=self._get_multi_param('TerminationPolicies.member'), termination_policies=self._get_multi_param('TerminationPolicies.member'),
tags=self._get_list_prefix('Tags.member'),
) )
template = self.response_template(CREATE_AUTOSCALING_GROUP_TEMPLATE) template = self.response_template(CREATE_AUTOSCALING_GROUP_TEMPLATE)
return template.render() return template.render()
@ -235,7 +236,17 @@ DESCRIBE_AUTOSCALING_GROUPS_TEMPLATE = """<DescribeAutoScalingGroupsResponse xml
<AutoScalingGroups> <AutoScalingGroups>
{% for group in groups %} {% for group in groups %}
<member> <member>
<Tags/> <Tags>
{% for tag in group.tags %}
<member>
<ResourceType>{{ tag.resource_type }}</ResourceType>
<ResourceId>{{ tag.resource_id }}</ResourceId>
<PropagateAtLaunch>{{ tag.propagate_at_launch }}</PropagateAtLaunch>
<Key>{{ tag.key }}</Key>
<Value>{{ tag.value }}</Value>
</member>
{% endfor %}
</Tags>
<SuspendedProcesses/> <SuspendedProcesses/>
<AutoScalingGroupName>{{ group.name }}</AutoScalingGroupName> <AutoScalingGroupName>{{ group.name }}</AutoScalingGroupName>
<HealthCheckType>{{ group.health_check_type }}</HealthCheckType> <HealthCheckType>{{ group.health_check_type }}</HealthCheckType>

View File

@ -2,6 +2,7 @@ from __future__ import unicode_literals
from moto.autoscaling import autoscaling_backend from moto.autoscaling import autoscaling_backend
from moto.cloudwatch import cloudwatch_backend from moto.cloudwatch import cloudwatch_backend
from moto.cloudformation import cloudformation_backend from moto.cloudformation import cloudformation_backend
from moto.datapipeline import datapipeline_backend
from moto.dynamodb import dynamodb_backend from moto.dynamodb import dynamodb_backend
from moto.dynamodb2 import dynamodb_backend2 from moto.dynamodb2 import dynamodb_backend2
from moto.ec2 import ec2_backend from moto.ec2 import ec2_backend
@ -25,6 +26,7 @@ BACKENDS = {
'autoscaling': autoscaling_backend, 'autoscaling': autoscaling_backend,
'cloudformation': cloudformation_backend, 'cloudformation': cloudformation_backend,
'cloudwatch': cloudwatch_backend, 'cloudwatch': cloudwatch_backend,
'datapipeline': datapipeline_backend,
'dynamodb': dynamodb_backend, 'dynamodb': dynamodb_backend,
'dynamodb2': dynamodb_backend2, 'dynamodb2': dynamodb_backend2,
'ec2': ec2_backend, 'ec2': ec2_backend,

View File

@ -4,6 +4,7 @@ import functools
import logging import logging
from moto.autoscaling import models as autoscaling_models from moto.autoscaling import models as autoscaling_models
from moto.datapipeline import models as datapipeline_models
from moto.ec2 import models as ec2_models from moto.ec2 import models as ec2_models
from moto.elb import models as elb_models from moto.elb import models as elb_models
from moto.iam import models as iam_models from moto.iam import models as iam_models
@ -36,6 +37,7 @@ MODEL_MAP = {
"AWS::EC2::VPCGatewayAttachment": ec2_models.VPCGatewayAttachment, "AWS::EC2::VPCGatewayAttachment": ec2_models.VPCGatewayAttachment,
"AWS::EC2::VPCPeeringConnection": ec2_models.VPCPeeringConnection, "AWS::EC2::VPCPeeringConnection": ec2_models.VPCPeeringConnection,
"AWS::ElasticLoadBalancing::LoadBalancer": elb_models.FakeLoadBalancer, "AWS::ElasticLoadBalancing::LoadBalancer": elb_models.FakeLoadBalancer,
"AWS::DataPipeline::Pipeline": datapipeline_models.Pipeline,
"AWS::IAM::InstanceProfile": iam_models.InstanceProfile, "AWS::IAM::InstanceProfile": iam_models.InstanceProfile,
"AWS::IAM::Role": iam_models.Role, "AWS::IAM::Role": iam_models.Role,
"AWS::RDS::DBInstance": rds_models.Database, "AWS::RDS::DBInstance": rds_models.Database,

View File

@ -86,9 +86,19 @@ class CloudFormationResponse(BaseResponse):
def get_template(self): def get_template(self):
name_or_stack_id = self.querystring.get('StackName')[0] name_or_stack_id = self.querystring.get('StackName')[0]
stack = self.cloudformation_backend.get_stack(name_or_stack_id) stack = self.cloudformation_backend.get_stack(name_or_stack_id)
return stack.template
response = {
"GetTemplateResponse": {
"GetTemplateResult": {
"TemplateBody": stack.template,
"ResponseMetadata": {
"RequestId": "2d06e36c-ac1d-11e0-a958-f9382b6eb86bEXAMPLE"
}
}
}
}
return json.dumps(response)
def update_stack(self): def update_stack(self):
stack_name = self._get_param('StackName') stack_name = self._get_param('StackName')

View File

@ -63,8 +63,16 @@ class DynamicDictLoader(DictLoader):
class _TemplateEnvironmentMixin(object): class _TemplateEnvironmentMixin(object):
loader = DynamicDictLoader({})
environment = Environment(loader=loader) def __init__(self):
super(_TemplateEnvironmentMixin, self).__init__()
self.loader = DynamicDictLoader({})
self.environment = Environment(loader=self.loader, autoescape=self.should_autoescape)
@property
def should_autoescape(self):
# Allow for subclass to overwrite
return False
def contains_template(self, template_id): def contains_template(self, template_id):
return self.loader.contains(template_id) return self.loader.contains(template_id)
@ -73,7 +81,7 @@ class _TemplateEnvironmentMixin(object):
template_id = id(source) template_id = id(source)
if not self.contains_template(template_id): if not self.contains_template(template_id):
self.loader.update({template_id: source}) self.loader.update({template_id: source})
self.environment = Environment(loader=self.loader) self.environment = Environment(loader=self.loader, autoescape=self.should_autoescape)
return self.environment.get_template(template_id) return self.environment.get_template(template_id)

View File

@ -0,0 +1,12 @@
from __future__ import unicode_literals
from .models import datapipeline_backends
from ..core.models import MockAWS
datapipeline_backend = datapipeline_backends['us-east-1']
def mock_datapipeline(func=None):
if func:
return MockAWS(datapipeline_backends)(func)
else:
return MockAWS(datapipeline_backends)

149
moto/datapipeline/models.py Normal file
View File

@ -0,0 +1,149 @@
from __future__ import unicode_literals
import datetime
import boto.datapipeline
from moto.core import BaseBackend
from .utils import get_random_pipeline_id, remove_capitalization_of_dict_keys
class PipelineObject(object):
def __init__(self, object_id, name, fields):
self.object_id = object_id
self.name = name
self.fields = fields
def to_json(self):
return {
"fields": self.fields,
"id": self.object_id,
"name": self.name,
}
class Pipeline(object):
def __init__(self, name, unique_id):
self.name = name
self.unique_id = unique_id
self.description = ""
self.pipeline_id = get_random_pipeline_id()
self.creation_time = datetime.datetime.utcnow()
self.objects = []
self.status = "PENDING"
@property
def physical_resource_id(self):
return self.pipeline_id
def to_meta_json(self):
return {
"id": self.pipeline_id,
"name": self.name,
}
def to_json(self):
return {
"description": self.description,
"fields": [{
"key": "@pipelineState",
"stringValue": self.status,
}, {
"key": "description",
"stringValue": self.description
}, {
"key": "name",
"stringValue": self.name
}, {
"key": "@creationTime",
"stringValue": datetime.datetime.strftime(self.creation_time, '%Y-%m-%dT%H-%M-%S'),
}, {
"key": "@id",
"stringValue": self.pipeline_id,
}, {
"key": "@sphere",
"stringValue": "PIPELINE"
}, {
"key": "@version",
"stringValue": "1"
}, {
"key": "@userId",
"stringValue": "924374875933"
}, {
"key": "@accountId",
"stringValue": "924374875933"
}, {
"key": "uniqueId",
"stringValue": self.unique_id
}],
"name": self.name,
"pipelineId": self.pipeline_id,
"tags": [
]
}
def set_pipeline_objects(self, pipeline_objects):
self.objects = [
PipelineObject(pipeline_object['id'], pipeline_object['name'], pipeline_object['fields'])
for pipeline_object in remove_capitalization_of_dict_keys(pipeline_objects)
]
def activate(self):
self.status = "SCHEDULED"
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
datapipeline_backend = datapipeline_backends[region_name]
properties = cloudformation_json["Properties"]
cloudformation_unique_id = "cf-" + properties["Name"]
pipeline = datapipeline_backend.create_pipeline(properties["Name"], cloudformation_unique_id)
datapipeline_backend.put_pipeline_definition(pipeline.pipeline_id, properties["PipelineObjects"])
if properties["Activate"]:
pipeline.activate()
return pipeline
class DataPipelineBackend(BaseBackend):
def __init__(self):
self.pipelines = {}
def create_pipeline(self, name, unique_id):
pipeline = Pipeline(name, unique_id)
self.pipelines[pipeline.pipeline_id] = pipeline
return pipeline
def list_pipelines(self):
return self.pipelines.values()
def describe_pipelines(self, pipeline_ids):
pipelines = [pipeline for pipeline in self.pipelines.values() if pipeline.pipeline_id in pipeline_ids]
return pipelines
def get_pipeline(self, pipeline_id):
return self.pipelines[pipeline_id]
def put_pipeline_definition(self, pipeline_id, pipeline_objects):
pipeline = self.get_pipeline(pipeline_id)
pipeline.set_pipeline_objects(pipeline_objects)
def get_pipeline_definition(self, pipeline_id):
pipeline = self.get_pipeline(pipeline_id)
return pipeline.objects
def describe_objects(self, object_ids, pipeline_id):
pipeline = self.get_pipeline(pipeline_id)
pipeline_objects = [
pipeline_object for pipeline_object in pipeline.objects
if pipeline_object.object_id in object_ids
]
return pipeline_objects
def activate_pipeline(self, pipeline_id):
pipeline = self.get_pipeline(pipeline_id)
pipeline.activate()
datapipeline_backends = {}
for region in boto.datapipeline.regions():
datapipeline_backends[region.name] = DataPipelineBackend()

View File

@ -0,0 +1,81 @@
from __future__ import unicode_literals
import json
from moto.core.responses import BaseResponse
from .models import datapipeline_backends
class DataPipelineResponse(BaseResponse):
@property
def parameters(self):
# TODO this should really be moved to core/responses.py
if self.body:
return json.loads(self.body.decode("utf-8"))
else:
return self.querystring
@property
def datapipeline_backend(self):
return datapipeline_backends[self.region]
def create_pipeline(self):
name = self.parameters['name']
unique_id = self.parameters['uniqueId']
pipeline = self.datapipeline_backend.create_pipeline(name, unique_id)
return json.dumps({
"pipelineId": pipeline.pipeline_id,
})
def list_pipelines(self):
pipelines = self.datapipeline_backend.list_pipelines()
return json.dumps({
"hasMoreResults": False,
"marker": None,
"pipelineIdList": [
pipeline.to_meta_json() for pipeline in pipelines
]
})
def describe_pipelines(self):
pipeline_ids = self.parameters["pipelineIds"]
pipelines = self.datapipeline_backend.describe_pipelines(pipeline_ids)
return json.dumps({
"pipelineDescriptionList": [
pipeline.to_json() for pipeline in pipelines
]
})
def put_pipeline_definition(self):
pipeline_id = self.parameters["pipelineId"]
pipeline_objects = self.parameters["pipelineObjects"]
self.datapipeline_backend.put_pipeline_definition(pipeline_id, pipeline_objects)
return json.dumps({"errored": False})
def get_pipeline_definition(self):
pipeline_id = self.parameters["pipelineId"]
pipeline_definition = self.datapipeline_backend.get_pipeline_definition(pipeline_id)
return json.dumps({
"pipelineObjects": [pipeline_object.to_json() for pipeline_object in pipeline_definition]
})
def describe_objects(self):
pipeline_id = self.parameters["pipelineId"]
object_ids = self.parameters["objectIds"]
pipeline_objects = self.datapipeline_backend.describe_objects(object_ids, pipeline_id)
return json.dumps({
"hasMoreResults": False,
"marker": None,
"pipelineObjects": [
pipeline_object.to_json() for pipeline_object in pipeline_objects
]
})
def activate_pipeline(self):
pipeline_id = self.parameters["pipelineId"]
self.datapipeline_backend.activate_pipeline(pipeline_id)
return json.dumps({})

10
moto/datapipeline/urls.py Normal file
View File

@ -0,0 +1,10 @@
from __future__ import unicode_literals
from .responses import DataPipelineResponse
url_bases = [
"https?://datapipeline.(.+).amazonaws.com",
]
url_paths = {
'{0}/$': DataPipelineResponse.dispatch,
}

View File

@ -0,0 +1,23 @@
import collections
import six
from moto.core.utils import get_random_hex
def get_random_pipeline_id():
return "df-{0}".format(get_random_hex(length=19))
def remove_capitalization_of_dict_keys(obj):
if isinstance(obj, collections.Mapping):
result = obj.__class__()
for key, value in obj.items():
normalized_key = key[:1].lower() + key[1:]
result[normalized_key] = remove_capitalization_of_dict_keys(value)
return result
elif isinstance(obj, collections.Iterable) and not isinstance(obj, six.string_types):
result = obj.__class__()
for item in obj:
result += (remove_capitalization_of_dict_keys(item),)
return result
else:
return obj

View File

@ -121,6 +121,17 @@ class Item(object):
# TODO deal with other types # TODO deal with other types
self.attrs[key] = DynamoType({"S": value}) self.attrs[key] = DynamoType({"S": value})
def update_with_attribute_updates(self, attribute_updates):
for attribute_name, update_action in attribute_updates.items():
action = update_action['Action']
new_value = list(update_action['Value'].values())[0]
if action == 'PUT':
# TODO deal with other types
if isinstance(new_value, list) or isinstance(new_value, set):
self.attrs[attribute_name] = DynamoType({"SS": new_value})
else:
self.attrs[attribute_name] = DynamoType({"S": new_value})
class Table(object): class Table(object):
@ -411,12 +422,19 @@ class DynamoDBBackend(BaseBackend):
return table.scan(scan_filters) return table.scan(scan_filters)
def update_item(self, table_name, key, update_expression): def update_item(self, table_name, key, update_expression, attribute_updates):
table = self.get_table(table_name) table = self.get_table(table_name)
if table.hash_key_attr in key:
# Sometimes the key is wrapped in a dict with the key name
key = key[table.hash_key_attr]
hash_value = DynamoType(key) hash_value = DynamoType(key)
item = table.get_item(hash_value) item = table.get_item(hash_value)
item.update(update_expression) if update_expression:
item.update(update_expression)
else:
item.update_with_attribute_updates(attribute_updates)
return item return item
def delete_item(self, table_name, keys): def delete_item(self, table_name, keys):

View File

@ -373,8 +373,9 @@ class DynamoHandler(BaseResponse):
def update_item(self): def update_item(self):
name = self.body['TableName'] name = self.body['TableName']
key = self.body['Key'] key = self.body['Key']
update_expression = self.body['UpdateExpression'] update_expression = self.body.get('UpdateExpression')
item = dynamodb_backend2.update_item(name, key, update_expression) attribute_updates = self.body.get('AttributeUpdates')
item = dynamodb_backend2.update_item(name, key, update_expression, attribute_updates)
item_dict = item.to_json() item_dict = item.to_json()
item_dict['ConsumedCapacityUnits'] = 0.5 item_dict['ConsumedCapacityUnits'] = 0.5

View File

@ -98,7 +98,7 @@ from .utils import (
def utc_date_and_time(): def utc_date_and_time():
return datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ') return datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.000Z')
def validate_resource_ids(resource_ids): def validate_resource_ids(resource_ids):
@ -710,6 +710,13 @@ class KeyPairBackend(object):
return results return results
def import_key_pair(self, key_name, public_key_material):
if key_name in self.keypairs:
raise InvalidKeyPairDuplicateError(key_name)
self.keypairs[key_name] = keypair = random_key_pair()
keypair['name'] = key_name
return keypair
class TagBackend(object): class TagBackend(object):
@ -1381,12 +1388,13 @@ class VolumeAttachment(object):
class Volume(TaggedEC2Resource): class Volume(TaggedEC2Resource):
def __init__(self, ec2_backend, volume_id, size, zone): def __init__(self, ec2_backend, volume_id, size, zone, snapshot_id=None):
self.id = volume_id self.id = volume_id
self.size = size self.size = size
self.zone = zone self.zone = zone
self.create_time = utc_date_and_time() self.create_time = utc_date_and_time()
self.attachment = None self.attachment = None
self.snapshot_id = snapshot_id
self.ec2_backend = ec2_backend self.ec2_backend = ec2_backend
@classmethod @classmethod
@ -1429,10 +1437,14 @@ class EBSBackend(object):
self.snapshots = {} self.snapshots = {}
super(EBSBackend, self).__init__() super(EBSBackend, self).__init__()
def create_volume(self, size, zone_name): def create_volume(self, size, zone_name, snapshot_id=None):
volume_id = random_volume_id() volume_id = random_volume_id()
zone = self.get_zone_by_name(zone_name) zone = self.get_zone_by_name(zone_name)
volume = Volume(self, volume_id, size, zone) if snapshot_id:
snapshot = self.get_snapshot(snapshot_id)
if size is None:
size = snapshot.volume.size
volume = Volume(self, volume_id, size, zone, snapshot_id)
self.volumes[volume_id] = volume self.volumes[volume_id] = volume
return volume return volume

View File

@ -66,3 +66,7 @@ class EC2Response(
def ec2_backend(self): def ec2_backend(self):
from moto.ec2.models import ec2_backends from moto.ec2.models import ec2_backends
return ec2_backends[self.region] return ec2_backends[self.region]
@property
def should_autoescape(self):
return True

View File

@ -25,9 +25,10 @@ class ElasticBlockStore(BaseResponse):
return template.render(snapshot=snapshot) return template.render(snapshot=snapshot)
def create_volume(self): def create_volume(self):
size = self.querystring.get('Size')[0] size = self._get_param('Size')
zone = self.querystring.get('AvailabilityZone')[0] zone = self._get_param('AvailabilityZone')
volume = self.ec2_backend.create_volume(size, zone) snapshot_id = self._get_param('SnapshotId')
volume = self.ec2_backend.create_volume(size, zone, snapshot_id)
template = self.response_template(CREATE_VOLUME_RESPONSE) template = self.response_template(CREATE_VOLUME_RESPONSE)
return template.render(volume=volume) return template.render(volume=volume)
@ -110,7 +111,11 @@ CREATE_VOLUME_RESPONSE = """<CreateVolumeResponse xmlns="http://ec2.amazonaws.co
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId> <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<volumeId>{{ volume.id }}</volumeId> <volumeId>{{ volume.id }}</volumeId>
<size>{{ volume.size }}</size> <size>{{ volume.size }}</size>
<snapshotId/> {% if volume.snapshot_id %}
<snapshotId>{{ volume.snapshot_id }}</snapshotId>
{% else %}
<snapshotId/>
{% endif %}
<availabilityZone>{{ volume.zone.name }}</availabilityZone> <availabilityZone>{{ volume.zone.name }}</availabilityZone>
<status>creating</status> <status>creating</status>
<createTime>{{ volume.create_time}}</createTime> <createTime>{{ volume.create_time}}</createTime>
@ -124,7 +129,11 @@ DESCRIBE_VOLUMES_RESPONSE = """<DescribeVolumesResponse xmlns="http://ec2.amazon
<item> <item>
<volumeId>{{ volume.id }}</volumeId> <volumeId>{{ volume.id }}</volumeId>
<size>{{ volume.size }}</size> <size>{{ volume.size }}</size>
<snapshotId/> {% if volume.snapshot_id %}
<snapshotId>{{ volume.snapshot_id }}</snapshotId>
{% else %}
<snapshotId/>
{% endif %}
<availabilityZone>{{ volume.zone.name }}</availabilityZone> <availabilityZone>{{ volume.zone.name }}</availabilityZone>
<status>{{ volume.status }}</status> <status>{{ volume.status }}</status>
<createTime>{{ volume.create_time}}</createTime> <createTime>{{ volume.create_time}}</createTime>
@ -198,9 +207,9 @@ DESCRIBE_SNAPSHOTS_RESPONSE = """<DescribeSnapshotsResponse xmlns="http://ec2.am
<item> <item>
<snapshotId>{{ snapshot.id }}</snapshotId> <snapshotId>{{ snapshot.id }}</snapshotId>
<volumeId>{{ snapshot.volume.id }}</volumeId> <volumeId>{{ snapshot.volume.id }}</volumeId>
<status>pending</status> <status>completed</status>
<startTime>{{ snapshot.start_time}}</startTime> <startTime>{{ snapshot.start_time}}</startTime>
<progress>30%</progress> <progress>100%</progress>
<ownerId>111122223333</ownerId> <ownerId>111122223333</ownerId>
<volumeSize>{{ snapshot.volume.size }}</volumeSize> <volumeSize>{{ snapshot.volume.size }}</volumeSize>
<description>{{ snapshot.description }}</description> <description>{{ snapshot.description }}</description>

View File

@ -1,4 +1,5 @@
from __future__ import unicode_literals from __future__ import unicode_literals
from boto.ec2.instancetype import InstanceType
from moto.core.responses import BaseResponse from moto.core.responses import BaseResponse
from moto.core.utils import camelcase_to_underscores from moto.core.utils import camelcase_to_underscores
from moto.ec2.utils import instance_ids_from_querystring, filters_from_querystring, \ from moto.ec2.utils import instance_ids_from_querystring, filters_from_querystring, \
@ -78,6 +79,11 @@ class InstanceResponse(BaseResponse):
template = self.response_template(EC2_INSTANCE_STATUS) template = self.response_template(EC2_INSTANCE_STATUS)
return template.render(instances=instances) return template.render(instances=instances)
def describe_instance_types(self):
instance_types = [InstanceType(name='t1.micro', cores=1, memory=644874240, disk=0)]
template = self.response_template(EC2_DESCRIBE_INSTANCE_TYPES)
return template.render(instance_types=instance_types)
def describe_instance_attribute(self): def describe_instance_attribute(self):
# TODO this and modify below should raise IncorrectInstanceState if # TODO this and modify below should raise IncorrectInstanceState if
# instance not in stopped state # instance not in stopped state
@ -586,3 +592,21 @@ EC2_INSTANCE_STATUS = """<?xml version="1.0" encoding="UTF-8"?>
{% endfor %} {% endfor %}
</instanceStatusSet> </instanceStatusSet>
</DescribeInstanceStatusResponse>""" </DescribeInstanceStatusResponse>"""
EC2_DESCRIBE_INSTANCE_TYPES = """<?xml version="1.0" encoding="UTF-8"?>
<DescribeInstanceTypesResponse xmlns="http://api.outscale.com/wsdl/fcuext/2014-04-15/">
<requestId>f8b86168-d034-4e65-b48d-3b84c78e64af</requestId>
<instanceTypeSet>
{% for instance_type in instance_types %}
<item>
<name>{{ instance_type.name }}</name>
<vcpu>{{ instance_type.cores }}</vcpu>
<memory>{{ instance_type.memory }}</memory>
<storageSize>{{ instance_type.disk }}</storageSize>
<storageCount>{{ instance_type.storageCount }}</storageCount>
<maxIpAddresses>{{ instance_type.maxIpAddresses }}</maxIpAddresses>
<ebsOptimizedAvailable>{{ instance_type.ebsOptimizedAvailable }}</ebsOptimizedAvailable>
</item>
{% endfor %}
</instanceTypeSet>
</DescribeInstanceTypesResponse>"""

View File

@ -28,7 +28,11 @@ class KeyPairs(BaseResponse):
return template.render(keypairs=keypairs) return template.render(keypairs=keypairs)
def import_key_pair(self): def import_key_pair(self):
raise NotImplementedError('KeyPairs.import_key_pair is not yet implemented') name = self.querystring.get('KeyName')[0]
material = self.querystring.get('PublicKeyMaterial')[0]
keypair = self.ec2_backend.import_key_pair(name, material)
template = self.response_template(IMPORT_KEYPAIR_RESPONSE)
return template.render(**keypair)
DESCRIBE_KEY_PAIRS_RESPONSE = """<DescribeKeyPairsResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/"> DESCRIBE_KEY_PAIRS_RESPONSE = """<DescribeKeyPairsResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
@ -58,3 +62,10 @@ DELETE_KEY_PAIR_RESPONSE = """<DeleteKeyPairResponse xmlns="http://ec2.amazonaws
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId> <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>{{ success }}</return> <return>{{ success }}</return>
</DeleteKeyPairResponse>""" </DeleteKeyPairResponse>"""
IMPORT_KEYPAIR_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<ImportKeyPairResponse xmlns="http://ec2.amazonaws.com/doc/2014-06-15/">
<requestId>471f9fdd-8fe2-4a84-86b0-bd3d3e350979</requestId>
<keyName>{{ name }}</keyName>
<keyFingerprint>{{ fingerprint }}</keyFingerprint>
</ImportKeyPairResponse>"""

View File

@ -9,9 +9,9 @@ def process_rules_from_querystring(querystring):
except: except:
group_name_or_id = querystring.get('GroupId')[0] group_name_or_id = querystring.get('GroupId')[0]
ip_protocol = querystring.get('IpPermissions.1.IpProtocol')[0] ip_protocol = querystring.get('IpPermissions.1.IpProtocol', [None])[0]
from_port = querystring.get('IpPermissions.1.FromPort')[0] from_port = querystring.get('IpPermissions.1.FromPort', [None])[0]
to_port = querystring.get('IpPermissions.1.ToPort')[0] to_port = querystring.get('IpPermissions.1.ToPort', [None])[0]
ip_ranges = [] ip_ranges = []
for key, value in querystring.items(): for key, value in querystring.items():
if 'IpPermissions.1.IpRanges' in key: if 'IpPermissions.1.IpRanges' in key:

View File

@ -1,6 +1,5 @@
from __future__ import unicode_literals from __future__ import unicode_literals
from xml.sax.saxutils import escape
from moto.core.responses import BaseResponse from moto.core.responses import BaseResponse
from moto.ec2.models import validate_resource_ids from moto.ec2.models import validate_resource_ids
from moto.ec2.utils import sequence_from_querystring, tags_from_query_string, filters_from_querystring from moto.ec2.utils import sequence_from_querystring, tags_from_query_string, filters_from_querystring
@ -26,8 +25,6 @@ class TagResponse(BaseResponse):
def describe_tags(self): def describe_tags(self):
filters = filters_from_querystring(querystring_dict=self.querystring) filters = filters_from_querystring(querystring_dict=self.querystring)
tags = self.ec2_backend.describe_tags(filters=filters) tags = self.ec2_backend.describe_tags(filters=filters)
for tag in tags:
tag['value'] = escape(tag['value'])
template = self.response_template(DESCRIBE_RESPONSE) template = self.response_template(DESCRIBE_RESPONSE)
return template.render(tags=tags) return template.render(tags=tags)

View File

@ -453,27 +453,22 @@ def simple_aws_filter_to_re(filter_string):
return tmp_filter return tmp_filter
# not really random ( http://xkcd.com/221/ )
def random_key_pair(): def random_key_pair():
def random_hex():
return chr(random.choice(list(range(48, 58)) + list(range(97, 102))))
def random_fingerprint():
return ':'.join([random_hex()+random_hex() for i in range(20)])
def random_material():
return ''.join([
chr(random.choice(list(range(65, 91)) + list(range(48, 58)) +
list(range(97, 102))))
for i in range(1000)
])
material = "---- BEGIN RSA PRIVATE KEY ----" + random_material() + \
"-----END RSA PRIVATE KEY-----"
return { return {
'fingerprint': ('1f:51:ae:28:bf:89:e9:d8:1f:25:5d:37:2d:' 'fingerprint': random_fingerprint(),
'7d:b8:ca:9f:f5:f1:6f'), 'material': material
'material': """---- BEGIN RSA PRIVATE KEY ----
MIICiTCCAfICCQD6m7oRw0uXOjANBgkqhkiG9w0BAQUFADCBiDELMAkGA1UEBhMC
VVMxCzAJBgNVBAgTAldBMRAwDgYDVQQHEwdTZWF0dGxlMQ8wDQYDVQQKEwZBbWF6
b24xFDASBgNVBAsTC0lBTSBDb25zb2xlMRIwEAYDVQQDEwlUZXN0Q2lsYWMxHzAd
BgkqhkiG9w0BCQEWEG5vb25lQGFtYXpvbi5jb20wHhcNMTEwNDI1MjA0NTIxWhcN
MTIwNDI0MjA0NTIxWjCBiDELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAldBMRAwDgYD
VQQHEwdTZWF0dGxlMQ8wDQYDVQQKEwZBbWF6b24xFDASBgNVBAsTC0lBTSBDb25z
b2xlMRIwEAYDVQQDEwlUZXN0Q2lsYWMxHzAdBgkqhkiG9w0BCQEWEG5vb25lQGFt
YXpvbi5jb20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMaK0dn+a4GmWIWJ
21uUSfwfEvySWtC2XADZ4nB+BLYgVIk60CpiwsZ3G93vUEIO3IyNoH/f0wYK8m9T
rDHudUZg3qX4waLG5M43q7Wgc/MbQITxOUSQv7c7ugFFDzQGBzZswY6786m86gpE
Ibb3OhjZnzcvQAaRHhdlQWIMm2nrAgMBAAEwDQYJKoZIhvcNAQEFBQADgYEAtCu4
nUhVVxYUntneD9+h8Mg9q6q+auNKyExzyLwaxlAoo7TJHidbtS4J5iNmZgXL0Fkb
FFBjvSfpJIlJ00zbhNYS5f6GuoEDmFJl0ZxBHjJnyp378OD8uTs7fLvjx79LjSTb
NYiytVbZPQUQ5Yaxu2jXnimvw3rrszlaEXAMPLE
-----END RSA PRIVATE KEY-----"""
} }

View File

@ -11,6 +11,7 @@ from .utils import region_from_glacier_url, vault_from_glacier_url
class GlacierResponse(_TemplateEnvironmentMixin): class GlacierResponse(_TemplateEnvironmentMixin):
def __init__(self, backend): def __init__(self, backend):
super(GlacierResponse, self).__init__()
self.backend = backend self.backend = backend
@classmethod @classmethod

View File

@ -13,6 +13,15 @@ class ResourceNotFoundError(BadRequest):
}) })
class ResourceInUseError(BadRequest):
def __init__(self, message):
super(ResourceNotFoundError, self).__init__()
self.description = json.dumps({
"message": message,
'__type': 'ResourceInUseException',
})
class StreamNotFoundError(ResourceNotFoundError): class StreamNotFoundError(ResourceNotFoundError):
def __init__(self, stream_name): def __init__(self, stream_name):
super(StreamNotFoundError, self).__init__( super(StreamNotFoundError, self).__init__(

View File

@ -1,9 +1,12 @@
from __future__ import unicode_literals from __future__ import unicode_literals
import datetime
import time
import boto.kinesis import boto.kinesis
from moto.compat import OrderedDict from moto.compat import OrderedDict
from moto.core import BaseBackend from moto.core import BaseBackend
from .exceptions import StreamNotFoundError, ShardNotFoundError from .exceptions import StreamNotFoundError, ShardNotFoundError, ResourceInUseError
from .utils import compose_shard_iterator, compose_new_shard_iterator, decompose_shard_iterator from .utils import compose_shard_iterator, compose_new_shard_iterator, decompose_shard_iterator
@ -124,12 +127,82 @@ class Stream(object):
} }
class FirehoseRecord(object):
def __init__(self, record_data):
self.record_id = 12345678
self.record_data = record_data
class DeliveryStream(object):
def __init__(self, stream_name, **stream_kwargs):
self.name = stream_name
self.redshift_username = stream_kwargs['redshift_username']
self.redshift_password = stream_kwargs['redshift_password']
self.redshift_jdbc_url = stream_kwargs['redshift_jdbc_url']
self.redshift_role_arn = stream_kwargs['redshift_role_arn']
self.redshift_copy_command = stream_kwargs['redshift_copy_command']
self.redshift_s3_role_arn = stream_kwargs['redshift_s3_role_arn']
self.redshift_s3_bucket_arn = stream_kwargs['redshift_s3_bucket_arn']
self.redshift_s3_prefix = stream_kwargs['redshift_s3_prefix']
self.redshift_s3_compression_format = stream_kwargs.get('redshift_s3_compression_format', 'UNCOMPRESSED')
self.redshift_s3_buffering_hings = stream_kwargs['redshift_s3_buffering_hings']
self.records = []
self.status = 'ACTIVE'
self.create_at = datetime.datetime.utcnow()
self.last_updated = datetime.datetime.utcnow()
@property
def arn(self):
return 'arn:aws:firehose:us-east-1:123456789012:deliverystream/{0}'.format(self.name)
def to_dict(self):
return {
"DeliveryStreamDescription": {
"CreateTimestamp": time.mktime(self.create_at.timetuple()),
"DeliveryStreamARN": self.arn,
"DeliveryStreamName": self.name,
"DeliveryStreamStatus": self.status,
"Destinations": [
{
"DestinationId": "string",
"RedshiftDestinationDescription": {
"ClusterJDBCURL": self.redshift_jdbc_url,
"CopyCommand": self.redshift_copy_command,
"RoleARN": self.redshift_role_arn,
"S3DestinationDescription": {
"BucketARN": self.redshift_s3_bucket_arn,
"BufferingHints": self.redshift_s3_buffering_hings,
"CompressionFormat": self.redshift_s3_compression_format,
"Prefix": self.redshift_s3_prefix,
"RoleARN": self.redshift_s3_role_arn
},
"Username": self.redshift_username,
},
}
],
"HasMoreDestinations": False,
"LastUpdateTimestamp": time.mktime(self.last_updated.timetuple()),
"VersionId": "string",
}
}
def put_record(self, record_data):
record = FirehoseRecord(record_data)
self.records.append(record)
return record
class KinesisBackend(BaseBackend): class KinesisBackend(BaseBackend):
def __init__(self): def __init__(self):
self.streams = {} self.streams = {}
self.delivery_streams = {}
def create_stream(self, stream_name, shard_count, region): def create_stream(self, stream_name, shard_count, region):
if stream_name in self.streams:
return ResourceInUseError(stream_name)
stream = Stream(stream_name, shard_count, region) stream = Stream(stream_name, shard_count, region)
self.streams[stream_name] = stream self.streams[stream_name] = stream
return stream return stream
@ -180,6 +253,52 @@ class KinesisBackend(BaseBackend):
return sequence_number, shard_id return sequence_number, shard_id
def put_records(self, stream_name, records):
stream = self.describe_stream(stream_name)
response = {
"FailedRecordCount": 0,
"Records" : []
}
for record in records:
partition_key = record.get("PartitionKey")
explicit_hash_key = record.get("ExplicitHashKey")
data = record.get("data")
sequence_number, shard_id = stream.put_record(
partition_key, explicit_hash_key, None, data
)
response['Records'].append({
"SequenceNumber": sequence_number,
"ShardId": shard_id
})
return response
''' Firehose '''
def create_delivery_stream(self, stream_name, **stream_kwargs):
stream = DeliveryStream(stream_name, **stream_kwargs)
self.delivery_streams[stream_name] = stream
return stream
def get_delivery_stream(self, stream_name):
if stream_name in self.delivery_streams:
return self.delivery_streams[stream_name]
else:
raise StreamNotFoundError(stream_name)
def list_delivery_streams(self):
return self.delivery_streams.values()
def delete_delivery_stream(self, stream_name):
self.delivery_streams.pop(stream_name)
def put_firehose_record(self, stream_name, record_data):
stream = self.get_delivery_stream(stream_name)
record = stream.put_record(record_data)
return record
kinesis_backends = {} kinesis_backends = {}
for region in boto.kinesis.regions(): for region in boto.kinesis.regions():
kinesis_backends[region.name] = KinesisBackend() kinesis_backends[region.name] = KinesisBackend()

View File

@ -16,6 +16,11 @@ class KinesisResponse(BaseResponse):
def kinesis_backend(self): def kinesis_backend(self):
return kinesis_backends[self.region] return kinesis_backends[self.region]
@property
def is_firehose(self):
host = self.headers.get('host') or self.headers['Host']
return host.startswith('firehose')
def create_stream(self): def create_stream(self):
stream_name = self.parameters.get('StreamName') stream_name = self.parameters.get('StreamName')
shard_count = self.parameters.get('ShardCount') shard_count = self.parameters.get('ShardCount')
@ -67,6 +72,8 @@ class KinesisResponse(BaseResponse):
}) })
def put_record(self): def put_record(self):
if self.is_firehose:
return self.firehose_put_record()
stream_name = self.parameters.get("StreamName") stream_name = self.parameters.get("StreamName")
partition_key = self.parameters.get("PartitionKey") partition_key = self.parameters.get("PartitionKey")
explicit_hash_key = self.parameters.get("ExplicitHashKey") explicit_hash_key = self.parameters.get("ExplicitHashKey")
@ -81,3 +88,83 @@ class KinesisResponse(BaseResponse):
"SequenceNumber": sequence_number, "SequenceNumber": sequence_number,
"ShardId": shard_id, "ShardId": shard_id,
}) })
def put_records(self):
if self.is_firehose:
return self.firehose_put_record()
stream_name = self.parameters.get("StreamName")
records = self.parameters.get("Records")
response = self.kinesis_backend.put_records(
stream_name, records
)
return json.dumps(response)
''' Firehose '''
def create_delivery_stream(self):
stream_name = self.parameters['DeliveryStreamName']
redshift_config = self.parameters.get('RedshiftDestinationConfiguration')
if redshift_config:
redshift_s3_config = redshift_config['S3Configuration']
stream_kwargs = {
'redshift_username': redshift_config['Username'],
'redshift_password': redshift_config['Password'],
'redshift_jdbc_url': redshift_config['ClusterJDBCURL'],
'redshift_role_arn': redshift_config['RoleARN'],
'redshift_copy_command': redshift_config['CopyCommand'],
'redshift_s3_role_arn': redshift_s3_config['RoleARN'],
'redshift_s3_bucket_arn': redshift_s3_config['BucketARN'],
'redshift_s3_prefix': redshift_s3_config['Prefix'],
'redshift_s3_compression_format': redshift_s3_config.get('CompressionFormat'),
'redshift_s3_buffering_hings': redshift_s3_config['BufferingHints'],
}
stream = self.kinesis_backend.create_delivery_stream(stream_name, **stream_kwargs)
return json.dumps({
'DeliveryStreamARN': stream.arn
})
def describe_delivery_stream(self):
stream_name = self.parameters["DeliveryStreamName"]
stream = self.kinesis_backend.get_delivery_stream(stream_name)
return json.dumps(stream.to_dict())
def list_delivery_streams(self):
streams = self.kinesis_backend.list_delivery_streams()
return json.dumps({
"DeliveryStreamNames": [
stream.name for stream in streams
],
"HasMoreDeliveryStreams": False
})
def delete_delivery_stream(self):
stream_name = self.parameters['DeliveryStreamName']
self.kinesis_backend.delete_delivery_stream(stream_name)
return json.dumps({})
def firehose_put_record(self):
stream_name = self.parameters['DeliveryStreamName']
record_data = self.parameters['Record']['Data']
record = self.kinesis_backend.put_firehose_record(stream_name, record_data)
return json.dumps({
"RecordId": record.record_id,
})
def put_record_batch(self):
stream_name = self.parameters['DeliveryStreamName']
records = self.parameters['Records']
request_responses = []
for record in records:
record_response = self.kinesis_backend.put_firehose_record(stream_name, record['Data'])
request_responses.append({
"RecordId": record_response.record_id
})
return json.dumps({
"FailedPutCount": 0,
"RequestResponses": request_responses,
})

View File

@ -3,6 +3,7 @@ from .responses import KinesisResponse
url_bases = [ url_bases = [
"https?://kinesis.(.+).amazonaws.com", "https?://kinesis.(.+).amazonaws.com",
"https?://firehose.(.+).amazonaws.com",
] ]
url_paths = { url_paths = {

View File

@ -135,7 +135,7 @@ class Database(object):
"engine": properties.get("Engine"), "engine": properties.get("Engine"),
"engine_version": properties.get("EngineVersion"), "engine_version": properties.get("EngineVersion"),
"iops": properties.get("Iops"), "iops": properties.get("Iops"),
"master_password": properties.get('MasterUserPassword'), "master_user_password": properties.get('MasterUserPassword'),
"master_username": properties.get('MasterUsername'), "master_username": properties.get('MasterUsername'),
"multi_az": properties.get("MultiAZ"), "multi_az": properties.get("MultiAZ"),
"port": properties.get('Port', 3306), "port": properties.get('Port', 3306),

View File

@ -27,7 +27,7 @@ class RDS2Response(BaseResponse):
"engine": self._get_param("Engine"), "engine": self._get_param("Engine"),
"engine_version": self._get_param("EngineVersion"), "engine_version": self._get_param("EngineVersion"),
"iops": self._get_int_param("Iops"), "iops": self._get_int_param("Iops"),
"master_password": self._get_param('MasterUserPassword'), "master_user_password": self._get_param('MasterUserPassword'),
"master_username": self._get_param('MasterUsername'), "master_username": self._get_param('MasterUsername'),
"multi_az": self._get_bool_param("MultiAZ"), "multi_az": self._get_bool_param("MultiAZ"),
# OptionGroupName # OptionGroupName
@ -504,4 +504,4 @@ ADD_TAGS_TO_RESOURCE_TEMPLATE = \
REMOVE_TAGS_FROM_RESOURCE_TEMPLATE = \ REMOVE_TAGS_FROM_RESOURCE_TEMPLATE = \
"""{"RemoveTagsFromResourceResponse": {"ResponseMetadata": {"RequestId": "c6499a01-a664-11e4-8069-fb454b71a80e"}}} """{"RemoveTagsFromResourceResponse": {"ResponseMetadata": {"RequestId": "c6499a01-a664-11e4-8069-fb454b71a80e"}}}
""" """

View File

@ -68,6 +68,7 @@ class RecordSet(object):
self.records = kwargs.get('ResourceRecords', []) self.records = kwargs.get('ResourceRecords', [])
self.set_identifier = kwargs.get('SetIdentifier') self.set_identifier = kwargs.get('SetIdentifier')
self.weight = kwargs.get('Weight') self.weight = kwargs.get('Weight')
self.region = kwargs.get('Region')
self.health_check = kwargs.get('HealthCheckId') self.health_check = kwargs.get('HealthCheckId')
@classmethod @classmethod
@ -89,6 +90,9 @@ class RecordSet(object):
{% if record_set.weight %} {% if record_set.weight %}
<Weight>{{ record_set.weight }}</Weight> <Weight>{{ record_set.weight }}</Weight>
{% endif %} {% endif %}
{% if record_set.region %}
<Region>{{ record_set.region }}</Region>
{% endif %}
<TTL>{{ record_set.ttl }}</TTL> <TTL>{{ record_set.ttl }}</TTL>
<ResourceRecords> <ResourceRecords>
{% for record in record_set.records %} {% for record in record_set.records %}

View File

@ -24,6 +24,7 @@ class FakeKey(object):
self.name = name self.name = name
self.value = value self.value = value
self.last_modified = datetime.datetime.utcnow() self.last_modified = datetime.datetime.utcnow()
self.acl = get_canned_acl('private')
self._storage_class = storage self._storage_class = storage
self._metadata = {} self._metadata = {}
self._expiry = None self._expiry = None
@ -45,6 +46,9 @@ class FakeKey(object):
def set_storage_class(self, storage_class): def set_storage_class(self, storage_class):
self._storage_class = storage_class self._storage_class = storage_class
def set_acl(self, acl):
self.acl = acl
def append_to_value(self, value): def append_to_value(self, value):
self.value += value self.value += value
self.last_modified = datetime.datetime.utcnow() self.last_modified = datetime.datetime.utcnow()
@ -161,6 +165,61 @@ class FakeMultipart(object):
yield self.parts[part_id] yield self.parts[part_id]
class FakeGrantee(object):
def __init__(self, id='', uri='', display_name=''):
self.id = id
self.uri = uri
self.display_name = display_name
@property
def type(self):
return 'Group' if self.uri else 'CanonicalUser'
ALL_USERS_GRANTEE = FakeGrantee(uri='http://acs.amazonaws.com/groups/global/AllUsers')
AUTHENTICATED_USERS_GRANTEE = FakeGrantee(uri='http://acs.amazonaws.com/groups/global/AuthenticatedUsers')
LOG_DELIVERY_GRANTEE = FakeGrantee(uri='http://acs.amazonaws.com/groups/s3/LogDelivery')
PERMISSION_FULL_CONTROL = 'FULL_CONTROL'
PERMISSION_WRITE = 'WRITE'
PERMISSION_READ = 'READ'
PERMISSION_WRITE_ACP = 'WRITE_ACP'
PERMISSION_READ_ACP = 'READ_ACP'
class FakeGrant(object):
def __init__(self, grantees, permissions):
self.grantees = grantees
self.permissions = permissions
class FakeAcl(object):
def __init__(self, grants=[]):
self.grants = grants
def get_canned_acl(acl):
owner_grantee = FakeGrantee(id='75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a')
grants = [FakeGrant([owner_grantee], [PERMISSION_FULL_CONTROL])]
if acl == 'private':
pass # no other permissions
elif acl == 'public-read':
grants.append(FakeGrant([ALL_USERS_GRANTEE], [PERMISSION_READ]))
elif acl == 'public-read-write':
grants.append(FakeGrant([ALL_USERS_GRANTEE], [PERMISSION_READ, PERMISSION_WRITE]))
elif acl == 'authenticated-read':
grants.append(FakeGrant([AUTHENTICATED_USERS_GRANTEE], [PERMISSION_READ]))
elif acl == 'bucket-owner-read':
pass # TODO: bucket owner ACL
elif acl == 'bucket-owner-full-control':
pass # TODO: bucket owner ACL
elif acl == 'log-delivery-write':
grants.append(FakeGrant([LOG_DELIVERY_GRANTEE], [PERMISSION_READ_ACP, PERMISSION_WRITE]))
else:
assert False, 'Unknown canned acl: %s' % (acl,)
return FakeAcl(grants=grants)
class LifecycleRule(object): class LifecycleRule(object):
def __init__(self, id=None, prefix=None, status=None, expiration_days=None, def __init__(self, id=None, prefix=None, status=None, expiration_days=None,
expiration_date=None, transition_days=None, expiration_date=None, transition_days=None,
@ -185,6 +244,8 @@ class FakeBucket(object):
self.versioning_status = None self.versioning_status = None
self.rules = [] self.rules = []
self.policy = None self.policy = None
self.website_configuration = None
self.acl = get_canned_acl('private')
@property @property
def location(self): def location(self):
@ -213,6 +274,9 @@ class FakeBucket(object):
def delete_lifecycle(self): def delete_lifecycle(self):
self.rules = [] self.rules = []
def set_website_configuration(self, website_configuration):
self.website_configuration = website_configuration
def get_cfn_attribute(self, attribute_name): def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == 'DomainName': if attribute_name == 'DomainName':
@ -221,6 +285,9 @@ class FakeBucket(object):
raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "WebsiteURL" ]"') raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "WebsiteURL" ]"')
raise UnformattedGetAttTemplateException() raise UnformattedGetAttTemplateException()
def set_acl(self, acl):
self.acl = acl
class S3Backend(BaseBackend): class S3Backend(BaseBackend):
@ -284,6 +351,14 @@ class S3Backend(BaseBackend):
bucket = self.get_bucket(bucket_name) bucket = self.get_bucket(bucket_name)
bucket.set_lifecycle(rules) bucket.set_lifecycle(rules)
def set_bucket_website_configuration(self, bucket_name, website_configuration):
bucket = self.get_bucket(bucket_name)
bucket.set_website_configuration(website_configuration)
def get_bucket_website_configuration(self, bucket_name):
bucket = self.get_bucket(bucket_name)
return bucket.website_configuration
def set_key(self, bucket_name, key_name, value, storage=None, etag=None): def set_key(self, bucket_name, key_name, value, storage=None, etag=None):
key_name = clean_key_name(key_name) key_name = clean_key_name(key_name)
@ -399,7 +474,7 @@ class S3Backend(BaseBackend):
bucket = self.get_bucket(bucket_name) bucket = self.get_bucket(bucket_name)
return bucket.keys.pop(key_name) return bucket.keys.pop(key_name)
def copy_key(self, src_bucket_name, src_key_name, dest_bucket_name, dest_key_name, storage=None): def copy_key(self, src_bucket_name, src_key_name, dest_bucket_name, dest_key_name, storage=None, acl=None):
src_key_name = clean_key_name(src_key_name) src_key_name = clean_key_name(src_key_name)
dest_key_name = clean_key_name(dest_key_name) dest_key_name = clean_key_name(dest_key_name)
src_bucket = self.get_bucket(src_bucket_name) src_bucket = self.get_bucket(src_bucket_name)
@ -409,6 +484,17 @@ class S3Backend(BaseBackend):
key = key.copy(dest_key_name) key = key.copy(dest_key_name)
dest_bucket.keys[dest_key_name] = key dest_bucket.keys[dest_key_name] = key
if storage is not None: if storage is not None:
dest_bucket.keys[dest_key_name].set_storage_class(storage) key.set_storage_class(storage)
if acl is not None:
key.set_acl(acl)
def set_bucket_acl(self, bucket_name, acl):
bucket = self.get_bucket(bucket_name)
bucket.set_acl(acl)
def get_bucket_acl(self, bucket_name):
bucket = self.get_bucket(bucket_name)
return bucket.acl
s3_backend = S3Backend() s3_backend = S3Backend()

View File

@ -9,7 +9,7 @@ import xmltodict
from moto.core.responses import _TemplateEnvironmentMixin from moto.core.responses import _TemplateEnvironmentMixin
from .exceptions import BucketAlreadyExists, S3ClientError, InvalidPartOrder from .exceptions import BucketAlreadyExists, S3ClientError, InvalidPartOrder
from .models import s3_backend from .models import s3_backend, get_canned_acl, FakeGrantee, FakeGrant, FakeAcl
from .utils import bucket_name_from_url, metadata_from_headers from .utils import bucket_name_from_url, metadata_from_headers
from xml.dom import minidom from xml.dom import minidom
@ -22,10 +22,18 @@ def parse_key_name(pth):
class ResponseObject(_TemplateEnvironmentMixin): class ResponseObject(_TemplateEnvironmentMixin):
def __init__(self, backend, bucket_name_from_url, parse_key_name): def __init__(self, backend, bucket_name_from_url, parse_key_name,
is_delete_keys=None):
super(ResponseObject, self).__init__()
self.backend = backend self.backend = backend
self.bucket_name_from_url = bucket_name_from_url self.bucket_name_from_url = bucket_name_from_url
self.parse_key_name = parse_key_name self.parse_key_name = parse_key_name
if is_delete_keys:
self.is_delete_keys = is_delete_keys
@staticmethod
def is_delete_keys(path, bucket_name):
return path == u'/?delete'
def all_buckets(self): def all_buckets(self):
# No bucket specified. Listing all buckets # No bucket specified. Listing all buckets
@ -72,7 +80,7 @@ class ResponseObject(_TemplateEnvironmentMixin):
elif method == 'GET': elif method == 'GET':
return self._bucket_response_get(bucket_name, querystring, headers) return self._bucket_response_get(bucket_name, querystring, headers)
elif method == 'PUT': elif method == 'PUT':
return self._bucket_response_put(body, region_name, bucket_name, querystring, headers) return self._bucket_response_put(request, body, region_name, bucket_name, querystring, headers)
elif method == 'DELETE': elif method == 'DELETE':
return self._bucket_response_delete(body, bucket_name, querystring, headers) return self._bucket_response_delete(body, bucket_name, querystring, headers)
elif method == 'POST': elif method == 'POST':
@ -94,29 +102,36 @@ class ResponseObject(_TemplateEnvironmentMixin):
prefix = querystring.get('prefix', [None])[0] prefix = querystring.get('prefix', [None])[0]
multiparts = [upload for upload in multiparts if upload.key_name.startswith(prefix)] multiparts = [upload for upload in multiparts if upload.key_name.startswith(prefix)]
template = self.response_template(S3_ALL_MULTIPARTS) template = self.response_template(S3_ALL_MULTIPARTS)
return 200, headers, template.render( return template.render(
bucket_name=bucket_name, bucket_name=bucket_name,
uploads=multiparts) uploads=multiparts)
elif 'location' in querystring: elif 'location' in querystring:
bucket = self.backend.get_bucket(bucket_name) bucket = self.backend.get_bucket(bucket_name)
template = self.response_template(S3_BUCKET_LOCATION) template = self.response_template(S3_BUCKET_LOCATION)
return 200, headers, template.render(location=bucket.location) return template.render(location=bucket.location)
elif 'lifecycle' in querystring: elif 'lifecycle' in querystring:
bucket = self.backend.get_bucket(bucket_name) bucket = self.backend.get_bucket(bucket_name)
if not bucket.rules: if not bucket.rules:
return 404, headers, "NoSuchLifecycleConfiguration" return 404, headers, "NoSuchLifecycleConfiguration"
template = self.response_template(S3_BUCKET_LIFECYCLE_CONFIGURATION) template = self.response_template(S3_BUCKET_LIFECYCLE_CONFIGURATION)
return 200, headers, template.render(rules=bucket.rules) return template.render(rules=bucket.rules)
elif 'versioning' in querystring: elif 'versioning' in querystring:
versioning = self.backend.get_bucket_versioning(bucket_name) versioning = self.backend.get_bucket_versioning(bucket_name)
template = self.response_template(S3_BUCKET_GET_VERSIONING) template = self.response_template(S3_BUCKET_GET_VERSIONING)
return 200, headers, template.render(status=versioning) return template.render(status=versioning)
elif 'policy' in querystring: elif 'policy' in querystring:
policy = self.backend.get_bucket_policy(bucket_name) policy = self.backend.get_bucket_policy(bucket_name)
if not policy: if not policy:
template = self.response_template(S3_NO_POLICY) template = self.response_template(S3_NO_POLICY)
return 404, headers, template.render(bucket_name=bucket_name) return 404, headers, template.render(bucket_name=bucket_name)
return 200, headers, policy return 200, headers, policy
elif 'website' in querystring:
website_configuration = self.backend.get_bucket_website_configuration(bucket_name)
return website_configuration
elif 'acl' in querystring:
bucket = self.backend.get_bucket(bucket_name)
template = self.response_template(S3_OBJECT_ACL_RESPONSE)
return template.render(obj=bucket)
elif 'versions' in querystring: elif 'versions' in querystring:
delimiter = querystring.get('delimiter', [None])[0] delimiter = querystring.get('delimiter', [None])[0]
encoding_type = querystring.get('encoding-type', [None])[0] encoding_type = querystring.get('encoding-type', [None])[0]
@ -157,7 +172,7 @@ class ResponseObject(_TemplateEnvironmentMixin):
result_folders=result_folders result_folders=result_folders
) )
def _bucket_response_put(self, body, region_name, bucket_name, querystring, headers): def _bucket_response_put(self, request, body, region_name, bucket_name, querystring, headers):
if 'versioning' in querystring: if 'versioning' in querystring:
ver = re.search('<Status>([A-Za-z]+)</Status>', body) ver = re.search('<Status>([A-Za-z]+)</Status>', body)
if ver: if ver:
@ -176,6 +191,14 @@ class ResponseObject(_TemplateEnvironmentMixin):
elif 'policy' in querystring: elif 'policy' in querystring:
self.backend.set_bucket_policy(bucket_name, body) self.backend.set_bucket_policy(bucket_name, body)
return 'True' return 'True'
elif 'acl' in querystring:
acl = self._acl_from_headers(request.headers)
# TODO: Support the XML-based ACL format
self.backend.set_bucket_acl(bucket_name, acl)
return ""
elif 'website' in querystring:
self.backend.set_bucket_website_configuration(bucket_name, body)
return ""
else: else:
try: try:
new_bucket = self.backend.create_bucket(bucket_name, region_name) new_bucket = self.backend.create_bucket(bucket_name, region_name)
@ -209,7 +232,7 @@ class ResponseObject(_TemplateEnvironmentMixin):
return 409, headers, template.render(bucket=removed_bucket) return 409, headers, template.render(bucket=removed_bucket)
def _bucket_response_post(self, request, bucket_name, headers): def _bucket_response_post(self, request, bucket_name, headers):
if request.path == u'/?delete': if self.is_delete_keys(request.path, bucket_name):
return self._bucket_response_delete_keys(request, bucket_name, headers) return self._bucket_response_delete_keys(request, bucket_name, headers)
# POST to bucket-url should create file from form # POST to bucket-url should create file from form
@ -294,7 +317,7 @@ class ResponseObject(_TemplateEnvironmentMixin):
def _key_response(self, request, full_url, headers): def _key_response(self, request, full_url, headers):
parsed_url = urlparse(full_url) parsed_url = urlparse(full_url)
query = parse_qs(parsed_url.query) query = parse_qs(parsed_url.query, keep_blank_values=True)
method = request.method method = request.method
key_name = self.parse_key_name(parsed_url.path) key_name = self.parse_key_name(parsed_url.path)
@ -310,18 +333,18 @@ class ResponseObject(_TemplateEnvironmentMixin):
if method == 'GET': if method == 'GET':
return self._key_response_get(bucket_name, query, key_name, headers) return self._key_response_get(bucket_name, query, key_name, headers)
elif method == 'PUT': elif method == 'PUT':
return self._key_response_put(request, parsed_url, body, bucket_name, query, key_name, headers) return self._key_response_put(request, body, bucket_name, query, key_name, headers)
elif method == 'HEAD': elif method == 'HEAD':
return self._key_response_head(bucket_name, key_name, headers) return self._key_response_head(bucket_name, key_name, headers)
elif method == 'DELETE': elif method == 'DELETE':
return self._key_response_delete(bucket_name, query, key_name, headers) return self._key_response_delete(bucket_name, query, key_name, headers)
elif method == 'POST': elif method == 'POST':
return self._key_response_post(request, body, parsed_url, bucket_name, query, key_name, headers) return self._key_response_post(request, body, bucket_name, query, key_name, headers)
else: else:
raise NotImplementedError("Method {0} has not been impelemented in the S3 backend yet".format(method)) raise NotImplementedError("Method {0} has not been impelemented in the S3 backend yet".format(method))
def _key_response_get(self, bucket_name, query, key_name, headers): def _key_response_get(self, bucket_name, query, key_name, headers):
if 'uploadId' in query: if query.get('uploadId'):
upload_id = query['uploadId'][0] upload_id = query['uploadId'][0]
parts = self.backend.list_multipart(bucket_name, upload_id) parts = self.backend.list_multipart(bucket_name, upload_id)
template = self.response_template(S3_MULTIPART_LIST_RESPONSE) template = self.response_template(S3_MULTIPART_LIST_RESPONSE)
@ -335,14 +358,18 @@ class ResponseObject(_TemplateEnvironmentMixin):
version_id = query.get('versionId', [None])[0] version_id = query.get('versionId', [None])[0]
key = self.backend.get_key( key = self.backend.get_key(
bucket_name, key_name, version_id=version_id) bucket_name, key_name, version_id=version_id)
if 'acl' in query:
template = self.response_template(S3_OBJECT_ACL_RESPONSE)
return 200, headers, template.render(obj=key)
if key: if key:
headers.update(key.metadata) headers.update(key.metadata)
return 200, headers, key.value return 200, headers, key.value
else: else:
return 404, headers, "" return 404, headers, ""
def _key_response_put(self, request, parsed_url, body, bucket_name, query, key_name, headers): def _key_response_put(self, request, body, bucket_name, query, key_name, headers):
if 'uploadId' in query and 'partNumber' in query: if query.get('uploadId') and query.get('partNumber'):
upload_id = query['uploadId'][0] upload_id = query['uploadId'][0]
part_number = int(query['partNumber'][0]) part_number = int(query['partNumber'][0])
if 'x-amz-copy-source' in request.headers: if 'x-amz-copy-source' in request.headers:
@ -361,16 +388,19 @@ class ResponseObject(_TemplateEnvironmentMixin):
return 200, headers, response return 200, headers, response
storage_class = request.headers.get('x-amz-storage-class', 'STANDARD') storage_class = request.headers.get('x-amz-storage-class', 'STANDARD')
acl = self._acl_from_headers(request.headers)
if parsed_url.query == 'acl': if 'acl' in query:
# We don't implement ACL yet, so just return key = self.backend.get_key(bucket_name, key_name)
# TODO: Support the XML-based ACL format
key.set_acl(acl)
return 200, headers, "" return 200, headers, ""
if 'x-amz-copy-source' in request.headers: if 'x-amz-copy-source' in request.headers:
# Copy key # Copy key
src_bucket, src_key = request.headers.get("x-amz-copy-source").split("/", 1) src_bucket, src_key = request.headers.get("x-amz-copy-source").split("/", 1)
self.backend.copy_key(src_bucket, src_key, bucket_name, key_name, self.backend.copy_key(src_bucket, src_key, bucket_name, key_name,
storage=storage_class) storage=storage_class, acl=acl)
mdirective = request.headers.get('x-amz-metadata-directive') mdirective = request.headers.get('x-amz-metadata-directive')
if mdirective is not None and mdirective == 'REPLACE': if mdirective is not None and mdirective == 'REPLACE':
new_key = self.backend.get_key(bucket_name, key_name) new_key = self.backend.get_key(bucket_name, key_name)
@ -393,6 +423,7 @@ class ResponseObject(_TemplateEnvironmentMixin):
request.streaming = True request.streaming = True
metadata = metadata_from_headers(request.headers) metadata = metadata_from_headers(request.headers)
new_key.set_metadata(metadata) new_key.set_metadata(metadata)
new_key.set_acl(acl)
template = self.response_template(S3_OBJECT_RESPONSE) template = self.response_template(S3_OBJECT_RESPONSE)
headers.update(new_key.response_dict) headers.update(new_key.response_dict)
@ -407,8 +438,40 @@ class ResponseObject(_TemplateEnvironmentMixin):
else: else:
return 404, headers, "" return 404, headers, ""
def _acl_from_headers(self, headers):
canned_acl = headers.get('x-amz-acl', '')
if canned_acl:
return get_canned_acl(canned_acl)
grants = []
for header, value in headers.items():
if not header.startswith('x-amz-grant-'):
continue
permission = {
'read': 'READ',
'write': 'WRITE',
'read-acp': 'READ_ACP',
'write-acp': 'WRITE_ACP',
'full-control': 'FULL_CONTROL',
}[header[len('x-amz-grant-'):]]
grantees = []
for key_and_value in value.split(","):
key, value = re.match('([^=]+)="([^"]+)"', key_and_value.strip()).groups()
if key.lower() == 'id':
grantees.append(FakeGrantee(id=value))
else:
grantees.append(FakeGrantee(uri=value))
grants.append(FakeGrant(grantees, [permission]))
if grants:
return FakeAcl(grants)
else:
return None
def _key_response_delete(self, bucket_name, query, key_name, headers): def _key_response_delete(self, bucket_name, query, key_name, headers):
if 'uploadId' in query: if query.get('uploadId'):
upload_id = query['uploadId'][0] upload_id = query['uploadId'][0]
self.backend.cancel_multipart(bucket_name, upload_id) self.backend.cancel_multipart(bucket_name, upload_id)
return 204, headers, "" return 204, headers, ""
@ -428,8 +491,8 @@ class ResponseObject(_TemplateEnvironmentMixin):
raise InvalidPartOrder() raise InvalidPartOrder()
yield (pn, p.getElementsByTagName('ETag')[0].firstChild.wholeText) yield (pn, p.getElementsByTagName('ETag')[0].firstChild.wholeText)
def _key_response_post(self, request, body, parsed_url, bucket_name, query, key_name, headers): def _key_response_post(self, request, body, bucket_name, query, key_name, headers):
if body == b'' and parsed_url.query == 'uploads': if body == b'' and 'uploads' in query:
metadata = metadata_from_headers(request.headers) metadata = metadata_from_headers(request.headers)
multipart = self.backend.initiate_multipart(bucket_name, key_name, metadata) multipart = self.backend.initiate_multipart(bucket_name, key_name, metadata)
@ -441,7 +504,7 @@ class ResponseObject(_TemplateEnvironmentMixin):
) )
return 200, headers, response return 200, headers, response
if 'uploadId' in query: if query.get('uploadId'):
body = self._complete_multipart_body(body) body = self._complete_multipart_body(body)
upload_id = query['uploadId'][0] upload_id = query['uploadId'][0]
key = self.backend.complete_multipart(bucket_name, upload_id, body) key = self.backend.complete_multipart(bucket_name, upload_id, body)
@ -451,7 +514,7 @@ class ResponseObject(_TemplateEnvironmentMixin):
key_name=key.name, key_name=key.name,
etag=key.etag, etag=key.etag,
) )
elif parsed_url.query == 'restore': elif 'restore' in query:
es = minidom.parseString(body).getElementsByTagName('Days') es = minidom.parseString(body).getElementsByTagName('Days')
days = es[0].childNodes[0].wholeText days = es[0].childNodes[0].wholeText
key = self.backend.get_key(bucket_name, key_name) key = self.backend.get_key(bucket_name, key_name)
@ -635,6 +698,37 @@ S3_OBJECT_RESPONSE = """<PutObjectResponse xmlns="http://s3.amazonaws.com/doc/20
</PutObjectResponse> </PutObjectResponse>
</PutObjectResponse>""" </PutObjectResponse>"""
S3_OBJECT_ACL_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<AccessControlPolicy xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
<AccessControlList>
{% for grant in obj.acl.grants %}
<Grant>
{% for grantee in grant.grantees %}
<Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:type="{{ grantee.type }}">
{% if grantee.uri %}
<URI>{{ grantee.uri }}</URI>
{% endif %}
{% if grantee.id %}
<ID>{{ grantee.id }}</ID>
{% endif %}
{% if grantee.display_name %}
<DisplayName>{{ grantee.display_name }}</DisplayName>
{% endif %}
</Grantee>
{% endfor %}
{% for permission in grant.permissions %}
<Permission>{{ permission }}</Permission>
{% endfor %}
</Grant>
{% endfor %}
</AccessControlList>
</AccessControlPolicy>"""
S3_OBJECT_COPY_RESPONSE = """<CopyObjectResponse xmlns="http://doc.s3.amazonaws.com/2006-03-01"> S3_OBJECT_COPY_RESPONSE = """<CopyObjectResponse xmlns="http://doc.s3.amazonaws.com/2006-03-01">
<CopyObjectResponse> <CopyObjectResponse>
<ETag>{{ key.etag }}</ETag> <ETag>{{ key.etag }}</ETag>
@ -710,7 +804,7 @@ S3_ALL_MULTIPARTS = """<?xml version="1.0" encoding="UTF-8"?>
</Initiator> </Initiator>
<Owner> <Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID> <ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>OwnerDisplayName</DisplayName> <DisplayName>webfile</DisplayName>
</Owner> </Owner>
<StorageClass>STANDARD</StorageClass> <StorageClass>STANDARD</StorageClass>
<Initiated>2010-11-10T20:48:33.000Z</Initiated> <Initiated>2010-11-10T20:48:33.000Z</Initiated>

View File

@ -9,8 +9,14 @@ from moto.s3.responses import ResponseObject
def parse_key_name(pth): def parse_key_name(pth):
return "/".join(pth.rstrip("/").split("/")[2:]) return "/".join(pth.rstrip("/").split("/")[2:])
def is_delete_keys(path, bucket_name):
return path == u'/' + bucket_name + u'/?delete'
S3BucketPathResponseInstance = ResponseObject( S3BucketPathResponseInstance = ResponseObject(
s3bucket_path_backend, s3bucket_path_backend,
bucket_name_from_url, bucket_name_from_url,
parse_key_name, parse_key_name,
is_delete_keys,
) )

View File

@ -34,7 +34,7 @@ class Message(object):
@property @property
def md5(self): def md5(self):
body_md5 = hashlib.md5() body_md5 = hashlib.md5()
body_md5.update(self.body.encode('utf-8')) body_md5.update(self._body.encode('utf-8'))
return body_md5.hexdigest() return body_md5.hexdigest()
@property @property
@ -106,9 +106,10 @@ class Queue(object):
'VisibilityTimeout', 'VisibilityTimeout',
'WaitTimeSeconds'] 'WaitTimeSeconds']
def __init__(self, name, visibility_timeout, wait_time_seconds): def __init__(self, name, visibility_timeout, wait_time_seconds, region):
self.name = name self.name = name
self.visibility_timeout = visibility_timeout or 30 self.visibility_timeout = visibility_timeout or 30
self.region = region
# wait_time_seconds will be set to immediate return messages # wait_time_seconds will be set to immediate return messages
self.wait_time_seconds = wait_time_seconds or 0 self.wait_time_seconds = wait_time_seconds or 0
@ -179,6 +180,10 @@ class Queue(object):
result[attribute] = getattr(self, camelcase_to_underscores(attribute)) result[attribute] = getattr(self, camelcase_to_underscores(attribute))
return result return result
@property
def url(self):
return "http://sqs.{0}.amazonaws.com/123456789012/{1}".format(self.region, self.name)
@property @property
def messages(self): def messages(self):
return [message for message in self._messages if message.visible and not message.delayed] return [message for message in self._messages if message.visible and not message.delayed]
@ -196,14 +201,20 @@ class Queue(object):
class SQSBackend(BaseBackend): class SQSBackend(BaseBackend):
def __init__(self): def __init__(self, region_name):
self.region_name = region_name
self.queues = {} self.queues = {}
super(SQSBackend, self).__init__() super(SQSBackend, self).__init__()
def reset(self):
region_name = self.region_name
self.__dict__ = {}
self.__init__(region_name)
def create_queue(self, name, visibility_timeout, wait_time_seconds): def create_queue(self, name, visibility_timeout, wait_time_seconds):
queue = self.queues.get(name) queue = self.queues.get(name)
if queue is None: if queue is None:
queue = Queue(name, visibility_timeout, wait_time_seconds) queue = Queue(name, visibility_timeout, wait_time_seconds, self.region_name)
self.queues[name] = queue self.queues[name] = queue
return queue return queue
@ -314,4 +325,4 @@ class SQSBackend(BaseBackend):
sqs_backends = {} sqs_backends = {}
for region in boto.sqs.regions(): for region in boto.sqs.regions():
sqs_backends[region.name] = SQSBackend() sqs_backends[region.name] = SQSBackend(region.name)

View File

@ -11,6 +11,7 @@ from .exceptions import (
) )
MAXIMUM_VISIBILTY_TIMEOUT = 43200 MAXIMUM_VISIBILTY_TIMEOUT = 43200
MAXIMUM_MESSAGE_LENGTH = 262144 # 256 KiB
DEFAULT_RECEIVED_MESSAGES = 1 DEFAULT_RECEIVED_MESSAGES = 1
SQS_REGION_REGEX = r'://(.+?)\.queue\.amazonaws\.com' SQS_REGION_REGEX = r'://(.+?)\.queue\.amazonaws\.com'
@ -106,6 +107,9 @@ class SQSResponse(BaseResponse):
message = self.querystring.get("MessageBody")[0] message = self.querystring.get("MessageBody")[0]
delay_seconds = self.querystring.get('DelaySeconds') delay_seconds = self.querystring.get('DelaySeconds')
if len(message) > MAXIMUM_MESSAGE_LENGTH:
return ERROR_TOO_LONG_RESPONSE, dict(status=400)
if delay_seconds: if delay_seconds:
delay_seconds = int(delay_seconds[0]) delay_seconds = int(delay_seconds[0])
else: else:
@ -232,7 +236,7 @@ class SQSResponse(BaseResponse):
CREATE_QUEUE_RESPONSE = """<CreateQueueResponse> CREATE_QUEUE_RESPONSE = """<CreateQueueResponse>
<CreateQueueResult> <CreateQueueResult>
<QueueUrl>http://sqs.us-east-1.amazonaws.com/123456789012/{{ queue.name }}</QueueUrl> <QueueUrl>{{ queue.url }}</QueueUrl>
<VisibilityTimeout>{{ queue.visibility_timeout }}</VisibilityTimeout> <VisibilityTimeout>{{ queue.visibility_timeout }}</VisibilityTimeout>
</CreateQueueResult> </CreateQueueResult>
<ResponseMetadata> <ResponseMetadata>
@ -244,7 +248,7 @@ CREATE_QUEUE_RESPONSE = """<CreateQueueResponse>
GET_QUEUE_URL_RESPONSE = """<GetQueueUrlResponse> GET_QUEUE_URL_RESPONSE = """<GetQueueUrlResponse>
<GetQueueUrlResult> <GetQueueUrlResult>
<QueueUrl>http://sqs.us-east-1.amazonaws.com/123456789012/{{ queue.name }}</QueueUrl> <QueueUrl>{{ queue.url }}</QueueUrl>
</GetQueueUrlResult> </GetQueueUrlResult>
<ResponseMetadata> <ResponseMetadata>
<RequestId>470a6f13-2ed9-4181-ad8a-2fdea142988e</RequestId> <RequestId>470a6f13-2ed9-4181-ad8a-2fdea142988e</RequestId>
@ -254,7 +258,7 @@ GET_QUEUE_URL_RESPONSE = """<GetQueueUrlResponse>
LIST_QUEUES_RESPONSE = """<ListQueuesResponse> LIST_QUEUES_RESPONSE = """<ListQueuesResponse>
<ListQueuesResult> <ListQueuesResult>
{% for queue in queues %} {% for queue in queues %}
<QueueUrl>http://sqs.us-east-1.amazonaws.com/123456789012/{{ queue.name }}</QueueUrl> <QueueUrl>{{ queue.url }}</QueueUrl>
{% endfor %} {% endfor %}
</ListQueuesResult> </ListQueuesResult>
<ResponseMetadata> <ResponseMetadata>
@ -417,3 +421,13 @@ PURGE_QUEUE_RESPONSE = """<PurgeQueueResponse>
</RequestId> </RequestId>
</ResponseMetadata> </ResponseMetadata>
</PurgeQueueResponse>""" </PurgeQueueResponse>"""
ERROR_TOO_LONG_RESPONSE = """<ErrorResponse xmlns="http://queue.amazonaws.com/doc/2012-11-05/">
<Error>
<Type>Sender</Type>
<Code>InvalidParameterValue</Code>
<Message>One or more parameters are invalid. Reason: Message must be shorter than 262144 bytes.</Message>
<Detail/>
</Error>
<RequestId>6fde8d1e-52cd-4581-8cd9-c512f4c64223</RequestId>
</ErrorResponse>"""

View File

@ -6,3 +6,4 @@ coverage
freezegun freezegun
flask flask
boto3 boto3
six

View File

@ -20,7 +20,7 @@ extras_require = {
setup( setup(
name='moto', name='moto',
version='0.4.12', version='0.4.18',
description='A library that allows your python tests to easily' description='A library that allows your python tests to easily'
' mock out the boto library', ' mock out the boto library',
author='Steve Pulec', author='Steve Pulec',

View File

@ -2,6 +2,7 @@ from __future__ import unicode_literals
import boto import boto
from boto.ec2.autoscale.launchconfig import LaunchConfiguration from boto.ec2.autoscale.launchconfig import LaunchConfiguration
from boto.ec2.autoscale.group import AutoScalingGroup from boto.ec2.autoscale.group import AutoScalingGroup
from boto.ec2.autoscale import Tag
import sure # noqa import sure # noqa
from moto import mock_autoscaling, mock_ec2 from moto import mock_autoscaling, mock_ec2
@ -18,6 +19,7 @@ def test_create_autoscaling_group():
) )
conn.create_launch_configuration(config) conn.create_launch_configuration(config)
group = AutoScalingGroup( group = AutoScalingGroup(
name='tester_group', name='tester_group',
availability_zones=['us-east-1c', 'us-east-1b'], availability_zones=['us-east-1c', 'us-east-1b'],
@ -32,6 +34,13 @@ def test_create_autoscaling_group():
placement_group="test_placement", placement_group="test_placement",
vpc_zone_identifier='subnet-1234abcd', vpc_zone_identifier='subnet-1234abcd',
termination_policies=["OldestInstance", "NewestInstance"], termination_policies=["OldestInstance", "NewestInstance"],
tags=[Tag(
resource_id='tester_group',
key='test_key',
value='test_value',
propagate_at_launch=True
)
],
) )
conn.create_auto_scaling_group(group) conn.create_auto_scaling_group(group)
@ -50,6 +59,12 @@ def test_create_autoscaling_group():
list(group.load_balancers).should.equal(["test_lb"]) list(group.load_balancers).should.equal(["test_lb"])
group.placement_group.should.equal("test_placement") group.placement_group.should.equal("test_placement")
list(group.termination_policies).should.equal(["OldestInstance", "NewestInstance"]) list(group.termination_policies).should.equal(["OldestInstance", "NewestInstance"])
len(list(group.tags)).should.equal(1)
tag = list(group.tags)[0]
tag.resource_id.should.equal('tester_group')
tag.key.should.equal('test_key')
tag.value.should.equal('test_value')
tag.propagate_at_launch.should.equal(True)
@mock_autoscaling @mock_autoscaling
@ -88,6 +103,7 @@ def test_create_autoscaling_groups_defaults():
list(group.load_balancers).should.equal([]) list(group.load_balancers).should.equal([])
group.placement_group.should.equal(None) group.placement_group.should.equal(None)
list(group.termination_policies).should.equal([]) list(group.termination_policies).should.equal([])
list(group.tags).should.equal([])
@mock_autoscaling @mock_autoscaling

View File

@ -40,7 +40,17 @@ def test_create_stack():
stack = conn.describe_stacks()[0] stack = conn.describe_stacks()[0]
stack.stack_name.should.equal('test_stack') stack.stack_name.should.equal('test_stack')
stack.get_template().should.equal(dummy_template) stack.get_template().should.equal({
'GetTemplateResponse': {
'GetTemplateResult': {
'TemplateBody': dummy_template_json,
'ResponseMetadata': {
'RequestId': '2d06e36c-ac1d-11e0-a958-f9382b6eb86bEXAMPLE'
}
}
}
})
@mock_cloudformation @mock_cloudformation
@ -83,7 +93,18 @@ def test_create_stack_from_s3_url():
stack = conn.describe_stacks()[0] stack = conn.describe_stacks()[0]
stack.stack_name.should.equal('new-stack') stack.stack_name.should.equal('new-stack')
stack.get_template().should.equal(dummy_template) stack.get_template().should.equal(
{
'GetTemplateResponse': {
'GetTemplateResult': {
'TemplateBody': dummy_template_json,
'ResponseMetadata': {
'RequestId': '2d06e36c-ac1d-11e0-a958-f9382b6eb86bEXAMPLE'
}
}
}
})
@mock_cloudformation @mock_cloudformation
@ -138,7 +159,17 @@ def test_get_template_by_name():
) )
template = conn.get_template("test_stack") template = conn.get_template("test_stack")
template.should.equal(dummy_template) template.should.equal({
'GetTemplateResponse': {
'GetTemplateResult': {
'TemplateBody': dummy_template_json,
'ResponseMetadata': {
'RequestId': '2d06e36c-ac1d-11e0-a958-f9382b6eb86bEXAMPLE'
}
}
}
})
@mock_cloudformation @mock_cloudformation
@ -243,4 +274,13 @@ def test_stack_tags():
# conn.update_stack("test_stack", dummy_template_json2) # conn.update_stack("test_stack", dummy_template_json2)
# stack = conn.describe_stacks()[0] # stack = conn.describe_stacks()[0]
# stack.get_template().should.equal(dummy_template2) # stack.get_template().should.equal({
# 'GetTemplateResponse': {
# 'GetTemplateResult': {
# 'TemplateBody': dummy_template_json2,
# 'ResponseMetadata': {
# 'RequestId': '2d06e36c-ac1d-11e0-a958-f9382b6eb86bEXAMPLE'
# }
# }
# }
# })

View File

@ -3,6 +3,7 @@ import json
import boto import boto
import boto.cloudformation import boto.cloudformation
import boto.datapipeline
import boto.ec2 import boto.ec2
import boto.ec2.autoscale import boto.ec2.autoscale
import boto.ec2.elb import boto.ec2.elb
@ -17,6 +18,7 @@ import sure # noqa
from moto import ( from moto import (
mock_autoscaling, mock_autoscaling,
mock_cloudformation, mock_cloudformation,
mock_datapipeline,
mock_ec2, mock_ec2,
mock_elb, mock_elb,
mock_iam, mock_iam,
@ -287,7 +289,6 @@ def test_stack_elb_integration_with_attached_ec2_instances():
ec2_conn = boto.ec2.connect_to_region("us-west-1") ec2_conn = boto.ec2.connect_to_region("us-west-1")
reservation = ec2_conn.get_all_instances()[0] reservation = ec2_conn.get_all_instances()[0]
ec2_instance = reservation.instances[0] ec2_instance = reservation.instances[0]
instance_id = ec2_instance.id
load_balancer.instances[0].id.should.equal(ec2_instance.id) load_balancer.instances[0].id.should.equal(ec2_instance.id)
list(load_balancer.availability_zones).should.equal(['us-east1']) list(load_balancer.availability_zones).should.equal(['us-east1'])
@ -1395,3 +1396,83 @@ def test_subnets_should_be_created_with_availability_zone():
) )
subnet = vpc_conn.get_all_subnets(filters={'cidrBlock': '10.0.0.0/24'})[0] subnet = vpc_conn.get_all_subnets(filters={'cidrBlock': '10.0.0.0/24'})[0]
subnet.availability_zone.should.equal('us-west-1b') subnet.availability_zone.should.equal('us-west-1b')
@mock_cloudformation
@mock_datapipeline
def test_datapipeline():
dp_template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
"dataPipeline": {
"Properties": {
"Activate": "true",
"Name": "testDataPipeline",
"PipelineObjects": [
{
"Fields": [
{
"Key": "failureAndRerunMode",
"StringValue": "CASCADE"
},
{
"Key": "scheduleType",
"StringValue": "cron"
},
{
"Key": "schedule",
"RefValue": "DefaultSchedule"
},
{
"Key": "pipelineLogUri",
"StringValue": "s3://bucket/logs"
},
{
"Key": "type",
"StringValue": "Default"
},
],
"Id": "Default",
"Name": "Default"
},
{
"Fields": [
{
"Key": "startDateTime",
"StringValue": "1970-01-01T01:00:00"
},
{
"Key": "period",
"StringValue": "1 Day"
},
{
"Key": "type",
"StringValue": "Schedule"
}
],
"Id": "DefaultSchedule",
"Name": "RunOnce"
}
],
"PipelineTags": []
},
"Type": "AWS::DataPipeline::Pipeline"
}
}
}
cf_conn = boto.cloudformation.connect_to_region("us-east-1")
template_json = json.dumps(dp_template)
stack_id = cf_conn.create_stack(
"test_stack",
template_body=template_json,
)
dp_conn = boto.datapipeline.connect_to_region('us-east-1')
data_pipelines = dp_conn.list_pipelines()
data_pipelines['pipelineIdList'].should.have.length_of(1)
data_pipelines['pipelineIdList'][0]['name'].should.equal('testDataPipeline')
stack_resources = cf_conn.list_stack_resources(stack_id)
stack_resources.should.have.length_of(1)
stack_resources[0].physical_resource_id.should.equal(data_pipelines['pipelineIdList'][0]['id'])

View File

@ -0,0 +1,175 @@
from __future__ import unicode_literals
import boto.datapipeline
import sure # noqa
from moto import mock_datapipeline
from moto.datapipeline.utils import remove_capitalization_of_dict_keys
def get_value_from_fields(key, fields):
for field in fields:
if field['key'] == key:
return field['stringValue']
@mock_datapipeline
def test_create_pipeline():
conn = boto.datapipeline.connect_to_region("us-west-2")
res = conn.create_pipeline("mypipeline", "some-unique-id")
pipeline_id = res["pipelineId"]
pipeline_descriptions = conn.describe_pipelines([pipeline_id])["pipelineDescriptionList"]
pipeline_descriptions.should.have.length_of(1)
pipeline_description = pipeline_descriptions[0]
pipeline_description['name'].should.equal("mypipeline")
pipeline_description["pipelineId"].should.equal(pipeline_id)
fields = pipeline_description['fields']
get_value_from_fields('@pipelineState', fields).should.equal("PENDING")
get_value_from_fields('uniqueId', fields).should.equal("some-unique-id")
PIPELINE_OBJECTS = [
{
"id": "Default",
"name": "Default",
"fields": [{
"key": "workerGroup",
"stringValue": "workerGroup"
}]
},
{
"id": "Schedule",
"name": "Schedule",
"fields": [{
"key": "startDateTime",
"stringValue": "2012-12-12T00:00:00"
}, {
"key": "type",
"stringValue": "Schedule"
}, {
"key": "period",
"stringValue": "1 hour"
}, {
"key": "endDateTime",
"stringValue": "2012-12-21T18:00:00"
}]
},
{
"id": "SayHello",
"name": "SayHello",
"fields": [{
"key": "type",
"stringValue": "ShellCommandActivity"
}, {
"key": "command",
"stringValue": "echo hello"
}, {
"key": "parent",
"refValue": "Default"
}, {
"key": "schedule",
"refValue": "Schedule"
}]
}
]
@mock_datapipeline
def test_creating_pipeline_definition():
conn = boto.datapipeline.connect_to_region("us-west-2")
res = conn.create_pipeline("mypipeline", "some-unique-id")
pipeline_id = res["pipelineId"]
conn.put_pipeline_definition(PIPELINE_OBJECTS, pipeline_id)
pipeline_definition = conn.get_pipeline_definition(pipeline_id)
pipeline_definition['pipelineObjects'].should.have.length_of(3)
default_object = pipeline_definition['pipelineObjects'][0]
default_object['name'].should.equal("Default")
default_object['id'].should.equal("Default")
default_object['fields'].should.equal([{
"key": "workerGroup",
"stringValue": "workerGroup"
}])
@mock_datapipeline
def test_describing_pipeline_objects():
conn = boto.datapipeline.connect_to_region("us-west-2")
res = conn.create_pipeline("mypipeline", "some-unique-id")
pipeline_id = res["pipelineId"]
conn.put_pipeline_definition(PIPELINE_OBJECTS, pipeline_id)
objects = conn.describe_objects(["Schedule", "Default"], pipeline_id)['pipelineObjects']
objects.should.have.length_of(2)
default_object = [x for x in objects if x['id'] == 'Default'][0]
default_object['name'].should.equal("Default")
default_object['fields'].should.equal([{
"key": "workerGroup",
"stringValue": "workerGroup"
}])
@mock_datapipeline
def test_activate_pipeline():
conn = boto.datapipeline.connect_to_region("us-west-2")
res = conn.create_pipeline("mypipeline", "some-unique-id")
pipeline_id = res["pipelineId"]
conn.activate_pipeline(pipeline_id)
pipeline_descriptions = conn.describe_pipelines([pipeline_id])["pipelineDescriptionList"]
pipeline_descriptions.should.have.length_of(1)
pipeline_description = pipeline_descriptions[0]
fields = pipeline_description['fields']
get_value_from_fields('@pipelineState', fields).should.equal("SCHEDULED")
@mock_datapipeline
def test_listing_pipelines():
conn = boto.datapipeline.connect_to_region("us-west-2")
res1 = conn.create_pipeline("mypipeline1", "some-unique-id1")
res2 = conn.create_pipeline("mypipeline2", "some-unique-id2")
response = conn.list_pipelines()
response["hasMoreResults"].should.be(False)
response["marker"].should.be.none
response["pipelineIdList"].should.have.length_of(2)
response["pipelineIdList"].should.contain({
"id": res1["pipelineId"],
"name": "mypipeline1",
})
response["pipelineIdList"].should.contain({
"id": res2["pipelineId"],
"name": "mypipeline2"
})
# testing a helper function
def test_remove_capitalization_of_dict_keys():
result = remove_capitalization_of_dict_keys(
{
"Id": "IdValue",
"Fields": [{
"Key": "KeyValue",
"StringValue": "StringValueValue"
}]
}
)
result.should.equal({
"id": "IdValue",
"fields": [{
"key": "KeyValue",
"stringValue": "StringValueValue"
}],
})

View File

@ -0,0 +1,27 @@
from __future__ import unicode_literals
import json
import sure # noqa
import moto.server as server
from moto import mock_datapipeline
'''
Test the different server responses
'''
@mock_datapipeline
def test_list_streams():
backend = server.create_backend_app("datapipeline")
test_client = backend.test_client()
res = test_client.post('/',
data={"pipelineIds": ["ASdf"]},
headers={"X-Amz-Target": "DataPipeline.DescribePipelines"},
)
json_data = json.loads(res.data.decode("utf-8"))
json_data.should.equal({
'pipelineDescriptionList': []
})

View File

@ -122,6 +122,33 @@ def test_item_add_and_describe_and_update():
}) })
@requires_boto_gte("2.9")
@mock_dynamodb2
def test_item_partial_save():
table = create_table()
data = {
'forum_name': 'LOLCat Forum',
'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User A',
}
table.put_item(data=data)
returned_item = table.get_item(forum_name="LOLCat Forum")
returned_item['SentBy'] = 'User B'
returned_item.partial_save()
returned_item = table.get_item(
forum_name='LOLCat Forum'
)
dict(returned_item).should.equal({
'forum_name': 'LOLCat Forum',
'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User B',
})
@requires_boto_gte("2.9") @requires_boto_gte("2.9")
@mock_dynamodb2 @mock_dynamodb2
def test_item_put_without_table(): def test_item_put_without_table():

View File

@ -33,6 +33,7 @@ def test_create_and_delete_volume():
cm.exception.status.should.equal(400) cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none cm.exception.request_id.should_not.be.none
@mock_ec2 @mock_ec2
def test_filter_volume_by_id(): def test_filter_volume_by_id():
conn = boto.connect_ec2('the_key', 'the_secret') conn = boto.connect_ec2('the_key', 'the_secret')
@ -93,7 +94,9 @@ def test_create_snapshot():
conn = boto.connect_ec2('the_key', 'the_secret') conn = boto.connect_ec2('the_key', 'the_secret')
volume = conn.create_volume(80, "us-east-1a") volume = conn.create_volume(80, "us-east-1a")
volume.create_snapshot('a test snapshot') snapshot = volume.create_snapshot('a test snapshot')
snapshot.update()
snapshot.status.should.equal('completed')
snapshots = conn.get_all_snapshots() snapshots = conn.get_all_snapshots()
snapshots.should.have.length_of(1) snapshots.should.have.length_of(1)
@ -114,6 +117,7 @@ def test_create_snapshot():
cm.exception.status.should.equal(400) cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none cm.exception.request_id.should_not.be.none
@mock_ec2 @mock_ec2
def test_filter_snapshot_by_id(): def test_filter_snapshot_by_id():
conn = boto.connect_ec2('the_key', 'the_secret') conn = boto.connect_ec2('the_key', 'the_secret')
@ -134,6 +138,7 @@ def test_filter_snapshot_by_id():
s.volume_id.should.be.within([volume2.id, volume3.id]) s.volume_id.should.be.within([volume2.id, volume3.id])
s.region.name.should.equal(conn.region.name) s.region.name.should.equal(conn.region.name)
@mock_ec2 @mock_ec2
def test_snapshot_attribute(): def test_snapshot_attribute():
conn = boto.connect_ec2('the_key', 'the_secret') conn = boto.connect_ec2('the_key', 'the_secret')
@ -215,6 +220,20 @@ def test_snapshot_attribute():
user_ids=['user']).should.throw(NotImplementedError) user_ids=['user']).should.throw(NotImplementedError)
@mock_ec2
def test_create_volume_from_snapshot():
conn = boto.connect_ec2('the_key', 'the_secret')
volume = conn.create_volume(80, "us-east-1a")
snapshot = volume.create_snapshot('a test snapshot')
snapshot.update()
snapshot.status.should.equal('completed')
new_volume = snapshot.create_volume('us-east-1a')
new_volume.size.should.equal(80)
new_volume.snapshot_id.should.equal(snapshot.id)
@mock_ec2 @mock_ec2
def test_modify_attribute_blockDeviceMapping(): def test_modify_attribute_blockDeviceMapping():
""" """
@ -234,3 +253,13 @@ def test_modify_attribute_blockDeviceMapping():
instance = ec2_backends[conn.region.name].get_instance(instance.id) instance = ec2_backends[conn.region.name].get_instance(instance.id)
instance.block_device_mapping.should.have.key('/dev/sda1') instance.block_device_mapping.should.have.key('/dev/sda1')
instance.block_device_mapping['/dev/sda1'].delete_on_termination.should.be(True) instance.block_device_mapping['/dev/sda1'].delete_on_termination.should.be(True)
@mock_ec2
def test_volume_tag_escaping():
conn = boto.connect_ec2('the_key', 'the_secret')
vol = conn.create_volume(10, 'us-east-1a')
snapshot = conn.create_snapshot(vol.id, 'Desc')
snapshot.add_tags({'key': '</closed>'})
dict(conn.get_all_snapshots()[0].tags).should.equal({'key': '</closed>'})

View File

@ -53,7 +53,7 @@ def test_instance_launch_and_terminate():
instances.should.have.length_of(1) instances.should.have.length_of(1)
instances[0].id.should.equal(instance.id) instances[0].id.should.equal(instance.id)
instances[0].state.should.equal('running') instances[0].state.should.equal('running')
instances[0].launch_time.should.equal("2014-01-01T05:00:00Z") instances[0].launch_time.should.equal("2014-01-01T05:00:00.000Z")
instances[0].vpc_id.should.equal(None) instances[0].vpc_id.should.equal(None)
root_device_name = instances[0].root_device_name root_device_name = instances[0].root_device_name

View File

@ -85,3 +85,27 @@ def test_key_pairs_delete_exist():
r = conn.delete_key_pair('foo') r = conn.delete_key_pair('foo')
r.should.be.ok r.should.be.ok
assert len(conn.get_all_key_pairs()) == 0 assert len(conn.get_all_key_pairs()) == 0
@mock_ec2
def test_key_pairs_import():
conn = boto.connect_ec2('the_key', 'the_secret')
kp = conn.import_key_pair('foo', b'content')
assert kp.name == 'foo'
kps = conn.get_all_key_pairs()
assert len(kps) == 1
assert kps[0].name == 'foo'
@mock_ec2
def test_key_pairs_import_exist():
conn = boto.connect_ec2('the_key', 'the_secret')
kp = conn.import_key_pair('foo', b'content')
assert kp.name == 'foo'
assert len(conn.get_all_key_pairs()) == 1
with assert_raises(EC2ResponseError) as cm:
conn.create_key_pair('foo')
cm.exception.code.should.equal('InvalidKeyPair.Duplicate')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none

View File

@ -0,0 +1,8 @@
from moto.ec2 import utils
def test_random_key_pair():
key_pair = utils.random_key_pair()
assert len(key_pair['fingerprint']) == 59
assert key_pair['material'].startswith('---- BEGIN RSA PRIVATE KEY ----')
assert key_pair['material'].endswith('-----END RSA PRIVATE KEY-----')

View File

@ -0,0 +1,141 @@
from __future__ import unicode_literals
import datetime
from botocore.exceptions import ClientError
import boto3
from freezegun import freeze_time
import sure # noqa
from moto import mock_kinesis
def create_stream(client, stream_name):
return client.create_delivery_stream(
DeliveryStreamName=stream_name,
RedshiftDestinationConfiguration={
'RoleARN': 'arn:aws:iam::123456789012:role/firehose_delivery_role',
'ClusterJDBCURL': 'jdbc:redshift://host.amazonaws.com:5439/database',
'CopyCommand': {
'DataTableName': 'outputTable',
'CopyOptions': "CSV DELIMITER ',' NULL '\\0'"
},
'Username': 'username',
'Password': 'password',
'S3Configuration': {
'RoleARN': 'arn:aws:iam::123456789012:role/firehose_delivery_role',
'BucketARN': 'arn:aws:s3:::kinesis-test',
'Prefix': 'myFolder/',
'BufferingHints': {
'SizeInMBs': 123,
'IntervalInSeconds': 124
},
'CompressionFormat': 'UNCOMPRESSED',
}
}
)
@mock_kinesis
@freeze_time("2015-03-01")
def test_create_stream():
client = boto3.client('firehose', region_name='us-east-1')
response = create_stream(client, 'stream1')
stream_arn = response['DeliveryStreamARN']
response = client.describe_delivery_stream(DeliveryStreamName='stream1')
stream_description = response['DeliveryStreamDescription']
# Sure and Freezegun don't play nicely together
created = stream_description.pop('CreateTimestamp')
last_updated = stream_description.pop('LastUpdateTimestamp')
from dateutil.tz import tzlocal
assert created == datetime.datetime(2015, 3, 1, tzinfo=tzlocal())
assert last_updated == datetime.datetime(2015, 3, 1, tzinfo=tzlocal())
stream_description.should.equal({
'DeliveryStreamName': 'stream1',
'DeliveryStreamARN': stream_arn,
'DeliveryStreamStatus': 'ACTIVE',
'VersionId': 'string',
'Destinations': [
{
'DestinationId': 'string',
'RedshiftDestinationDescription': {
'RoleARN': 'arn:aws:iam::123456789012:role/firehose_delivery_role',
'ClusterJDBCURL': 'jdbc:redshift://host.amazonaws.com:5439/database',
'CopyCommand': {
'DataTableName': 'outputTable',
'CopyOptions': "CSV DELIMITER ',' NULL '\\0'"
},
'Username': 'username',
'S3DestinationDescription': {
'RoleARN': 'arn:aws:iam::123456789012:role/firehose_delivery_role',
'BucketARN': 'arn:aws:s3:::kinesis-test',
'Prefix': 'myFolder/',
'BufferingHints': {
'SizeInMBs': 123,
'IntervalInSeconds': 124
},
'CompressionFormat': 'UNCOMPRESSED',
}
}
},
],
"HasMoreDestinations": False,
})
@mock_kinesis
@freeze_time("2015-03-01")
def test_deescribe_non_existant_stream():
client = boto3.client('firehose', region_name='us-east-1')
client.describe_delivery_stream.when.called_with(DeliveryStreamName='not-a-stream').should.throw(ClientError)
@mock_kinesis
@freeze_time("2015-03-01")
def test_list_and_delete_stream():
client = boto3.client('firehose', region_name='us-east-1')
create_stream(client, 'stream1')
create_stream(client, 'stream2')
set(client.list_delivery_streams()['DeliveryStreamNames']).should.equal(set(['stream1', 'stream2']))
client.delete_delivery_stream(DeliveryStreamName='stream1')
set(client.list_delivery_streams()['DeliveryStreamNames']).should.equal(set(['stream2']))
@mock_kinesis
def test_put_record():
client = boto3.client('firehose', region_name='us-east-1')
create_stream(client, 'stream1')
client.put_record(
DeliveryStreamName='stream1',
Record={
'Data': 'some data'
}
)
@mock_kinesis
def test_put_record_batch():
client = boto3.client('firehose', region_name='us-east-1')
create_stream(client, 'stream1')
client.put_record_batch(
DeliveryStreamName='stream1',
Records=[
{
'Data': 'some data1'
},
{
'Data': 'some data2'
},
]
)

View File

@ -239,3 +239,25 @@ def test_deleting_weighted_route():
cname = zone.get_cname('cname.testdns.aws.com.', all=True) cname = zone.get_cname('cname.testdns.aws.com.', all=True)
# When get_cname only had one result, it returns just that result instead of a list. # When get_cname only had one result, it returns just that result instead of a list.
cname.identifier.should.equal('success-test-bar') cname.identifier.should.equal('success-test-bar')
@mock_route53
def test_deleting_latency_route():
conn = boto.connect_route53()
conn.create_hosted_zone("testdns.aws.com.")
zone = conn.get_zone("testdns.aws.com.")
zone.add_cname("cname.testdns.aws.com", "example.com", identifier=('success-test-foo', 'us-west-2'))
zone.add_cname("cname.testdns.aws.com", "example.com", identifier=('success-test-bar', 'us-west-1'))
cnames = zone.get_cname('cname.testdns.aws.com.', all=True)
cnames.should.have.length_of(2)
foo_cname = [cname for cname in cnames if cname.identifier == 'success-test-foo'][0]
foo_cname.region.should.equal('us-west-2')
zone.delete_record(foo_cname)
cname = zone.get_cname('cname.testdns.aws.com.', all=True)
# When get_cname only had one result, it returns just that result instead of a list.
cname.identifier.should.equal('success-test-bar')
cname.region.should.equal('us-west-1')

View File

@ -726,7 +726,7 @@ def test_list_versions():
@mock_s3 @mock_s3
def test_acl_is_ignored_for_now(): def test_acl_setting():
conn = boto.connect_s3() conn = boto.connect_s3()
bucket = conn.create_bucket('foobar') bucket = conn.create_bucket('foobar')
content = b'imafile' content = b'imafile'
@ -741,6 +741,74 @@ def test_acl_is_ignored_for_now():
assert key.get_contents_as_string() == content assert key.get_contents_as_string() == content
grants = key.get_acl().acl.grants
assert any(g.uri == 'http://acs.amazonaws.com/groups/global/AllUsers' and
g.permission == 'READ' for g in grants), grants
@mock_s3
def test_acl_setting_via_headers():
conn = boto.connect_s3()
bucket = conn.create_bucket('foobar')
content = b'imafile'
keyname = 'test.txt'
key = Key(bucket, name=keyname)
key.content_type = 'text/plain'
key.set_contents_from_string(content, headers={
'x-amz-grant-full-control': 'uri="http://acs.amazonaws.com/groups/global/AllUsers"'
})
key = bucket.get_key(keyname)
assert key.get_contents_as_string() == content
grants = key.get_acl().acl.grants
assert any(g.uri == 'http://acs.amazonaws.com/groups/global/AllUsers' and
g.permission == 'FULL_CONTROL' for g in grants), grants
@mock_s3
def test_acl_switching():
conn = boto.connect_s3()
bucket = conn.create_bucket('foobar')
content = b'imafile'
keyname = 'test.txt'
key = Key(bucket, name=keyname)
key.content_type = 'text/plain'
key.set_contents_from_string(content, policy='public-read')
key.set_acl('private')
grants = key.get_acl().acl.grants
assert not any(g.uri == 'http://acs.amazonaws.com/groups/global/AllUsers' and
g.permission == 'READ' for g in grants), grants
@mock_s3
def test_bucket_acl_setting():
conn = boto.connect_s3()
bucket = conn.create_bucket('foobar')
bucket.make_public()
grants = bucket.get_acl().acl.grants
assert any(g.uri == 'http://acs.amazonaws.com/groups/global/AllUsers' and
g.permission == 'READ' for g in grants), grants
@mock_s3
def test_bucket_acl_switching():
conn = boto.connect_s3()
bucket = conn.create_bucket('foobar')
bucket.make_public()
bucket.set_acl('private')
grants = bucket.get_acl().acl.grants
assert not any(g.uri == 'http://acs.amazonaws.com/groups/global/AllUsers' and
g.permission == 'READ' for g in grants), grants
@mock_s3 @mock_s3
def test_unicode_key(): def test_unicode_key():
@ -902,5 +970,33 @@ def test_boto3_head_object():
s3.Object('blah', 'hello.txt').meta.client.head_object(Bucket='blah', Key='hello.txt') s3.Object('blah', 'hello.txt').meta.client.head_object(Bucket='blah', Key='hello.txt')
with assert_raises(ClientError) as err: with assert_raises(ClientError):
s3.Object('blah', 'hello2.txt').meta.client.head_object(Bucket='blah', Key='hello_bad.txt') s3.Object('blah', 'hello2.txt').meta.client.head_object(Bucket='blah', Key='hello_bad.txt')
TEST_XML = """\
<?xml version="1.0" encoding="UTF-8"?>
<ns0:WebsiteConfiguration xmlns:ns0="http://s3.amazonaws.com/doc/2006-03-01/">
<ns0:IndexDocument>
<ns0:Suffix>index.html</ns0:Suffix>
</ns0:IndexDocument>
<ns0:RoutingRules>
<ns0:RoutingRule>
<ns0:Condition>
<ns0:KeyPrefixEquals>test/testing</ns0:KeyPrefixEquals>
</ns0:Condition>
<ns0:Redirect>
<ns0:ReplaceKeyWith>test.txt</ns0:ReplaceKeyWith>
</ns0:Redirect>
</ns0:RoutingRule>
</ns0:RoutingRules>
</ns0:WebsiteConfiguration>
"""
@mock_s3
def test_website_configuration_xml():
conn = boto.connect_s3()
bucket = conn.create_bucket('test-bucket')
bucket.set_website_configuration_xml(TEST_XML)
bucket.get_website_configuration_xml().should.equal(TEST_XML)

View File

@ -281,3 +281,40 @@ def test_bucket_key_listing_order():
delimiter = '/' delimiter = '/'
keys = [x.name for x in bucket.list(prefix + 'x', delimiter)] keys = [x.name for x in bucket.list(prefix + 'x', delimiter)]
keys.should.equal(['toplevel/x/']) keys.should.equal(['toplevel/x/'])
@mock_s3bucket_path
def test_delete_keys():
conn = create_connection()
bucket = conn.create_bucket('foobar')
Key(bucket=bucket, name='file1').set_contents_from_string('abc')
Key(bucket=bucket, name='file2').set_contents_from_string('abc')
Key(bucket=bucket, name='file3').set_contents_from_string('abc')
Key(bucket=bucket, name='file4').set_contents_from_string('abc')
result = bucket.delete_keys(['file2', 'file3'])
result.deleted.should.have.length_of(2)
result.errors.should.have.length_of(0)
keys = bucket.get_all_keys()
keys.should.have.length_of(2)
keys[0].name.should.equal('file1')
@mock_s3bucket_path
def test_delete_keys_with_invalid():
conn = create_connection()
bucket = conn.create_bucket('foobar')
Key(bucket=bucket, name='file1').set_contents_from_string('abc')
Key(bucket=bucket, name='file2').set_contents_from_string('abc')
Key(bucket=bucket, name='file3').set_contents_from_string('abc')
Key(bucket=bucket, name='file4').set_contents_from_string('abc')
result = bucket.delete_keys(['abc', 'file3'])
result.deleted.should.have.length_of(1)
result.errors.should.have.length_of(1)
keys = bucket.get_all_keys()
keys.should.have.length_of(3)
keys[0].name.should.equal('file1')

View File

@ -34,6 +34,8 @@ def test_create_queues_in_multiple_region():
list(west1_conn.get_all_queues()).should.have.length_of(1) list(west1_conn.get_all_queues()).should.have.length_of(1)
list(west2_conn.get_all_queues()).should.have.length_of(1) list(west2_conn.get_all_queues()).should.have.length_of(1)
west1_conn.get_all_queues()[0].url.should.equal('http://sqs.us-west-1.amazonaws.com/123456789012/test-queue')
@mock_sqs @mock_sqs
def test_get_queue(): def test_get_queue():
@ -168,6 +170,18 @@ def test_send_message_with_delay():
queue.count().should.equal(0) queue.count().should.equal(0)
@mock_sqs
def test_send_large_message_fails():
conn = boto.connect_sqs('the_key', 'the_secret')
queue = conn.create_queue("test-queue", visibility_timeout=60)
queue.set_message_class(RawMessage)
body_one = 'test message' * 200000
huge_message = queue.new_message(body_one)
queue.write.when.called_with(huge_message).should.throw(SQSError)
@mock_sqs @mock_sqs
def test_message_becomes_inflight_when_received(): def test_message_becomes_inflight_when_received():
conn = boto.connect_sqs('the_key', 'the_secret') conn = boto.connect_sqs('the_key', 'the_secret')