Merge remote-tracking branch 'upstream/master' into adding_rds2_support
This commit is contained in:
commit
95e48336d9
@ -5,11 +5,11 @@ python:
|
|||||||
- 2.7
|
- 2.7
|
||||||
env:
|
env:
|
||||||
matrix:
|
matrix:
|
||||||
- BOTO_VERSION=2.34.0
|
- BOTO_VERSION=2.36.0
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- python: "3.3"
|
- python: "3.3"
|
||||||
env: BOTO_VERSION=2.34.0
|
env: BOTO_VERSION=2.36.0
|
||||||
install:
|
install:
|
||||||
- travis_retry pip install boto==$BOTO_VERSION
|
- travis_retry pip install boto==$BOTO_VERSION
|
||||||
- travis_retry pip install .
|
- travis_retry pip install .
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import json
|
import json
|
||||||
from six.moves.urllib_parse import urlparse
|
from six.moves.urllib.parse import urlparse
|
||||||
|
|
||||||
from moto.core.responses import BaseResponse
|
from moto.core.responses import BaseResponse
|
||||||
from moto.s3 import s3_backend
|
from moto.s3 import s3_backend
|
||||||
|
28
moto/core/exceptions.py
Normal file
28
moto/core/exceptions.py
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
from werkzeug.exceptions import HTTPException
|
||||||
|
from jinja2 import DictLoader, Environment
|
||||||
|
|
||||||
|
|
||||||
|
ERROR_RESPONSE = u"""<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<Response>
|
||||||
|
<Errors>
|
||||||
|
<Error>
|
||||||
|
<Code>{{code}}</Code>
|
||||||
|
<Message>{{message}}</Message>
|
||||||
|
{% block extra %}{% endblock %}
|
||||||
|
</Error>
|
||||||
|
</Errors>
|
||||||
|
<RequestID>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestID>
|
||||||
|
</Response>
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
class RESTError(HTTPException):
|
||||||
|
templates = {
|
||||||
|
'error': ERROR_RESPONSE
|
||||||
|
}
|
||||||
|
|
||||||
|
def __init__(self, code, message, template='error', **kwargs):
|
||||||
|
super(RESTError, self).__init__()
|
||||||
|
env = Environment(loader=DictLoader(self.templates))
|
||||||
|
self.description = env.get_template(template).render(
|
||||||
|
code=code, message=message, **kwargs)
|
@ -1,13 +1,9 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
from werkzeug.exceptions import BadRequest
|
from moto.core.exceptions import RESTError
|
||||||
from jinja2 import Template
|
|
||||||
|
|
||||||
|
|
||||||
class EC2ClientError(BadRequest):
|
class EC2ClientError(RESTError):
|
||||||
def __init__(self, code, message):
|
code = 400
|
||||||
super(EC2ClientError, self).__init__()
|
|
||||||
self.description = ERROR_RESPONSE_TEMPLATE.render(
|
|
||||||
code=code, message=message)
|
|
||||||
|
|
||||||
|
|
||||||
class DependencyViolationError(EC2ClientError):
|
class DependencyViolationError(EC2ClientError):
|
||||||
@ -306,17 +302,3 @@ class InvalidCIDRSubnetError(EC2ClientError):
|
|||||||
"InvalidParameterValue",
|
"InvalidParameterValue",
|
||||||
"invalid CIDR subnet specification: {0}"
|
"invalid CIDR subnet specification: {0}"
|
||||||
.format(cidr))
|
.format(cidr))
|
||||||
|
|
||||||
|
|
||||||
ERROR_RESPONSE = u"""<?xml version="1.0" encoding="UTF-8"?>
|
|
||||||
<Response>
|
|
||||||
<Errors>
|
|
||||||
<Error>
|
|
||||||
<Code>{{code}}</Code>
|
|
||||||
<Message>{{message}}</Message>
|
|
||||||
</Error>
|
|
||||||
</Errors>
|
|
||||||
<RequestID>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestID>
|
|
||||||
</Response>
|
|
||||||
"""
|
|
||||||
ERROR_RESPONSE_TEMPLATE = Template(ERROR_RESPONSE)
|
|
||||||
|
@ -69,6 +69,7 @@ from .utils import (
|
|||||||
random_internet_gateway_id,
|
random_internet_gateway_id,
|
||||||
random_ip,
|
random_ip,
|
||||||
random_key_pair,
|
random_key_pair,
|
||||||
|
random_private_ip,
|
||||||
random_public_ip,
|
random_public_ip,
|
||||||
random_reservation_id,
|
random_reservation_id,
|
||||||
random_route_table_id,
|
random_route_table_id,
|
||||||
@ -163,7 +164,7 @@ class NetworkInterface(object):
|
|||||||
group = self.ec2_backend.get_security_group_from_id(group_id)
|
group = self.ec2_backend.get_security_group_from_id(group_id)
|
||||||
if not group:
|
if not group:
|
||||||
# Create with specific group ID.
|
# Create with specific group ID.
|
||||||
group = SecurityGroup(group_id, group_id, group_id, vpc_id=subnet.vpc_id)
|
group = SecurityGroup(group.ec2_backend, group_id, group_id, group_id, vpc_id=subnet.vpc_id)
|
||||||
self.ec2_backend.groups[subnet.vpc_id][group_id] = group
|
self.ec2_backend.groups[subnet.vpc_id][group_id] = group
|
||||||
if group:
|
if group:
|
||||||
self._group_set.append(group)
|
self._group_set.append(group)
|
||||||
@ -174,9 +175,12 @@ class NetworkInterface(object):
|
|||||||
|
|
||||||
security_group_ids = properties.get('SecurityGroups', [])
|
security_group_ids = properties.get('SecurityGroups', [])
|
||||||
|
|
||||||
subnet_id = properties['SubnetId']
|
|
||||||
ec2_backend = ec2_backends[region_name]
|
ec2_backend = ec2_backends[region_name]
|
||||||
subnet = ec2_backend.get_subnet(subnet_id)
|
subnet_id = properties.get('SubnetId')
|
||||||
|
if subnet_id:
|
||||||
|
subnet = ec2_backend.get_subnet(subnet_id)
|
||||||
|
else:
|
||||||
|
subnet = None
|
||||||
|
|
||||||
private_ip_address = properties.get('PrivateIpAddress', None)
|
private_ip_address = properties.get('PrivateIpAddress', None)
|
||||||
|
|
||||||
@ -224,7 +228,7 @@ class NetworkInterfaceBackend(object):
|
|||||||
super(NetworkInterfaceBackend, self).__init__()
|
super(NetworkInterfaceBackend, self).__init__()
|
||||||
|
|
||||||
def create_network_interface(self, subnet, private_ip_address, group_ids=None, **kwargs):
|
def create_network_interface(self, subnet, private_ip_address, group_ids=None, **kwargs):
|
||||||
eni = NetworkInterface(self, subnet, private_ip_address, group_ids=group_ids)
|
eni = NetworkInterface(self, subnet, private_ip_address, group_ids=group_ids, **kwargs)
|
||||||
self.enis[eni.id] = eni
|
self.enis[eni.id] = eni
|
||||||
return eni
|
return eni
|
||||||
|
|
||||||
@ -297,10 +301,14 @@ class Instance(BotoInstance, TaggedEC2Resource):
|
|||||||
self.instance_type = kwargs.get("instance_type", "m1.small")
|
self.instance_type = kwargs.get("instance_type", "m1.small")
|
||||||
self.vpc_id = None
|
self.vpc_id = None
|
||||||
self.subnet_id = kwargs.get("subnet_id")
|
self.subnet_id = kwargs.get("subnet_id")
|
||||||
|
in_ec2_classic = not bool(self.subnet_id)
|
||||||
self.key_name = kwargs.get("key_name")
|
self.key_name = kwargs.get("key_name")
|
||||||
self.source_dest_check = "true"
|
self.source_dest_check = "true"
|
||||||
self.launch_time = datetime.utcnow().isoformat()
|
self.launch_time = datetime.utcnow().isoformat()
|
||||||
self.private_ip_address = kwargs.get('private_ip_address')
|
associate_public_ip = kwargs.get("associate_public_ip", False)
|
||||||
|
if in_ec2_classic:
|
||||||
|
# If we are in EC2-Classic, autoassign a public IP
|
||||||
|
associate_public_ip = True
|
||||||
|
|
||||||
self.block_device_mapping = BlockDeviceMapping()
|
self.block_device_mapping = BlockDeviceMapping()
|
||||||
self.block_device_mapping['/dev/sda1'] = BlockDeviceType(volume_id=random_volume_id())
|
self.block_device_mapping['/dev/sda1'] = BlockDeviceType(volume_id=random_volume_id())
|
||||||
@ -326,9 +334,26 @@ class Instance(BotoInstance, TaggedEC2Resource):
|
|||||||
self.vpc_id = subnet.vpc_id
|
self.vpc_id = subnet.vpc_id
|
||||||
|
|
||||||
self.prep_nics(kwargs.get("nics", {}),
|
self.prep_nics(kwargs.get("nics", {}),
|
||||||
subnet_id=kwargs.get("subnet_id"),
|
subnet_id=self.subnet_id,
|
||||||
private_ip=kwargs.get("private_ip"),
|
private_ip=kwargs.get("private_ip"),
|
||||||
associate_public_ip=kwargs.get("associate_public_ip"))
|
associate_public_ip=associate_public_ip)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def private_ip(self):
|
||||||
|
return self.nics[0].private_ip_address
|
||||||
|
|
||||||
|
@property
|
||||||
|
def private_dns(self):
|
||||||
|
return "ip-{0}.ec2.internal".format(self.private_ip)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def public_ip(self):
|
||||||
|
return self.nics[0].public_ip
|
||||||
|
|
||||||
|
@property
|
||||||
|
def public_dns(self):
|
||||||
|
if self.public_ip:
|
||||||
|
return "ec2-{0}.compute-1.amazonaws.com".format(self.public_ip)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
|
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
|
||||||
@ -346,7 +371,7 @@ class Instance(BotoInstance, TaggedEC2Resource):
|
|||||||
instance_type=properties.get("InstanceType", "m1.small"),
|
instance_type=properties.get("InstanceType", "m1.small"),
|
||||||
subnet_id=properties.get("SubnetId"),
|
subnet_id=properties.get("SubnetId"),
|
||||||
key_name=properties.get("KeyName"),
|
key_name=properties.get("KeyName"),
|
||||||
private_ip_address=properties.get('PrivateIpAddress'),
|
private_ip=properties.get('PrivateIpAddress'),
|
||||||
)
|
)
|
||||||
return reservation.instances[0]
|
return reservation.instances[0]
|
||||||
|
|
||||||
@ -407,6 +432,9 @@ class Instance(BotoInstance, TaggedEC2Resource):
|
|||||||
def prep_nics(self, nic_spec, subnet_id=None, private_ip=None, associate_public_ip=None):
|
def prep_nics(self, nic_spec, subnet_id=None, private_ip=None, associate_public_ip=None):
|
||||||
self.nics = {}
|
self.nics = {}
|
||||||
|
|
||||||
|
if not private_ip:
|
||||||
|
private_ip = random_private_ip()
|
||||||
|
|
||||||
# Primary NIC defaults
|
# Primary NIC defaults
|
||||||
primary_nic = {'SubnetId': subnet_id,
|
primary_nic = {'SubnetId': subnet_id,
|
||||||
'PrivateIpAddress': private_ip,
|
'PrivateIpAddress': private_ip,
|
||||||
@ -434,7 +462,10 @@ class Instance(BotoInstance, TaggedEC2Resource):
|
|||||||
if device_index == 0 and primary_nic:
|
if device_index == 0 and primary_nic:
|
||||||
nic.update(primary_nic)
|
nic.update(primary_nic)
|
||||||
|
|
||||||
subnet = self.ec2_backend.get_subnet(nic['SubnetId'])
|
if 'SubnetId' in nic:
|
||||||
|
subnet = self.ec2_backend.get_subnet(nic['SubnetId'])
|
||||||
|
else:
|
||||||
|
subnet = None
|
||||||
|
|
||||||
group_id = nic.get('SecurityGroupId')
|
group_id = nic.get('SecurityGroupId')
|
||||||
group_ids = [group_id] if group_id else []
|
group_ids = [group_id] if group_id else []
|
||||||
@ -468,13 +499,13 @@ class Instance(BotoInstance, TaggedEC2Resource):
|
|||||||
if attribute_name == 'AvailabilityZone':
|
if attribute_name == 'AvailabilityZone':
|
||||||
return self.placement
|
return self.placement
|
||||||
elif attribute_name == 'PrivateDnsName':
|
elif attribute_name == 'PrivateDnsName':
|
||||||
return self.private_dns_name
|
return self.private_dns
|
||||||
elif attribute_name == 'PublicDnsName':
|
elif attribute_name == 'PublicDnsName':
|
||||||
return self.public_dns_name
|
return self.public_dns
|
||||||
elif attribute_name == 'PrivateIp':
|
elif attribute_name == 'PrivateIp':
|
||||||
return self.private_ip_address
|
return self.private_ip
|
||||||
elif attribute_name == 'PublicIp':
|
elif attribute_name == 'PublicIp':
|
||||||
return self.ip_address
|
return self.public_ip
|
||||||
raise UnformattedGetAttTemplateException()
|
raise UnformattedGetAttTemplateException()
|
||||||
|
|
||||||
|
|
||||||
@ -1016,8 +1047,9 @@ class SecurityRule(object):
|
|||||||
return self.unique_representation == other.unique_representation
|
return self.unique_representation == other.unique_representation
|
||||||
|
|
||||||
|
|
||||||
class SecurityGroup(object):
|
class SecurityGroup(TaggedEC2Resource):
|
||||||
def __init__(self, group_id, name, description, vpc_id=None):
|
def __init__(self, ec2_backend, group_id, name, description, vpc_id=None):
|
||||||
|
self.ec2_backend = ec2_backend
|
||||||
self.id = group_id
|
self.id = group_id
|
||||||
self.name = name
|
self.name = name
|
||||||
self.description = description
|
self.description = description
|
||||||
@ -1116,7 +1148,7 @@ class SecurityGroupBackend(object):
|
|||||||
existing_group = self.get_security_group_from_name(name, vpc_id)
|
existing_group = self.get_security_group_from_name(name, vpc_id)
|
||||||
if existing_group:
|
if existing_group:
|
||||||
raise InvalidSecurityGroupDuplicateError(name)
|
raise InvalidSecurityGroupDuplicateError(name)
|
||||||
group = SecurityGroup(group_id, name, description, vpc_id=vpc_id)
|
group = SecurityGroup(self, group_id, name, description, vpc_id=vpc_id)
|
||||||
|
|
||||||
self.groups[vpc_id][group_id] = group
|
self.groups[vpc_id][group_id] = group
|
||||||
return group
|
return group
|
||||||
@ -2031,10 +2063,12 @@ class VPCGatewayAttachment(object):
|
|||||||
properties = cloudformation_json['Properties']
|
properties = cloudformation_json['Properties']
|
||||||
|
|
||||||
ec2_backend = ec2_backends[region_name]
|
ec2_backend = ec2_backends[region_name]
|
||||||
return ec2_backend.create_vpc_gateway_attachment(
|
attachment = ec2_backend.create_vpc_gateway_attachment(
|
||||||
gateway_id=properties['InternetGatewayId'],
|
gateway_id=properties['InternetGatewayId'],
|
||||||
vpc_id=properties['VpcId'],
|
vpc_id=properties['VpcId'],
|
||||||
)
|
)
|
||||||
|
ec2_backend.attach_internet_gateway(properties['InternetGatewayId'], properties['VpcId'])
|
||||||
|
return attachment
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def physical_resource_id(self):
|
def physical_resource_id(self):
|
||||||
|
@ -25,6 +25,11 @@ class ElasticNetworkInterfaces(BaseResponse):
|
|||||||
def describe_network_interfaces(self):
|
def describe_network_interfaces(self):
|
||||||
# Partially implemented. Supports only network-interface-id and group-id filters
|
# Partially implemented. Supports only network-interface-id and group-id filters
|
||||||
filters = filters_from_querystring(self.querystring)
|
filters = filters_from_querystring(self.querystring)
|
||||||
|
eni_ids = self._get_multi_param('NetworkInterfaceId.')
|
||||||
|
if 'network-interface-id' not in filters and eni_ids:
|
||||||
|
# Network interfaces can be filtered by passing the 'network-interface-id'
|
||||||
|
# filter or by passing the NetworkInterfaceId parameter
|
||||||
|
filters['network-interface-id'] = eni_ids
|
||||||
enis = self.ec2_backend.describe_network_interfaces(filters)
|
enis = self.ec2_backend.describe_network_interfaces(filters)
|
||||||
template = self.response_template(DESCRIBE_NETWORK_INTERFACES_RESPONSE)
|
template = self.response_template(DESCRIBE_NETWORK_INTERFACES_RESPONSE)
|
||||||
return template.render(enis=enis)
|
return template.render(enis=enis)
|
||||||
|
@ -198,8 +198,8 @@ EC2_RUN_INSTANCES = """<RunInstancesResponse xmlns="http://ec2.amazonaws.com/doc
|
|||||||
<code>0</code>
|
<code>0</code>
|
||||||
<name>pending</name>
|
<name>pending</name>
|
||||||
</instanceState>
|
</instanceState>
|
||||||
<privateDnsName/>
|
<privateDnsName>{{ instance.private_dns }}</privateDnsName>
|
||||||
<dnsName/>
|
<publicDnsName>{{ instance.public_dns }}</publicDnsName>
|
||||||
<reason/>
|
<reason/>
|
||||||
<keyName>{{ instance.key_name }}</keyName>
|
<keyName>{{ instance.key_name }}</keyName>
|
||||||
<amiLaunchIndex>0</amiLaunchIndex>
|
<amiLaunchIndex>0</amiLaunchIndex>
|
||||||
@ -216,9 +216,9 @@ EC2_RUN_INSTANCES = """<RunInstancesResponse xmlns="http://ec2.amazonaws.com/doc
|
|||||||
{% if instance.nics %}
|
{% if instance.nics %}
|
||||||
<subnetId>{{ instance.nics[0].subnet.id }}</subnetId>
|
<subnetId>{{ instance.nics[0].subnet.id }}</subnetId>
|
||||||
<vpcId>{{ instance.nics[0].subnet.vpc_id }}</vpcId>
|
<vpcId>{{ instance.nics[0].subnet.vpc_id }}</vpcId>
|
||||||
<privateIpAddress>{{ instance.nics[0].private_ip_address }}</privateIpAddress>
|
<privateIpAddress>{{ instance.private_ip }}</privateIpAddress>
|
||||||
{% if instance.nics[0].public_ip %}
|
{% if instance.public_ip %}
|
||||||
<ipAddress>{{ instance.nics[0].public_ip }}</ipAddress>
|
<ipAddress>{{ instance.public_ip }}</ipAddress>
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% else %}
|
{% else %}
|
||||||
<subnetId>{{ instance.subnet_id }}</subnetId>
|
<subnetId>{{ instance.subnet_id }}</subnetId>
|
||||||
@ -318,8 +318,8 @@ EC2_DESCRIBE_INSTANCES = """<DescribeInstancesResponse xmlns='http://ec2.amazona
|
|||||||
<code>{{ instance._state.code }}</code>
|
<code>{{ instance._state.code }}</code>
|
||||||
<name>{{ instance._state.name }}</name>
|
<name>{{ instance._state.name }}</name>
|
||||||
</instanceState>
|
</instanceState>
|
||||||
<privateDnsName>ip-10.0.0.12.ec2.internal</privateDnsName>
|
<privateDnsName>{{ instance.private_dns }}</privateDnsName>
|
||||||
<dnsName>ec2-46.51.219.63.compute-1.amazonaws.com</dnsName>
|
<publicDnsName>{{ instance.public_dns }}</publicDnsName>
|
||||||
<reason>{{ instance._reason }}</reason>
|
<reason>{{ instance._reason }}</reason>
|
||||||
<keyName>{{ instance.key_name }}</keyName>
|
<keyName>{{ instance.key_name }}</keyName>
|
||||||
<amiLaunchIndex>0</amiLaunchIndex>
|
<amiLaunchIndex>0</amiLaunchIndex>
|
||||||
@ -340,7 +340,7 @@ EC2_DESCRIBE_INSTANCES = """<DescribeInstancesResponse xmlns='http://ec2.amazona
|
|||||||
{% if instance.nics %}
|
{% if instance.nics %}
|
||||||
<subnetId>{{ instance.nics[0].subnet.id }}</subnetId>
|
<subnetId>{{ instance.nics[0].subnet.id }}</subnetId>
|
||||||
<vpcId>{{ instance.nics[0].subnet.vpc_id }}</vpcId>
|
<vpcId>{{ instance.nics[0].subnet.vpc_id }}</vpcId>
|
||||||
<privateIpAddress>{{ instance.nics[0].private_ip_address }}</privateIpAddress>
|
<privateIpAddress>{{ instance.private_ip }}</privateIpAddress>
|
||||||
{% if instance.nics[0].public_ip %}
|
{% if instance.nics[0].public_ip %}
|
||||||
<ipAddress>{{ instance.nics[0].public_ip }}</ipAddress>
|
<ipAddress>{{ instance.nics[0].public_ip }}</ipAddress>
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
@ -133,6 +133,16 @@ DESCRIBE_SECURITY_GROUPS_RESPONSE = """<DescribeSecurityGroupsResponse xmlns="ht
|
|||||||
{% endfor %}
|
{% endfor %}
|
||||||
</ipPermissions>
|
</ipPermissions>
|
||||||
<ipPermissionsEgress/>
|
<ipPermissionsEgress/>
|
||||||
|
<tagSet>
|
||||||
|
{% for tag in group.get_tags() %}
|
||||||
|
<item>
|
||||||
|
<resourceId>{{ tag.resource_id }}</resourceId>
|
||||||
|
<resourceType>{{ tag.resource_type }}</resourceType>
|
||||||
|
<key>{{ tag.key }}</key>
|
||||||
|
<value>{{ tag.value }}</value>
|
||||||
|
</item>
|
||||||
|
{% endfor %}
|
||||||
|
</tagSet>
|
||||||
</item>
|
</item>
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
</securityGroupInfo>
|
</securityGroupInfo>
|
||||||
|
@ -130,6 +130,12 @@ def random_public_ip():
|
|||||||
random.choice(range(255)))
|
random.choice(range(255)))
|
||||||
|
|
||||||
|
|
||||||
|
def random_private_ip():
|
||||||
|
return '10.{0}.{1}.{2}'.format(random.choice(range(255)),
|
||||||
|
random.choice(range(255)),
|
||||||
|
random.choice(range(255)))
|
||||||
|
|
||||||
|
|
||||||
def random_ip():
|
def random_ip():
|
||||||
return "127.{0}.{1}.{2}".format(
|
return "127.{0}.{1}.{2}".format(
|
||||||
random.randint(0, 255),
|
random.randint(0, 255),
|
||||||
@ -331,7 +337,7 @@ filter_dict_attribute_mapping = {
|
|||||||
|
|
||||||
def passes_filter_dict(instance, filter_dict):
|
def passes_filter_dict(instance, filter_dict):
|
||||||
for filter_name, filter_values in filter_dict.items():
|
for filter_name, filter_values in filter_dict.items():
|
||||||
|
|
||||||
if filter_name in filter_dict_attribute_mapping:
|
if filter_name in filter_dict_attribute_mapping:
|
||||||
instance_attr = filter_dict_attribute_mapping[filter_name]
|
instance_attr = filter_dict_attribute_mapping[filter_name]
|
||||||
instance_value = get_object_value(instance, instance_attr)
|
instance_value = get_object_value(instance, instance_attr)
|
||||||
@ -347,14 +353,16 @@ def passes_filter_dict(instance, filter_dict):
|
|||||||
filter_name)
|
filter_name)
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
def instance_value_in_filter_values(instance_value, filter_values):
|
def instance_value_in_filter_values(instance_value, filter_values):
|
||||||
if isinstance(instance_value, list):
|
if isinstance(instance_value, list):
|
||||||
if not set(filter_values).intersection(set(instance_value)):
|
if not set(filter_values).intersection(set(instance_value)):
|
||||||
return False
|
return False
|
||||||
elif instance_value not in filter_values:
|
elif instance_value not in filter_values:
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
def filter_reservations(reservations, filter_dict):
|
def filter_reservations(reservations, filter_dict):
|
||||||
result = []
|
result = []
|
||||||
for reservation in reservations:
|
for reservation in reservations:
|
||||||
|
@ -20,6 +20,35 @@ class FakeInstanceGroup(object):
|
|||||||
self.num_instances = instance_count
|
self.num_instances = instance_count
|
||||||
|
|
||||||
|
|
||||||
|
class Cluster(object):
|
||||||
|
def __init__(self, id, name, availability_zone, ec2_key_name, subnet_id,
|
||||||
|
ec2_iam_profile, log_uri):
|
||||||
|
self.id = id
|
||||||
|
self.name = name
|
||||||
|
self.applications = []
|
||||||
|
self.auto_terminate = "false"
|
||||||
|
self.availability_zone = availability_zone
|
||||||
|
self.subnet_id = subnet_id
|
||||||
|
self.ec2_iam_profile = ec2_iam_profile
|
||||||
|
self.log_uri = log_uri
|
||||||
|
self.master_public_dns_name = ""
|
||||||
|
self.normalized_instance_hours = 0
|
||||||
|
self.requested_ami_version = "2.4.2"
|
||||||
|
self.running_ami_version = "2.4.2"
|
||||||
|
self.service_role = "my-service-role"
|
||||||
|
self.state = "RUNNING"
|
||||||
|
self.tags = {}
|
||||||
|
self.termination_protected = "false"
|
||||||
|
self.visible_to_all_users = "false"
|
||||||
|
|
||||||
|
def add_tags(self, tags):
|
||||||
|
self.tags.update(tags)
|
||||||
|
|
||||||
|
def remove_tags(self, tag_keys):
|
||||||
|
for key in tag_keys:
|
||||||
|
self.tags.pop(key, None)
|
||||||
|
|
||||||
|
|
||||||
class FakeStep(object):
|
class FakeStep(object):
|
||||||
def __init__(self, state, **kwargs):
|
def __init__(self, state, **kwargs):
|
||||||
# 'Steps.member.1.HadoopJarStep.Jar': ['/home/hadoop/contrib/streaming/hadoop-streaming.jar'],
|
# 'Steps.member.1.HadoopJarStep.Jar': ['/home/hadoop/contrib/streaming/hadoop-streaming.jar'],
|
||||||
@ -68,11 +97,24 @@ class FakeJobFlow(object):
|
|||||||
self.normalized_instance_hours = 0
|
self.normalized_instance_hours = 0
|
||||||
self.ec2_key_name = instance_attrs.get('ec2_key_name')
|
self.ec2_key_name = instance_attrs.get('ec2_key_name')
|
||||||
self.availability_zone = instance_attrs.get('placement.availability_zone')
|
self.availability_zone = instance_attrs.get('placement.availability_zone')
|
||||||
|
self.subnet_id = instance_attrs.get('ec2_subnet_id')
|
||||||
self.keep_job_flow_alive_when_no_steps = instance_attrs.get('keep_job_flow_alive_when_no_steps')
|
self.keep_job_flow_alive_when_no_steps = instance_attrs.get('keep_job_flow_alive_when_no_steps')
|
||||||
self.termination_protected = instance_attrs.get('termination_protected')
|
self.termination_protected = instance_attrs.get('termination_protected')
|
||||||
|
|
||||||
self.instance_group_ids = []
|
self.instance_group_ids = []
|
||||||
|
|
||||||
|
def create_cluster(self):
|
||||||
|
cluster = Cluster(
|
||||||
|
id=self.id,
|
||||||
|
name=self.name,
|
||||||
|
availability_zone=self.availability_zone,
|
||||||
|
ec2_key_name=self.ec2_key_name,
|
||||||
|
subnet_id=self.subnet_id,
|
||||||
|
ec2_iam_profile=self.role,
|
||||||
|
log_uri=self.log_uri,
|
||||||
|
)
|
||||||
|
return cluster
|
||||||
|
|
||||||
def terminate(self):
|
def terminate(self):
|
||||||
self.state = 'TERMINATED'
|
self.state = 'TERMINATED'
|
||||||
|
|
||||||
@ -129,12 +171,15 @@ class ElasticMapReduceBackend(BaseBackend):
|
|||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.job_flows = {}
|
self.job_flows = {}
|
||||||
|
self.clusters = {}
|
||||||
self.instance_groups = {}
|
self.instance_groups = {}
|
||||||
|
|
||||||
def run_job_flow(self, name, log_uri, job_flow_role, visible_to_all_users, steps, instance_attrs):
|
def run_job_flow(self, name, log_uri, job_flow_role, visible_to_all_users, steps, instance_attrs):
|
||||||
job_id = random_job_id()
|
job_id = random_job_id()
|
||||||
job_flow = FakeJobFlow(job_id, name, log_uri, job_flow_role, visible_to_all_users, steps, instance_attrs)
|
job_flow = FakeJobFlow(job_id, name, log_uri, job_flow_role, visible_to_all_users, steps, instance_attrs)
|
||||||
self.job_flows[job_id] = job_flow
|
self.job_flows[job_id] = job_flow
|
||||||
|
cluster = job_flow.create_cluster()
|
||||||
|
self.clusters[cluster.id] = cluster
|
||||||
return job_flow
|
return job_flow
|
||||||
|
|
||||||
def add_job_flow_steps(self, job_flow_id, steps):
|
def add_job_flow_steps(self, job_flow_id, steps):
|
||||||
@ -142,8 +187,12 @@ class ElasticMapReduceBackend(BaseBackend):
|
|||||||
job_flow.add_steps(steps)
|
job_flow.add_steps(steps)
|
||||||
return job_flow
|
return job_flow
|
||||||
|
|
||||||
def describe_job_flows(self):
|
def describe_job_flows(self, job_flow_ids=None):
|
||||||
return self.job_flows.values()
|
jobs = self.job_flows.values()
|
||||||
|
if job_flow_ids:
|
||||||
|
return [job for job in jobs if job.id in job_flow_ids]
|
||||||
|
else:
|
||||||
|
return jobs
|
||||||
|
|
||||||
def terminate_job_flows(self, job_ids):
|
def terminate_job_flows(self, job_ids):
|
||||||
flows = [flow for flow in self.describe_job_flows() if flow.id in job_ids]
|
flows = [flow for flow in self.describe_job_flows() if flow.id in job_ids]
|
||||||
@ -151,6 +200,12 @@ class ElasticMapReduceBackend(BaseBackend):
|
|||||||
flow.terminate()
|
flow.terminate()
|
||||||
return flows
|
return flows
|
||||||
|
|
||||||
|
def list_clusters(self):
|
||||||
|
return self.clusters.values()
|
||||||
|
|
||||||
|
def get_cluster(self, cluster_id):
|
||||||
|
return self.clusters[cluster_id]
|
||||||
|
|
||||||
def get_instance_groups(self, instance_group_ids):
|
def get_instance_groups(self, instance_group_ids):
|
||||||
return [
|
return [
|
||||||
group for group_id, group
|
group for group_id, group
|
||||||
@ -181,5 +236,13 @@ class ElasticMapReduceBackend(BaseBackend):
|
|||||||
job = self.job_flows[job_id]
|
job = self.job_flows[job_id]
|
||||||
job.set_visibility(visible_to_all_users)
|
job.set_visibility(visible_to_all_users)
|
||||||
|
|
||||||
|
def add_tags(self, cluster_id, tags):
|
||||||
|
cluster = self.get_cluster(cluster_id)
|
||||||
|
cluster.add_tags(tags)
|
||||||
|
|
||||||
|
def remove_tags(self, cluster_id, tag_keys):
|
||||||
|
cluster = self.get_cluster(cluster_id)
|
||||||
|
cluster.remove_tags(tag_keys)
|
||||||
|
|
||||||
|
|
||||||
emr_backend = ElasticMapReduceBackend()
|
emr_backend = ElasticMapReduceBackend()
|
||||||
|
@ -2,6 +2,7 @@ from __future__ import unicode_literals
|
|||||||
|
|
||||||
from moto.core.responses import BaseResponse
|
from moto.core.responses import BaseResponse
|
||||||
from .models import emr_backend
|
from .models import emr_backend
|
||||||
|
from .utils import tags_from_query_string
|
||||||
|
|
||||||
|
|
||||||
class ElasticMapReduceResponse(BaseResponse):
|
class ElasticMapReduceResponse(BaseResponse):
|
||||||
@ -30,7 +31,8 @@ class ElasticMapReduceResponse(BaseResponse):
|
|||||||
return template.render(job_flow=job_flow)
|
return template.render(job_flow=job_flow)
|
||||||
|
|
||||||
def describe_job_flows(self):
|
def describe_job_flows(self):
|
||||||
job_flows = emr_backend.describe_job_flows()
|
job_flow_ids = self._get_multi_param("JobFlowIds.member")
|
||||||
|
job_flows = emr_backend.describe_job_flows(job_flow_ids)
|
||||||
template = self.response_template(DESCRIBE_JOB_FLOWS_TEMPLATE)
|
template = self.response_template(DESCRIBE_JOB_FLOWS_TEMPLATE)
|
||||||
return template.render(job_flows=job_flows)
|
return template.render(job_flows=job_flows)
|
||||||
|
|
||||||
@ -60,6 +62,31 @@ class ElasticMapReduceResponse(BaseResponse):
|
|||||||
template = self.response_template(SET_VISIBLE_TO_ALL_USERS_TEMPLATE)
|
template = self.response_template(SET_VISIBLE_TO_ALL_USERS_TEMPLATE)
|
||||||
return template.render()
|
return template.render()
|
||||||
|
|
||||||
|
def list_clusters(self):
|
||||||
|
clusters = emr_backend.list_clusters()
|
||||||
|
template = self.response_template(LIST_CLUSTERS_TEMPLATE)
|
||||||
|
return template.render(clusters=clusters)
|
||||||
|
|
||||||
|
def describe_cluster(self):
|
||||||
|
cluster_id = self._get_param('ClusterId')
|
||||||
|
cluster = emr_backend.get_cluster(cluster_id)
|
||||||
|
template = self.response_template(DESCRIBE_CLUSTER_TEMPLATE)
|
||||||
|
return template.render(cluster=cluster)
|
||||||
|
|
||||||
|
def add_tags(self):
|
||||||
|
cluster_id = self._get_param('ResourceId')
|
||||||
|
tags = tags_from_query_string(self.querystring)
|
||||||
|
emr_backend.add_tags(cluster_id, tags)
|
||||||
|
template = self.response_template(ADD_TAGS_TEMPLATE)
|
||||||
|
return template.render()
|
||||||
|
|
||||||
|
def remove_tags(self):
|
||||||
|
cluster_id = self._get_param('ResourceId')
|
||||||
|
tag_keys = self._get_multi_param('TagKeys.member')
|
||||||
|
emr_backend.remove_tags(cluster_id, tag_keys)
|
||||||
|
template = self.response_template(REMOVE_TAGS_TEMPLATE)
|
||||||
|
return template.render()
|
||||||
|
|
||||||
|
|
||||||
RUN_JOB_FLOW_TEMPLATE = """<RunJobFlowResponse xmlns="http://elasticmapreduce.amazonaws.com/doc/2009-03-31">
|
RUN_JOB_FLOW_TEMPLATE = """<RunJobFlowResponse xmlns="http://elasticmapreduce.amazonaws.com/doc/2009-03-31">
|
||||||
<RunJobFlowResult>
|
<RunJobFlowResult>
|
||||||
@ -163,6 +190,85 @@ ADD_JOB_FLOW_STEPS_TEMPLATE = """<AddJobFlowStepsResponse xmlns="http://elasticm
|
|||||||
</ResponseMetadata>
|
</ResponseMetadata>
|
||||||
</AddJobFlowStepsResponse>"""
|
</AddJobFlowStepsResponse>"""
|
||||||
|
|
||||||
|
LIST_CLUSTERS_TEMPLATE = """<ListClustersResponse xmlns="http://elasticmapreduce.amazonaws.com/doc/2009-03-31">
|
||||||
|
<Clusters>
|
||||||
|
{% for cluster in clusters %}
|
||||||
|
<member>
|
||||||
|
<Id>{{ cluster.id }}</Id>
|
||||||
|
<Name>{{ cluster.name }}</Name>
|
||||||
|
<NormalizedInstanceHours>{{ cluster.normalized_instance_hours }}</NormalizedInstanceHours>
|
||||||
|
<Status>
|
||||||
|
<State>{{ cluster.state }}</State>
|
||||||
|
<StateChangeReason>
|
||||||
|
<Code></Code>
|
||||||
|
<Message></Message>
|
||||||
|
</StateChangeReason>
|
||||||
|
<Timeline></Timeline>
|
||||||
|
</Status>
|
||||||
|
</member>
|
||||||
|
{% endfor %}
|
||||||
|
</Clusters>
|
||||||
|
<Marker></Marker>
|
||||||
|
<ResponseMetadata>
|
||||||
|
<RequestId>
|
||||||
|
2690d7eb-ed86-11dd-9877-6fad448a8418
|
||||||
|
</RequestId>
|
||||||
|
</ResponseMetadata>
|
||||||
|
</ListClustersResponse>"""
|
||||||
|
|
||||||
|
DESCRIBE_CLUSTER_TEMPLATE = """<DescribeClusterResponse xmlns="http://elasticmapreduce.amazonaws.com/doc/2009-03-31">
|
||||||
|
<DescribeClusterResult>
|
||||||
|
<Cluster>
|
||||||
|
<Id>{{ cluster.id }}</Id>
|
||||||
|
<Tags>
|
||||||
|
{% for tag_key, tag_value in cluster.tags.items() %}
|
||||||
|
<member>
|
||||||
|
<Key>{{ tag_key }}</Key>
|
||||||
|
<Value>{{ tag_value }}</Value>
|
||||||
|
</member>
|
||||||
|
{% endfor %}
|
||||||
|
</Tags>
|
||||||
|
<Ec2InstanceAttributes>
|
||||||
|
<Ec2AvailabilityZone>{{ cluster.availability_zone }}</Ec2AvailabilityZone>
|
||||||
|
<Ec2SubnetId>{{ cluster.subnet_id }}</Ec2SubnetId>
|
||||||
|
<Ec2KeyName>{{ cluster.ec2_key_name }}</Ec2KeyName>
|
||||||
|
</Ec2InstanceAttributes>
|
||||||
|
<RunningAmiVersion>{{ cluster.running_ami_version }}</RunningAmiVersion>
|
||||||
|
<VisibleToAllUsers>{{ cluster.visible_to_all_users }}</VisibleToAllUsers>
|
||||||
|
<Status>
|
||||||
|
<StateChangeReason>
|
||||||
|
<Message>Terminated by user request</Message>
|
||||||
|
<Code>USER_REQUEST</Code>
|
||||||
|
</StateChangeReason>
|
||||||
|
<State>{{ cluster.state }}</State>
|
||||||
|
<Timeline>
|
||||||
|
<CreationDateTime>2014-01-24T01:21:21Z</CreationDateTime>
|
||||||
|
<ReadyDateTime>2014-01-24T01:25:26Z</ReadyDateTime>
|
||||||
|
<EndDateTime>2014-01-24T02:19:46Z</EndDateTime>
|
||||||
|
</Timeline>
|
||||||
|
</Status>
|
||||||
|
<AutoTerminate>{{ cluster.auto_terminate }}</AutoTerminate>
|
||||||
|
<Name>{{ cluster.name }}</Name>
|
||||||
|
<RequestedAmiVersion>{{ cluster.requested_ami_version }}</RequestedAmiVersion>
|
||||||
|
<Applications>
|
||||||
|
{% for application in cluster.applications %}
|
||||||
|
<member>
|
||||||
|
<Name>{{ application.name }}</Name>
|
||||||
|
<Version>{{ application.version }}</Version>
|
||||||
|
</member>
|
||||||
|
{% endfor %}
|
||||||
|
</Applications>
|
||||||
|
<TerminationProtected>{{ cluster.termination_protection }}</TerminationProtected>
|
||||||
|
<MasterPublicDnsName>ec2-184-0-0-1.us-west-1.compute.amazonaws.com</MasterPublicDnsName>
|
||||||
|
<NormalizedInstanceHours>{{ cluster.normalized_instance_hours }}</NormalizedInstanceHours>
|
||||||
|
<ServiceRole>{{ cluster.service_role }}</ServiceRole>
|
||||||
|
</Cluster>
|
||||||
|
</DescribeClusterResult>
|
||||||
|
<ResponseMetadata>
|
||||||
|
<RequestId>aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee</RequestId>
|
||||||
|
</ResponseMetadata>
|
||||||
|
</DescribeClusterResponse>"""
|
||||||
|
|
||||||
ADD_INSTANCE_GROUPS_TEMPLATE = """<AddInstanceGroupsResponse xmlns="http://elasticmapreduce.amazonaws.com/doc/2009-03-31">
|
ADD_INSTANCE_GROUPS_TEMPLATE = """<AddInstanceGroupsResponse xmlns="http://elasticmapreduce.amazonaws.com/doc/2009-03-31">
|
||||||
<InstanceGroupIds>{% for instance_group in instance_groups %}{{ instance_group.id }}{% if loop.index != loop.length %},{% endif %}{% endfor %}</InstanceGroupIds>
|
<InstanceGroupIds>{% for instance_group in instance_groups %}{{ instance_group.id }}{% if loop.index != loop.length %},{% endif %}{% endfor %}</InstanceGroupIds>
|
||||||
</AddInstanceGroupsResponse>"""
|
</AddInstanceGroupsResponse>"""
|
||||||
@ -182,3 +288,20 @@ SET_VISIBLE_TO_ALL_USERS_TEMPLATE = """<SetVisibleToAllUsersResponse xmlns="http
|
|||||||
</RequestId>
|
</RequestId>
|
||||||
</ResponseMetadata>
|
</ResponseMetadata>
|
||||||
</SetVisibleToAllUsersResponse>"""
|
</SetVisibleToAllUsersResponse>"""
|
||||||
|
|
||||||
|
|
||||||
|
ADD_TAGS_TEMPLATE = """<AddTagsResponse xmlns="http://elasticmapreduce.amazonaws.com/doc/2009-03-31">
|
||||||
|
<ResponseMetadata>
|
||||||
|
<RequestId>
|
||||||
|
2690d7eb-ed86-11dd-9877-6fad448a8419
|
||||||
|
</RequestId>
|
||||||
|
</ResponseMetadata>
|
||||||
|
</AddTagsResponse>"""
|
||||||
|
|
||||||
|
REMOVE_TAGS_TEMPLATE = """<RemoveTagsResponse xmlns="http://elasticmapreduce.amazonaws.com/doc/2009-03-31">
|
||||||
|
<ResponseMetadata>
|
||||||
|
<RequestId>
|
||||||
|
2690d7eb-ed86-11dd-9877-6fad448a8419
|
||||||
|
</RequestId>
|
||||||
|
</ResponseMetadata>
|
||||||
|
</RemoveTagsResponse>"""
|
||||||
|
@ -14,3 +14,19 @@ def random_instance_group_id(size=13):
|
|||||||
chars = list(range(10)) + list(string.ascii_uppercase)
|
chars = list(range(10)) + list(string.ascii_uppercase)
|
||||||
job_tag = ''.join(six.text_type(random.choice(chars)) for x in range(size))
|
job_tag = ''.join(six.text_type(random.choice(chars)) for x in range(size))
|
||||||
return 'i-{0}'.format(job_tag)
|
return 'i-{0}'.format(job_tag)
|
||||||
|
|
||||||
|
|
||||||
|
def tags_from_query_string(querystring_dict):
|
||||||
|
prefix = 'Tags'
|
||||||
|
suffix = 'Key'
|
||||||
|
response_values = {}
|
||||||
|
for key, value in querystring_dict.items():
|
||||||
|
if key.startswith(prefix) and key.endswith(suffix):
|
||||||
|
tag_index = key.replace(prefix + ".", "").replace("." + suffix, "")
|
||||||
|
tag_key = querystring_dict.get("Tags.{0}.Key".format(tag_index))[0]
|
||||||
|
tag_value_key = "Tags.{0}.Value".format(tag_index)
|
||||||
|
if tag_value_key in querystring_dict:
|
||||||
|
response_values[tag_key] = querystring_dict.get(tag_value_key)[0]
|
||||||
|
else:
|
||||||
|
response_values[tag_key] = None
|
||||||
|
return response_values
|
||||||
|
@ -4,7 +4,7 @@ from boto.exception import BotoServerError
|
|||||||
from moto.core import BaseBackend
|
from moto.core import BaseBackend
|
||||||
from .utils import random_access_key, random_alphanumeric, random_resource_id
|
from .utils import random_access_key, random_alphanumeric, random_resource_id
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
import base64
|
||||||
|
|
||||||
class Role(object):
|
class Role(object):
|
||||||
|
|
||||||
@ -135,7 +135,7 @@ class User(object):
|
|||||||
datetime.utcnow(),
|
datetime.utcnow(),
|
||||||
"%Y-%m-%d-%H-%M-%S"
|
"%Y-%m-%d-%H-%M-%S"
|
||||||
)
|
)
|
||||||
|
self.arn = 'arn:aws:iam::123456789012:user/{0}'.format(name)
|
||||||
self.policies = {}
|
self.policies = {}
|
||||||
self.access_keys = []
|
self.access_keys = []
|
||||||
self.password = None
|
self.password = None
|
||||||
@ -184,6 +184,45 @@ class User(object):
|
|||||||
raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "Arn" ]"')
|
raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "Arn" ]"')
|
||||||
raise UnformattedGetAttTemplateException()
|
raise UnformattedGetAttTemplateException()
|
||||||
|
|
||||||
|
def to_csv(self):
|
||||||
|
date_format = '%Y-%m-%dT%H:%M:%S+00:00'
|
||||||
|
date_created = datetime.strptime(self.created, '%Y-%m-%d-%H-%M-%S')
|
||||||
|
# aagrawal,arn:aws:iam::509284790694:user/aagrawal,2014-09-01T22:28:48+00:00,true,2014-11-12T23:36:49+00:00,2014-09-03T18:59:00+00:00,N/A,false,true,2014-09-01T22:28:48+00:00,false,N/A,false,N/A,false,N/A
|
||||||
|
if not self.password:
|
||||||
|
password_enabled = 'false'
|
||||||
|
password_last_used = 'not_supported'
|
||||||
|
else:
|
||||||
|
password_enabled = 'true'
|
||||||
|
password_last_used = 'no_information'
|
||||||
|
|
||||||
|
if len(self.access_keys) == 0:
|
||||||
|
access_key_1_active = 'false'
|
||||||
|
access_key_1_last_rotated = 'N/A'
|
||||||
|
access_key_2_active = 'false'
|
||||||
|
access_key_2_last_rotated = 'N/A'
|
||||||
|
elif len(self.access_keys) == 1:
|
||||||
|
access_key_1_active = 'true'
|
||||||
|
access_key_1_last_rotated = date_created.strftime(date_format)
|
||||||
|
access_key_2_active = 'false'
|
||||||
|
access_key_2_last_rotated = 'N/A'
|
||||||
|
else:
|
||||||
|
access_key_1_active = 'true'
|
||||||
|
access_key_1_last_rotated = date_created.strftime(date_format)
|
||||||
|
access_key_2_active = 'true'
|
||||||
|
access_key_2_last_rotated = date_created.strftime(date_format)
|
||||||
|
|
||||||
|
return '{0},{1},{2},{3},{4},{5},not_supported,false,{6},{7},{8},{9},false,N/A,false,N/A'.format(self.name,
|
||||||
|
self.arn,
|
||||||
|
date_created.strftime(date_format),
|
||||||
|
password_enabled,
|
||||||
|
password_last_used,
|
||||||
|
date_created.strftime(date_format),
|
||||||
|
access_key_1_active,
|
||||||
|
access_key_1_last_rotated,
|
||||||
|
access_key_2_active,
|
||||||
|
access_key_2_last_rotated
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class IAMBackend(BaseBackend):
|
class IAMBackend(BaseBackend):
|
||||||
|
|
||||||
@ -193,6 +232,7 @@ class IAMBackend(BaseBackend):
|
|||||||
self.certificates = {}
|
self.certificates = {}
|
||||||
self.groups = {}
|
self.groups = {}
|
||||||
self.users = {}
|
self.users = {}
|
||||||
|
self.credential_report = None
|
||||||
super(IAMBackend, self).__init__()
|
super(IAMBackend, self).__init__()
|
||||||
|
|
||||||
def create_role(self, role_name, assume_role_policy_document, path):
|
def create_role(self, role_name, assume_role_policy_document, path):
|
||||||
@ -394,5 +434,18 @@ class IAMBackend(BaseBackend):
|
|||||||
except KeyError:
|
except KeyError:
|
||||||
raise BotoServerError(404, 'Not Found')
|
raise BotoServerError(404, 'Not Found')
|
||||||
|
|
||||||
|
def report_generated(self):
|
||||||
|
return self.credential_report
|
||||||
|
|
||||||
|
def generate_report(self):
|
||||||
|
self.credential_report = True
|
||||||
|
|
||||||
|
def get_credential_report(self):
|
||||||
|
if not self.credential_report:
|
||||||
|
raise BotoServerError(410, 'ReportNotPresent')
|
||||||
|
report = 'user,arn,user_creation_time,password_enabled,password_last_used,password_last_changed,password_next_rotation,mfa_active,access_key_1_active,access_key_1_last_rotated,access_key_2_active,access_key_2_last_rotated,cert_1_active,cert_1_last_rotated,cert_2_active,cert_2_last_rotated\n'
|
||||||
|
for user in self.users:
|
||||||
|
report += self.users[user].to_csv()
|
||||||
|
return base64.b64encode(report.encode('ascii')).decode('ascii')
|
||||||
|
|
||||||
iam_backend = IAMBackend()
|
iam_backend = IAMBackend()
|
||||||
|
@ -219,6 +219,18 @@ class IamResponse(BaseResponse):
|
|||||||
template = self.response_template(GENERIC_EMPTY_TEMPLATE)
|
template = self.response_template(GENERIC_EMPTY_TEMPLATE)
|
||||||
return template.render(name='DeleteUser')
|
return template.render(name='DeleteUser')
|
||||||
|
|
||||||
|
def generate_credential_report(self):
|
||||||
|
if iam_backend.report_generated():
|
||||||
|
template = self.response_template(CREDENTIAL_REPORT_GENERATED)
|
||||||
|
else:
|
||||||
|
template = self.response_template(CREDENTIAL_REPORT_GENERATING)
|
||||||
|
iam_backend.generate_report()
|
||||||
|
return template.render()
|
||||||
|
|
||||||
|
def get_credential_report(self):
|
||||||
|
report = iam_backend.get_credential_report()
|
||||||
|
template = self.response_template(CREDENTIAL_REPORT)
|
||||||
|
return template.render(report=report)
|
||||||
|
|
||||||
GENERIC_EMPTY_TEMPLATE = """<{{ name }}Response>
|
GENERIC_EMPTY_TEMPLATE = """<{{ name }}Response>
|
||||||
<ResponseMetadata>
|
<ResponseMetadata>
|
||||||
@ -559,3 +571,34 @@ LIST_ACCESS_KEYS_TEMPLATE = """<ListAccessKeysResponse>
|
|||||||
<RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
|
<RequestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</RequestId>
|
||||||
</ResponseMetadata>
|
</ResponseMetadata>
|
||||||
</ListAccessKeysResponse>"""
|
</ListAccessKeysResponse>"""
|
||||||
|
|
||||||
|
CREDENTIAL_REPORT_GENERATING = """
|
||||||
|
<GenerateCredentialReportResponse>
|
||||||
|
<GenerateCredentialReportResult>
|
||||||
|
<state>STARTED</state>
|
||||||
|
<description>No report exists. Starting a new report generation task</description>
|
||||||
|
</GenerateCredentialReportResult>
|
||||||
|
<ResponseMetadata>
|
||||||
|
<RequestId>fa788a82-aa8a-11e4-a278-1786c418872b"</RequestId>
|
||||||
|
</ResponseMetadata>
|
||||||
|
</GenerateCredentialReportResponse>"""
|
||||||
|
|
||||||
|
CREDENTIAL_REPORT_GENERATED = """<GenerateCredentialReportResponse>
|
||||||
|
<GenerateCredentialReportResult>
|
||||||
|
<state>COMPLETE</state>
|
||||||
|
</GenerateCredentialReportResult>
|
||||||
|
<ResponseMetadata>
|
||||||
|
<RequestId>fa788a82-aa8a-11e4-a278-1786c418872b"</RequestId>
|
||||||
|
</ResponseMetadata>
|
||||||
|
</GenerateCredentialReportResponse>"""
|
||||||
|
|
||||||
|
CREDENTIAL_REPORT = """<GetCredentialReportResponse>
|
||||||
|
<GetCredentialReportResult>
|
||||||
|
<content>{{ report }}</content>
|
||||||
|
<GeneratedTime>2015-02-02T20:02:02Z</GeneratedTime>
|
||||||
|
<ReportFormat>text/csv</ReportFormat>
|
||||||
|
</GetCredentialReportResult>
|
||||||
|
<ResponseMetadata>
|
||||||
|
<RequestId>fa788a82-aa8a-11e4-a278-1786c418872b"</RequestId>
|
||||||
|
</ResponseMetadata>
|
||||||
|
</GetCredentialReportResponse>"""
|
@ -199,6 +199,12 @@ class Database(object):
|
|||||||
<PubliclyAccessible>{{ database.publicly_accessible }}</PubliclyAccessible>
|
<PubliclyAccessible>{{ database.publicly_accessible }}</PubliclyAccessible>
|
||||||
<AutoMinorVersionUpgrade>{{ database.auto_minor_version_upgrade }}</AutoMinorVersionUpgrade>
|
<AutoMinorVersionUpgrade>{{ database.auto_minor_version_upgrade }}</AutoMinorVersionUpgrade>
|
||||||
<AllocatedStorage>{{ database.allocated_storage }}</AllocatedStorage>
|
<AllocatedStorage>{{ database.allocated_storage }}</AllocatedStorage>
|
||||||
|
{% if database.iops %}
|
||||||
|
<Iops>{{ database.iops }}</Iops>
|
||||||
|
<StorageType>io1</StorageType>
|
||||||
|
{% else %}
|
||||||
|
<StorageType>{{ database.storage_type }}</StorageType>
|
||||||
|
{% endif %}
|
||||||
<DBInstanceClass>{{ database.db_instance_class }}</DBInstanceClass>
|
<DBInstanceClass>{{ database.db_instance_class }}</DBInstanceClass>
|
||||||
<MasterUsername>{{ database.master_username }}</MasterUsername>
|
<MasterUsername>{{ database.master_username }}</MasterUsername>
|
||||||
<Endpoint>
|
<Endpoint>
|
||||||
|
@ -2,7 +2,7 @@ from __future__ import unicode_literals
|
|||||||
from .responses import RDSResponse
|
from .responses import RDSResponse
|
||||||
|
|
||||||
url_bases = [
|
url_bases = [
|
||||||
"https?://rds.(.+).amazonaws.com",
|
"https?://rds(\..+)?.amazonaws.com",
|
||||||
]
|
]
|
||||||
|
|
||||||
url_paths = {
|
url_paths = {
|
||||||
|
@ -1,9 +1,73 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
from moto.core.exceptions import RESTError
|
||||||
|
|
||||||
|
|
||||||
class BucketAlreadyExists(Exception):
|
ERROR_WITH_BUCKET_NAME = """{% extends 'error' %}
|
||||||
|
{% block extra %}<BucketName>{{ bucket }}</BucketName>{% endblock %}
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
class S3ClientError(RESTError):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class MissingBucket(Exception):
|
class BucketError(S3ClientError):
|
||||||
pass
|
def __init__(self, *args, **kwargs):
|
||||||
|
kwargs.setdefault('template', 'bucket_error')
|
||||||
|
self.templates['bucket_error'] = ERROR_WITH_BUCKET_NAME
|
||||||
|
super(BucketError, self).__init__(*args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
class BucketAlreadyExists(BucketError):
|
||||||
|
code = 409
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super(BucketAlreadyExists, self).__init__(
|
||||||
|
"BucketAlreadyExists",
|
||||||
|
("The requested bucket name is not available. The bucket "
|
||||||
|
"namespace is shared by all users of the system. Please "
|
||||||
|
"select a different name and try again"),
|
||||||
|
*args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
class MissingBucket(BucketError):
|
||||||
|
code = 404
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super(MissingBucket, self).__init__(
|
||||||
|
"NoSuchBucket",
|
||||||
|
"The specified bucket does not exist",
|
||||||
|
*args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
class InvalidPartOrder(S3ClientError):
|
||||||
|
code = 400
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super(InvalidPartOrder, self).__init__(
|
||||||
|
"InvalidPartOrder",
|
||||||
|
("The list of parts was not in ascending order. The parts "
|
||||||
|
"list must be specified in order by part number."),
|
||||||
|
*args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
class InvalidPart(S3ClientError):
|
||||||
|
code = 400
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super(InvalidPart, self).__init__(
|
||||||
|
"InvalidPart",
|
||||||
|
("One or more of the specified parts could not be found. "
|
||||||
|
"The part might not have been uploaded, or the specified "
|
||||||
|
"entity tag might not have matched the part's entity tag."),
|
||||||
|
*args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
class EntityTooSmall(S3ClientError):
|
||||||
|
code = 400
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super(EntityTooSmall, self).__init__(
|
||||||
|
"EntityTooSmall",
|
||||||
|
"Your proposed upload is smaller than the minimum allowed object size.",
|
||||||
|
*args, **kwargs)
|
||||||
|
@ -8,9 +8,10 @@ import itertools
|
|||||||
import codecs
|
import codecs
|
||||||
import six
|
import six
|
||||||
|
|
||||||
|
from bisect import insort
|
||||||
from moto.core import BaseBackend
|
from moto.core import BaseBackend
|
||||||
from moto.core.utils import iso_8601_datetime_with_milliseconds, rfc_1123_datetime
|
from moto.core.utils import iso_8601_datetime_with_milliseconds, rfc_1123_datetime
|
||||||
from .exceptions import BucketAlreadyExists, MissingBucket
|
from .exceptions import BucketAlreadyExists, MissingBucket, InvalidPart, EntityTooSmall
|
||||||
from .utils import clean_key_name, _VersionedKeyStore
|
from .utils import clean_key_name, _VersionedKeyStore
|
||||||
|
|
||||||
UPLOAD_ID_BYTES = 43
|
UPLOAD_ID_BYTES = 43
|
||||||
@ -118,25 +119,32 @@ class FakeMultipart(object):
|
|||||||
self.key_name = key_name
|
self.key_name = key_name
|
||||||
self.metadata = metadata
|
self.metadata = metadata
|
||||||
self.parts = {}
|
self.parts = {}
|
||||||
|
self.partlist = [] # ordered list of part ID's
|
||||||
rand_b64 = base64.b64encode(os.urandom(UPLOAD_ID_BYTES))
|
rand_b64 = base64.b64encode(os.urandom(UPLOAD_ID_BYTES))
|
||||||
self.id = rand_b64.decode('utf-8').replace('=', '').replace('+', '')
|
self.id = rand_b64.decode('utf-8').replace('=', '').replace('+', '')
|
||||||
|
|
||||||
def complete(self):
|
def complete(self, body):
|
||||||
decode_hex = codecs.getdecoder("hex_codec")
|
decode_hex = codecs.getdecoder("hex_codec")
|
||||||
total = bytearray()
|
total = bytearray()
|
||||||
md5s = bytearray()
|
md5s = bytearray()
|
||||||
last_part_name = len(self.list_parts())
|
|
||||||
|
|
||||||
for part in self.list_parts():
|
last = None
|
||||||
if part.name != last_part_name and len(part.value) < UPLOAD_PART_MIN_SIZE:
|
count = 0
|
||||||
return None, None
|
for pn, etag in body:
|
||||||
|
part = self.parts.get(pn)
|
||||||
|
if part is None or part.etag != etag:
|
||||||
|
raise InvalidPart()
|
||||||
|
if last is not None and len(last.value) < UPLOAD_PART_MIN_SIZE:
|
||||||
|
raise EntityTooSmall()
|
||||||
part_etag = part.etag.replace('"', '')
|
part_etag = part.etag.replace('"', '')
|
||||||
md5s.extend(decode_hex(part_etag)[0])
|
md5s.extend(decode_hex(part_etag)[0])
|
||||||
total.extend(part.value)
|
total.extend(part.value)
|
||||||
|
last = part
|
||||||
|
count += 1
|
||||||
|
|
||||||
etag = hashlib.md5()
|
etag = hashlib.md5()
|
||||||
etag.update(bytes(md5s))
|
etag.update(bytes(md5s))
|
||||||
return total, "{0}-{1}".format(etag.hexdigest(), last_part_name)
|
return total, "{0}-{1}".format(etag.hexdigest(), count)
|
||||||
|
|
||||||
def set_part(self, part_id, value):
|
def set_part(self, part_id, value):
|
||||||
if part_id < 1:
|
if part_id < 1:
|
||||||
@ -144,18 +152,12 @@ class FakeMultipart(object):
|
|||||||
|
|
||||||
key = FakeKey(part_id, value)
|
key = FakeKey(part_id, value)
|
||||||
self.parts[part_id] = key
|
self.parts[part_id] = key
|
||||||
|
insort(self.partlist, part_id)
|
||||||
return key
|
return key
|
||||||
|
|
||||||
def list_parts(self):
|
def list_parts(self):
|
||||||
parts = []
|
for part_id in self.partlist:
|
||||||
|
yield self.parts[part_id]
|
||||||
for part_id, index in enumerate(sorted(self.parts.keys()), start=1):
|
|
||||||
# Make sure part ids are continuous
|
|
||||||
if part_id != index:
|
|
||||||
return
|
|
||||||
parts.append(self.parts[part_id])
|
|
||||||
|
|
||||||
return parts
|
|
||||||
|
|
||||||
|
|
||||||
class FakeBucket(object):
|
class FakeBucket(object):
|
||||||
@ -191,7 +193,7 @@ class S3Backend(BaseBackend):
|
|||||||
|
|
||||||
def create_bucket(self, bucket_name, region_name):
|
def create_bucket(self, bucket_name, region_name):
|
||||||
if bucket_name in self.buckets:
|
if bucket_name in self.buckets:
|
||||||
raise BucketAlreadyExists()
|
raise BucketAlreadyExists(bucket=bucket_name)
|
||||||
new_bucket = FakeBucket(name=bucket_name, region_name=region_name)
|
new_bucket = FakeBucket(name=bucket_name, region_name=region_name)
|
||||||
self.buckets[bucket_name] = new_bucket
|
self.buckets[bucket_name] = new_bucket
|
||||||
return new_bucket
|
return new_bucket
|
||||||
@ -203,7 +205,7 @@ class S3Backend(BaseBackend):
|
|||||||
try:
|
try:
|
||||||
return self.buckets[bucket_name]
|
return self.buckets[bucket_name]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
raise MissingBucket()
|
raise MissingBucket(bucket=bucket_name)
|
||||||
|
|
||||||
def delete_bucket(self, bucket_name):
|
def delete_bucket(self, bucket_name):
|
||||||
bucket = self.get_bucket(bucket_name)
|
bucket = self.get_bucket(bucket_name)
|
||||||
@ -279,10 +281,10 @@ class S3Backend(BaseBackend):
|
|||||||
|
|
||||||
return new_multipart
|
return new_multipart
|
||||||
|
|
||||||
def complete_multipart(self, bucket_name, multipart_id):
|
def complete_multipart(self, bucket_name, multipart_id, body):
|
||||||
bucket = self.get_bucket(bucket_name)
|
bucket = self.get_bucket(bucket_name)
|
||||||
multipart = bucket.multiparts[multipart_id]
|
multipart = bucket.multiparts[multipart_id]
|
||||||
value, etag = multipart.complete()
|
value, etag = multipart.complete(body)
|
||||||
if value is None:
|
if value is None:
|
||||||
return
|
return
|
||||||
del bucket.multiparts[multipart_id]
|
del bucket.multiparts[multipart_id]
|
||||||
@ -297,7 +299,7 @@ class S3Backend(BaseBackend):
|
|||||||
|
|
||||||
def list_multipart(self, bucket_name, multipart_id):
|
def list_multipart(self, bucket_name, multipart_id):
|
||||||
bucket = self.get_bucket(bucket_name)
|
bucket = self.get_bucket(bucket_name)
|
||||||
return bucket.multiparts[multipart_id].list_parts()
|
return list(bucket.multiparts[multipart_id].list_parts())
|
||||||
|
|
||||||
def get_all_multiparts(self, bucket_name):
|
def get_all_multiparts(self, bucket_name):
|
||||||
bucket = self.get_bucket(bucket_name)
|
bucket = self.get_bucket(bucket_name)
|
||||||
|
@ -7,7 +7,7 @@ from six.moves.urllib.parse import parse_qs, urlparse
|
|||||||
|
|
||||||
from moto.core.responses import _TemplateEnvironmentMixin
|
from moto.core.responses import _TemplateEnvironmentMixin
|
||||||
|
|
||||||
from .exceptions import BucketAlreadyExists, MissingBucket
|
from .exceptions import BucketAlreadyExists, S3ClientError, InvalidPartOrder
|
||||||
from .models import s3_backend
|
from .models import s3_backend
|
||||||
from .utils import bucket_name_from_url, metadata_from_headers
|
from .utils import bucket_name_from_url, metadata_from_headers
|
||||||
from xml.dom import minidom
|
from xml.dom import minidom
|
||||||
@ -35,8 +35,8 @@ class ResponseObject(_TemplateEnvironmentMixin):
|
|||||||
def bucket_response(self, request, full_url, headers):
|
def bucket_response(self, request, full_url, headers):
|
||||||
try:
|
try:
|
||||||
response = self._bucket_response(request, full_url, headers)
|
response = self._bucket_response(request, full_url, headers)
|
||||||
except MissingBucket:
|
except S3ClientError as s3error:
|
||||||
return 404, headers, ""
|
response = s3error.code, headers, s3error.description
|
||||||
|
|
||||||
if isinstance(response, six.string_types):
|
if isinstance(response, six.string_types):
|
||||||
return 200, headers, response.encode("utf-8")
|
return 200, headers, response.encode("utf-8")
|
||||||
@ -72,12 +72,8 @@ class ResponseObject(_TemplateEnvironmentMixin):
|
|||||||
raise NotImplementedError("Method {0} has not been impelemented in the S3 backend yet".format(method))
|
raise NotImplementedError("Method {0} has not been impelemented in the S3 backend yet".format(method))
|
||||||
|
|
||||||
def _bucket_response_head(self, bucket_name, headers):
|
def _bucket_response_head(self, bucket_name, headers):
|
||||||
try:
|
self.backend.get_bucket(bucket_name)
|
||||||
self.backend.get_bucket(bucket_name)
|
return 200, headers, ""
|
||||||
except MissingBucket:
|
|
||||||
return 404, headers, ""
|
|
||||||
else:
|
|
||||||
return 200, headers, ""
|
|
||||||
|
|
||||||
def _bucket_response_get(self, bucket_name, querystring, headers):
|
def _bucket_response_get(self, bucket_name, querystring, headers):
|
||||||
if 'uploads' in querystring:
|
if 'uploads' in querystring:
|
||||||
@ -127,11 +123,7 @@ class ResponseObject(_TemplateEnvironmentMixin):
|
|||||||
is_truncated='false',
|
is_truncated='false',
|
||||||
)
|
)
|
||||||
|
|
||||||
try:
|
bucket = self.backend.get_bucket(bucket_name)
|
||||||
bucket = self.backend.get_bucket(bucket_name)
|
|
||||||
except MissingBucket:
|
|
||||||
return 404, headers, ""
|
|
||||||
|
|
||||||
prefix = querystring.get('prefix', [None])[0]
|
prefix = querystring.get('prefix', [None])[0]
|
||||||
delimiter = querystring.get('delimiter', [None])[0]
|
delimiter = querystring.get('delimiter', [None])[0]
|
||||||
result_keys, result_folders = self.backend.prefix_query(bucket, prefix, delimiter)
|
result_keys, result_folders = self.backend.prefix_query(bucket, prefix, delimiter)
|
||||||
@ -161,17 +153,12 @@ class ResponseObject(_TemplateEnvironmentMixin):
|
|||||||
# us-east-1 has different behavior
|
# us-east-1 has different behavior
|
||||||
new_bucket = self.backend.get_bucket(bucket_name)
|
new_bucket = self.backend.get_bucket(bucket_name)
|
||||||
else:
|
else:
|
||||||
return 409, headers, ""
|
raise
|
||||||
template = self.response_template(S3_BUCKET_CREATE_RESPONSE)
|
template = self.response_template(S3_BUCKET_CREATE_RESPONSE)
|
||||||
return 200, headers, template.render(bucket=new_bucket)
|
return 200, headers, template.render(bucket=new_bucket)
|
||||||
|
|
||||||
def _bucket_response_delete(self, bucket_name, headers):
|
def _bucket_response_delete(self, bucket_name, headers):
|
||||||
try:
|
removed_bucket = self.backend.delete_bucket(bucket_name)
|
||||||
removed_bucket = self.backend.delete_bucket(bucket_name)
|
|
||||||
except MissingBucket:
|
|
||||||
# Non-existant bucket
|
|
||||||
template = self.response_template(S3_DELETE_NON_EXISTING_BUCKET)
|
|
||||||
return 404, headers, template.render(bucket_name=bucket_name)
|
|
||||||
|
|
||||||
if removed_bucket:
|
if removed_bucket:
|
||||||
# Bucket exists
|
# Bucket exists
|
||||||
@ -228,17 +215,43 @@ class ResponseObject(_TemplateEnvironmentMixin):
|
|||||||
|
|
||||||
return 200, headers, template.render(deleted=deleted_names, delete_errors=error_names)
|
return 200, headers, template.render(deleted=deleted_names, delete_errors=error_names)
|
||||||
|
|
||||||
|
def _handle_range_header(self, request, headers, response_content):
|
||||||
|
length = len(response_content)
|
||||||
|
last = length - 1
|
||||||
|
_, rspec = request.headers.get('range').split('=')
|
||||||
|
if ',' in rspec:
|
||||||
|
raise NotImplementedError(
|
||||||
|
"Multiple range specifiers not supported")
|
||||||
|
toint = lambda i: int(i) if i else None
|
||||||
|
begin, end = map(toint, rspec.split('-'))
|
||||||
|
if begin is not None: # byte range
|
||||||
|
end = last if end is None else end
|
||||||
|
elif end is not None: # suffix byte range
|
||||||
|
begin = length - end
|
||||||
|
end = last
|
||||||
|
else:
|
||||||
|
return 400, headers, ""
|
||||||
|
if begin < 0 or end > length or begin > min(end, last):
|
||||||
|
return 416, headers, ""
|
||||||
|
headers['content-range'] = "bytes {0}-{1}/{2}".format(
|
||||||
|
begin, end, length)
|
||||||
|
return 206, headers, response_content[begin:end + 1]
|
||||||
|
|
||||||
def key_response(self, request, full_url, headers):
|
def key_response(self, request, full_url, headers):
|
||||||
try:
|
try:
|
||||||
response = self._key_response(request, full_url, headers)
|
response = self._key_response(request, full_url, headers)
|
||||||
except MissingBucket:
|
except S3ClientError as s3error:
|
||||||
return 404, headers, ""
|
response = s3error.code, headers, s3error.description
|
||||||
|
|
||||||
if isinstance(response, six.string_types):
|
if isinstance(response, six.string_types):
|
||||||
return 200, headers, response
|
status_code = 200
|
||||||
|
response_content = response
|
||||||
else:
|
else:
|
||||||
status_code, headers, response_content = response
|
status_code, headers, response_content = response
|
||||||
return status_code, headers, response_content
|
|
||||||
|
if status_code == 200 and 'range' in request.headers:
|
||||||
|
return self._handle_range_header(request, headers, response_content)
|
||||||
|
return status_code, headers, response_content
|
||||||
|
|
||||||
def _key_response(self, request, full_url, headers):
|
def _key_response(self, request, full_url, headers):
|
||||||
parsed_url = urlparse(full_url)
|
parsed_url = urlparse(full_url)
|
||||||
@ -364,6 +377,15 @@ class ResponseObject(_TemplateEnvironmentMixin):
|
|||||||
template = self.response_template(S3_DELETE_OBJECT_SUCCESS)
|
template = self.response_template(S3_DELETE_OBJECT_SUCCESS)
|
||||||
return 204, headers, template.render(bucket=removed_key)
|
return 204, headers, template.render(bucket=removed_key)
|
||||||
|
|
||||||
|
def _complete_multipart_body(self, body):
|
||||||
|
ps = minidom.parseString(body).getElementsByTagName('Part')
|
||||||
|
prev = 0
|
||||||
|
for p in ps:
|
||||||
|
pn = int(p.getElementsByTagName('PartNumber')[0].firstChild.wholeText)
|
||||||
|
if pn <= prev:
|
||||||
|
raise InvalidPartOrder()
|
||||||
|
yield (pn, p.getElementsByTagName('ETag')[0].firstChild.wholeText)
|
||||||
|
|
||||||
def _key_response_post(self, request, body, parsed_url, bucket_name, query, key_name, headers):
|
def _key_response_post(self, request, body, parsed_url, bucket_name, query, key_name, headers):
|
||||||
if body == b'' and parsed_url.query == 'uploads':
|
if body == b'' and parsed_url.query == 'uploads':
|
||||||
metadata = metadata_from_headers(request.headers)
|
metadata = metadata_from_headers(request.headers)
|
||||||
@ -378,18 +400,15 @@ class ResponseObject(_TemplateEnvironmentMixin):
|
|||||||
return 200, headers, response
|
return 200, headers, response
|
||||||
|
|
||||||
if 'uploadId' in query:
|
if 'uploadId' in query:
|
||||||
|
body = self._complete_multipart_body(body)
|
||||||
upload_id = query['uploadId'][0]
|
upload_id = query['uploadId'][0]
|
||||||
key = self.backend.complete_multipart(bucket_name, upload_id)
|
key = self.backend.complete_multipart(bucket_name, upload_id, body)
|
||||||
|
template = self.response_template(S3_MULTIPART_COMPLETE_RESPONSE)
|
||||||
if key is not None:
|
return template.render(
|
||||||
template = self.response_template(S3_MULTIPART_COMPLETE_RESPONSE)
|
bucket_name=bucket_name,
|
||||||
return template.render(
|
key_name=key.name,
|
||||||
bucket_name=bucket_name,
|
etag=key.etag,
|
||||||
key_name=key.name,
|
)
|
||||||
etag=key.etag,
|
|
||||||
)
|
|
||||||
template = self.response_template(S3_MULTIPART_COMPLETE_TOO_SMALL_ERROR)
|
|
||||||
return 400, headers, template.render()
|
|
||||||
elif parsed_url.query == 'restore':
|
elif parsed_url.query == 'restore':
|
||||||
es = minidom.parseString(body).getElementsByTagName('Days')
|
es = minidom.parseString(body).getElementsByTagName('Days')
|
||||||
days = es[0].childNodes[0].wholeText
|
days = es[0].childNodes[0].wholeText
|
||||||
@ -461,14 +480,6 @@ S3_DELETE_BUCKET_SUCCESS = """<DeleteBucketResponse xmlns="http://s3.amazonaws.c
|
|||||||
</DeleteBucketResponse>
|
</DeleteBucketResponse>
|
||||||
</DeleteBucketResponse>"""
|
</DeleteBucketResponse>"""
|
||||||
|
|
||||||
S3_DELETE_NON_EXISTING_BUCKET = """<?xml version="1.0" encoding="UTF-8"?>
|
|
||||||
<Error><Code>NoSuchBucket</Code>
|
|
||||||
<Message>The specified bucket does not exist</Message>
|
|
||||||
<BucketName>{{ bucket_name }}</BucketName>
|
|
||||||
<RequestId>asdfasdfsadf</RequestId>
|
|
||||||
<HostId>asfasdfsfsafasdf</HostId>
|
|
||||||
</Error>"""
|
|
||||||
|
|
||||||
S3_DELETE_BUCKET_WITH_ITEMS_ERROR = """<?xml version="1.0" encoding="UTF-8"?>
|
S3_DELETE_BUCKET_WITH_ITEMS_ERROR = """<?xml version="1.0" encoding="UTF-8"?>
|
||||||
<Error><Code>BucketNotEmpty</Code>
|
<Error><Code>BucketNotEmpty</Code>
|
||||||
<Message>The bucket you tried to delete is not empty</Message>
|
<Message>The bucket you tried to delete is not empty</Message>
|
||||||
@ -609,14 +620,6 @@ S3_MULTIPART_COMPLETE_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
|
|||||||
</CompleteMultipartUploadResult>
|
</CompleteMultipartUploadResult>
|
||||||
"""
|
"""
|
||||||
|
|
||||||
S3_MULTIPART_COMPLETE_TOO_SMALL_ERROR = """<?xml version="1.0" encoding="UTF-8"?>
|
|
||||||
<Error>
|
|
||||||
<Code>EntityTooSmall</Code>
|
|
||||||
<Message>Your proposed upload is smaller than the minimum allowed object size.</Message>
|
|
||||||
<RequestId>asdfasdfsdafds</RequestId>
|
|
||||||
<HostId>sdfgdsfgdsfgdfsdsfgdfs</HostId>
|
|
||||||
</Error>"""
|
|
||||||
|
|
||||||
S3_ALL_MULTIPARTS = """<?xml version="1.0" encoding="UTF-8"?>
|
S3_ALL_MULTIPARTS = """<?xml version="1.0" encoding="UTF-8"?>
|
||||||
<ListMultipartUploadsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
<ListMultipartUploadsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
||||||
<Bucket>{{ bucket_name }}</Bucket>
|
<Bucket>{{ bucket_name }}</Bucket>
|
||||||
|
3
setup.py
3
setup.py
@ -5,7 +5,6 @@ from setuptools import setup, find_packages
|
|||||||
install_requires = [
|
install_requires = [
|
||||||
"Jinja2",
|
"Jinja2",
|
||||||
"boto",
|
"boto",
|
||||||
"dicttoxml",
|
|
||||||
"flask",
|
"flask",
|
||||||
"httpretty>=0.6.1",
|
"httpretty>=0.6.1",
|
||||||
"requests",
|
"requests",
|
||||||
@ -22,7 +21,7 @@ if sys.version_info < (2, 7):
|
|||||||
|
|
||||||
setup(
|
setup(
|
||||||
name='moto',
|
name='moto',
|
||||||
version='0.3.9',
|
version='0.4.0',
|
||||||
description='A library that allows your python tests to easily'
|
description='A library that allows your python tests to easily'
|
||||||
' mock out the boto library',
|
' mock out the boto library',
|
||||||
author='Steve Pulec',
|
author='Steve Pulec',
|
||||||
|
@ -936,3 +936,51 @@ def test_sns_topic():
|
|||||||
topic_name_output.value.should.equal("my_topics")
|
topic_name_output.value.should.equal("my_topics")
|
||||||
topic_arn_output = [x for x in stack.outputs if x.key == 'topic_arn'][0]
|
topic_arn_output = [x for x in stack.outputs if x.key == 'topic_arn'][0]
|
||||||
topic_arn_output.value.should.equal(topic_arn)
|
topic_arn_output.value.should.equal(topic_arn)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@mock_cloudformation
|
||||||
|
def test_vpc_gateway_attachment_creation_should_attach_itself_to_vpc():
|
||||||
|
template = {
|
||||||
|
"AWSTemplateFormatVersion": "2010-09-09",
|
||||||
|
"Resources": {
|
||||||
|
"internetgateway": {
|
||||||
|
"Type": "AWS::EC2::InternetGateway"
|
||||||
|
},
|
||||||
|
"testvpc": {
|
||||||
|
"Type": "AWS::EC2::VPC",
|
||||||
|
"Properties": {
|
||||||
|
"CidrBlock": "10.0.0.0/16",
|
||||||
|
"EnableDnsHostnames": "true",
|
||||||
|
"EnableDnsSupport": "true",
|
||||||
|
"InstanceTenancy": "default"
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"vpcgatewayattachment": {
|
||||||
|
"Type": "AWS::EC2::VPCGatewayAttachment",
|
||||||
|
"Properties": {
|
||||||
|
"InternetGatewayId": {
|
||||||
|
"Ref": "internetgateway"
|
||||||
|
},
|
||||||
|
"VpcId": {
|
||||||
|
"Ref": "testvpc"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template_json = json.dumps(template)
|
||||||
|
cf_conn = boto.cloudformation.connect_to_region("us-west-1")
|
||||||
|
cf_conn.create_stack(
|
||||||
|
"test_stack",
|
||||||
|
template_body=template_json,
|
||||||
|
)
|
||||||
|
|
||||||
|
vpc_conn = boto.vpc.connect_to_region("us-west-1")
|
||||||
|
vpc = vpc_conn.get_all_vpcs()[0]
|
||||||
|
igws = vpc_conn.get_all_internet_gateways(
|
||||||
|
filters={'attachment.vpc-id': vpc.id}
|
||||||
|
)
|
||||||
|
|
||||||
|
igws.should.have.length_of(1)
|
||||||
|
@ -76,14 +76,14 @@ def test_elastic_network_interfaces_with_groups():
|
|||||||
subnet = conn.create_subnet(vpc.id, "10.0.0.0/18")
|
subnet = conn.create_subnet(vpc.id, "10.0.0.0/18")
|
||||||
security_group1 = conn.create_security_group('test security group #1', 'this is a test security group')
|
security_group1 = conn.create_security_group('test security group #1', 'this is a test security group')
|
||||||
security_group2 = conn.create_security_group('test security group #2', 'this is a test security group')
|
security_group2 = conn.create_security_group('test security group #2', 'this is a test security group')
|
||||||
conn.create_network_interface(subnet.id, groups=[security_group1.id,security_group2.id])
|
conn.create_network_interface(subnet.id, groups=[security_group1.id, security_group2.id])
|
||||||
|
|
||||||
all_enis = conn.get_all_network_interfaces()
|
all_enis = conn.get_all_network_interfaces()
|
||||||
all_enis.should.have.length_of(1)
|
all_enis.should.have.length_of(1)
|
||||||
|
|
||||||
eni = all_enis[0]
|
eni = all_enis[0]
|
||||||
eni.groups.should.have.length_of(2)
|
eni.groups.should.have.length_of(2)
|
||||||
set([group.id for group in eni.groups]).should.equal(set([security_group1.id,security_group2.id]))
|
set([group.id for group in eni.groups]).should.equal(set([security_group1.id, security_group2.id]))
|
||||||
|
|
||||||
|
|
||||||
@requires_boto_gte("2.12.0")
|
@requires_boto_gte("2.12.0")
|
||||||
@ -122,25 +122,30 @@ def test_elastic_network_interfaces_filtering():
|
|||||||
security_group1 = conn.create_security_group('test security group #1', 'this is a test security group')
|
security_group1 = conn.create_security_group('test security group #1', 'this is a test security group')
|
||||||
security_group2 = conn.create_security_group('test security group #2', 'this is a test security group')
|
security_group2 = conn.create_security_group('test security group #2', 'this is a test security group')
|
||||||
|
|
||||||
eni1 = conn.create_network_interface(subnet.id, groups=[security_group1.id,security_group2.id])
|
eni1 = conn.create_network_interface(subnet.id, groups=[security_group1.id, security_group2.id])
|
||||||
eni2 = conn.create_network_interface(subnet.id, groups=[security_group1.id])
|
eni2 = conn.create_network_interface(subnet.id, groups=[security_group1.id])
|
||||||
eni3 = conn.create_network_interface(subnet.id)
|
eni3 = conn.create_network_interface(subnet.id)
|
||||||
|
|
||||||
all_enis = conn.get_all_network_interfaces()
|
all_enis = conn.get_all_network_interfaces()
|
||||||
all_enis.should.have.length_of(3)
|
all_enis.should.have.length_of(3)
|
||||||
|
|
||||||
|
# Filter by NetworkInterfaceId
|
||||||
|
enis_by_id = conn.get_all_network_interfaces([eni1.id])
|
||||||
|
enis_by_id.should.have.length_of(1)
|
||||||
|
set([eni.id for eni in enis_by_id]).should.equal(set([eni1.id]))
|
||||||
|
|
||||||
# Filter by ENI ID
|
# Filter by ENI ID
|
||||||
enis_by_id = conn.get_all_network_interfaces(filters={'network-interface-id':eni1.id})
|
enis_by_id = conn.get_all_network_interfaces(filters={'network-interface-id': eni1.id})
|
||||||
enis_by_id.should.have.length_of(1)
|
enis_by_id.should.have.length_of(1)
|
||||||
set([eni.id for eni in enis_by_id]).should.equal(set([eni1.id]))
|
set([eni.id for eni in enis_by_id]).should.equal(set([eni1.id]))
|
||||||
|
|
||||||
# Filter by Security Group
|
# Filter by Security Group
|
||||||
enis_by_group = conn.get_all_network_interfaces(filters={'group-id':security_group1.id})
|
enis_by_group = conn.get_all_network_interfaces(filters={'group-id':security_group1.id})
|
||||||
enis_by_group.should.have.length_of(2)
|
enis_by_group.should.have.length_of(2)
|
||||||
set([eni.id for eni in enis_by_group]).should.equal(set([eni1.id,eni2.id]))
|
set([eni.id for eni in enis_by_group]).should.equal(set([eni1.id, eni2.id]))
|
||||||
|
|
||||||
# Filter by ENI ID and Security Group
|
# Filter by ENI ID and Security Group
|
||||||
enis_by_group = conn.get_all_network_interfaces(filters={'network-interface-id':eni1.id, 'group-id':security_group1.id})
|
enis_by_group = conn.get_all_network_interfaces(filters={'network-interface-id': eni1.id, 'group-id': security_group1.id})
|
||||||
enis_by_group.should.have.length_of(1)
|
enis_by_group.should.have.length_of(1)
|
||||||
set([eni.id for eni in enis_by_group]).should.equal(set([eni1.id]))
|
set([eni.id for eni in enis_by_group]).should.equal(set([eni1.id]))
|
||||||
|
|
||||||
@ -157,7 +162,7 @@ def test_elastic_network_interfaces_cloudformation():
|
|||||||
conn.create_stack(
|
conn.create_stack(
|
||||||
"test_stack",
|
"test_stack",
|
||||||
template_body=template_json,
|
template_body=template_json,
|
||||||
)
|
)
|
||||||
ec2_conn = boto.ec2.connect_to_region("us-west-1")
|
ec2_conn = boto.ec2.connect_to_region("us-west-1")
|
||||||
eni = ec2_conn.get_all_network_interfaces()[0]
|
eni = ec2_conn.get_all_network_interfaces()[0]
|
||||||
|
|
||||||
|
@ -491,23 +491,23 @@ def test_instance_with_nic_attach_detach():
|
|||||||
eni = conn.create_network_interface(subnet.id, groups=[security_group2.id])
|
eni = conn.create_network_interface(subnet.id, groups=[security_group2.id])
|
||||||
|
|
||||||
# Check initial instance and ENI data
|
# Check initial instance and ENI data
|
||||||
instance.interfaces.should.have.length_of(0)
|
instance.interfaces.should.have.length_of(1)
|
||||||
|
|
||||||
eni.groups.should.have.length_of(1)
|
eni.groups.should.have.length_of(1)
|
||||||
set([group.id for group in eni.groups]).should.equal(set([security_group2.id]))
|
set([group.id for group in eni.groups]).should.equal(set([security_group2.id]))
|
||||||
|
|
||||||
# Attach
|
# Attach
|
||||||
conn.attach_network_interface(eni.id, instance.id, device_index=0)
|
conn.attach_network_interface(eni.id, instance.id, device_index=1)
|
||||||
|
|
||||||
# Check attached instance and ENI data
|
# Check attached instance and ENI data
|
||||||
instance.update()
|
instance.update()
|
||||||
instance.interfaces.should.have.length_of(1)
|
instance.interfaces.should.have.length_of(2)
|
||||||
instance_eni = instance.interfaces[0]
|
instance_eni = instance.interfaces[1]
|
||||||
instance_eni.id.should.equal(eni.id)
|
instance_eni.id.should.equal(eni.id)
|
||||||
instance_eni.groups.should.have.length_of(2)
|
instance_eni.groups.should.have.length_of(2)
|
||||||
set([group.id for group in instance_eni.groups]).should.equal(set([security_group1.id,security_group2.id]))
|
set([group.id for group in instance_eni.groups]).should.equal(set([security_group1.id,security_group2.id]))
|
||||||
|
|
||||||
eni = conn.get_all_network_interfaces(eni.id)[0]
|
eni = conn.get_all_network_interfaces(filters={'network-interface-id': eni.id})[0]
|
||||||
eni.groups.should.have.length_of(2)
|
eni.groups.should.have.length_of(2)
|
||||||
set([group.id for group in eni.groups]).should.equal(set([security_group1.id,security_group2.id]))
|
set([group.id for group in eni.groups]).should.equal(set([security_group1.id,security_group2.id]))
|
||||||
|
|
||||||
@ -516,9 +516,9 @@ def test_instance_with_nic_attach_detach():
|
|||||||
|
|
||||||
# Check detached instance and ENI data
|
# Check detached instance and ENI data
|
||||||
instance.update()
|
instance.update()
|
||||||
instance.interfaces.should.have.length_of(0)
|
instance.interfaces.should.have.length_of(1)
|
||||||
|
|
||||||
eni = conn.get_all_network_interfaces(eni.id)[0]
|
eni = conn.get_all_network_interfaces(filters={'network-interface-id': eni.id})[0]
|
||||||
eni.groups.should.have.length_of(1)
|
eni.groups.should.have.length_of(1)
|
||||||
set([group.id for group in eni.groups]).should.equal(set([security_group2.id]))
|
set([group.id for group in eni.groups]).should.equal(set([security_group2.id]))
|
||||||
|
|
||||||
@ -530,6 +530,18 @@ def test_instance_with_nic_attach_detach():
|
|||||||
cm.exception.request_id.should_not.be.none
|
cm.exception.request_id.should_not.be.none
|
||||||
|
|
||||||
|
|
||||||
|
@mock_ec2
|
||||||
|
def test_ec2_classic_has_public_ip_address():
|
||||||
|
conn = boto.connect_ec2('the_key', 'the_secret')
|
||||||
|
reservation = conn.run_instances('ami-1234abcd', key_name="keypair_name")
|
||||||
|
instance = reservation.instances[0]
|
||||||
|
instance.ip_address.should_not.equal(None)
|
||||||
|
instance.public_dns_name.should.contain(instance.ip_address)
|
||||||
|
|
||||||
|
instance.private_ip_address.should_not.equal(None)
|
||||||
|
instance.private_dns_name.should.contain(instance.private_ip_address)
|
||||||
|
|
||||||
|
|
||||||
@mock_ec2
|
@mock_ec2
|
||||||
def test_run_instance_with_keypair():
|
def test_run_instance_with_keypair():
|
||||||
conn = boto.connect_ec2('the_key', 'the_secret')
|
conn = boto.connect_ec2('the_key', 'the_secret')
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
# Ensure 'assert_raises' context manager support for Python 2.6
|
# Ensure 'assert_raises' context manager support for Python 2.6
|
||||||
import tests.backport_assert_raises
|
import tests.backport_assert_raises # noqa
|
||||||
from nose.tools import assert_raises
|
from nose.tools import assert_raises
|
||||||
|
|
||||||
import boto
|
import boto
|
||||||
@ -201,7 +201,7 @@ def test_authorize_group_in_vpc():
|
|||||||
def test_get_all_security_groups():
|
def test_get_all_security_groups():
|
||||||
conn = boto.connect_ec2()
|
conn = boto.connect_ec2()
|
||||||
sg1 = conn.create_security_group(name='test1', description='test1', vpc_id='vpc-mjm05d27')
|
sg1 = conn.create_security_group(name='test1', description='test1', vpc_id='vpc-mjm05d27')
|
||||||
sg2 = conn.create_security_group(name='test2', description='test2')
|
conn.create_security_group(name='test2', description='test2')
|
||||||
|
|
||||||
resp = conn.get_all_security_groups(groupnames=['test1'])
|
resp = conn.get_all_security_groups(groupnames=['test1'])
|
||||||
resp.should.have.length_of(1)
|
resp.should.have.length_of(1)
|
||||||
@ -232,3 +232,19 @@ def test_authorize_bad_cidr_throws_invalid_parameter_value():
|
|||||||
cm.exception.code.should.equal('InvalidParameterValue')
|
cm.exception.code.should.equal('InvalidParameterValue')
|
||||||
cm.exception.status.should.equal(400)
|
cm.exception.status.should.equal(400)
|
||||||
cm.exception.request_id.should_not.be.none
|
cm.exception.request_id.should_not.be.none
|
||||||
|
|
||||||
|
|
||||||
|
@mock_ec2
|
||||||
|
def test_security_group_tagging():
|
||||||
|
conn = boto.connect_vpc()
|
||||||
|
vpc = conn.create_vpc("10.0.0.0/16")
|
||||||
|
sg = conn.create_security_group("test-sg", "Test SG", vpc.id)
|
||||||
|
sg.add_tag("Test", "Tag")
|
||||||
|
|
||||||
|
tag = conn.get_all_tags()[0]
|
||||||
|
tag.name.should.equal("Test")
|
||||||
|
tag.value.should.equal("Tag")
|
||||||
|
|
||||||
|
group = conn.get_all_security_groups("test-sg")[0]
|
||||||
|
group.tags.should.have.length_of(1)
|
||||||
|
group.tags["Test"].should.equal("Tag")
|
||||||
|
@ -123,6 +123,31 @@ def test_terminate_job_flow():
|
|||||||
flow.state.should.equal('TERMINATED')
|
flow.state.should.equal('TERMINATED')
|
||||||
|
|
||||||
|
|
||||||
|
@mock_emr
|
||||||
|
def test_describe_job_flows():
|
||||||
|
conn = boto.connect_emr()
|
||||||
|
job1_id = conn.run_jobflow(
|
||||||
|
name='My jobflow',
|
||||||
|
log_uri='s3://some_bucket/jobflow_logs',
|
||||||
|
steps=[]
|
||||||
|
)
|
||||||
|
job2_id = conn.run_jobflow(
|
||||||
|
name='My jobflow',
|
||||||
|
log_uri='s3://some_bucket/jobflow_logs',
|
||||||
|
steps=[]
|
||||||
|
)
|
||||||
|
|
||||||
|
jobs = conn.describe_jobflows()
|
||||||
|
jobs.should.have.length_of(2)
|
||||||
|
|
||||||
|
jobs = conn.describe_jobflows(jobflow_ids=[job2_id])
|
||||||
|
jobs.should.have.length_of(1)
|
||||||
|
jobs[0].jobflowid.should.equal(job2_id)
|
||||||
|
|
||||||
|
first_job = conn.describe_jobflow(job1_id)
|
||||||
|
first_job.jobflowid.should.equal(job1_id)
|
||||||
|
|
||||||
|
|
||||||
@mock_emr
|
@mock_emr
|
||||||
def test_add_steps_to_flow():
|
def test_add_steps_to_flow():
|
||||||
conn = boto.connect_emr()
|
conn = boto.connect_emr()
|
||||||
@ -291,3 +316,61 @@ def test_set_visible_to_all_users():
|
|||||||
|
|
||||||
job_flow = conn.describe_jobflow(job_id)
|
job_flow = conn.describe_jobflow(job_id)
|
||||||
job_flow.visibletoallusers.should.equal('False')
|
job_flow.visibletoallusers.should.equal('False')
|
||||||
|
|
||||||
|
|
||||||
|
@mock_emr
|
||||||
|
def test_list_clusters():
|
||||||
|
conn = boto.connect_emr()
|
||||||
|
conn.run_jobflow(
|
||||||
|
name='My jobflow',
|
||||||
|
log_uri='s3://some_bucket/jobflow_logs',
|
||||||
|
steps=[],
|
||||||
|
)
|
||||||
|
|
||||||
|
summary = conn.list_clusters()
|
||||||
|
clusters = summary.clusters
|
||||||
|
clusters.should.have.length_of(1)
|
||||||
|
cluster = clusters[0]
|
||||||
|
cluster.name.should.equal("My jobflow")
|
||||||
|
cluster.normalizedinstancehours.should.equal('0')
|
||||||
|
cluster.status.state.should.equal("RUNNING")
|
||||||
|
|
||||||
|
|
||||||
|
@mock_emr
|
||||||
|
def test_describe_cluster():
|
||||||
|
conn = boto.connect_emr()
|
||||||
|
job_id = conn.run_jobflow(
|
||||||
|
name='My jobflow',
|
||||||
|
log_uri='s3://some_bucket/jobflow_logs',
|
||||||
|
steps=[],
|
||||||
|
)
|
||||||
|
|
||||||
|
cluster = conn.describe_cluster(job_id)
|
||||||
|
cluster.name.should.equal("My jobflow")
|
||||||
|
cluster.normalizedinstancehours.should.equal('0')
|
||||||
|
cluster.status.state.should.equal("RUNNING")
|
||||||
|
|
||||||
|
|
||||||
|
@mock_emr
|
||||||
|
def test_cluster_tagging():
|
||||||
|
conn = boto.connect_emr()
|
||||||
|
job_id = conn.run_jobflow(
|
||||||
|
name='My jobflow',
|
||||||
|
log_uri='s3://some_bucket/jobflow_logs',
|
||||||
|
steps=[],
|
||||||
|
)
|
||||||
|
cluster_id = job_id
|
||||||
|
conn.add_tags(cluster_id, {"tag1": "val1", "tag2": "val2"})
|
||||||
|
|
||||||
|
cluster = conn.describe_cluster(cluster_id)
|
||||||
|
cluster.tags.should.have.length_of(2)
|
||||||
|
tags = dict((tag.key, tag.value) for tag in cluster.tags)
|
||||||
|
tags['tag1'].should.equal('val1')
|
||||||
|
tags['tag2'].should.equal('val2')
|
||||||
|
|
||||||
|
# Remove a tag
|
||||||
|
conn.remove_tags(cluster_id, ["tag1"])
|
||||||
|
cluster = conn.describe_cluster(cluster_id)
|
||||||
|
cluster.tags.should.have.length_of(1)
|
||||||
|
tags = dict((tag.key, tag.value) for tag in cluster.tags)
|
||||||
|
tags['tag2'].should.equal('val2')
|
||||||
|
@ -1,10 +1,11 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
import boto
|
import boto
|
||||||
import sure # noqa
|
import sure # noqa
|
||||||
|
import re
|
||||||
|
|
||||||
from nose.tools import assert_raises, assert_equals, assert_not_equals
|
from nose.tools import assert_raises, assert_equals, assert_not_equals
|
||||||
from boto.exception import BotoServerError
|
from boto.exception import BotoServerError
|
||||||
|
import base64
|
||||||
from moto import mock_iam
|
from moto import mock_iam
|
||||||
|
|
||||||
|
|
||||||
@ -200,3 +201,24 @@ def test_delete_user():
|
|||||||
conn.delete_user('my-user')
|
conn.delete_user('my-user')
|
||||||
conn.create_user('my-user')
|
conn.create_user('my-user')
|
||||||
conn.delete_user('my-user')
|
conn.delete_user('my-user')
|
||||||
|
|
||||||
|
@mock_iam()
|
||||||
|
def test_generate_credential_report():
|
||||||
|
conn = boto.connect_iam()
|
||||||
|
result = conn.generate_credential_report()
|
||||||
|
result['generate_credential_report_response']['generate_credential_report_result']['state'].should.equal('STARTED')
|
||||||
|
result = conn.generate_credential_report()
|
||||||
|
result['generate_credential_report_response']['generate_credential_report_result']['state'].should.equal('COMPLETE')
|
||||||
|
|
||||||
|
@mock_iam()
|
||||||
|
def test_get_credential_report():
|
||||||
|
conn = boto.connect_iam()
|
||||||
|
conn.create_user('my-user')
|
||||||
|
with assert_raises(BotoServerError):
|
||||||
|
conn.get_credential_report()
|
||||||
|
result = conn.generate_credential_report()
|
||||||
|
while result['generate_credential_report_response']['generate_credential_report_result']['state'] != 'COMPLETE':
|
||||||
|
result = conn.generate_credential_report()
|
||||||
|
result = conn.get_credential_report()
|
||||||
|
report = base64.b64decode(result['get_credential_report_response']['get_credential_report_result']['content'].encode('ascii')).decode('ascii')
|
||||||
|
report.should.match(r'.*my-user.*')
|
@ -237,3 +237,36 @@ def test_create_database_replica():
|
|||||||
|
|
||||||
primary = conn.get_all_dbinstances("db-master-1")[0]
|
primary = conn.get_all_dbinstances("db-master-1")[0]
|
||||||
list(primary.read_replica_dbinstance_identifiers).should.have.length_of(0)
|
list(primary.read_replica_dbinstance_identifiers).should.have.length_of(0)
|
||||||
|
|
||||||
|
|
||||||
|
@disable_on_py3()
|
||||||
|
@mock_rds
|
||||||
|
def test_connecting_to_us_east_1():
|
||||||
|
# boto does not use us-east-1 in the URL for RDS,
|
||||||
|
# and that broke moto in the past:
|
||||||
|
# https://github.com/boto/boto/blob/e271ff09364ea18d9d8b6f4d63d6b0ac6cbc9b75/boto/endpoints.json#L285
|
||||||
|
conn = boto.rds.connect_to_region("us-east-1")
|
||||||
|
|
||||||
|
database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2',
|
||||||
|
security_groups=["my_sg"])
|
||||||
|
|
||||||
|
database.status.should.equal('available')
|
||||||
|
database.id.should.equal("db-master-1")
|
||||||
|
database.allocated_storage.should.equal(10)
|
||||||
|
database.instance_class.should.equal("db.m1.small")
|
||||||
|
database.master_username.should.equal("root")
|
||||||
|
database.endpoint.should.equal(('db-master-1.aaaaaaaaaa.us-east-1.rds.amazonaws.com', 3306))
|
||||||
|
database.security_groups[0].name.should.equal('my_sg')
|
||||||
|
|
||||||
|
|
||||||
|
@disable_on_py3()
|
||||||
|
@mock_rds
|
||||||
|
def test_create_database_with_iops():
|
||||||
|
conn = boto.rds.connect_to_region("us-west-2")
|
||||||
|
|
||||||
|
database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2', iops=6000)
|
||||||
|
|
||||||
|
database.status.should.equal('available')
|
||||||
|
database.iops.should.equal(6000)
|
||||||
|
# boto>2.36.0 may change the following property name to `storage_type`
|
||||||
|
database.StorageType.should.equal('io1')
|
||||||
|
@ -19,6 +19,25 @@ import sure # noqa
|
|||||||
from moto import mock_s3
|
from moto import mock_s3
|
||||||
|
|
||||||
|
|
||||||
|
REDUCED_PART_SIZE = 256
|
||||||
|
|
||||||
|
|
||||||
|
def reduced_min_part_size(f):
|
||||||
|
""" speed up tests by temporarily making the multipart minimum part size
|
||||||
|
small
|
||||||
|
"""
|
||||||
|
import moto.s3.models as s3model
|
||||||
|
orig_size = s3model.UPLOAD_PART_MIN_SIZE
|
||||||
|
|
||||||
|
def wrapped(*args, **kwargs):
|
||||||
|
try:
|
||||||
|
s3model.UPLOAD_PART_MIN_SIZE = REDUCED_PART_SIZE
|
||||||
|
return f(*args, **kwargs)
|
||||||
|
finally:
|
||||||
|
s3model.UPLOAD_PART_MIN_SIZE = orig_size
|
||||||
|
return wrapped
|
||||||
|
|
||||||
|
|
||||||
class MyModel(object):
|
class MyModel(object):
|
||||||
def __init__(self, name, value):
|
def __init__(self, name, value):
|
||||||
self.name = name
|
self.name = name
|
||||||
@ -72,12 +91,13 @@ def test_multipart_upload_too_small():
|
|||||||
|
|
||||||
|
|
||||||
@mock_s3
|
@mock_s3
|
||||||
|
@reduced_min_part_size
|
||||||
def test_multipart_upload():
|
def test_multipart_upload():
|
||||||
conn = boto.connect_s3('the_key', 'the_secret')
|
conn = boto.connect_s3('the_key', 'the_secret')
|
||||||
bucket = conn.create_bucket("foobar")
|
bucket = conn.create_bucket("foobar")
|
||||||
|
|
||||||
multipart = bucket.initiate_multipart_upload("the-key")
|
multipart = bucket.initiate_multipart_upload("the-key")
|
||||||
part1 = b'0' * 5242880
|
part1 = b'0' * REDUCED_PART_SIZE
|
||||||
multipart.upload_part_from_file(BytesIO(part1), 1)
|
multipart.upload_part_from_file(BytesIO(part1), 1)
|
||||||
# last part, can be less than 5 MB
|
# last part, can be less than 5 MB
|
||||||
part2 = b'1'
|
part2 = b'1'
|
||||||
@ -88,6 +108,24 @@ def test_multipart_upload():
|
|||||||
|
|
||||||
|
|
||||||
@mock_s3
|
@mock_s3
|
||||||
|
@reduced_min_part_size
|
||||||
|
def test_multipart_upload_out_of_order():
|
||||||
|
conn = boto.connect_s3('the_key', 'the_secret')
|
||||||
|
bucket = conn.create_bucket("foobar")
|
||||||
|
|
||||||
|
multipart = bucket.initiate_multipart_upload("the-key")
|
||||||
|
# last part, can be less than 5 MB
|
||||||
|
part2 = b'1'
|
||||||
|
multipart.upload_part_from_file(BytesIO(part2), 4)
|
||||||
|
part1 = b'0' * REDUCED_PART_SIZE
|
||||||
|
multipart.upload_part_from_file(BytesIO(part1), 2)
|
||||||
|
multipart.complete_upload()
|
||||||
|
# we should get both parts as the key contents
|
||||||
|
bucket.get_key("the-key").get_contents_as_string().should.equal(part1 + part2)
|
||||||
|
|
||||||
|
|
||||||
|
@mock_s3
|
||||||
|
@reduced_min_part_size
|
||||||
def test_multipart_upload_with_headers():
|
def test_multipart_upload_with_headers():
|
||||||
conn = boto.connect_s3('the_key', 'the_secret')
|
conn = boto.connect_s3('the_key', 'the_secret')
|
||||||
bucket = conn.create_bucket("foobar")
|
bucket = conn.create_bucket("foobar")
|
||||||
@ -102,6 +140,7 @@ def test_multipart_upload_with_headers():
|
|||||||
|
|
||||||
|
|
||||||
@mock_s3
|
@mock_s3
|
||||||
|
@reduced_min_part_size
|
||||||
def test_multipart_upload_with_copy_key():
|
def test_multipart_upload_with_copy_key():
|
||||||
conn = boto.connect_s3('the_key', 'the_secret')
|
conn = boto.connect_s3('the_key', 'the_secret')
|
||||||
bucket = conn.create_bucket("foobar")
|
bucket = conn.create_bucket("foobar")
|
||||||
@ -110,7 +149,7 @@ def test_multipart_upload_with_copy_key():
|
|||||||
key.set_contents_from_string("key_value")
|
key.set_contents_from_string("key_value")
|
||||||
|
|
||||||
multipart = bucket.initiate_multipart_upload("the-key")
|
multipart = bucket.initiate_multipart_upload("the-key")
|
||||||
part1 = b'0' * 5242880
|
part1 = b'0' * REDUCED_PART_SIZE
|
||||||
multipart.upload_part_from_file(BytesIO(part1), 1)
|
multipart.upload_part_from_file(BytesIO(part1), 1)
|
||||||
multipart.copy_part_from_key("foobar", "original-key", 2)
|
multipart.copy_part_from_key("foobar", "original-key", 2)
|
||||||
multipart.complete_upload()
|
multipart.complete_upload()
|
||||||
@ -118,12 +157,13 @@ def test_multipart_upload_with_copy_key():
|
|||||||
|
|
||||||
|
|
||||||
@mock_s3
|
@mock_s3
|
||||||
|
@reduced_min_part_size
|
||||||
def test_multipart_upload_cancel():
|
def test_multipart_upload_cancel():
|
||||||
conn = boto.connect_s3('the_key', 'the_secret')
|
conn = boto.connect_s3('the_key', 'the_secret')
|
||||||
bucket = conn.create_bucket("foobar")
|
bucket = conn.create_bucket("foobar")
|
||||||
|
|
||||||
multipart = bucket.initiate_multipart_upload("the-key")
|
multipart = bucket.initiate_multipart_upload("the-key")
|
||||||
part1 = b'0' * 5242880
|
part1 = b'0' * REDUCED_PART_SIZE
|
||||||
multipart.upload_part_from_file(BytesIO(part1), 1)
|
multipart.upload_part_from_file(BytesIO(part1), 1)
|
||||||
multipart.cancel_upload()
|
multipart.cancel_upload()
|
||||||
# TODO we really need some sort of assertion here, but we don't currently
|
# TODO we really need some sort of assertion here, but we don't currently
|
||||||
@ -131,13 +171,14 @@ def test_multipart_upload_cancel():
|
|||||||
|
|
||||||
|
|
||||||
@mock_s3
|
@mock_s3
|
||||||
|
@reduced_min_part_size
|
||||||
def test_multipart_etag():
|
def test_multipart_etag():
|
||||||
# Create Bucket so that test can run
|
# Create Bucket so that test can run
|
||||||
conn = boto.connect_s3('the_key', 'the_secret')
|
conn = boto.connect_s3('the_key', 'the_secret')
|
||||||
bucket = conn.create_bucket('mybucket')
|
bucket = conn.create_bucket('mybucket')
|
||||||
|
|
||||||
multipart = bucket.initiate_multipart_upload("the-key")
|
multipart = bucket.initiate_multipart_upload("the-key")
|
||||||
part1 = b'0' * 5242880
|
part1 = b'0' * REDUCED_PART_SIZE
|
||||||
multipart.upload_part_from_file(BytesIO(part1), 1)
|
multipart.upload_part_from_file(BytesIO(part1), 1)
|
||||||
# last part, can be less than 5 MB
|
# last part, can be less than 5 MB
|
||||||
part2 = b'1'
|
part2 = b'1'
|
||||||
@ -148,6 +189,26 @@ def test_multipart_etag():
|
|||||||
'"140f92a6df9f9e415f74a1463bcee9bb-2"')
|
'"140f92a6df9f9e415f74a1463bcee9bb-2"')
|
||||||
|
|
||||||
|
|
||||||
|
@mock_s3
|
||||||
|
@reduced_min_part_size
|
||||||
|
def test_multipart_invalid_order():
|
||||||
|
# Create Bucket so that test can run
|
||||||
|
conn = boto.connect_s3('the_key', 'the_secret')
|
||||||
|
bucket = conn.create_bucket('mybucket')
|
||||||
|
|
||||||
|
multipart = bucket.initiate_multipart_upload("the-key")
|
||||||
|
part1 = b'0' * 5242880
|
||||||
|
etag1 = multipart.upload_part_from_file(BytesIO(part1), 1).etag
|
||||||
|
# last part, can be less than 5 MB
|
||||||
|
part2 = b'1'
|
||||||
|
etag2 = multipart.upload_part_from_file(BytesIO(part2), 2).etag
|
||||||
|
xml = "<Part><PartNumber>{0}</PartNumber><ETag>{1}</ETag></Part>"
|
||||||
|
xml = xml.format(2, etag2) + xml.format(1, etag1)
|
||||||
|
xml = "<CompleteMultipartUpload>{0}</CompleteMultipartUpload>".format(xml)
|
||||||
|
bucket.complete_multipart_upload.when.called_with(
|
||||||
|
multipart.key_name, multipart.id, xml).should.throw(S3ResponseError)
|
||||||
|
|
||||||
|
|
||||||
@mock_s3
|
@mock_s3
|
||||||
def test_list_multiparts():
|
def test_list_multiparts():
|
||||||
# Create Bucket so that test can run
|
# Create Bucket so that test can run
|
||||||
@ -692,3 +753,21 @@ def test_bucket_location():
|
|||||||
conn = boto.s3.connect_to_region("us-west-2")
|
conn = boto.s3.connect_to_region("us-west-2")
|
||||||
bucket = conn.create_bucket('mybucket')
|
bucket = conn.create_bucket('mybucket')
|
||||||
bucket.get_location().should.equal("us-west-2")
|
bucket.get_location().should.equal("us-west-2")
|
||||||
|
|
||||||
|
|
||||||
|
@mock_s3
|
||||||
|
def test_ranged_get():
|
||||||
|
conn = boto.connect_s3()
|
||||||
|
bucket = conn.create_bucket('mybucket')
|
||||||
|
key = Key(bucket)
|
||||||
|
key.key = 'bigkey'
|
||||||
|
rep = b"0123456789"
|
||||||
|
key.set_contents_from_string(rep * 10)
|
||||||
|
key.get_contents_as_string(headers={'Range': 'bytes=0-'}).should.equal(rep * 10)
|
||||||
|
key.get_contents_as_string(headers={'Range': 'bytes=0-99'}).should.equal(rep * 10)
|
||||||
|
key.get_contents_as_string(headers={'Range': 'bytes=0-0'}).should.equal(b'0')
|
||||||
|
key.get_contents_as_string(headers={'Range': 'bytes=99-99'}).should.equal(b'9')
|
||||||
|
key.get_contents_as_string(headers={'Range': 'bytes=50-54'}).should.equal(rep[:5])
|
||||||
|
key.get_contents_as_string(headers={'Range': 'bytes=50-'}).should.equal(rep * 5)
|
||||||
|
key.get_contents_as_string(headers={'Range': 'bytes=-60'}).should.equal(rep * 6)
|
||||||
|
key.size.should.equal(100)
|
||||||
|
Loading…
Reference in New Issue
Block a user