diff --git a/.travis.yml b/.travis.yml
index 56e87caa0..be2d97c86 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -5,11 +5,11 @@ python:
- 2.7
env:
matrix:
- - BOTO_VERSION=2.34.0
+ - BOTO_VERSION=2.36.0
matrix:
include:
- python: "3.3"
- env: BOTO_VERSION=2.34.0
+ env: BOTO_VERSION=2.36.0
install:
- travis_retry pip install boto==$BOTO_VERSION
- travis_retry pip install .
diff --git a/moto/cloudformation/responses.py b/moto/cloudformation/responses.py
index 1aa4c9117..18c1d23f3 100644
--- a/moto/cloudformation/responses.py
+++ b/moto/cloudformation/responses.py
@@ -1,7 +1,7 @@
from __future__ import unicode_literals
import json
-from six.moves.urllib_parse import urlparse
+from six.moves.urllib.parse import urlparse
from moto.core.responses import BaseResponse
from moto.s3 import s3_backend
diff --git a/moto/core/exceptions.py b/moto/core/exceptions.py
new file mode 100644
index 000000000..d5a754e78
--- /dev/null
+++ b/moto/core/exceptions.py
@@ -0,0 +1,28 @@
+from werkzeug.exceptions import HTTPException
+from jinja2 import DictLoader, Environment
+
+
+ERROR_RESPONSE = u"""
+
+
+
+ {{code}}
+ {{message}}
+ {% block extra %}{% endblock %}
+
+
+ 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE
+
+"""
+
+
+class RESTError(HTTPException):
+ templates = {
+ 'error': ERROR_RESPONSE
+ }
+
+ def __init__(self, code, message, template='error', **kwargs):
+ super(RESTError, self).__init__()
+ env = Environment(loader=DictLoader(self.templates))
+ self.description = env.get_template(template).render(
+ code=code, message=message, **kwargs)
diff --git a/moto/ec2/exceptions.py b/moto/ec2/exceptions.py
index 599f0d00d..3c181e045 100644
--- a/moto/ec2/exceptions.py
+++ b/moto/ec2/exceptions.py
@@ -1,13 +1,9 @@
from __future__ import unicode_literals
-from werkzeug.exceptions import BadRequest
-from jinja2 import Template
+from moto.core.exceptions import RESTError
-class EC2ClientError(BadRequest):
- def __init__(self, code, message):
- super(EC2ClientError, self).__init__()
- self.description = ERROR_RESPONSE_TEMPLATE.render(
- code=code, message=message)
+class EC2ClientError(RESTError):
+ code = 400
class DependencyViolationError(EC2ClientError):
@@ -306,17 +302,3 @@ class InvalidCIDRSubnetError(EC2ClientError):
"InvalidParameterValue",
"invalid CIDR subnet specification: {0}"
.format(cidr))
-
-
-ERROR_RESPONSE = u"""
-
-
-
- {{code}}
- {{message}}
-
-
- 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE
-
-"""
-ERROR_RESPONSE_TEMPLATE = Template(ERROR_RESPONSE)
diff --git a/moto/ec2/models.py b/moto/ec2/models.py
index b4d491264..ac0bb4ddc 100644
--- a/moto/ec2/models.py
+++ b/moto/ec2/models.py
@@ -69,6 +69,7 @@ from .utils import (
random_internet_gateway_id,
random_ip,
random_key_pair,
+ random_private_ip,
random_public_ip,
random_reservation_id,
random_route_table_id,
@@ -163,7 +164,7 @@ class NetworkInterface(object):
group = self.ec2_backend.get_security_group_from_id(group_id)
if not group:
# Create with specific group ID.
- group = SecurityGroup(group_id, group_id, group_id, vpc_id=subnet.vpc_id)
+ group = SecurityGroup(group.ec2_backend, group_id, group_id, group_id, vpc_id=subnet.vpc_id)
self.ec2_backend.groups[subnet.vpc_id][group_id] = group
if group:
self._group_set.append(group)
@@ -174,9 +175,12 @@ class NetworkInterface(object):
security_group_ids = properties.get('SecurityGroups', [])
- subnet_id = properties['SubnetId']
ec2_backend = ec2_backends[region_name]
- subnet = ec2_backend.get_subnet(subnet_id)
+ subnet_id = properties.get('SubnetId')
+ if subnet_id:
+ subnet = ec2_backend.get_subnet(subnet_id)
+ else:
+ subnet = None
private_ip_address = properties.get('PrivateIpAddress', None)
@@ -224,7 +228,7 @@ class NetworkInterfaceBackend(object):
super(NetworkInterfaceBackend, self).__init__()
def create_network_interface(self, subnet, private_ip_address, group_ids=None, **kwargs):
- eni = NetworkInterface(self, subnet, private_ip_address, group_ids=group_ids)
+ eni = NetworkInterface(self, subnet, private_ip_address, group_ids=group_ids, **kwargs)
self.enis[eni.id] = eni
return eni
@@ -297,10 +301,14 @@ class Instance(BotoInstance, TaggedEC2Resource):
self.instance_type = kwargs.get("instance_type", "m1.small")
self.vpc_id = None
self.subnet_id = kwargs.get("subnet_id")
+ in_ec2_classic = not bool(self.subnet_id)
self.key_name = kwargs.get("key_name")
self.source_dest_check = "true"
self.launch_time = datetime.utcnow().isoformat()
- self.private_ip_address = kwargs.get('private_ip_address')
+ associate_public_ip = kwargs.get("associate_public_ip", False)
+ if in_ec2_classic:
+ # If we are in EC2-Classic, autoassign a public IP
+ associate_public_ip = True
self.block_device_mapping = BlockDeviceMapping()
self.block_device_mapping['/dev/sda1'] = BlockDeviceType(volume_id=random_volume_id())
@@ -326,9 +334,26 @@ class Instance(BotoInstance, TaggedEC2Resource):
self.vpc_id = subnet.vpc_id
self.prep_nics(kwargs.get("nics", {}),
- subnet_id=kwargs.get("subnet_id"),
+ subnet_id=self.subnet_id,
private_ip=kwargs.get("private_ip"),
- associate_public_ip=kwargs.get("associate_public_ip"))
+ associate_public_ip=associate_public_ip)
+
+ @property
+ def private_ip(self):
+ return self.nics[0].private_ip_address
+
+ @property
+ def private_dns(self):
+ return "ip-{0}.ec2.internal".format(self.private_ip)
+
+ @property
+ def public_ip(self):
+ return self.nics[0].public_ip
+
+ @property
+ def public_dns(self):
+ if self.public_ip:
+ return "ec2-{0}.compute-1.amazonaws.com".format(self.public_ip)
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
@@ -346,7 +371,7 @@ class Instance(BotoInstance, TaggedEC2Resource):
instance_type=properties.get("InstanceType", "m1.small"),
subnet_id=properties.get("SubnetId"),
key_name=properties.get("KeyName"),
- private_ip_address=properties.get('PrivateIpAddress'),
+ private_ip=properties.get('PrivateIpAddress'),
)
return reservation.instances[0]
@@ -407,6 +432,9 @@ class Instance(BotoInstance, TaggedEC2Resource):
def prep_nics(self, nic_spec, subnet_id=None, private_ip=None, associate_public_ip=None):
self.nics = {}
+ if not private_ip:
+ private_ip = random_private_ip()
+
# Primary NIC defaults
primary_nic = {'SubnetId': subnet_id,
'PrivateIpAddress': private_ip,
@@ -434,7 +462,10 @@ class Instance(BotoInstance, TaggedEC2Resource):
if device_index == 0 and primary_nic:
nic.update(primary_nic)
- subnet = self.ec2_backend.get_subnet(nic['SubnetId'])
+ if 'SubnetId' in nic:
+ subnet = self.ec2_backend.get_subnet(nic['SubnetId'])
+ else:
+ subnet = None
group_id = nic.get('SecurityGroupId')
group_ids = [group_id] if group_id else []
@@ -468,13 +499,13 @@ class Instance(BotoInstance, TaggedEC2Resource):
if attribute_name == 'AvailabilityZone':
return self.placement
elif attribute_name == 'PrivateDnsName':
- return self.private_dns_name
+ return self.private_dns
elif attribute_name == 'PublicDnsName':
- return self.public_dns_name
+ return self.public_dns
elif attribute_name == 'PrivateIp':
- return self.private_ip_address
+ return self.private_ip
elif attribute_name == 'PublicIp':
- return self.ip_address
+ return self.public_ip
raise UnformattedGetAttTemplateException()
@@ -1016,8 +1047,9 @@ class SecurityRule(object):
return self.unique_representation == other.unique_representation
-class SecurityGroup(object):
- def __init__(self, group_id, name, description, vpc_id=None):
+class SecurityGroup(TaggedEC2Resource):
+ def __init__(self, ec2_backend, group_id, name, description, vpc_id=None):
+ self.ec2_backend = ec2_backend
self.id = group_id
self.name = name
self.description = description
@@ -1116,7 +1148,7 @@ class SecurityGroupBackend(object):
existing_group = self.get_security_group_from_name(name, vpc_id)
if existing_group:
raise InvalidSecurityGroupDuplicateError(name)
- group = SecurityGroup(group_id, name, description, vpc_id=vpc_id)
+ group = SecurityGroup(self, group_id, name, description, vpc_id=vpc_id)
self.groups[vpc_id][group_id] = group
return group
@@ -2031,10 +2063,12 @@ class VPCGatewayAttachment(object):
properties = cloudformation_json['Properties']
ec2_backend = ec2_backends[region_name]
- return ec2_backend.create_vpc_gateway_attachment(
+ attachment = ec2_backend.create_vpc_gateway_attachment(
gateway_id=properties['InternetGatewayId'],
vpc_id=properties['VpcId'],
)
+ ec2_backend.attach_internet_gateway(properties['InternetGatewayId'], properties['VpcId'])
+ return attachment
@property
def physical_resource_id(self):
diff --git a/moto/ec2/responses/elastic_network_interfaces.py b/moto/ec2/responses/elastic_network_interfaces.py
index a7e270a93..10c5df36d 100644
--- a/moto/ec2/responses/elastic_network_interfaces.py
+++ b/moto/ec2/responses/elastic_network_interfaces.py
@@ -25,6 +25,11 @@ class ElasticNetworkInterfaces(BaseResponse):
def describe_network_interfaces(self):
# Partially implemented. Supports only network-interface-id and group-id filters
filters = filters_from_querystring(self.querystring)
+ eni_ids = self._get_multi_param('NetworkInterfaceId.')
+ if 'network-interface-id' not in filters and eni_ids:
+ # Network interfaces can be filtered by passing the 'network-interface-id'
+ # filter or by passing the NetworkInterfaceId parameter
+ filters['network-interface-id'] = eni_ids
enis = self.ec2_backend.describe_network_interfaces(filters)
template = self.response_template(DESCRIBE_NETWORK_INTERFACES_RESPONSE)
return template.render(enis=enis)
diff --git a/moto/ec2/responses/instances.py b/moto/ec2/responses/instances.py
index 62909e2ea..f7c03206d 100644
--- a/moto/ec2/responses/instances.py
+++ b/moto/ec2/responses/instances.py
@@ -198,8 +198,8 @@ EC2_RUN_INSTANCES = """{{ instance.nics[0].subnet.id }}
{{ instance.nics[0].subnet.vpc_id }}
- {{ instance.nics[0].private_ip_address }}
+ {{ instance.private_ip }}
{% if instance.nics[0].public_ip %}
{{ instance.nics[0].public_ip }}
{% endif %}
diff --git a/moto/ec2/responses/security_groups.py b/moto/ec2/responses/security_groups.py
index 9da61d90a..38fadb883 100644
--- a/moto/ec2/responses/security_groups.py
+++ b/moto/ec2/responses/security_groups.py
@@ -133,6 +133,16 @@ DESCRIBE_SECURITY_GROUPS_RESPONSE = """
@@ -163,6 +190,85 @@ ADD_JOB_FLOW_STEPS_TEMPLATE = """
+
+ {% for cluster in clusters %}
+
+ {{ cluster.id }}
+ {{ cluster.name }}
+ {{ cluster.normalized_instance_hours }}
+
+ {{ cluster.state }}
+
+
+
+
+
+
+
+ {% endfor %}
+
+
+
+
+ 2690d7eb-ed86-11dd-9877-6fad448a8418
+
+
+"""
+
+DESCRIBE_CLUSTER_TEMPLATE = """
+
+
+ {{ cluster.id }}
+
+ {% for tag_key, tag_value in cluster.tags.items() %}
+
+ {{ tag_key }}
+ {{ tag_value }}
+
+ {% endfor %}
+
+
+ {{ cluster.availability_zone }}
+ {{ cluster.subnet_id }}
+ {{ cluster.ec2_key_name }}
+
+ {{ cluster.running_ami_version }}
+ {{ cluster.visible_to_all_users }}
+
+
+ Terminated by user request
+ USER_REQUEST
+
+ {{ cluster.state }}
+
+ 2014-01-24T01:21:21Z
+ 2014-01-24T01:25:26Z
+ 2014-01-24T02:19:46Z
+
+
+ {{ cluster.auto_terminate }}
+ {{ cluster.name }}
+ {{ cluster.requested_ami_version }}
+
+ {% for application in cluster.applications %}
+
+ {{ application.name }}
+ {{ application.version }}
+
+ {% endfor %}
+
+ {{ cluster.termination_protection }}
+ ec2-184-0-0-1.us-west-1.compute.amazonaws.com
+ {{ cluster.normalized_instance_hours }}
+ {{ cluster.service_role }}
+
+
+
+ aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee
+
+"""
+
ADD_INSTANCE_GROUPS_TEMPLATE = """
{% for instance_group in instance_groups %}{{ instance_group.id }}{% if loop.index != loop.length %},{% endif %}{% endfor %}
"""
@@ -182,3 +288,20 @@ SET_VISIBLE_TO_ALL_USERS_TEMPLATE = """
+
+
+ 2690d7eb-ed86-11dd-9877-6fad448a8419
+
+
+"""
+
+REMOVE_TAGS_TEMPLATE = """
+
+
+ 2690d7eb-ed86-11dd-9877-6fad448a8419
+
+
+"""
diff --git a/moto/emr/utils.py b/moto/emr/utils.py
index e09c1e123..b4262c177 100644
--- a/moto/emr/utils.py
+++ b/moto/emr/utils.py
@@ -14,3 +14,19 @@ def random_instance_group_id(size=13):
chars = list(range(10)) + list(string.ascii_uppercase)
job_tag = ''.join(six.text_type(random.choice(chars)) for x in range(size))
return 'i-{0}'.format(job_tag)
+
+
+def tags_from_query_string(querystring_dict):
+ prefix = 'Tags'
+ suffix = 'Key'
+ response_values = {}
+ for key, value in querystring_dict.items():
+ if key.startswith(prefix) and key.endswith(suffix):
+ tag_index = key.replace(prefix + ".", "").replace("." + suffix, "")
+ tag_key = querystring_dict.get("Tags.{0}.Key".format(tag_index))[0]
+ tag_value_key = "Tags.{0}.Value".format(tag_index)
+ if tag_value_key in querystring_dict:
+ response_values[tag_key] = querystring_dict.get(tag_value_key)[0]
+ else:
+ response_values[tag_key] = None
+ return response_values
diff --git a/moto/iam/models.py b/moto/iam/models.py
index ebb25b66d..5468e0805 100644
--- a/moto/iam/models.py
+++ b/moto/iam/models.py
@@ -4,7 +4,7 @@ from boto.exception import BotoServerError
from moto.core import BaseBackend
from .utils import random_access_key, random_alphanumeric, random_resource_id
from datetime import datetime
-
+import base64
class Role(object):
@@ -135,7 +135,7 @@ class User(object):
datetime.utcnow(),
"%Y-%m-%d-%H-%M-%S"
)
-
+ self.arn = 'arn:aws:iam::123456789012:user/{0}'.format(name)
self.policies = {}
self.access_keys = []
self.password = None
@@ -184,6 +184,45 @@ class User(object):
raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "Arn" ]"')
raise UnformattedGetAttTemplateException()
+ def to_csv(self):
+ date_format = '%Y-%m-%dT%H:%M:%S+00:00'
+ date_created = datetime.strptime(self.created, '%Y-%m-%d-%H-%M-%S')
+ # aagrawal,arn:aws:iam::509284790694:user/aagrawal,2014-09-01T22:28:48+00:00,true,2014-11-12T23:36:49+00:00,2014-09-03T18:59:00+00:00,N/A,false,true,2014-09-01T22:28:48+00:00,false,N/A,false,N/A,false,N/A
+ if not self.password:
+ password_enabled = 'false'
+ password_last_used = 'not_supported'
+ else:
+ password_enabled = 'true'
+ password_last_used = 'no_information'
+
+ if len(self.access_keys) == 0:
+ access_key_1_active = 'false'
+ access_key_1_last_rotated = 'N/A'
+ access_key_2_active = 'false'
+ access_key_2_last_rotated = 'N/A'
+ elif len(self.access_keys) == 1:
+ access_key_1_active = 'true'
+ access_key_1_last_rotated = date_created.strftime(date_format)
+ access_key_2_active = 'false'
+ access_key_2_last_rotated = 'N/A'
+ else:
+ access_key_1_active = 'true'
+ access_key_1_last_rotated = date_created.strftime(date_format)
+ access_key_2_active = 'true'
+ access_key_2_last_rotated = date_created.strftime(date_format)
+
+ return '{0},{1},{2},{3},{4},{5},not_supported,false,{6},{7},{8},{9},false,N/A,false,N/A'.format(self.name,
+ self.arn,
+ date_created.strftime(date_format),
+ password_enabled,
+ password_last_used,
+ date_created.strftime(date_format),
+ access_key_1_active,
+ access_key_1_last_rotated,
+ access_key_2_active,
+ access_key_2_last_rotated
+ )
+
class IAMBackend(BaseBackend):
@@ -193,6 +232,7 @@ class IAMBackend(BaseBackend):
self.certificates = {}
self.groups = {}
self.users = {}
+ self.credential_report = None
super(IAMBackend, self).__init__()
def create_role(self, role_name, assume_role_policy_document, path):
@@ -394,5 +434,18 @@ class IAMBackend(BaseBackend):
except KeyError:
raise BotoServerError(404, 'Not Found')
+ def report_generated(self):
+ return self.credential_report
+
+ def generate_report(self):
+ self.credential_report = True
+
+ def get_credential_report(self):
+ if not self.credential_report:
+ raise BotoServerError(410, 'ReportNotPresent')
+ report = 'user,arn,user_creation_time,password_enabled,password_last_used,password_last_changed,password_next_rotation,mfa_active,access_key_1_active,access_key_1_last_rotated,access_key_2_active,access_key_2_last_rotated,cert_1_active,cert_1_last_rotated,cert_2_active,cert_2_last_rotated\n'
+ for user in self.users:
+ report += self.users[user].to_csv()
+ return base64.b64encode(report.encode('ascii')).decode('ascii')
iam_backend = IAMBackend()
diff --git a/moto/iam/responses.py b/moto/iam/responses.py
index e95494ce4..78030f288 100644
--- a/moto/iam/responses.py
+++ b/moto/iam/responses.py
@@ -219,6 +219,18 @@ class IamResponse(BaseResponse):
template = self.response_template(GENERIC_EMPTY_TEMPLATE)
return template.render(name='DeleteUser')
+ def generate_credential_report(self):
+ if iam_backend.report_generated():
+ template = self.response_template(CREDENTIAL_REPORT_GENERATED)
+ else:
+ template = self.response_template(CREDENTIAL_REPORT_GENERATING)
+ iam_backend.generate_report()
+ return template.render()
+
+ def get_credential_report(self):
+ report = iam_backend.get_credential_report()
+ template = self.response_template(CREDENTIAL_REPORT)
+ return template.render(report=report)
GENERIC_EMPTY_TEMPLATE = """<{{ name }}Response>
@@ -559,3 +571,34 @@ LIST_ACCESS_KEYS_TEMPLATE = """
7a62c49f-347e-4fc4-9331-6e8eEXAMPLE
"""
+
+CREDENTIAL_REPORT_GENERATING = """
+
+
+ STARTED
+ No report exists. Starting a new report generation task
+
+
+ fa788a82-aa8a-11e4-a278-1786c418872b"
+
+"""
+
+CREDENTIAL_REPORT_GENERATED = """
+
+ COMPLETE
+
+
+ fa788a82-aa8a-11e4-a278-1786c418872b"
+
+"""
+
+CREDENTIAL_REPORT = """
+
+ {{ report }}
+ 2015-02-02T20:02:02Z
+ text/csv
+
+
+ fa788a82-aa8a-11e4-a278-1786c418872b"
+
+"""
\ No newline at end of file
diff --git a/moto/rds/models.py b/moto/rds/models.py
index f0ac1e789..80a55434d 100644
--- a/moto/rds/models.py
+++ b/moto/rds/models.py
@@ -199,6 +199,12 @@ class Database(object):
{{ database.publicly_accessible }}
{{ database.auto_minor_version_upgrade }}
{{ database.allocated_storage }}
+ {% if database.iops %}
+ {{ database.iops }}
+ io1
+ {% else %}
+ {{ database.storage_type }}
+ {% endif %}
{{ database.db_instance_class }}
{{ database.master_username }}
diff --git a/moto/rds/urls.py b/moto/rds/urls.py
index e2e5b86ce..0a7530c97 100644
--- a/moto/rds/urls.py
+++ b/moto/rds/urls.py
@@ -2,7 +2,7 @@ from __future__ import unicode_literals
from .responses import RDSResponse
url_bases = [
- "https?://rds.(.+).amazonaws.com",
+ "https?://rds(\..+)?.amazonaws.com",
]
url_paths = {
diff --git a/moto/s3/exceptions.py b/moto/s3/exceptions.py
index 52d8faced..17da1236d 100644
--- a/moto/s3/exceptions.py
+++ b/moto/s3/exceptions.py
@@ -1,9 +1,73 @@
from __future__ import unicode_literals
+from moto.core.exceptions import RESTError
-class BucketAlreadyExists(Exception):
+ERROR_WITH_BUCKET_NAME = """{% extends 'error' %}
+{% block extra %}{{ bucket }}{% endblock %}
+"""
+
+
+class S3ClientError(RESTError):
pass
-class MissingBucket(Exception):
- pass
+class BucketError(S3ClientError):
+ def __init__(self, *args, **kwargs):
+ kwargs.setdefault('template', 'bucket_error')
+ self.templates['bucket_error'] = ERROR_WITH_BUCKET_NAME
+ super(BucketError, self).__init__(*args, **kwargs)
+
+
+class BucketAlreadyExists(BucketError):
+ code = 409
+
+ def __init__(self, *args, **kwargs):
+ super(BucketAlreadyExists, self).__init__(
+ "BucketAlreadyExists",
+ ("The requested bucket name is not available. The bucket "
+ "namespace is shared by all users of the system. Please "
+ "select a different name and try again"),
+ *args, **kwargs)
+
+
+class MissingBucket(BucketError):
+ code = 404
+
+ def __init__(self, *args, **kwargs):
+ super(MissingBucket, self).__init__(
+ "NoSuchBucket",
+ "The specified bucket does not exist",
+ *args, **kwargs)
+
+
+class InvalidPartOrder(S3ClientError):
+ code = 400
+
+ def __init__(self, *args, **kwargs):
+ super(InvalidPartOrder, self).__init__(
+ "InvalidPartOrder",
+ ("The list of parts was not in ascending order. The parts "
+ "list must be specified in order by part number."),
+ *args, **kwargs)
+
+
+class InvalidPart(S3ClientError):
+ code = 400
+
+ def __init__(self, *args, **kwargs):
+ super(InvalidPart, self).__init__(
+ "InvalidPart",
+ ("One or more of the specified parts could not be found. "
+ "The part might not have been uploaded, or the specified "
+ "entity tag might not have matched the part's entity tag."),
+ *args, **kwargs)
+
+
+class EntityTooSmall(S3ClientError):
+ code = 400
+
+ def __init__(self, *args, **kwargs):
+ super(EntityTooSmall, self).__init__(
+ "EntityTooSmall",
+ "Your proposed upload is smaller than the minimum allowed object size.",
+ *args, **kwargs)
diff --git a/moto/s3/models.py b/moto/s3/models.py
index 2a78a8003..fd41125ed 100644
--- a/moto/s3/models.py
+++ b/moto/s3/models.py
@@ -8,9 +8,10 @@ import itertools
import codecs
import six
+from bisect import insort
from moto.core import BaseBackend
from moto.core.utils import iso_8601_datetime_with_milliseconds, rfc_1123_datetime
-from .exceptions import BucketAlreadyExists, MissingBucket
+from .exceptions import BucketAlreadyExists, MissingBucket, InvalidPart, EntityTooSmall
from .utils import clean_key_name, _VersionedKeyStore
UPLOAD_ID_BYTES = 43
@@ -118,25 +119,32 @@ class FakeMultipart(object):
self.key_name = key_name
self.metadata = metadata
self.parts = {}
+ self.partlist = [] # ordered list of part ID's
rand_b64 = base64.b64encode(os.urandom(UPLOAD_ID_BYTES))
self.id = rand_b64.decode('utf-8').replace('=', '').replace('+', '')
- def complete(self):
+ def complete(self, body):
decode_hex = codecs.getdecoder("hex_codec")
total = bytearray()
md5s = bytearray()
- last_part_name = len(self.list_parts())
- for part in self.list_parts():
- if part.name != last_part_name and len(part.value) < UPLOAD_PART_MIN_SIZE:
- return None, None
+ last = None
+ count = 0
+ for pn, etag in body:
+ part = self.parts.get(pn)
+ if part is None or part.etag != etag:
+ raise InvalidPart()
+ if last is not None and len(last.value) < UPLOAD_PART_MIN_SIZE:
+ raise EntityTooSmall()
part_etag = part.etag.replace('"', '')
md5s.extend(decode_hex(part_etag)[0])
total.extend(part.value)
+ last = part
+ count += 1
etag = hashlib.md5()
etag.update(bytes(md5s))
- return total, "{0}-{1}".format(etag.hexdigest(), last_part_name)
+ return total, "{0}-{1}".format(etag.hexdigest(), count)
def set_part(self, part_id, value):
if part_id < 1:
@@ -144,18 +152,12 @@ class FakeMultipart(object):
key = FakeKey(part_id, value)
self.parts[part_id] = key
+ insort(self.partlist, part_id)
return key
def list_parts(self):
- parts = []
-
- for part_id, index in enumerate(sorted(self.parts.keys()), start=1):
- # Make sure part ids are continuous
- if part_id != index:
- return
- parts.append(self.parts[part_id])
-
- return parts
+ for part_id in self.partlist:
+ yield self.parts[part_id]
class FakeBucket(object):
@@ -191,7 +193,7 @@ class S3Backend(BaseBackend):
def create_bucket(self, bucket_name, region_name):
if bucket_name in self.buckets:
- raise BucketAlreadyExists()
+ raise BucketAlreadyExists(bucket=bucket_name)
new_bucket = FakeBucket(name=bucket_name, region_name=region_name)
self.buckets[bucket_name] = new_bucket
return new_bucket
@@ -203,7 +205,7 @@ class S3Backend(BaseBackend):
try:
return self.buckets[bucket_name]
except KeyError:
- raise MissingBucket()
+ raise MissingBucket(bucket=bucket_name)
def delete_bucket(self, bucket_name):
bucket = self.get_bucket(bucket_name)
@@ -279,10 +281,10 @@ class S3Backend(BaseBackend):
return new_multipart
- def complete_multipart(self, bucket_name, multipart_id):
+ def complete_multipart(self, bucket_name, multipart_id, body):
bucket = self.get_bucket(bucket_name)
multipart = bucket.multiparts[multipart_id]
- value, etag = multipart.complete()
+ value, etag = multipart.complete(body)
if value is None:
return
del bucket.multiparts[multipart_id]
@@ -297,7 +299,7 @@ class S3Backend(BaseBackend):
def list_multipart(self, bucket_name, multipart_id):
bucket = self.get_bucket(bucket_name)
- return bucket.multiparts[multipart_id].list_parts()
+ return list(bucket.multiparts[multipart_id].list_parts())
def get_all_multiparts(self, bucket_name):
bucket = self.get_bucket(bucket_name)
diff --git a/moto/s3/responses.py b/moto/s3/responses.py
index 750bf68ba..143ad0214 100644
--- a/moto/s3/responses.py
+++ b/moto/s3/responses.py
@@ -7,7 +7,7 @@ from six.moves.urllib.parse import parse_qs, urlparse
from moto.core.responses import _TemplateEnvironmentMixin
-from .exceptions import BucketAlreadyExists, MissingBucket
+from .exceptions import BucketAlreadyExists, S3ClientError, InvalidPartOrder
from .models import s3_backend
from .utils import bucket_name_from_url, metadata_from_headers
from xml.dom import minidom
@@ -35,8 +35,8 @@ class ResponseObject(_TemplateEnvironmentMixin):
def bucket_response(self, request, full_url, headers):
try:
response = self._bucket_response(request, full_url, headers)
- except MissingBucket:
- return 404, headers, ""
+ except S3ClientError as s3error:
+ response = s3error.code, headers, s3error.description
if isinstance(response, six.string_types):
return 200, headers, response.encode("utf-8")
@@ -72,12 +72,8 @@ class ResponseObject(_TemplateEnvironmentMixin):
raise NotImplementedError("Method {0} has not been impelemented in the S3 backend yet".format(method))
def _bucket_response_head(self, bucket_name, headers):
- try:
- self.backend.get_bucket(bucket_name)
- except MissingBucket:
- return 404, headers, ""
- else:
- return 200, headers, ""
+ self.backend.get_bucket(bucket_name)
+ return 200, headers, ""
def _bucket_response_get(self, bucket_name, querystring, headers):
if 'uploads' in querystring:
@@ -127,11 +123,7 @@ class ResponseObject(_TemplateEnvironmentMixin):
is_truncated='false',
)
- try:
- bucket = self.backend.get_bucket(bucket_name)
- except MissingBucket:
- return 404, headers, ""
-
+ bucket = self.backend.get_bucket(bucket_name)
prefix = querystring.get('prefix', [None])[0]
delimiter = querystring.get('delimiter', [None])[0]
result_keys, result_folders = self.backend.prefix_query(bucket, prefix, delimiter)
@@ -161,17 +153,12 @@ class ResponseObject(_TemplateEnvironmentMixin):
# us-east-1 has different behavior
new_bucket = self.backend.get_bucket(bucket_name)
else:
- return 409, headers, ""
+ raise
template = self.response_template(S3_BUCKET_CREATE_RESPONSE)
return 200, headers, template.render(bucket=new_bucket)
def _bucket_response_delete(self, bucket_name, headers):
- try:
- removed_bucket = self.backend.delete_bucket(bucket_name)
- except MissingBucket:
- # Non-existant bucket
- template = self.response_template(S3_DELETE_NON_EXISTING_BUCKET)
- return 404, headers, template.render(bucket_name=bucket_name)
+ removed_bucket = self.backend.delete_bucket(bucket_name)
if removed_bucket:
# Bucket exists
@@ -228,17 +215,43 @@ class ResponseObject(_TemplateEnvironmentMixin):
return 200, headers, template.render(deleted=deleted_names, delete_errors=error_names)
+ def _handle_range_header(self, request, headers, response_content):
+ length = len(response_content)
+ last = length - 1
+ _, rspec = request.headers.get('range').split('=')
+ if ',' in rspec:
+ raise NotImplementedError(
+ "Multiple range specifiers not supported")
+ toint = lambda i: int(i) if i else None
+ begin, end = map(toint, rspec.split('-'))
+ if begin is not None: # byte range
+ end = last if end is None else end
+ elif end is not None: # suffix byte range
+ begin = length - end
+ end = last
+ else:
+ return 400, headers, ""
+ if begin < 0 or end > length or begin > min(end, last):
+ return 416, headers, ""
+ headers['content-range'] = "bytes {0}-{1}/{2}".format(
+ begin, end, length)
+ return 206, headers, response_content[begin:end + 1]
+
def key_response(self, request, full_url, headers):
try:
response = self._key_response(request, full_url, headers)
- except MissingBucket:
- return 404, headers, ""
+ except S3ClientError as s3error:
+ response = s3error.code, headers, s3error.description
if isinstance(response, six.string_types):
- return 200, headers, response
+ status_code = 200
+ response_content = response
else:
status_code, headers, response_content = response
- return status_code, headers, response_content
+
+ if status_code == 200 and 'range' in request.headers:
+ return self._handle_range_header(request, headers, response_content)
+ return status_code, headers, response_content
def _key_response(self, request, full_url, headers):
parsed_url = urlparse(full_url)
@@ -364,6 +377,15 @@ class ResponseObject(_TemplateEnvironmentMixin):
template = self.response_template(S3_DELETE_OBJECT_SUCCESS)
return 204, headers, template.render(bucket=removed_key)
+ def _complete_multipart_body(self, body):
+ ps = minidom.parseString(body).getElementsByTagName('Part')
+ prev = 0
+ for p in ps:
+ pn = int(p.getElementsByTagName('PartNumber')[0].firstChild.wholeText)
+ if pn <= prev:
+ raise InvalidPartOrder()
+ yield (pn, p.getElementsByTagName('ETag')[0].firstChild.wholeText)
+
def _key_response_post(self, request, body, parsed_url, bucket_name, query, key_name, headers):
if body == b'' and parsed_url.query == 'uploads':
metadata = metadata_from_headers(request.headers)
@@ -378,18 +400,15 @@ class ResponseObject(_TemplateEnvironmentMixin):
return 200, headers, response
if 'uploadId' in query:
+ body = self._complete_multipart_body(body)
upload_id = query['uploadId'][0]
- key = self.backend.complete_multipart(bucket_name, upload_id)
-
- if key is not None:
- template = self.response_template(S3_MULTIPART_COMPLETE_RESPONSE)
- return template.render(
- bucket_name=bucket_name,
- key_name=key.name,
- etag=key.etag,
- )
- template = self.response_template(S3_MULTIPART_COMPLETE_TOO_SMALL_ERROR)
- return 400, headers, template.render()
+ key = self.backend.complete_multipart(bucket_name, upload_id, body)
+ template = self.response_template(S3_MULTIPART_COMPLETE_RESPONSE)
+ return template.render(
+ bucket_name=bucket_name,
+ key_name=key.name,
+ etag=key.etag,
+ )
elif parsed_url.query == 'restore':
es = minidom.parseString(body).getElementsByTagName('Days')
days = es[0].childNodes[0].wholeText
@@ -461,14 +480,6 @@ S3_DELETE_BUCKET_SUCCESS = """
-NoSuchBucket
-The specified bucket does not exist
-{{ bucket_name }}
-asdfasdfsadf
-asfasdfsfsafasdf
-"""
-
S3_DELETE_BUCKET_WITH_ITEMS_ERROR = """
BucketNotEmpty
The bucket you tried to delete is not empty
@@ -609,14 +620,6 @@ S3_MULTIPART_COMPLETE_RESPONSE = """
"""
-S3_MULTIPART_COMPLETE_TOO_SMALL_ERROR = """
-
- EntityTooSmall
- Your proposed upload is smaller than the minimum allowed object size.
- asdfasdfsdafds
- sdfgdsfgdsfgdfsdsfgdfs
-"""
-
S3_ALL_MULTIPARTS = """
{{ bucket_name }}
diff --git a/setup.py b/setup.py
index f894c5ddd..8ba17c57b 100644
--- a/setup.py
+++ b/setup.py
@@ -5,7 +5,6 @@ from setuptools import setup, find_packages
install_requires = [
"Jinja2",
"boto",
- "dicttoxml",
"flask",
"httpretty>=0.6.1",
"requests",
@@ -22,7 +21,7 @@ if sys.version_info < (2, 7):
setup(
name='moto',
- version='0.3.9',
+ version='0.4.0',
description='A library that allows your python tests to easily'
' mock out the boto library',
author='Steve Pulec',
diff --git a/tests/test_cloudformation/test_cloudformation_stack_integration.py b/tests/test_cloudformation/test_cloudformation_stack_integration.py
index c1f356bca..031ed464a 100644
--- a/tests/test_cloudformation/test_cloudformation_stack_integration.py
+++ b/tests/test_cloudformation/test_cloudformation_stack_integration.py
@@ -936,3 +936,51 @@ def test_sns_topic():
topic_name_output.value.should.equal("my_topics")
topic_arn_output = [x for x in stack.outputs if x.key == 'topic_arn'][0]
topic_arn_output.value.should.equal(topic_arn)
+
+
+
+@mock_cloudformation
+def test_vpc_gateway_attachment_creation_should_attach_itself_to_vpc():
+ template = {
+ "AWSTemplateFormatVersion": "2010-09-09",
+ "Resources": {
+ "internetgateway": {
+ "Type": "AWS::EC2::InternetGateway"
+ },
+ "testvpc": {
+ "Type": "AWS::EC2::VPC",
+ "Properties": {
+ "CidrBlock": "10.0.0.0/16",
+ "EnableDnsHostnames": "true",
+ "EnableDnsSupport": "true",
+ "InstanceTenancy": "default"
+ },
+ },
+ "vpcgatewayattachment": {
+ "Type": "AWS::EC2::VPCGatewayAttachment",
+ "Properties": {
+ "InternetGatewayId": {
+ "Ref": "internetgateway"
+ },
+ "VpcId": {
+ "Ref": "testvpc"
+ }
+ },
+ },
+ }
+ }
+
+ template_json = json.dumps(template)
+ cf_conn = boto.cloudformation.connect_to_region("us-west-1")
+ cf_conn.create_stack(
+ "test_stack",
+ template_body=template_json,
+ )
+
+ vpc_conn = boto.vpc.connect_to_region("us-west-1")
+ vpc = vpc_conn.get_all_vpcs()[0]
+ igws = vpc_conn.get_all_internet_gateways(
+ filters={'attachment.vpc-id': vpc.id}
+ )
+
+ igws.should.have.length_of(1)
diff --git a/tests/test_ec2/test_elastic_network_interfaces.py b/tests/test_ec2/test_elastic_network_interfaces.py
index f60c6c5aa..40c17aa40 100644
--- a/tests/test_ec2/test_elastic_network_interfaces.py
+++ b/tests/test_ec2/test_elastic_network_interfaces.py
@@ -76,14 +76,14 @@ def test_elastic_network_interfaces_with_groups():
subnet = conn.create_subnet(vpc.id, "10.0.0.0/18")
security_group1 = conn.create_security_group('test security group #1', 'this is a test security group')
security_group2 = conn.create_security_group('test security group #2', 'this is a test security group')
- conn.create_network_interface(subnet.id, groups=[security_group1.id,security_group2.id])
+ conn.create_network_interface(subnet.id, groups=[security_group1.id, security_group2.id])
all_enis = conn.get_all_network_interfaces()
all_enis.should.have.length_of(1)
eni = all_enis[0]
eni.groups.should.have.length_of(2)
- set([group.id for group in eni.groups]).should.equal(set([security_group1.id,security_group2.id]))
+ set([group.id for group in eni.groups]).should.equal(set([security_group1.id, security_group2.id]))
@requires_boto_gte("2.12.0")
@@ -122,25 +122,30 @@ def test_elastic_network_interfaces_filtering():
security_group1 = conn.create_security_group('test security group #1', 'this is a test security group')
security_group2 = conn.create_security_group('test security group #2', 'this is a test security group')
- eni1 = conn.create_network_interface(subnet.id, groups=[security_group1.id,security_group2.id])
+ eni1 = conn.create_network_interface(subnet.id, groups=[security_group1.id, security_group2.id])
eni2 = conn.create_network_interface(subnet.id, groups=[security_group1.id])
eni3 = conn.create_network_interface(subnet.id)
all_enis = conn.get_all_network_interfaces()
all_enis.should.have.length_of(3)
+ # Filter by NetworkInterfaceId
+ enis_by_id = conn.get_all_network_interfaces([eni1.id])
+ enis_by_id.should.have.length_of(1)
+ set([eni.id for eni in enis_by_id]).should.equal(set([eni1.id]))
+
# Filter by ENI ID
- enis_by_id = conn.get_all_network_interfaces(filters={'network-interface-id':eni1.id})
+ enis_by_id = conn.get_all_network_interfaces(filters={'network-interface-id': eni1.id})
enis_by_id.should.have.length_of(1)
set([eni.id for eni in enis_by_id]).should.equal(set([eni1.id]))
# Filter by Security Group
enis_by_group = conn.get_all_network_interfaces(filters={'group-id':security_group1.id})
enis_by_group.should.have.length_of(2)
- set([eni.id for eni in enis_by_group]).should.equal(set([eni1.id,eni2.id]))
+ set([eni.id for eni in enis_by_group]).should.equal(set([eni1.id, eni2.id]))
# Filter by ENI ID and Security Group
- enis_by_group = conn.get_all_network_interfaces(filters={'network-interface-id':eni1.id, 'group-id':security_group1.id})
+ enis_by_group = conn.get_all_network_interfaces(filters={'network-interface-id': eni1.id, 'group-id': security_group1.id})
enis_by_group.should.have.length_of(1)
set([eni.id for eni in enis_by_group]).should.equal(set([eni1.id]))
@@ -157,7 +162,7 @@ def test_elastic_network_interfaces_cloudformation():
conn.create_stack(
"test_stack",
template_body=template_json,
- )
+ )
ec2_conn = boto.ec2.connect_to_region("us-west-1")
eni = ec2_conn.get_all_network_interfaces()[0]
diff --git a/tests/test_ec2/test_instances.py b/tests/test_ec2/test_instances.py
index 95f047d5c..e16bbb126 100644
--- a/tests/test_ec2/test_instances.py
+++ b/tests/test_ec2/test_instances.py
@@ -491,23 +491,23 @@ def test_instance_with_nic_attach_detach():
eni = conn.create_network_interface(subnet.id, groups=[security_group2.id])
# Check initial instance and ENI data
- instance.interfaces.should.have.length_of(0)
+ instance.interfaces.should.have.length_of(1)
eni.groups.should.have.length_of(1)
set([group.id for group in eni.groups]).should.equal(set([security_group2.id]))
# Attach
- conn.attach_network_interface(eni.id, instance.id, device_index=0)
+ conn.attach_network_interface(eni.id, instance.id, device_index=1)
# Check attached instance and ENI data
instance.update()
- instance.interfaces.should.have.length_of(1)
- instance_eni = instance.interfaces[0]
+ instance.interfaces.should.have.length_of(2)
+ instance_eni = instance.interfaces[1]
instance_eni.id.should.equal(eni.id)
instance_eni.groups.should.have.length_of(2)
set([group.id for group in instance_eni.groups]).should.equal(set([security_group1.id,security_group2.id]))
- eni = conn.get_all_network_interfaces(eni.id)[0]
+ eni = conn.get_all_network_interfaces(filters={'network-interface-id': eni.id})[0]
eni.groups.should.have.length_of(2)
set([group.id for group in eni.groups]).should.equal(set([security_group1.id,security_group2.id]))
@@ -516,9 +516,9 @@ def test_instance_with_nic_attach_detach():
# Check detached instance and ENI data
instance.update()
- instance.interfaces.should.have.length_of(0)
+ instance.interfaces.should.have.length_of(1)
- eni = conn.get_all_network_interfaces(eni.id)[0]
+ eni = conn.get_all_network_interfaces(filters={'network-interface-id': eni.id})[0]
eni.groups.should.have.length_of(1)
set([group.id for group in eni.groups]).should.equal(set([security_group2.id]))
@@ -530,6 +530,18 @@ def test_instance_with_nic_attach_detach():
cm.exception.request_id.should_not.be.none
+@mock_ec2
+def test_ec2_classic_has_public_ip_address():
+ conn = boto.connect_ec2('the_key', 'the_secret')
+ reservation = conn.run_instances('ami-1234abcd', key_name="keypair_name")
+ instance = reservation.instances[0]
+ instance.ip_address.should_not.equal(None)
+ instance.public_dns_name.should.contain(instance.ip_address)
+
+ instance.private_ip_address.should_not.equal(None)
+ instance.private_dns_name.should.contain(instance.private_ip_address)
+
+
@mock_ec2
def test_run_instance_with_keypair():
conn = boto.connect_ec2('the_key', 'the_secret')
diff --git a/tests/test_ec2/test_security_groups.py b/tests/test_ec2/test_security_groups.py
index b33b42439..861e492b8 100644
--- a/tests/test_ec2/test_security_groups.py
+++ b/tests/test_ec2/test_security_groups.py
@@ -1,6 +1,6 @@
from __future__ import unicode_literals
# Ensure 'assert_raises' context manager support for Python 2.6
-import tests.backport_assert_raises
+import tests.backport_assert_raises # noqa
from nose.tools import assert_raises
import boto
@@ -201,7 +201,7 @@ def test_authorize_group_in_vpc():
def test_get_all_security_groups():
conn = boto.connect_ec2()
sg1 = conn.create_security_group(name='test1', description='test1', vpc_id='vpc-mjm05d27')
- sg2 = conn.create_security_group(name='test2', description='test2')
+ conn.create_security_group(name='test2', description='test2')
resp = conn.get_all_security_groups(groupnames=['test1'])
resp.should.have.length_of(1)
@@ -232,3 +232,19 @@ def test_authorize_bad_cidr_throws_invalid_parameter_value():
cm.exception.code.should.equal('InvalidParameterValue')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
+
+
+@mock_ec2
+def test_security_group_tagging():
+ conn = boto.connect_vpc()
+ vpc = conn.create_vpc("10.0.0.0/16")
+ sg = conn.create_security_group("test-sg", "Test SG", vpc.id)
+ sg.add_tag("Test", "Tag")
+
+ tag = conn.get_all_tags()[0]
+ tag.name.should.equal("Test")
+ tag.value.should.equal("Tag")
+
+ group = conn.get_all_security_groups("test-sg")[0]
+ group.tags.should.have.length_of(1)
+ group.tags["Test"].should.equal("Tag")
diff --git a/tests/test_emr/test_emr.py b/tests/test_emr/test_emr.py
index c8d5fca3b..9157a82a5 100644
--- a/tests/test_emr/test_emr.py
+++ b/tests/test_emr/test_emr.py
@@ -123,6 +123,31 @@ def test_terminate_job_flow():
flow.state.should.equal('TERMINATED')
+@mock_emr
+def test_describe_job_flows():
+ conn = boto.connect_emr()
+ job1_id = conn.run_jobflow(
+ name='My jobflow',
+ log_uri='s3://some_bucket/jobflow_logs',
+ steps=[]
+ )
+ job2_id = conn.run_jobflow(
+ name='My jobflow',
+ log_uri='s3://some_bucket/jobflow_logs',
+ steps=[]
+ )
+
+ jobs = conn.describe_jobflows()
+ jobs.should.have.length_of(2)
+
+ jobs = conn.describe_jobflows(jobflow_ids=[job2_id])
+ jobs.should.have.length_of(1)
+ jobs[0].jobflowid.should.equal(job2_id)
+
+ first_job = conn.describe_jobflow(job1_id)
+ first_job.jobflowid.should.equal(job1_id)
+
+
@mock_emr
def test_add_steps_to_flow():
conn = boto.connect_emr()
@@ -291,3 +316,61 @@ def test_set_visible_to_all_users():
job_flow = conn.describe_jobflow(job_id)
job_flow.visibletoallusers.should.equal('False')
+
+
+@mock_emr
+def test_list_clusters():
+ conn = boto.connect_emr()
+ conn.run_jobflow(
+ name='My jobflow',
+ log_uri='s3://some_bucket/jobflow_logs',
+ steps=[],
+ )
+
+ summary = conn.list_clusters()
+ clusters = summary.clusters
+ clusters.should.have.length_of(1)
+ cluster = clusters[0]
+ cluster.name.should.equal("My jobflow")
+ cluster.normalizedinstancehours.should.equal('0')
+ cluster.status.state.should.equal("RUNNING")
+
+
+@mock_emr
+def test_describe_cluster():
+ conn = boto.connect_emr()
+ job_id = conn.run_jobflow(
+ name='My jobflow',
+ log_uri='s3://some_bucket/jobflow_logs',
+ steps=[],
+ )
+
+ cluster = conn.describe_cluster(job_id)
+ cluster.name.should.equal("My jobflow")
+ cluster.normalizedinstancehours.should.equal('0')
+ cluster.status.state.should.equal("RUNNING")
+
+
+@mock_emr
+def test_cluster_tagging():
+ conn = boto.connect_emr()
+ job_id = conn.run_jobflow(
+ name='My jobflow',
+ log_uri='s3://some_bucket/jobflow_logs',
+ steps=[],
+ )
+ cluster_id = job_id
+ conn.add_tags(cluster_id, {"tag1": "val1", "tag2": "val2"})
+
+ cluster = conn.describe_cluster(cluster_id)
+ cluster.tags.should.have.length_of(2)
+ tags = dict((tag.key, tag.value) for tag in cluster.tags)
+ tags['tag1'].should.equal('val1')
+ tags['tag2'].should.equal('val2')
+
+ # Remove a tag
+ conn.remove_tags(cluster_id, ["tag1"])
+ cluster = conn.describe_cluster(cluster_id)
+ cluster.tags.should.have.length_of(1)
+ tags = dict((tag.key, tag.value) for tag in cluster.tags)
+ tags['tag2'].should.equal('val2')
diff --git a/tests/test_iam/test_iam.py b/tests/test_iam/test_iam.py
index 11863d83d..454703fec 100644
--- a/tests/test_iam/test_iam.py
+++ b/tests/test_iam/test_iam.py
@@ -1,10 +1,11 @@
from __future__ import unicode_literals
import boto
import sure # noqa
+import re
from nose.tools import assert_raises, assert_equals, assert_not_equals
from boto.exception import BotoServerError
-
+import base64
from moto import mock_iam
@@ -200,3 +201,24 @@ def test_delete_user():
conn.delete_user('my-user')
conn.create_user('my-user')
conn.delete_user('my-user')
+
+@mock_iam()
+def test_generate_credential_report():
+ conn = boto.connect_iam()
+ result = conn.generate_credential_report()
+ result['generate_credential_report_response']['generate_credential_report_result']['state'].should.equal('STARTED')
+ result = conn.generate_credential_report()
+ result['generate_credential_report_response']['generate_credential_report_result']['state'].should.equal('COMPLETE')
+
+@mock_iam()
+def test_get_credential_report():
+ conn = boto.connect_iam()
+ conn.create_user('my-user')
+ with assert_raises(BotoServerError):
+ conn.get_credential_report()
+ result = conn.generate_credential_report()
+ while result['generate_credential_report_response']['generate_credential_report_result']['state'] != 'COMPLETE':
+ result = conn.generate_credential_report()
+ result = conn.get_credential_report()
+ report = base64.b64decode(result['get_credential_report_response']['get_credential_report_result']['content'].encode('ascii')).decode('ascii')
+ report.should.match(r'.*my-user.*')
\ No newline at end of file
diff --git a/tests/test_rds/test_rds.py b/tests/test_rds/test_rds.py
index 80644ef3e..df8ede179 100644
--- a/tests/test_rds/test_rds.py
+++ b/tests/test_rds/test_rds.py
@@ -237,3 +237,36 @@ def test_create_database_replica():
primary = conn.get_all_dbinstances("db-master-1")[0]
list(primary.read_replica_dbinstance_identifiers).should.have.length_of(0)
+
+
+@disable_on_py3()
+@mock_rds
+def test_connecting_to_us_east_1():
+ # boto does not use us-east-1 in the URL for RDS,
+ # and that broke moto in the past:
+ # https://github.com/boto/boto/blob/e271ff09364ea18d9d8b6f4d63d6b0ac6cbc9b75/boto/endpoints.json#L285
+ conn = boto.rds.connect_to_region("us-east-1")
+
+ database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2',
+ security_groups=["my_sg"])
+
+ database.status.should.equal('available')
+ database.id.should.equal("db-master-1")
+ database.allocated_storage.should.equal(10)
+ database.instance_class.should.equal("db.m1.small")
+ database.master_username.should.equal("root")
+ database.endpoint.should.equal(('db-master-1.aaaaaaaaaa.us-east-1.rds.amazonaws.com', 3306))
+ database.security_groups[0].name.should.equal('my_sg')
+
+
+@disable_on_py3()
+@mock_rds
+def test_create_database_with_iops():
+ conn = boto.rds.connect_to_region("us-west-2")
+
+ database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2', iops=6000)
+
+ database.status.should.equal('available')
+ database.iops.should.equal(6000)
+ # boto>2.36.0 may change the following property name to `storage_type`
+ database.StorageType.should.equal('io1')
diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py
index 1643eaaff..2a14650e3 100644
--- a/tests/test_s3/test_s3.py
+++ b/tests/test_s3/test_s3.py
@@ -19,6 +19,25 @@ import sure # noqa
from moto import mock_s3
+REDUCED_PART_SIZE = 256
+
+
+def reduced_min_part_size(f):
+ """ speed up tests by temporarily making the multipart minimum part size
+ small
+ """
+ import moto.s3.models as s3model
+ orig_size = s3model.UPLOAD_PART_MIN_SIZE
+
+ def wrapped(*args, **kwargs):
+ try:
+ s3model.UPLOAD_PART_MIN_SIZE = REDUCED_PART_SIZE
+ return f(*args, **kwargs)
+ finally:
+ s3model.UPLOAD_PART_MIN_SIZE = orig_size
+ return wrapped
+
+
class MyModel(object):
def __init__(self, name, value):
self.name = name
@@ -72,12 +91,13 @@ def test_multipart_upload_too_small():
@mock_s3
+@reduced_min_part_size
def test_multipart_upload():
conn = boto.connect_s3('the_key', 'the_secret')
bucket = conn.create_bucket("foobar")
multipart = bucket.initiate_multipart_upload("the-key")
- part1 = b'0' * 5242880
+ part1 = b'0' * REDUCED_PART_SIZE
multipart.upload_part_from_file(BytesIO(part1), 1)
# last part, can be less than 5 MB
part2 = b'1'
@@ -88,6 +108,24 @@ def test_multipart_upload():
@mock_s3
+@reduced_min_part_size
+def test_multipart_upload_out_of_order():
+ conn = boto.connect_s3('the_key', 'the_secret')
+ bucket = conn.create_bucket("foobar")
+
+ multipart = bucket.initiate_multipart_upload("the-key")
+ # last part, can be less than 5 MB
+ part2 = b'1'
+ multipart.upload_part_from_file(BytesIO(part2), 4)
+ part1 = b'0' * REDUCED_PART_SIZE
+ multipart.upload_part_from_file(BytesIO(part1), 2)
+ multipart.complete_upload()
+ # we should get both parts as the key contents
+ bucket.get_key("the-key").get_contents_as_string().should.equal(part1 + part2)
+
+
+@mock_s3
+@reduced_min_part_size
def test_multipart_upload_with_headers():
conn = boto.connect_s3('the_key', 'the_secret')
bucket = conn.create_bucket("foobar")
@@ -102,6 +140,7 @@ def test_multipart_upload_with_headers():
@mock_s3
+@reduced_min_part_size
def test_multipart_upload_with_copy_key():
conn = boto.connect_s3('the_key', 'the_secret')
bucket = conn.create_bucket("foobar")
@@ -110,7 +149,7 @@ def test_multipart_upload_with_copy_key():
key.set_contents_from_string("key_value")
multipart = bucket.initiate_multipart_upload("the-key")
- part1 = b'0' * 5242880
+ part1 = b'0' * REDUCED_PART_SIZE
multipart.upload_part_from_file(BytesIO(part1), 1)
multipart.copy_part_from_key("foobar", "original-key", 2)
multipart.complete_upload()
@@ -118,12 +157,13 @@ def test_multipart_upload_with_copy_key():
@mock_s3
+@reduced_min_part_size
def test_multipart_upload_cancel():
conn = boto.connect_s3('the_key', 'the_secret')
bucket = conn.create_bucket("foobar")
multipart = bucket.initiate_multipart_upload("the-key")
- part1 = b'0' * 5242880
+ part1 = b'0' * REDUCED_PART_SIZE
multipart.upload_part_from_file(BytesIO(part1), 1)
multipart.cancel_upload()
# TODO we really need some sort of assertion here, but we don't currently
@@ -131,13 +171,14 @@ def test_multipart_upload_cancel():
@mock_s3
+@reduced_min_part_size
def test_multipart_etag():
# Create Bucket so that test can run
conn = boto.connect_s3('the_key', 'the_secret')
bucket = conn.create_bucket('mybucket')
multipart = bucket.initiate_multipart_upload("the-key")
- part1 = b'0' * 5242880
+ part1 = b'0' * REDUCED_PART_SIZE
multipart.upload_part_from_file(BytesIO(part1), 1)
# last part, can be less than 5 MB
part2 = b'1'
@@ -148,6 +189,26 @@ def test_multipart_etag():
'"140f92a6df9f9e415f74a1463bcee9bb-2"')
+@mock_s3
+@reduced_min_part_size
+def test_multipart_invalid_order():
+ # Create Bucket so that test can run
+ conn = boto.connect_s3('the_key', 'the_secret')
+ bucket = conn.create_bucket('mybucket')
+
+ multipart = bucket.initiate_multipart_upload("the-key")
+ part1 = b'0' * 5242880
+ etag1 = multipart.upload_part_from_file(BytesIO(part1), 1).etag
+ # last part, can be less than 5 MB
+ part2 = b'1'
+ etag2 = multipart.upload_part_from_file(BytesIO(part2), 2).etag
+ xml = "{0}{1}"
+ xml = xml.format(2, etag2) + xml.format(1, etag1)
+ xml = "{0}".format(xml)
+ bucket.complete_multipart_upload.when.called_with(
+ multipart.key_name, multipart.id, xml).should.throw(S3ResponseError)
+
+
@mock_s3
def test_list_multiparts():
# Create Bucket so that test can run
@@ -692,3 +753,21 @@ def test_bucket_location():
conn = boto.s3.connect_to_region("us-west-2")
bucket = conn.create_bucket('mybucket')
bucket.get_location().should.equal("us-west-2")
+
+
+@mock_s3
+def test_ranged_get():
+ conn = boto.connect_s3()
+ bucket = conn.create_bucket('mybucket')
+ key = Key(bucket)
+ key.key = 'bigkey'
+ rep = b"0123456789"
+ key.set_contents_from_string(rep * 10)
+ key.get_contents_as_string(headers={'Range': 'bytes=0-'}).should.equal(rep * 10)
+ key.get_contents_as_string(headers={'Range': 'bytes=0-99'}).should.equal(rep * 10)
+ key.get_contents_as_string(headers={'Range': 'bytes=0-0'}).should.equal(b'0')
+ key.get_contents_as_string(headers={'Range': 'bytes=99-99'}).should.equal(b'9')
+ key.get_contents_as_string(headers={'Range': 'bytes=50-54'}).should.equal(rep[:5])
+ key.get_contents_as_string(headers={'Range': 'bytes=50-'}).should.equal(rep * 5)
+ key.get_contents_as_string(headers={'Range': 'bytes=-60'}).should.equal(rep * 6)
+ key.size.should.equal(100)