diff --git a/.travis.yml b/.travis.yml
index 0b82672cd..b6026cbe9 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,10 +1,20 @@
language: python
python:
- 2.7
+env:
+ matrix:
+ #- BOTO_VERSION=2.13.3
+ - BOTO_VERSION=2.12.0
+ - BOTO_VERSION=2.11.0
+ - BOTO_VERSION=2.10.0
+ - BOTO_VERSION=2.9.9
+ - BOTO_VERSION=2.8
+ - BOTO_VERSION=2.7
install:
+ - pip install boto==$BOTO_VERSION
- pip install .
- pip install -r requirements.txt
script:
- make test
after_success:
- - coveralls
\ No newline at end of file
+ - coveralls
diff --git a/AUTHORS.md b/AUTHORS.md
new file mode 100644
index 000000000..3a615ea97
--- /dev/null
+++ b/AUTHORS.md
@@ -0,0 +1,8 @@
+## Moto Contributors
+
+Moto is written by Steve Pulec with contributions from:
+
+* [Zach Smith](https://github.com/zmsmith)
+* [Dilshod Tadjibaev](https://github.com/antimora)
+* [Dan Berglund](https://github.com/cheif)
+* [Lincoln de Sousa](https://github.com/clarete)
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 000000000..90a85e714
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+Copyright 2012 Steve Pulec
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
\ No newline at end of file
diff --git a/Makefile b/Makefile
index 0420a9ea3..7521a6d88 100644
--- a/Makefile
+++ b/Makefile
@@ -1,9 +1,10 @@
SHELL := /bin/bash
init:
- python setup.py develop
- pip install -r requirements.txt
+ @python setup.py develop
+ @pip install -r requirements.txt
test:
rm -f .coverage
- nosetests --with-coverage ./tests/
+ @nosetests -sv --with-coverage ./tests/
+
diff --git a/README.md b/README.md
index 7fbd40cdf..4af14f623 100644
--- a/README.md
+++ b/README.md
@@ -37,10 +37,13 @@ from mymodule import MyModel
@mock_s3
def test_my_model_save():
+ conn = boto.connect_s3()
+ # We need to create the bucket since this is all in Moto's 'virtual' AWS account
+ conn.create_bucket('mybucket')
+
model_instance = MyModel('steve', 'is awesome')
model_instance.save()
- conn = boto.connect_s3()
assert conn.get_bucket('mybucket').get_key('steve') == 'is awesome'
```
@@ -49,24 +52,30 @@ With the decorator wrapping the test, all the calls to s3 are automatically mock
It gets even better! Moto isn't just S3. Here's the status of the other AWS services implemented.
```gherkin
-|---------------------------------------------------------------------------|
-| Service Name | Decorator | Development Status |
-|---------------------------------------------------------------------------|
-| DynamoDB | @mock_dynamodb | core endpoints done |
-|---------------------------------------------------------------------------|
-| EC2 | @mock_ec2 | core endpoints done |
-| - AMI | | core endpoints done |
-| - EBS | | core endpoints done |
-| - Instances | | all endpoints done |
-| - Security Groups | | core endpoints done |
-| - Tags | | all endpoints done |
-|---------------------------------------------------------------------------|
-| S3 | @mock_s3 | core endpoints done |
-|---------------------------------------------------------------------------|
-| SES | @mock_ses | core endpoints done |
-|---------------------------------------------------------------------------|
-| SQS | @mock_sqs | core endpoints done |
-|---------------------------------------------------------------------------|
+|------------------------------------------------------------------------------|
+| Service Name | Decorator | Development Status |
+|------------------------------------------------------------------------------|
+| Autoscaling | @mock_autoscaling| core endpoints done |
+|------------------------------------------------------------------------------|
+| DynamoDB | @mock_dynamodb | core endpoints done |
+|------------------------------------------------------------------------------|
+| EC2 | @mock_ec2 | core endpoints done |
+| - AMI | | core endpoints done |
+| - EBS | | core endpoints done |
+| - Instances | | all endpoints done |
+| - Security Groups | | core endpoints done |
+| - Tags | | all endpoints done |
+|------------------------------------------------------------------------------|
+| ELB | @mock_elb | core endpoints done |
+|------------------------------------------------------------------------------|
+| S3 | @mock_s3 | core endpoints done |
+|------------------------------------------------------------------------------|
+| SES | @mock_ses | core endpoints done |
+|------------------------------------------------------------------------------|
+| SQS | @mock_sqs | core endpoints done |
+|------------------------------------------------------------------------------|
+| STS | @mock_sts | core endpoints done |
+|------------------------------------------------------------------------------|
```
### Another Example
@@ -154,6 +163,14 @@ $ moto_server ec2
* Running on http://127.0.0.1:5000/
```
+You can also pass the port as the second argument:
+
+```console
+$ moto_server ec2 3000
+ * Running on http://127.0.0.1:3000/
+```
+
+
Then go to [localhost](http://localhost:5000/?Action=DescribeInstances) to see a list of running instances (it will be empty since you haven't added any yet).
## Install
@@ -162,9 +179,6 @@ Then go to [localhost](http://localhost:5000/?Action=DescribeInstances) to see a
$ pip install moto
```
-This library has been tested on boto v2.5+.
-
-
## Thanks
A huge thanks to [Gabriel Falcão](https://github.com/gabrielfalcao) and his [HTTPretty](https://github.com/gabrielfalcao/HTTPretty) library. Moto would not exist without it.
diff --git a/moto/__init__.py b/moto/__init__.py
index 0548f9653..57e8eef38 100644
--- a/moto/__init__.py
+++ b/moto/__init__.py
@@ -1,8 +1,12 @@
import logging
logging.getLogger('boto').setLevel(logging.CRITICAL)
+from .autoscaling import mock_autoscaling
from .dynamodb import mock_dynamodb
from .ec2 import mock_ec2
+from .elb import mock_elb
+from .emr import mock_emr
from .s3 import mock_s3
from .ses import mock_ses
from .sqs import mock_sqs
+from .sts import mock_sts
diff --git a/moto/autoscaling/__init__.py b/moto/autoscaling/__init__.py
new file mode 100644
index 000000000..2c25ca388
--- /dev/null
+++ b/moto/autoscaling/__init__.py
@@ -0,0 +1,2 @@
+from .models import autoscaling_backend
+mock_autoscaling = autoscaling_backend.decorator
diff --git a/moto/autoscaling/models.py b/moto/autoscaling/models.py
new file mode 100644
index 000000000..a367ba297
--- /dev/null
+++ b/moto/autoscaling/models.py
@@ -0,0 +1,252 @@
+from moto.core import BaseBackend
+from moto.ec2 import ec2_backend
+
+# http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AS_Concepts.html#Cooldown
+DEFAULT_COOLDOWN = 300
+
+
+class FakeScalingPolicy(object):
+ def __init__(self, name, adjustment_type, as_name, scaling_adjustment,
+ cooldown):
+ self.name = name
+ self.adjustment_type = adjustment_type
+ self.as_name = as_name
+ self.scaling_adjustment = scaling_adjustment
+ if cooldown is not None:
+ self.cooldown = cooldown
+ else:
+ self.cooldown = DEFAULT_COOLDOWN
+
+ def execute(self):
+ if self.adjustment_type == 'ExactCapacity':
+ autoscaling_backend.set_desired_capacity(self.as_name, self.scaling_adjustment)
+ elif self.adjustment_type == 'ChangeInCapacity':
+ autoscaling_backend.change_capacity(self.as_name, self.scaling_adjustment)
+ elif self.adjustment_type == 'PercentChangeInCapacity':
+ autoscaling_backend.change_capacity_percent(self.as_name, self.scaling_adjustment)
+
+
+class FakeLaunchConfiguration(object):
+ def __init__(self, name, image_id, key_name, security_groups, user_data,
+ instance_type, instance_monitoring, instance_profile_name,
+ spot_price, ebs_optimized):
+ self.name = name
+ self.image_id = image_id
+ self.key_name = key_name
+ self.security_groups = security_groups
+ self.user_data = user_data
+ self.instance_type = instance_type
+ self.instance_monitoring = instance_monitoring
+ self.instance_profile_name = instance_profile_name
+ self.spot_price = spot_price
+ self.ebs_optimized = ebs_optimized
+
+ @property
+ def instance_monitoring_enabled(self):
+ if self.instance_monitoring:
+ return 'true'
+ return 'false'
+
+
+class FakeAutoScalingGroup(object):
+ def __init__(self, name, availability_zones, desired_capacity, max_size,
+ min_size, launch_config_name, vpc_zone_identifier,
+ default_cooldown, health_check_period, health_check_type,
+ load_balancers, placement_group, termination_policies):
+ self.name = name
+ self.availability_zones = availability_zones
+ self.max_size = max_size
+ self.min_size = min_size
+
+ self.launch_config = autoscaling_backend.launch_configurations[launch_config_name]
+ self.launch_config_name = launch_config_name
+ self.vpc_zone_identifier = vpc_zone_identifier
+
+ self.default_cooldown = default_cooldown if default_cooldown else DEFAULT_COOLDOWN
+ self.health_check_period = health_check_period
+ self.health_check_type = health_check_type if health_check_type else "EC2"
+ self.load_balancers = load_balancers
+ self.placement_group = placement_group
+ self.termination_policies = termination_policies
+
+ self.instances = []
+ self.set_desired_capacity(desired_capacity)
+
+ def update(self, availability_zones, desired_capacity, max_size, min_size,
+ launch_config_name, vpc_zone_identifier, default_cooldown,
+ health_check_period, health_check_type, load_balancers,
+ placement_group, termination_policies):
+ self.availability_zones = availability_zones
+ self.max_size = max_size
+ self.min_size = min_size
+
+ self.launch_config = autoscaling_backend.launch_configurations[launch_config_name]
+ self.launch_config_name = launch_config_name
+ self.vpc_zone_identifier = vpc_zone_identifier
+
+ self.set_desired_capacity(desired_capacity)
+
+ def set_desired_capacity(self, new_capacity):
+ if new_capacity is None:
+ self.desired_capacity = self.min_size
+ else:
+ self.desired_capacity = new_capacity
+
+ curr_instance_count = len(self.instances)
+
+ if self.desired_capacity == curr_instance_count:
+ return
+
+ if self.desired_capacity > curr_instance_count:
+ # Need more instances
+ count_needed = self.desired_capacity - curr_instance_count
+ reservation = ec2_backend.add_instances(
+ self.launch_config.image_id,
+ count_needed,
+ self.launch_config.user_data
+ )
+ for instance in reservation.instances:
+ instance.autoscaling_group = self
+ self.instances.extend(reservation.instances)
+ else:
+ # Need to remove some instances
+ count_to_remove = curr_instance_count - self.desired_capacity
+ instances_to_remove = self.instances[:count_to_remove]
+ instance_ids_to_remove = [instance.id for instance in instances_to_remove]
+ ec2_backend.terminate_instances(instance_ids_to_remove)
+ self.instances = self.instances[count_to_remove:]
+
+
+class AutoScalingBackend(BaseBackend):
+
+ def __init__(self):
+ self.autoscaling_groups = {}
+ self.launch_configurations = {}
+ self.policies = {}
+
+ def create_launch_configuration(self, name, image_id, key_name,
+ security_groups, user_data, instance_type,
+ instance_monitoring, instance_profile_name,
+ spot_price, ebs_optimized):
+ launch_configuration = FakeLaunchConfiguration(
+ name=name,
+ image_id=image_id,
+ key_name=key_name,
+ security_groups=security_groups,
+ user_data=user_data,
+ instance_type=instance_type,
+ instance_monitoring=instance_monitoring,
+ instance_profile_name=instance_profile_name,
+ spot_price=spot_price,
+ ebs_optimized=ebs_optimized,
+ )
+ self.launch_configurations[name] = launch_configuration
+ return launch_configuration
+
+ def describe_launch_configurations(self, names):
+ configurations = self.launch_configurations.values()
+ if names:
+ return [configuration for configuration in configurations if configuration.name in names]
+ else:
+ return configurations
+
+ def delete_launch_configuration(self, launch_configuration_name):
+ self.launch_configurations.pop(launch_configuration_name, None)
+
+ def create_autoscaling_group(self, name, availability_zones,
+ desired_capacity, max_size, min_size,
+ launch_config_name, vpc_zone_identifier,
+ default_cooldown, health_check_period,
+ health_check_type, load_balancers,
+ placement_group, termination_policies):
+ group = FakeAutoScalingGroup(
+ name=name,
+ availability_zones=availability_zones,
+ desired_capacity=desired_capacity,
+ max_size=max_size,
+ min_size=min_size,
+ launch_config_name=launch_config_name,
+ vpc_zone_identifier=vpc_zone_identifier,
+ default_cooldown=default_cooldown,
+ health_check_period=health_check_period,
+ health_check_type=health_check_type,
+ load_balancers=load_balancers,
+ placement_group=placement_group,
+ termination_policies=termination_policies,
+ )
+ self.autoscaling_groups[name] = group
+ return group
+
+ def update_autoscaling_group(self, name, availability_zones,
+ desired_capacity, max_size, min_size,
+ launch_config_name, vpc_zone_identifier,
+ default_cooldown, health_check_period,
+ health_check_type, load_balancers,
+ placement_group, termination_policies):
+ group = self.autoscaling_groups[name]
+ group.update(availability_zones, desired_capacity, max_size,
+ min_size, launch_config_name, vpc_zone_identifier,
+ default_cooldown, health_check_period, health_check_type,
+ load_balancers, placement_group, termination_policies)
+ return group
+
+ def describe_autoscaling_groups(self, names):
+ groups = self.autoscaling_groups.values()
+ if names:
+ return [group for group in groups if group.name in names]
+ else:
+ return groups
+
+ def delete_autoscaling_group(self, group_name):
+ self.autoscaling_groups.pop(group_name, None)
+
+ def describe_autoscaling_instances(self):
+ instances = []
+ for group in self.autoscaling_groups.values():
+ instances.extend(group.instances)
+ return instances
+
+ def set_desired_capacity(self, group_name, desired_capacity):
+ group = self.autoscaling_groups[group_name]
+ group.set_desired_capacity(desired_capacity)
+
+ def change_capacity(self, group_name, scaling_adjustment):
+ group = self.autoscaling_groups[group_name]
+ desired_capacity = group.desired_capacity + scaling_adjustment
+ self.set_desired_capacity(group_name, desired_capacity)
+
+ def change_capacity_percent(self, group_name, scaling_adjustment):
+ """ http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/as-scale-based-on-demand.html
+ If PercentChangeInCapacity returns a value between 0 and 1,
+ Auto Scaling will round it off to 1. If the PercentChangeInCapacity
+ returns a value greater than 1, Auto Scaling will round it off to the
+ lower value. For example, if PercentChangeInCapacity returns 12.5,
+ then Auto Scaling will round it off to 12."""
+ group = self.autoscaling_groups[group_name]
+ percent_change = 1 + (scaling_adjustment / 100.0)
+ desired_capacity = group.desired_capacity * percent_change
+ if group.desired_capacity < desired_capacity < group.desired_capacity + 1:
+ desired_capacity = group.desired_capacity + 1
+ else:
+ desired_capacity = int(desired_capacity)
+ self.set_desired_capacity(group_name, desired_capacity)
+
+ def create_autoscaling_policy(self, name, adjustment_type, as_name,
+ scaling_adjustment, cooldown):
+ policy = FakeScalingPolicy(name, adjustment_type, as_name,
+ scaling_adjustment, cooldown)
+
+ self.policies[name] = policy
+ return policy
+
+ def describe_policies(self):
+ return self.policies.values()
+
+ def delete_policy(self, group_name):
+ self.policies.pop(group_name, None)
+
+ def execute_policy(self, group_name):
+ policy = self.policies[group_name]
+ policy.execute()
+
+autoscaling_backend = AutoScalingBackend()
diff --git a/moto/autoscaling/responses.py b/moto/autoscaling/responses.py
new file mode 100644
index 000000000..55cdf9b6e
--- /dev/null
+++ b/moto/autoscaling/responses.py
@@ -0,0 +1,351 @@
+from jinja2 import Template
+
+from moto.core.responses import BaseResponse
+from .models import autoscaling_backend
+
+
+class AutoScalingResponse(BaseResponse):
+
+ def _get_param(self, param_name):
+ return self.querystring.get(param_name, [None])[0]
+
+ def _get_int_param(self, param_name):
+ value = self._get_param(param_name)
+ if value is not None:
+ return int(value)
+
+ def _get_multi_param(self, param_prefix):
+ return [value[0] for key, value in self.querystring.items() if key.startswith(param_prefix)]
+
+ def create_launch_configuration(self):
+ instance_monitoring_string = self._get_param('InstanceMonitoring.Enabled')
+ if instance_monitoring_string == 'true':
+ instance_monitoring = True
+ else:
+ instance_monitoring = False
+ autoscaling_backend.create_launch_configuration(
+ name=self._get_param('LaunchConfigurationName'),
+ image_id=self._get_param('ImageId'),
+ key_name=self._get_param('KeyName'),
+ security_groups=self._get_multi_param('SecurityGroups.member.'),
+ user_data=self._get_param('UserData'),
+ instance_type=self._get_param('InstanceType'),
+ instance_monitoring=instance_monitoring,
+ instance_profile_name=self._get_param('IamInstanceProfile'),
+ spot_price=self._get_param('SpotPrice'),
+ ebs_optimized=self._get_param('EbsOptimized'),
+ )
+ template = Template(CREATE_LAUNCH_CONFIGURATION_TEMPLATE)
+ return template.render()
+
+ def describe_launch_configurations(self):
+ names = self._get_multi_param('LaunchConfigurationNames')
+ launch_configurations = autoscaling_backend.describe_launch_configurations(names)
+ template = Template(DESCRIBE_LAUNCH_CONFIGURATIONS_TEMPLATE)
+ return template.render(launch_configurations=launch_configurations)
+
+ def delete_launch_configuration(self):
+ launch_configurations_name = self.querystring.get('LaunchConfigurationName')[0]
+ autoscaling_backend.delete_launch_configuration(launch_configurations_name)
+ template = Template(DELETE_LAUNCH_CONFIGURATION_TEMPLATE)
+ return template.render()
+
+ def create_auto_scaling_group(self):
+ autoscaling_backend.create_autoscaling_group(
+ name=self._get_param('AutoScalingGroupName'),
+ availability_zones=self._get_multi_param('AvailabilityZones.member'),
+ desired_capacity=self._get_int_param('DesiredCapacity'),
+ max_size=self._get_int_param('MaxSize'),
+ min_size=self._get_int_param('MinSize'),
+ launch_config_name=self._get_param('LaunchConfigurationName'),
+ vpc_zone_identifier=self._get_param('VPCZoneIdentifier'),
+ default_cooldown=self._get_int_param('DefaultCooldown'),
+ health_check_period=self._get_int_param('HealthCheckGracePeriod'),
+ health_check_type=self._get_param('HealthCheckType'),
+ load_balancers=self._get_multi_param('LoadBalancerNames.member'),
+ placement_group=self._get_param('PlacementGroup'),
+ termination_policies=self._get_multi_param('TerminationPolicies.member'),
+ )
+ template = Template(CREATE_AUTOSCALING_GROUP_TEMPLATE)
+ return template.render()
+
+ def describe_auto_scaling_groups(self):
+ names = self._get_multi_param("AutoScalingGroupNames")
+ groups = autoscaling_backend.describe_autoscaling_groups(names)
+ template = Template(DESCRIBE_AUTOSCALING_GROUPS_TEMPLATE)
+ return template.render(groups=groups)
+
+ def update_auto_scaling_group(self):
+ autoscaling_backend.update_autoscaling_group(
+ name=self._get_param('AutoScalingGroupName'),
+ availability_zones=self._get_multi_param('AvailabilityZones.member'),
+ desired_capacity=self._get_int_param('DesiredCapacity'),
+ max_size=self._get_int_param('MaxSize'),
+ min_size=self._get_int_param('MinSize'),
+ launch_config_name=self._get_param('LaunchConfigurationName'),
+ vpc_zone_identifier=self._get_param('VPCZoneIdentifier'),
+ default_cooldown=self._get_int_param('DefaultCooldown'),
+ health_check_period=self._get_int_param('HealthCheckGracePeriod'),
+ health_check_type=self._get_param('HealthCheckType'),
+ load_balancers=self._get_multi_param('LoadBalancerNames.member'),
+ placement_group=self._get_param('PlacementGroup'),
+ termination_policies=self._get_multi_param('TerminationPolicies.member'),
+ )
+ template = Template(UPDATE_AUTOSCALING_GROUP_TEMPLATE)
+ return template.render()
+
+ def delete_auto_scaling_group(self):
+ group_name = self._get_param('AutoScalingGroupName')
+ autoscaling_backend.delete_autoscaling_group(group_name)
+ template = Template(DELETE_AUTOSCALING_GROUP_TEMPLATE)
+ return template.render()
+
+ def set_desired_capacity(self):
+ group_name = self._get_param('AutoScalingGroupName')
+ desired_capacity = self._get_int_param('DesiredCapacity')
+ autoscaling_backend.set_desired_capacity(group_name, desired_capacity)
+ template = Template(SET_DESIRED_CAPACITY_TEMPLATE)
+ return template.render()
+
+ def describe_auto_scaling_instances(self):
+ instances = autoscaling_backend.describe_autoscaling_instances()
+ template = Template(DESCRIBE_AUTOSCALING_INSTANCES_TEMPLATE)
+ return template.render(instances=instances)
+
+ def put_scaling_policy(self):
+ policy = autoscaling_backend.create_autoscaling_policy(
+ name=self._get_param('PolicyName'),
+ adjustment_type=self._get_param('AdjustmentType'),
+ as_name=self._get_param('AutoScalingGroupName'),
+ scaling_adjustment=self._get_int_param('ScalingAdjustment'),
+ cooldown=self._get_int_param('Cooldown'),
+ )
+ template = Template(CREATE_SCALING_POLICY_TEMPLATE)
+ return template.render(policy=policy)
+
+ def describe_policies(self):
+ policies = autoscaling_backend.describe_policies()
+ template = Template(DESCRIBE_SCALING_POLICIES_TEMPLATE)
+ return template.render(policies=policies)
+
+ def delete_policy(self):
+ group_name = self._get_param('PolicyName')
+ autoscaling_backend.delete_policy(group_name)
+ template = Template(DELETE_POLICY_TEMPLATE)
+ return template.render()
+
+ def execute_policy(self):
+ group_name = self._get_param('PolicyName')
+ autoscaling_backend.execute_policy(group_name)
+ template = Template(EXECUTE_POLICY_TEMPLATE)
+ return template.render()
+
+
+CREATE_LAUNCH_CONFIGURATION_TEMPLATE = """
+
+ 7c6e177f-f082-11e1-ac58-3714bEXAMPLE
+
+"""
+
+DESCRIBE_LAUNCH_CONFIGURATIONS_TEMPLATE = """
+
+
+ {% for launch_configuration in launch_configurations %}
+
+
+ {% for security_group in launch_configuration.security_groups %}
+ {{ security_group }}
+ {% endfor %}
+
+ 2013-01-21T23:04:42.200Z
+
+ {% if launch_configuration.instance_profile_name %}
+ {{ launch_configuration.instance_profile_name }}
+ {% endif %}
+ {{ launch_configuration.name }}
+ {% if launch_configuration.user_data %}
+ {{ launch_configuration.user_data }}
+ {% else %}
+
+ {% endif %}
+ m1.small
+ arn:aws:autoscaling:us-east-1:803981987763:launchConfiguration:
+ 9dbbbf87-6141-428a-a409-0752edbe6cad:launchConfigurationName/my-test-lc
+
+ {{ launch_configuration.image_id }}
+ {% if launch_configuration.key_name %}
+ {{ launch_configuration.key_name }}
+ {% else %}
+
+ {% endif %}
+
+ {{ launch_configuration.ebs_optimized }}
+
+ {{ launch_configuration.instance_monitoring_enabled }}
+
+ {% if launch_configuration.spot_price %}
+ {{ launch_configuration.spot_price }}
+ {% endif %}
+
+ {% endfor %}
+
+
+
+ d05a22f8-b690-11e2-bf8e-2113fEXAMPLE
+
+"""
+
+DELETE_LAUNCH_CONFIGURATION_TEMPLATE = """
+
+ 7347261f-97df-11e2-8756-35eEXAMPLE
+
+"""
+
+CREATE_AUTOSCALING_GROUP_TEMPLATE = """
+
+8d798a29-f083-11e1-bdfb-cb223EXAMPLE
+
+"""
+
+DESCRIBE_AUTOSCALING_GROUPS_TEMPLATE = """
+
+
+ {% for group in groups %}
+
+
+
+ {{ group.name }}
+ {{ group.health_check_type }}
+ 2013-05-06T17:47:15.107Z
+
+ {{ group.launch_config_name }}
+
+ {{ group.desired_capacity }}
+
+ {% for availability_zone in group.availability_zones %}
+ {{ availability_zone }}
+ {% endfor %}
+
+ {% if group.load_balancers %}
+
+ {% for load_balancer in group.load_balancers %}
+ {{ load_balancer }}
+ {% endfor %}
+
+ {% else %}
+
+ {% endif %}
+ {{ group.min_size }}
+ {% if group.vpc_zone_identifier %}
+ {{ group.vpc_zone_identifier }}
+ {% else %}
+
+ {% endif %}
+ {{ group.health_check_period }}
+ {{ group.default_cooldown }}
+ arn:aws:autoscaling:us-east-1:803981987763:autoScalingGroup:ca861182-c8f9-4ca7-b1eb-cd35505f5ebb
+ :autoScalingGroupName/my-test-asg-lbs
+ {% if group.termination_policies %}
+
+ {% for policy in group.termination_policies %}
+ {{ policy }}
+ {% endfor %}
+
+ {% else %}
+
+ {% endif %}
+ {{ group.max_size }}
+ {% if group.placement_group %}
+ {{ group.placement_group }}
+ {% endif %}
+
+ {% endfor %}
+
+
+
+ 0f02a07d-b677-11e2-9eb0-dd50EXAMPLE
+
+"""
+
+UPDATE_AUTOSCALING_GROUP_TEMPLATE = """
+
+ adafead0-ab8a-11e2-ba13-ab0ccEXAMPLE
+
+"""
+
+DELETE_AUTOSCALING_GROUP_TEMPLATE = """
+
+ 70a76d42-9665-11e2-9fdf-211deEXAMPLE
+
+"""
+
+DESCRIBE_AUTOSCALING_INSTANCES_TEMPLATE = """
+
+
+ {% for instance in instances %}
+
+ HEALTHY
+ {{ instance.autoscaling_group.name }}
+ us-east-1e
+ {{ instance.id }}
+ {{ instance.autoscaling_group.launch_config_name }}
+ InService
+
+ {% endfor %}
+
+
+
+ df992dc3-b72f-11e2-81e1-750aa6EXAMPLE
+
+"""
+
+CREATE_SCALING_POLICY_TEMPLATE = """
+
+ arn:aws:autoscaling:us-east-1:803981987763:scalingPolicy:b0dcf5e8
+-02e6-4e31-9719-0675d0dc31ae:autoScalingGroupName/my-test-asg:policyName/my-scal
+eout-policy
+
+
+ 3cfc6fef-c08b-11e2-a697-2922EXAMPLE
+
+"""
+
+DESCRIBE_SCALING_POLICIES_TEMPLATE = """
+
+
+ {% for policy in policies %}
+
+ arn:aws:autoscaling:us-east-1:803981987763:scalingPolicy:c322
+761b-3172-4d56-9a21-0ed9d6161d67:autoScalingGroupName/my-test-asg:policyName/MyScaleDownPolicy
+ {{ policy.adjustment_type }}
+ {{ policy.scaling_adjustment }}
+ {{ policy.name }}
+ {{ policy.as_name }}
+ {{ policy.cooldown }}
+
+
+ {% endfor %}
+
+
+
+ ec3bffad-b739-11e2-b38d-15fbEXAMPLE
+
+"""
+
+SET_DESIRED_CAPACITY_TEMPLATE = """
+
+ 9fb7e2db-6998-11e2-a985-57c82EXAMPLE
+
+"""
+
+EXECUTE_POLICY_TEMPLATE = """
+
+ 70a76d42-9665-11e2-9fdf-211deEXAMPLE
+
+"""
+
+DELETE_POLICY_TEMPLATE = """
+
+ 70a76d42-9665-11e2-9fdf-211deEXAMPLE
+
+"""
diff --git a/moto/autoscaling/urls.py b/moto/autoscaling/urls.py
new file mode 100644
index 000000000..affa69c96
--- /dev/null
+++ b/moto/autoscaling/urls.py
@@ -0,0 +1,9 @@
+from .responses import AutoScalingResponse
+
+url_bases = [
+ "https?://autoscaling.(.+).amazonaws.com",
+]
+
+url_paths = {
+ '{0}/$': AutoScalingResponse().dispatch,
+}
diff --git a/moto/backends.py b/moto/backends.py
new file mode 100644
index 000000000..6f375a8f1
--- /dev/null
+++ b/moto/backends.py
@@ -0,0 +1,21 @@
+from moto.autoscaling import autoscaling_backend
+from moto.dynamodb import dynamodb_backend
+from moto.ec2 import ec2_backend
+from moto.elb import elb_backend
+from moto.emr import emr_backend
+from moto.s3 import s3_backend
+from moto.ses import ses_backend
+from moto.sqs import sqs_backend
+from moto.sts import sts_backend
+
+BACKENDS = {
+ 'autoscaling': autoscaling_backend,
+ 'dynamodb': dynamodb_backend,
+ 'ec2': ec2_backend,
+ 'elb': elb_backend,
+ 'emr': emr_backend,
+ 's3': s3_backend,
+ 'ses': ses_backend,
+ 'sqs': sqs_backend,
+ 'sts': sts_backend,
+}
diff --git a/moto/core/models.py b/moto/core/models.py
index e98c1eed3..f3e6ad701 100644
--- a/moto/core/models.py
+++ b/moto/core/models.py
@@ -1,7 +1,7 @@
import functools
import re
-from moto.packages.httpretty import HTTPretty
+from httpretty import HTTPretty
from .responses import metadata_response
from .utils import convert_regex_to_flask_path
@@ -47,6 +47,7 @@ class MockAWS(object):
result = func(*args, **kwargs)
return result
functools.update_wrapper(wrapper, func)
+ wrapper.__wrapped__ = func
return wrapper
diff --git a/moto/core/responses.py b/moto/core/responses.py
index d74bcd2e4..7e896e961 100644
--- a/moto/core/responses.py
+++ b/moto/core/responses.py
@@ -1,50 +1,80 @@
import datetime
import json
-from urlparse import parse_qs
+from urlparse import parse_qs, urlparse
-from moto.core.utils import headers_to_dict, camelcase_to_underscores, method_names_from_class
+from moto.core.utils import camelcase_to_underscores, method_names_from_class
class BaseResponse(object):
- def dispatch(self, uri, method, body, headers):
- if body:
- querystring = parse_qs(body)
+
+ def dispatch(self, request, full_url, headers):
+ if hasattr(request, 'body'):
+ # Boto
+ self.body = request.body
else:
- querystring = headers_to_dict(headers)
+ # Flask server
+ self.body = request.data
- self.path = uri.path
+ querystring = parse_qs(urlparse(full_url).query)
+ if not querystring:
+ querystring = parse_qs(self.body)
+ if not querystring:
+ querystring = headers
+
+ self.uri = full_url
+ self.path = urlparse(full_url).path
self.querystring = querystring
+ self.method = request.method
- action = querystring.get('Action', [""])[0]
+ self.headers = dict(request.headers)
+ self.response_headers = headers
+ return self.call_action()
+
+ def call_action(self):
+ headers = self.response_headers
+ action = self.querystring.get('Action', [""])[0]
action = camelcase_to_underscores(action)
-
method_names = method_names_from_class(self.__class__)
if action in method_names:
method = getattr(self, action)
- return method()
+ response = method()
+ if isinstance(response, basestring):
+ return 200, headers, response
+ else:
+ body, new_headers = response
+ status = new_headers.pop('status', 200)
+ headers.update(new_headers)
+ return status, headers, body
raise NotImplementedError("The {} action has not been implemented".format(action))
-def metadata_response(uri, method, body, headers):
+def metadata_response(request, full_url, headers):
"""
Mock response for localhost metadata
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AESDG-chapter-instancedata.html
"""
-
+ parsed_url = urlparse(full_url)
tomorrow = datetime.datetime.now() + datetime.timedelta(days=1)
- path = uri.path.lstrip("/latest/meta-data/")
+ credentials = dict(
+ AccessKeyId="test-key",
+ SecretAccessKey="test-secret-key",
+ Token="test-session-token",
+ Expiration=tomorrow.strftime("%Y-%m-%dT%H:%M:%SZ")
+ )
+
+ path = parsed_url.path.lstrip("/latest/meta-data/")
if path == '':
- return "iam/"
- elif path == 'iam/':
- return 'security-credentials/'
+ result = 'iam'
+ elif path == 'iam':
+ result = json.dumps({
+ 'security-credentials': {
+ 'default-role': credentials
+ }
+ })
elif path == 'iam/security-credentials/':
- return 'default-role'
+ result = 'default-role'
elif path == 'iam/security-credentials/default-role':
- return json.dumps(dict(
- AccessKeyId="test-key",
- SecretAccessKey="test-secret-key",
- Token="test-session-token",
- Expiration=tomorrow.strftime("%Y-%m-%dT%H:%M:%SZ")
- ))
+ result = json.dumps(credentials)
+ return 200, headers, result
diff --git a/moto/core/utils.py b/moto/core/utils.py
index 8532698cb..53418edbf 100644
--- a/moto/core/utils.py
+++ b/moto/core/utils.py
@@ -1,37 +1,10 @@
-from collections import namedtuple
import inspect
import random
import re
-from urlparse import parse_qs
from flask import request
-def headers_to_dict(headers):
- if isinstance(headers, dict):
- # If already dict, return
- return headers
-
- result = {}
- for index, header in enumerate(headers.split("\r\n")):
- if not header:
- continue
- if index:
- # Parsing headers
- key, value = header.split(":", 1)
- result[key.strip()] = value.strip()
- else:
- # Parsing method and path
- path_and_querystring = header.split(" /")[1]
- if '?' in path_and_querystring:
- querystring = path_and_querystring.split("?")[1]
- else:
- querystring = path_and_querystring
- queryset_dict = parse_qs(querystring)
- result.update(queryset_dict)
- return result
-
-
def camelcase_to_underscores(argument):
''' Converts a camelcase param like theNewAttribute to the equivalent
python underscore variable like the_new_attribute'''
@@ -91,23 +64,17 @@ class convert_flask_to_httpretty_response(object):
return "{}.{}".format(outer, self.callback.__name__)
def __call__(self, args=None, **kwargs):
- hostname = request.host_url
- method = request.method
- path = request.path
- query = request.query_string
-
- # Mimic the HTTPretty URIInfo class
- URI = namedtuple('URI', 'hostname method path query')
- uri = URI(hostname, method, path, query)
-
- body = request.data or query
headers = dict(request.headers)
- result = self.callback(uri, method, body, headers)
- if isinstance(result, basestring):
- # result is just the response
- return result
- else:
- # result is a responce, headers tuple
- response, headers = result
- status = headers.pop('status', None)
- return response, status, headers
+ result = self.callback(request, request.url, headers)
+ # result is a status, headers, response tuple
+ status, headers, response = result
+ return response, status, headers
+
+
+def iso_8601_datetime(datetime):
+ return datetime.strftime("%Y-%m-%dT%H:%M:%SZ")
+
+
+def rfc_1123_datetime(datetime):
+ RFC1123 = '%a, %d %b %Y %H:%M:%S GMT'
+ return datetime.strftime(RFC1123)
diff --git a/moto/dynamodb/models.py b/moto/dynamodb/models.py
index 84330e279..66612caa8 100644
--- a/moto/dynamodb/models.py
+++ b/moto/dynamodb/models.py
@@ -101,6 +101,10 @@ class Table(object):
self.created_at = datetime.datetime.now()
self.items = defaultdict(dict)
+ @property
+ def has_range_key(self):
+ return self.range_key_attr is not None
+
@property
def describe(self):
results = {
@@ -122,7 +126,7 @@ class Table(object):
"TableSizeBytes": 0,
}
}
- if self.range_key_attr:
+ if self.has_range_key:
results["Table"]["KeySchema"]["RangeKeyElement"] = {
"AttributeName": self.range_key_attr,
"AttributeType": self.range_key_type
@@ -132,7 +136,7 @@ class Table(object):
def __len__(self):
count = 0
for key, value in self.items.iteritems():
- if self.range_key_attr:
+ if self.has_range_key:
count += len(value)
else:
count += 1
@@ -143,7 +147,7 @@ class Table(object):
def put_item(self, item_attrs):
hash_value = DynamoType(item_attrs.get(self.hash_key_attr))
- if self.range_key_attr:
+ if self.has_range_key:
range_value = DynamoType(item_attrs.get(self.range_key_attr))
else:
range_value = None
@@ -157,6 +161,8 @@ class Table(object):
return item
def get_item(self, hash_key, range_key):
+ if self.has_range_key and not range_key:
+ raise ValueError("Table has a range key, but no range key was passed into get_item")
try:
if range_key:
return self.items[hash_key][range_key]
diff --git a/moto/dynamodb/responses.py b/moto/dynamodb/responses.py
index a75443a11..b2cc29e8c 100644
--- a/moto/dynamodb/responses.py
+++ b/moto/dynamodb/responses.py
@@ -1,6 +1,7 @@
import json
-from moto.core.utils import headers_to_dict, camelcase_to_underscores
+from moto.core.responses import BaseResponse
+from moto.core.utils import camelcase_to_underscores
from .models import dynamodb_backend, dynamo_json_dump
@@ -27,17 +28,11 @@ GET_SESSION_TOKEN_RESULT = """
"""
-def sts_handler(uri, method, body, headers):
+def sts_handler():
return GET_SESSION_TOKEN_RESULT
-class DynamoHandler(object):
-
- def __init__(self, uri, method, body, headers):
- self.uri = uri
- self.method = method
- self.body = body
- self.headers = headers
+class DynamoHandler(BaseResponse):
def get_endpoint_name(self, headers):
"""Parses request headers and extracts part od the X-Amz-Target
@@ -45,22 +40,35 @@ class DynamoHandler(object):
ie: X-Amz-Target: DynamoDB_20111205.ListTables -> ListTables
"""
- match = headers.get('X-Amz-Target')
+ # Headers are case-insensitive. Probably a better way to do this.
+ match = headers.get('x-amz-target') or headers.get('X-Amz-Target')
if match:
return match.split(".")[1]
def error(self, type_, status=400):
- return dynamo_json_dump({'__type': type_}), dict(status=400)
+ return status, self.response_headers, dynamo_json_dump({'__type': type_})
- def dispatch(self):
+ def call_action(self):
+ if 'GetSessionToken' in self.body:
+ return 200, self.response_headers, sts_handler()
+
+ self.body = json.loads(self.body or '{}')
endpoint = self.get_endpoint_name(self.headers)
if endpoint:
endpoint = camelcase_to_underscores(endpoint)
- return getattr(self, endpoint)(self.uri, self.method, self.body, self.headers)
- else:
- return "", dict(status=404)
+ response = getattr(self, endpoint)()
+ if isinstance(response, basestring):
+ return 200, self.response_headers, response
- def list_tables(self, uri, method, body, headers):
+ else:
+ status_code, new_headers, response_content = response
+ self.response_headers.update(new_headers)
+ return status_code, self.response_headers, response_content
+ else:
+ return 404, self.response_headers, ""
+
+ def list_tables(self):
+ body = self.body
limit = body.get('Limit')
if body.get("ExclusiveStartTableName"):
last = body.get("ExclusiveStartTableName")
@@ -77,7 +85,8 @@ class DynamoHandler(object):
response["LastEvaluatedTableName"] = tables[-1]
return dynamo_json_dump(response)
- def create_table(self, uri, method, body, headers):
+ def create_table(self):
+ body = self.body
name = body['TableName']
key_schema = body['KeySchema']
@@ -104,8 +113,8 @@ class DynamoHandler(object):
)
return dynamo_json_dump(table.describe)
- def delete_table(self, uri, method, body, headers):
- name = body['TableName']
+ def delete_table(self):
+ name = self.body['TableName']
table = dynamodb_backend.delete_table(name)
if table:
return dynamo_json_dump(table.describe)
@@ -113,16 +122,16 @@ class DynamoHandler(object):
er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'
return self.error(er)
- def update_table(self, uri, method, body, headers):
- name = body['TableName']
- throughput = body["ProvisionedThroughput"]
+ def update_table(self):
+ name = self.body['TableName']
+ throughput = self.body["ProvisionedThroughput"]
new_read_units = throughput["ReadCapacityUnits"]
new_write_units = throughput["WriteCapacityUnits"]
table = dynamodb_backend.update_table_throughput(name, new_read_units, new_write_units)
return dynamo_json_dump(table.describe)
- def describe_table(self, uri, method, body, headers):
- name = body['TableName']
+ def describe_table(self):
+ name = self.body['TableName']
try:
table = dynamodb_backend.tables[name]
except KeyError:
@@ -130,9 +139,9 @@ class DynamoHandler(object):
return self.error(er)
return dynamo_json_dump(table.describe)
- def put_item(self, uri, method, body, headers):
- name = body['TableName']
- item = body['Item']
+ def put_item(self):
+ name = self.body['TableName']
+ item = self.body['Item']
result = dynamodb_backend.put_item(name, item)
if result:
item_dict = result.to_json()
@@ -142,8 +151,8 @@ class DynamoHandler(object):
er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'
return self.error(er)
- def batch_write_item(self, uri, method, body, headers):
- table_batches = body['RequestItems']
+ def batch_write_item(self):
+ table_batches = self.body['RequestItems']
for table_name, table_requests in table_batches.iteritems():
for table_request in table_requests:
@@ -173,23 +182,28 @@ class DynamoHandler(object):
return dynamo_json_dump(response)
- def get_item(self, uri, method, body, headers):
- name = body['TableName']
- key = body['Key']
+ def get_item(self):
+ name = self.body['TableName']
+ key = self.body['Key']
hash_key = key['HashKeyElement']
range_key = key.get('RangeKeyElement')
- attrs_to_get = body.get('AttributesToGet')
- item = dynamodb_backend.get_item(name, hash_key, range_key)
+ attrs_to_get = self.body.get('AttributesToGet')
+ try:
+ item = dynamodb_backend.get_item(name, hash_key, range_key)
+ except ValueError:
+ er = 'com.amazon.coral.validate#ValidationException'
+ return self.error(er, status=400)
if item:
item_dict = item.describe_attrs(attrs_to_get)
item_dict['ConsumedCapacityUnits'] = 0.5
return dynamo_json_dump(item_dict)
else:
+ # Item not found
er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'
- return self.error(er)
+ return self.error(er, status=404)
- def batch_get_item(self, uri, method, body, headers):
- table_batches = body['RequestItems']
+ def batch_get_item(self):
+ table_batches = self.body['RequestItems']
results = {
"Responses": {
@@ -211,10 +225,10 @@ class DynamoHandler(object):
results["Responses"][table_name] = {"Items": items, "ConsumedCapacityUnits": 1}
return dynamo_json_dump(results)
- def query(self, uri, method, body, headers):
- name = body['TableName']
- hash_key = body['HashKeyValue']
- range_condition = body.get('RangeKeyCondition')
+ def query(self):
+ name = self.body['TableName']
+ hash_key = self.body['HashKeyValue']
+ range_condition = self.body.get('RangeKeyCondition')
if range_condition:
range_comparison = range_condition['ComparisonOperator']
range_values = range_condition['AttributeValueList']
@@ -242,11 +256,11 @@ class DynamoHandler(object):
# }
return dynamo_json_dump(result)
- def scan(self, uri, method, body, headers):
- name = body['TableName']
+ def scan(self):
+ name = self.body['TableName']
filters = {}
- scan_filters = body.get('ScanFilter', {})
+ scan_filters = self.body.get('ScanFilter', {})
for attribute_name, scan_filter in scan_filters.iteritems():
# Keys are attribute names. Values are tuples of (comparison, comparison_value)
comparison_operator = scan_filter["ComparisonOperator"]
@@ -274,12 +288,12 @@ class DynamoHandler(object):
# }
return dynamo_json_dump(result)
- def delete_item(self, uri, method, body, headers):
- name = body['TableName']
- key = body['Key']
+ def delete_item(self):
+ name = self.body['TableName']
+ key = self.body['Key']
hash_key = key['HashKeyElement']
range_key = key.get('RangeKeyElement')
- return_values = body.get('ReturnValues', '')
+ return_values = self.body.get('ReturnValues', '')
item = dynamodb_backend.delete_item(name, hash_key, range_key)
if item:
if return_values == 'ALL_OLD':
@@ -291,10 +305,3 @@ class DynamoHandler(object):
else:
er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'
return self.error(er)
-
-
-def handler(uri, method, body, headers):
- if 'GetSessionToken' in body:
- return sts_handler(uri, method, body, headers)
- body = json.loads(body or '{}')
- return DynamoHandler(uri, method, body, headers_to_dict(headers)).dispatch()
diff --git a/moto/dynamodb/urls.py b/moto/dynamodb/urls.py
index 1132b7815..6ed5e00d5 100644
--- a/moto/dynamodb/urls.py
+++ b/moto/dynamodb/urls.py
@@ -1,10 +1,10 @@
-from .responses import handler
+from .responses import DynamoHandler
url_bases = [
- "https?://dynamodb.us-east-1.amazonaws.com",
+ "https?://dynamodb.(.+).amazonaws.com",
"https?://sts.amazonaws.com",
]
url_paths = {
- "{0}/": handler,
+ "{0}/": DynamoHandler().dispatch,
}
diff --git a/moto/ec2/exceptions.py b/moto/ec2/exceptions.py
new file mode 100644
index 000000000..07122d2b2
--- /dev/null
+++ b/moto/ec2/exceptions.py
@@ -0,0 +1,4 @@
+class InvalidIdError(RuntimeError):
+ def __init__(self, instance_id):
+ super(InvalidIdError, self).__init__()
+ self.instance_id = instance_id
diff --git a/moto/ec2/models.py b/moto/ec2/models.py
index c254e1716..2150f2567 100644
--- a/moto/ec2/models.py
+++ b/moto/ec2/models.py
@@ -1,25 +1,56 @@
+import copy
from collections import defaultdict
from boto.ec2.instance import Instance as BotoInstance, Reservation
from moto.core import BaseBackend
+from .exceptions import InvalidIdError
from .utils import (
random_ami_id,
random_instance_id,
random_reservation_id,
random_security_group_id,
random_snapshot_id,
+ random_spot_request_id,
random_subnet_id,
random_volume_id,
random_vpc_id,
)
+class InstanceState(object):
+ def __init__(self, name='pending', code=0):
+ self.name = name
+ self.code = code
+
+
class Instance(BotoInstance):
- def __init__(self):
- self._state_name = None
- self._state_code = None
+ def __init__(self, image_id, user_data):
super(Instance, self).__init__()
+ self.id = random_instance_id()
+ self.image_id = image_id
+ self._state = InstanceState()
+ self.user_data = user_data
+
+ def start(self):
+ self._state.name = "pending"
+ self._state.code = 0
+
+ def stop(self):
+ self._state.name = "stopping"
+ self._state.code = 64
+
+ def terminate(self):
+ self._state.name = "shutting-down"
+ self._state.code = 32
+
+ def reboot(self):
+ self._state.name = "pending"
+ self._state.code = 0
+
+ def get_tags(self):
+ tags = ec2_backend.describe_tags(self.id)
+ return tags
class InstanceBackend(object):
@@ -33,15 +64,14 @@ class InstanceBackend(object):
if instance.id == instance_id:
return instance
- def add_instances(self, image_id, count):
+ def add_instances(self, image_id, count, user_data):
new_reservation = Reservation()
new_reservation.id = random_reservation_id()
for index in range(count):
- new_instance = Instance()
- new_instance.id = random_instance_id()
- new_instance.image_id = image_id
- new_instance._state_name = "pending"
- new_instance._state_code = 0
+ new_instance = Instance(
+ image_id,
+ user_data,
+ )
new_reservation.instances.append(new_instance)
self.reservations[new_reservation.id] = new_reservation
return new_reservation
@@ -50,8 +80,7 @@ class InstanceBackend(object):
started_instances = []
for instance in self.all_instances():
if instance.id in instance_ids:
- instance._state_name = "pending"
- instance._state_code = 0
+ instance.start()
started_instances.append(instance)
return started_instances
@@ -60,8 +89,7 @@ class InstanceBackend(object):
stopped_instances = []
for instance in self.all_instances():
if instance.id in instance_ids:
- instance._state_name = "stopping"
- instance._state_code = 64
+ instance.stop()
stopped_instances.append(instance)
return stopped_instances
@@ -70,8 +98,7 @@ class InstanceBackend(object):
terminated_instances = []
for instance in self.all_instances():
if instance.id in instance_ids:
- instance._state_name = "shutting-down"
- instance._state_code = 32
+ instance.terminate()
terminated_instances.append(instance)
return terminated_instances
@@ -80,9 +107,7 @@ class InstanceBackend(object):
rebooted_instances = []
for instance in self.all_instances():
if instance.id in instance_ids:
- # TODO double check instances go to pending when reboot
- instance._state_name = "pending"
- instance._state_code = 0
+ instance.reboot()
rebooted_instances.append(instance)
return rebooted_instances
@@ -104,8 +129,32 @@ class InstanceBackend(object):
instances.append(instance)
return instances
- def all_reservations(self):
- return self.reservations.values()
+ def get_reservations_by_instance_ids(self, instance_ids):
+ """ Go through all of the reservations and filter to only return those
+ associated with the given instance_ids.
+ """
+ reservations = []
+ for reservation in self.all_reservations(make_copy=True):
+ reservation_instance_ids = [instance.id for instance in reservation.instances]
+ matching_reservation = any(instance_id in reservation_instance_ids for instance_id in instance_ids)
+ if matching_reservation:
+ # We need to make a copy of the reservation because we have to modify the
+ # instances to limit to those requested
+ reservation.instances = [instance for instance in reservation.instances if instance.id in instance_ids]
+ reservations.append(reservation)
+ found_instance_ids = [instance.id for reservation in reservations for instance in reservation.instances]
+ if len(found_instance_ids) != len(instance_ids):
+ invalid_id = list(set(instance_ids).difference(set(found_instance_ids)))[0]
+ raise InvalidIdError(invalid_id)
+ return reservations
+
+ def all_reservations(self, make_copy=False):
+ if make_copy:
+ # Return copies so that other functions can modify them with changing
+ # the originals
+ return [copy.deepcopy(reservation) for reservation in self.reservations.values()]
+ else:
+ return [reservation for reservation in self.reservations.values()]
class TagBackend(object):
@@ -121,18 +170,21 @@ class TagBackend(object):
def delete_tag(self, resource_id, key):
return self.tags[resource_id].pop(key)
- def describe_tags(self):
+ def describe_tags(self, filter_resource_ids=None):
results = []
for resource_id, tags in self.tags.iteritems():
ami = 'ami' in resource_id
for key, value in tags.iteritems():
- result = {
- 'resource_id': resource_id,
- 'key': key,
- 'value': value,
- 'resource_type': 'image' if ami else 'instance',
- }
- results.append(result)
+ if not filter_resource_ids or resource_id in filter_resource_ids:
+ # If we're not filtering, or we are filtering and this
+ # resource id is in the filter list, add this tag
+ result = {
+ 'resource_id': resource_id,
+ 'key': key,
+ 'value': value,
+ 'resource_type': 'image' if ami else 'instance',
+ }
+ results.append(result)
return results
@@ -255,11 +307,12 @@ class SecurityGroupBackend(object):
self.groups = {}
super(SecurityGroupBackend, self).__init__()
- def create_security_group(self, name, description):
+ def create_security_group(self, name, description, force=False):
group_id = random_security_group_id()
- existing_group = self.get_security_group_from_name(name)
- if existing_group:
- return None
+ if not force:
+ existing_group = self.get_security_group_from_name(name)
+ if existing_group:
+ return None
group = SecurityGroup(group_id, name, description)
self.groups[group_id] = group
return group
@@ -282,6 +335,11 @@ class SecurityGroupBackend(object):
if group.name == name:
return group
+ if name == 'default':
+ # If the request is for the default group and it does not exist, create it
+ default_group = ec2_backend.create_security_group("default", "The default security group", force=True)
+ return default_group
+
def authorize_security_group_ingress(self, group_name, ip_protocol, from_port, to_port, ip_ranges=None, source_group_names=None):
group = self.get_security_group_from_name(group_name)
source_groups = []
@@ -445,9 +503,77 @@ class SubnetBackend(object):
return self.subnets.pop(subnet_id, None)
+class SpotInstanceRequest(object):
+ def __init__(self, spot_request_id, price, image_id, type, valid_from,
+ valid_until, launch_group, availability_zone_group, key_name,
+ security_groups, user_data, instance_type, placement, kernel_id,
+ ramdisk_id, monitoring_enabled, subnet_id):
+ self.id = spot_request_id
+ self.state = "open"
+ self.price = price
+ self.image_id = image_id
+ self.type = type
+ self.valid_from = valid_from
+ self.valid_until = valid_until
+ self.launch_group = launch_group
+ self.availability_zone_group = availability_zone_group
+ self.key_name = key_name
+ self.user_data = user_data
+ self.instance_type = instance_type
+ self.placement = placement
+ self.kernel_id = kernel_id
+ self.ramdisk_id = ramdisk_id
+ self.monitoring_enabled = monitoring_enabled
+ self.subnet_id = subnet_id
+
+ self.security_groups = []
+ if security_groups:
+ for group_name in security_groups:
+ group = ec2_backend.get_security_group_from_name(group_name)
+ if group:
+ self.security_groups.append(group)
+ else:
+ # If not security groups, add the default
+ default_group = ec2_backend.get_security_group_from_name("default")
+ self.security_groups.append(default_group)
+
+
+class SpotRequestBackend(object):
+ def __init__(self):
+ self.spot_instance_requests = {}
+ super(SpotRequestBackend, self).__init__()
+
+ def request_spot_instances(self, price, image_id, count, type, valid_from,
+ valid_until, launch_group, availability_zone_group,
+ key_name, security_groups, user_data,
+ instance_type, placement, kernel_id, ramdisk_id,
+ monitoring_enabled, subnet_id):
+ requests = []
+ for index in range(count):
+ spot_request_id = random_spot_request_id()
+ request = SpotInstanceRequest(
+ spot_request_id, price, image_id, type, valid_from, valid_until,
+ launch_group, availability_zone_group, key_name, security_groups,
+ user_data, instance_type, placement, kernel_id, ramdisk_id,
+ monitoring_enabled, subnet_id
+ )
+ self.spot_instance_requests[spot_request_id] = request
+ requests.append(request)
+ return requests
+
+ def describe_spot_instance_requests(self):
+ return self.spot_instance_requests.values()
+
+ def cancel_spot_instance_requests(self, request_ids):
+ requests = []
+ for request_id in request_ids:
+ requests.append(self.spot_instance_requests.pop(request_id))
+ return requests
+
+
class EC2Backend(BaseBackend, InstanceBackend, TagBackend, AmiBackend,
RegionsAndZonesBackend, SecurityGroupBackend, EBSBackend,
- VPCBackend, SubnetBackend):
+ VPCBackend, SubnetBackend, SpotRequestBackend):
pass
diff --git a/moto/ec2/responses/__init__.py b/moto/ec2/responses/__init__.py
index 0a50797ee..690419438 100644
--- a/moto/ec2/responses/__init__.py
+++ b/moto/ec2/responses/__init__.py
@@ -1,6 +1,4 @@
-from urlparse import parse_qs
-
-from moto.core.utils import camelcase_to_underscores, method_names_from_class
+from moto.core.responses import BaseResponse
from .amazon_dev_pay import AmazonDevPay
from .amis import AmisResponse
@@ -32,53 +30,35 @@ from .vpn_connections import VPNConnections
from .windows import Windows
-class EC2Response(object):
-
- sub_responses = [
- AmazonDevPay,
- AmisResponse,
- AvailabilityZonesAndRegions,
- CustomerGateways,
- DHCPOptions,
- ElasticBlockStore,
- ElasticIPAddresses,
- ElasticNetworkInterfaces,
- General,
- InstanceResponse,
- InternetGateways,
- IPAddresses,
- KeyPairs,
- Monitoring,
- NetworkACLs,
- PlacementGroups,
- ReservedInstances,
- RouteTables,
- SecurityGroups,
- SpotInstances,
- Subnets,
- TagResponse,
- VirtualPrivateGateways,
- VMExport,
- VMImport,
- VPCs,
- VPNConnections,
- Windows,
- ]
-
- def dispatch(self, uri, method, body, headers):
- if body:
- querystring = parse_qs(body)
- else:
- querystring = parse_qs(headers)
-
- action = querystring.get('Action', [None])[0]
- if action:
- action = camelcase_to_underscores(action)
-
- for sub_response in self.sub_responses:
- method_names = method_names_from_class(sub_response)
- if action in method_names:
- response = sub_response(querystring)
- method = getattr(response, action)
- return method()
- raise NotImplementedError("The {} action has not been implemented".format(action))
+class EC2Response(
+ BaseResponse,
+ AmazonDevPay,
+ AmisResponse,
+ AvailabilityZonesAndRegions,
+ CustomerGateways,
+ DHCPOptions,
+ ElasticBlockStore,
+ ElasticIPAddresses,
+ ElasticNetworkInterfaces,
+ General,
+ InstanceResponse,
+ InternetGateways,
+ IPAddresses,
+ KeyPairs,
+ Monitoring,
+ NetworkACLs,
+ PlacementGroups,
+ ReservedInstances,
+ RouteTables,
+ SecurityGroups,
+ SpotInstances,
+ Subnets,
+ TagResponse,
+ VirtualPrivateGateways,
+ VMExport,
+ VMImport,
+ VPCs,
+ VPNConnections,
+ Windows,
+):
+ pass
diff --git a/moto/ec2/responses/amis.py b/moto/ec2/responses/amis.py
index afce0bbb5..feddc89f1 100644
--- a/moto/ec2/responses/amis.py
+++ b/moto/ec2/responses/amis.py
@@ -5,14 +5,11 @@ from moto.ec2.utils import instance_ids_from_querystring
class AmisResponse(object):
- def __init__(self, querystring):
- self.querystring = querystring
- self.instance_ids = instance_ids_from_querystring(querystring)
-
def create_image(self):
name = self.querystring.get('Name')[0]
description = self.querystring.get('Description')[0]
- instance_id = self.instance_ids[0]
+ instance_ids = instance_ids_from_querystring(self.querystring)
+ instance_id = instance_ids[0]
image = ec2_backend.create_image(instance_id, name, description)
if not image:
return "There is not instance with id {}".format(instance_id), dict(status=404)
diff --git a/moto/ec2/responses/availability_zones_and_regions.py b/moto/ec2/responses/availability_zones_and_regions.py
index 4faeda764..f216a644f 100644
--- a/moto/ec2/responses/availability_zones_and_regions.py
+++ b/moto/ec2/responses/availability_zones_and_regions.py
@@ -4,9 +4,6 @@ from moto.ec2.models import ec2_backend
class AvailabilityZonesAndRegions(object):
- def __init__(self, querystring):
- self.querystring = querystring
-
def describe_availability_zones(self):
zones = ec2_backend.describe_availability_zones()
template = Template(DESCRIBE_ZONES_RESPONSE)
diff --git a/moto/ec2/responses/elastic_block_store.py b/moto/ec2/responses/elastic_block_store.py
index bdea18188..d81c61c9d 100644
--- a/moto/ec2/responses/elastic_block_store.py
+++ b/moto/ec2/responses/elastic_block_store.py
@@ -4,9 +4,6 @@ from moto.ec2.models import ec2_backend
class ElasticBlockStore(object):
- def __init__(self, querystring):
- self.querystring = querystring
-
def attach_volume(self):
volume_id = self.querystring.get('VolumeId')[0]
instance_id = self.querystring.get('InstanceId')[0]
diff --git a/moto/ec2/responses/general.py b/moto/ec2/responses/general.py
index ad133a30c..5353bb99a 100644
--- a/moto/ec2/responses/general.py
+++ b/moto/ec2/responses/general.py
@@ -5,11 +5,8 @@ from moto.ec2.utils import instance_ids_from_querystring
class General(object):
- def __init__(self, querystring):
- self.querystring = querystring
- self.instance_ids = instance_ids_from_querystring(querystring)
-
def get_console_output(self):
+ self.instance_ids = instance_ids_from_querystring(self.querystring)
instance_id = self.instance_ids[0]
instance = ec2_backend.get_instance(instance_id)
if instance:
diff --git a/moto/ec2/responses/instances.py b/moto/ec2/responses/instances.py
index 7c7c9d725..68be9dafd 100644
--- a/moto/ec2/responses/instances.py
+++ b/moto/ec2/responses/instances.py
@@ -2,42 +2,57 @@ from jinja2 import Template
from moto.core.utils import camelcase_to_underscores
from moto.ec2.models import ec2_backend
-from moto.ec2.utils import instance_ids_from_querystring
+from moto.ec2.utils import instance_ids_from_querystring, filters_from_querystring, filter_reservations
+from moto.ec2.exceptions import InvalidIdError
class InstanceResponse(object):
- def __init__(self, querystring):
- self.querystring = querystring
- self.instance_ids = instance_ids_from_querystring(querystring)
-
def describe_instances(self):
+ instance_ids = instance_ids_from_querystring(self.querystring)
+ if instance_ids:
+ try:
+ reservations = ec2_backend.get_reservations_by_instance_ids(instance_ids)
+ except InvalidIdError as exc:
+ template = Template(EC2_INVALID_INSTANCE_ID)
+ return template.render(instance_id=exc.instance_id), dict(status=400)
+ else:
+ reservations = ec2_backend.all_reservations(make_copy=True)
+
+ filter_dict = filters_from_querystring(self.querystring)
+ reservations = filter_reservations(reservations, filter_dict)
+
template = Template(EC2_DESCRIBE_INSTANCES)
- return template.render(reservations=ec2_backend.all_reservations())
+ return template.render(reservations=reservations)
def run_instances(self):
min_count = int(self.querystring.get('MinCount', ['1'])[0])
image_id = self.querystring.get('ImageId')[0]
- new_reservation = ec2_backend.add_instances(image_id, min_count)
+ user_data = self.querystring.get('UserData')
+ new_reservation = ec2_backend.add_instances(image_id, min_count, user_data)
template = Template(EC2_RUN_INSTANCES)
return template.render(reservation=new_reservation)
def terminate_instances(self):
- instances = ec2_backend.terminate_instances(self.instance_ids)
+ instance_ids = instance_ids_from_querystring(self.querystring)
+ instances = ec2_backend.terminate_instances(instance_ids)
template = Template(EC2_TERMINATE_INSTANCES)
return template.render(instances=instances)
def reboot_instances(self):
- instances = ec2_backend.reboot_instances(self.instance_ids)
+ instance_ids = instance_ids_from_querystring(self.querystring)
+ instances = ec2_backend.reboot_instances(instance_ids)
template = Template(EC2_REBOOT_INSTANCES)
return template.render(instances=instances)
def stop_instances(self):
- instances = ec2_backend.stop_instances(self.instance_ids)
+ instance_ids = instance_ids_from_querystring(self.querystring)
+ instances = ec2_backend.stop_instances(instance_ids)
template = Template(EC2_STOP_INSTANCES)
return template.render(instances=instances)
def start_instances(self):
- instances = ec2_backend.start_instances(self.instance_ids)
+ instance_ids = instance_ids_from_querystring(self.querystring)
+ instances = ec2_backend.start_instances(instance_ids)
template = Template(EC2_START_INSTANCES)
return template.render(instances=instances)
@@ -45,7 +60,8 @@ class InstanceResponse(object):
# TODO this and modify below should raise IncorrectInstanceState if instance not in stopped state
attribute = self.querystring.get("Attribute")[0]
key = camelcase_to_underscores(attribute)
- instance_id = self.instance_ids[0]
+ instance_ids = instance_ids_from_querystring(self.querystring)
+ instance_id = instance_ids[0]
instance, value = ec2_backend.describe_instance_attribute(instance_id, key)
template = Template(EC2_DESCRIBE_INSTANCE_ATTRIBUTE)
return template.render(instance=instance, attribute=attribute, value=value)
@@ -57,7 +73,8 @@ class InstanceResponse(object):
value = self.querystring.get(key)[0]
normalized_attribute = camelcase_to_underscores(key.split(".")[0])
- instance_id = self.instance_ids[0]
+ instance_ids = instance_ids_from_querystring(self.querystring)
+ instance_id = instance_ids[0]
ec2_backend.modify_instance_attribute(instance_id, normalized_attribute, value)
return EC2_MODIFY_INSTANCE_ATTRIBUTE
@@ -78,8 +95,8 @@ EC2_RUN_INSTANCES = """
hvm
ABCDE1234567890123
-
+
+ {% for tag in instance.get_tags() %}
+ -
+ {{ tag.resource_id }}
+ {{ tag.resource_type }}
+ {{ tag.key }}
+ {{ tag.value }}
+
+ {% endfor %}
+
xen
@@ -190,8 +216,8 @@ EC2_TERMINATE_INSTANCES = """
running
- {{ instance._state_code }}
- {{ instance._state_name }}
+ {{ instance._state.code }}
+ {{ instance._state.name }}
{% endfor %}
@@ -210,8 +236,8 @@ EC2_STOP_INSTANCES = """
running
- {{ instance._state_code }}
- {{ instance._state_name }}
+ {{ instance._state.code }}
+ {{ instance._state.name }}
{% endfor %}
@@ -230,8 +256,8 @@ EC2_START_INSTANCES = """
running
- {{ instance._state_code }}
- {{ instance._state_name }}
+ {{ instance._state.code }}
+ {{ instance._state.name }}
{% endfor %}
@@ -255,3 +281,10 @@ EC2_MODIFY_INSTANCE_ATTRIBUTE = """
+InvalidInstanceID.NotFound
+The instance ID '{{ instance_id }}' does not exist
+
+39070fe4-6f6d-4565-aecd-7850607e4555"""
diff --git a/moto/ec2/responses/security_groups.py b/moto/ec2/responses/security_groups.py
index 2768494a8..1b40e182f 100644
--- a/moto/ec2/responses/security_groups.py
+++ b/moto/ec2/responses/security_groups.py
@@ -1,7 +1,6 @@
from jinja2 import Template
from moto.ec2.models import ec2_backend
-from moto.ec2.utils import resource_ids_from_querystring
def process_rules_from_querystring(querystring):
@@ -22,9 +21,6 @@ def process_rules_from_querystring(querystring):
class SecurityGroups(object):
- def __init__(self, querystring):
- self.querystring = querystring
-
def authorize_security_group_egress(self):
raise NotImplementedError('SecurityGroups.authorize_security_group_egress is not yet implemented')
diff --git a/moto/ec2/responses/spot_instances.py b/moto/ec2/responses/spot_instances.py
index 0ace72dfd..759914989 100644
--- a/moto/ec2/responses/spot_instances.py
+++ b/moto/ec2/responses/spot_instances.py
@@ -1,12 +1,25 @@
from jinja2 import Template
from moto.ec2.models import ec2_backend
-from moto.ec2.utils import resource_ids_from_querystring
class SpotInstances(object):
+ def _get_param(self, param_name):
+ return self.querystring.get(param_name, [None])[0]
+
+ def _get_int_param(self, param_name):
+ value = self._get_param(param_name)
+ if value is not None:
+ return int(value)
+
+ def _get_multi_param(self, param_prefix):
+ return [value[0] for key, value in self.querystring.items() if key.startswith(param_prefix)]
+
def cancel_spot_instance_requests(self):
- raise NotImplementedError('SpotInstances.cancel_spot_instance_requests is not yet implemented')
+ request_ids = self._get_multi_param('SpotInstanceRequestId')
+ requests = ec2_backend.cancel_spot_instance_requests(request_ids)
+ template = Template(CANCEL_SPOT_INSTANCES_TEMPLATE)
+ return template.render(requests=requests)
def create_spot_datafeed_subscription(self):
raise NotImplementedError('SpotInstances.create_spot_datafeed_subscription is not yet implemented')
@@ -18,10 +31,186 @@ class SpotInstances(object):
raise NotImplementedError('SpotInstances.describe_spot_datafeed_subscription is not yet implemented')
def describe_spot_instance_requests(self):
- raise NotImplementedError('SpotInstances.describe_spot_instance_requests is not yet implemented')
+ requests = ec2_backend.describe_spot_instance_requests()
+ template = Template(DESCRIBE_SPOT_INSTANCES_TEMPLATE)
+ return template.render(requests=requests)
def describe_spot_price_history(self):
raise NotImplementedError('SpotInstances.describe_spot_price_history is not yet implemented')
def request_spot_instances(self):
- raise NotImplementedError('SpotInstances.request_spot_instances is not yet implemented')
+ price = self._get_param('SpotPrice')
+ image_id = self._get_param('LaunchSpecification.ImageId')
+ count = self._get_int_param('InstanceCount')
+ type = self._get_param('Type')
+ valid_from = self._get_param('ValidFrom')
+ valid_until = self._get_param('ValidUntil')
+ launch_group = self._get_param('LaunchGroup')
+ availability_zone_group = self._get_param('AvailabilityZoneGroup')
+ key_name = self._get_param('LaunchSpecification.KeyName')
+ security_groups = self._get_multi_param('LaunchSpecification.SecurityGroup.')
+ user_data = self._get_param('LaunchSpecification.UserData')
+ instance_type = self._get_param('LaunchSpecification.InstanceType')
+ placement = self._get_param('LaunchSpecification.Placement.AvailabilityZone')
+ kernel_id = self._get_param('LaunchSpecification.KernelId')
+ ramdisk_id = self._get_param('LaunchSpecification.RamdiskId')
+ monitoring_enabled = self._get_param('LaunchSpecification.Monitoring.Enabled')
+ subnet_id = self._get_param('LaunchSpecification.SubnetId')
+
+ requests = ec2_backend.request_spot_instances(
+ price=price,
+ image_id=image_id,
+ count=count,
+ type=type,
+ valid_from=valid_from,
+ valid_until=valid_until,
+ launch_group=launch_group,
+ availability_zone_group=availability_zone_group,
+ key_name=key_name,
+ security_groups=security_groups,
+ user_data=user_data,
+ instance_type=instance_type,
+ placement=placement,
+ kernel_id=kernel_id,
+ ramdisk_id=ramdisk_id,
+ monitoring_enabled=monitoring_enabled,
+ subnet_id=subnet_id,
+ )
+
+ template = Template(REQUEST_SPOT_INSTANCES_TEMPLATE)
+ return template.render(requests=requests)
+
+
+REQUEST_SPOT_INSTANCES_TEMPLATE = """
+ 59dbff89-35bd-4eac-99ed-be587EXAMPLE
+
+ {% for request in requests %}
+ -
+ {{ request.price }}
+ {{ request.price }}
+ {{ request.type }}
+ {{ request.state }}
+
+
pending-evaluation
+ YYYY-MM-DDTHH:MM:SS.000Z
+ Your Spot request has been submitted for review, and is pending evaluation.
+
+ {{ request.availability_zone_group }}
+
+ {{ request.image_id }}
+ {{ request.key_name }}
+
+ {% for group in request.security_groups %}
+ -
+ {{ group.id }}
+ {{ group.name }}
+
+ {% endfor %}
+
+ {{ request.kernel_id }}
+ {{ request.ramdisk_id }}
+ {{ request.subnet_id }}
+ {{ request.instance_type }}
+
+
+ {{ request.monitoring_enabled }}
+
+ {{ request.ebs_optimized }}
+
+ {{ request.placement }}
+
+
+
+ {{ request.launch_group }}
+ YYYY-MM-DDTHH:MM:SS.000Z
+ {% if request.valid_from %}
+ {{ request.valid_from }}
+ {% endif %}
+ {% if request.valid_until %}
+ {{ request.valid_until }}
+ {% endif %}
+ Linux/UNIX
+
+ {% endfor %}
+
+"""
+
+DESCRIBE_SPOT_INSTANCES_TEMPLATE = """
+ 59dbff89-35bd-4eac-99ed-be587EXAMPLE
+
+ {% for request in requests %}
+ -
+ {{ request.id }}
+ {{ request.price }}
+ {{ request.type }}
+ {{ request.state }}
+
+
pending-evaluation
+ YYYY-MM-DDTHH:MM:SS.000Z
+ Your Spot request has been submitted for review, and is pending evaluation.
+
+ {% if request.availability_zone_group %}
+ {{ request.availability_zone_group }}
+ {% endif %}
+
+ {{ request.image_id }}
+ {% if request.key_name %}
+ {{ request.key_name }}
+ {% endif %}
+
+ {% for group in request.security_groups %}
+ -
+ {{ group.id }}
+ {{ group.name }}
+
+ {% endfor %}
+
+ {% if request.kernel_id %}
+ {{ request.kernel_id }}
+ {% endif %}
+ {% if request.ramdisk_id %}
+ {{ request.ramdisk_id }}
+ {% endif %}
+ {% if request.subnet_id %}
+ {{ request.subnet_id }}
+ {% endif %}
+ {{ request.instance_type }}
+
+
+ {{ request.monitoring_enabled }}
+
+ {{ request.ebs_optimized }}
+ {% if request.placement %}
+
+ {{ request.placement }}
+
+
+ {% endif %}
+
+ {% if request.launch_group %}
+ {{ request.launch_group }}
+ {% endif %}
+ YYYY-MM-DDTHH:MM:SS.000Z
+ {% if request.valid_from %}
+ {{ request.valid_from }}
+ {% endif %}
+ {% if request.valid_until %}
+ {{ request.valid_until }}
+ {% endif %}
+ Linux/UNIX
+
+ {% endfor %}
+
+"""
+
+CANCEL_SPOT_INSTANCES_TEMPLATE = """
+ 59dbff89-35bd-4eac-99ed-be587EXAMPLE
+
+ {% for request in requests %}
+ -
+ {{ request.id }}
+ cancelled
+
+ {% endfor %}
+
+"""
diff --git a/moto/ec2/responses/subnets.py b/moto/ec2/responses/subnets.py
index 97a5da287..761f492e5 100644
--- a/moto/ec2/responses/subnets.py
+++ b/moto/ec2/responses/subnets.py
@@ -4,9 +4,6 @@ from moto.ec2.models import ec2_backend
class Subnets(object):
- def __init__(self, querystring):
- self.querystring = querystring
-
def create_subnet(self):
vpc_id = self.querystring.get('VpcId')[0]
cidr_block = self.querystring.get('CidrBlock')[0]
diff --git a/moto/ec2/responses/tags.py b/moto/ec2/responses/tags.py
index 18478e9a5..dd8dce8e8 100644
--- a/moto/ec2/responses/tags.py
+++ b/moto/ec2/responses/tags.py
@@ -5,17 +5,16 @@ from moto.ec2.utils import resource_ids_from_querystring
class TagResponse(object):
- def __init__(self, querystring):
- self.querystring = querystring
- self.resource_ids = resource_ids_from_querystring(querystring)
def create_tags(self):
- for resource_id, tag in self.resource_ids.iteritems():
+ resource_ids = resource_ids_from_querystring(self.querystring)
+ for resource_id, tag in resource_ids.iteritems():
ec2_backend.create_tag(resource_id, tag[0], tag[1])
return CREATE_RESPONSE
def delete_tags(self):
- for resource_id, tag in self.resource_ids.iteritems():
+ resource_ids = resource_ids_from_querystring(self.querystring)
+ for resource_id, tag in resource_ids.iteritems():
ec2_backend.delete_tag(resource_id, tag[0])
template = Template(DELETE_RESPONSE)
return template.render(reservations=ec2_backend.all_reservations())
diff --git a/moto/ec2/responses/vpcs.py b/moto/ec2/responses/vpcs.py
index 857b9b2bb..c2b16f9cd 100644
--- a/moto/ec2/responses/vpcs.py
+++ b/moto/ec2/responses/vpcs.py
@@ -4,9 +4,6 @@ from moto.ec2.models import ec2_backend
class VPCs(object):
- def __init__(self, querystring):
- self.querystring = querystring
-
def create_vpc(self):
cidr_block = self.querystring.get('CidrBlock')[0]
vpc = ec2_backend.create_vpc(cidr_block)
diff --git a/moto/ec2/urls.py b/moto/ec2/urls.py
index e4f05aea7..65413369d 100644
--- a/moto/ec2/urls.py
+++ b/moto/ec2/urls.py
@@ -2,7 +2,7 @@ from .responses import EC2Response
url_bases = [
- "https?://ec2.us-east-1.amazonaws.com",
+ "https?://ec2.(.+).amazonaws.com",
]
url_paths = {
diff --git a/moto/ec2/utils.py b/moto/ec2/utils.py
index 258927f05..a86ed64c5 100644
--- a/moto/ec2/utils.py
+++ b/moto/ec2/utils.py
@@ -1,4 +1,5 @@
import random
+import re
def random_id(prefix=''):
@@ -9,6 +10,10 @@ def random_id(prefix=''):
return '{}-{}'.format(prefix, instance_tag)
+def random_ami_id():
+ return random_id(prefix='ami')
+
+
def random_instance_id():
return random_id(prefix='i')
@@ -17,14 +22,22 @@ def random_reservation_id():
return random_id(prefix='r')
-def random_ami_id():
- return random_id(prefix='ami')
-
-
def random_security_group_id():
return random_id(prefix='sg')
+def random_snapshot_id():
+ return random_id(prefix='snap')
+
+
+def random_spot_request_id():
+ return random_id(prefix='sir')
+
+
+def random_subnet_id():
+ return random_id(prefix='subnet')
+
+
def random_volume_id():
return random_id(prefix='vol')
@@ -33,14 +46,6 @@ def random_vpc_id():
return random_id(prefix='vpc')
-def random_subnet_id():
- return random_id(prefix='subnet')
-
-
-def random_snapshot_id():
- return random_id(prefix='snap')
-
-
def instance_ids_from_querystring(querystring_dict):
instance_ids = []
for key, value in querystring_dict.iteritems():
@@ -53,7 +58,7 @@ def resource_ids_from_querystring(querystring_dict):
prefix = 'ResourceId'
response_values = {}
for key, value in querystring_dict.iteritems():
- if prefix in key:
+ if key.startswith(prefix):
resource_index = key.replace(prefix + ".", "")
tag_key = querystring_dict.get("Tag.{}.Key".format(resource_index))[0]
@@ -65,3 +70,45 @@ def resource_ids_from_querystring(querystring_dict):
response_values[value[0]] = (tag_key, tag_value)
return response_values
+
+
+def filters_from_querystring(querystring_dict):
+ response_values = {}
+ for key, value in querystring_dict.iteritems():
+ match = re.search("Filter.(\d).Name", key)
+ if match:
+ filter_index = match.groups()[0]
+ value_prefix = "Filter.{}.Value".format(filter_index)
+ filter_values = [filter_value[0] for filter_key, filter_value in querystring_dict.iteritems() if filter_key.startswith(value_prefix)]
+ response_values[value[0]] = filter_values
+ return response_values
+
+
+filter_dict_attribute_mapping = {
+ 'instance-state-name': 'state'
+}
+
+
+def passes_filter_dict(instance, filter_dict):
+ for filter_name, filter_values in filter_dict.iteritems():
+ if filter_name in filter_dict_attribute_mapping:
+ instance_attr = filter_dict_attribute_mapping[filter_name]
+ else:
+ raise NotImplementedError("Filter dicts have not been implemented in Moto for '%s' yet. Feel free to open an issue at https://github.com/spulec/moto/issues", filter_name)
+ instance_value = getattr(instance, instance_attr)
+ if instance_value not in filter_values:
+ return False
+ return True
+
+
+def filter_reservations(reservations, filter_dict):
+ result = []
+ for reservation in reservations:
+ new_instances = []
+ for instance in reservation.instances:
+ if passes_filter_dict(instance, filter_dict):
+ new_instances.append(instance)
+ if new_instances:
+ reservation.instances = new_instances
+ result.append(reservation)
+ return result
diff --git a/moto/elb/__init__.py b/moto/elb/__init__.py
new file mode 100644
index 000000000..fcadac99e
--- /dev/null
+++ b/moto/elb/__init__.py
@@ -0,0 +1,2 @@
+from .models import elb_backend
+mock_elb = elb_backend.decorator
diff --git a/moto/elb/models.py b/moto/elb/models.py
new file mode 100644
index 000000000..aff7f082b
--- /dev/null
+++ b/moto/elb/models.py
@@ -0,0 +1,80 @@
+from moto.core import BaseBackend
+
+
+class FakeHealthCheck(object):
+ def __init__(self, timeout, healthy_threshold, unhealthy_threshold,
+ interval, target):
+ self.timeout = timeout
+ self.healthy_threshold = healthy_threshold
+ self.unhealthy_threshold = unhealthy_threshold
+ self.interval = interval
+ self.target = target
+
+
+class FakeListener(object):
+ def __init__(self, load_balancer_port, instance_port, protocol):
+ self.load_balancer_port = load_balancer_port
+ self.instance_port = instance_port
+ self.protocol = protocol.upper()
+
+
+class FakeLoadBalancer(object):
+ def __init__(self, name, zones, ports):
+ self.name = name
+ self.health_check = None
+ self.instance_ids = []
+ self.zones = zones
+ self.listeners = []
+ for protocol, lb_port, instance_port in ports:
+ listener = FakeListener(
+ protocol=protocol,
+ load_balancer_port=lb_port,
+ instance_port=instance_port,
+ )
+ self.listeners.append(listener)
+
+
+class ELBBackend(BaseBackend):
+
+ def __init__(self):
+ self.load_balancers = {}
+
+ def create_load_balancer(self, name, zones, ports):
+ new_load_balancer = FakeLoadBalancer(name=name, zones=zones, ports=ports)
+ self.load_balancers[name] = new_load_balancer
+ return new_load_balancer
+
+ def describe_load_balancers(self, names):
+ balancers = self.load_balancers.values()
+ if names:
+ return [balancer for balancer in balancers if balancer.name in names]
+ else:
+ return balancers
+
+ def delete_load_balancer(self, load_balancer_name):
+ self.load_balancers.pop(load_balancer_name, None)
+
+ def get_load_balancer(self, load_balancer_name):
+ return self.load_balancers.get(load_balancer_name)
+
+ def configure_health_check(self, load_balancer_name, timeout,
+ healthy_threshold, unhealthy_threshold, interval,
+ target):
+ check = FakeHealthCheck(timeout, healthy_threshold, unhealthy_threshold,
+ interval, target)
+ load_balancer = self.get_load_balancer(load_balancer_name)
+ load_balancer.health_check = check
+ return check
+
+ def register_instances(self, load_balancer_name, instance_ids):
+ load_balancer = self.get_load_balancer(load_balancer_name)
+ load_balancer.instance_ids.extend(instance_ids)
+ return load_balancer
+
+ def deregister_instances(self, load_balancer_name, instance_ids):
+ load_balancer = self.get_load_balancer(load_balancer_name)
+ new_instance_ids = [instance_id for instance_id in load_balancer.instance_ids if instance_id not in instance_ids]
+ load_balancer.instance_ids = new_instance_ids
+ return load_balancer
+
+elb_backend = ELBBackend()
diff --git a/moto/elb/responses.py b/moto/elb/responses.py
new file mode 100644
index 000000000..4fcf055df
--- /dev/null
+++ b/moto/elb/responses.py
@@ -0,0 +1,179 @@
+from jinja2 import Template
+
+from moto.core.responses import BaseResponse
+from .models import elb_backend
+
+
+class ELBResponse(BaseResponse):
+
+ def create_load_balancer(self):
+ """
+ u'Scheme': [u'internet-facing'],
+ """
+ load_balancer_name = self.querystring.get('LoadBalancerName')[0]
+ availability_zones = [value[0] for key, value in self.querystring.items() if "AvailabilityZones.member" in key]
+ ports = []
+ port_index = 1
+ while True:
+ try:
+ protocol = self.querystring['Listeners.member.{}.Protocol'.format(port_index)][0]
+ except KeyError:
+ break
+ lb_port = self.querystring['Listeners.member.{}.LoadBalancerPort'.format(port_index)][0]
+ instance_port = self.querystring['Listeners.member.{}.InstancePort'.format(port_index)][0]
+ ports.append([protocol, lb_port, instance_port])
+ port_index += 1
+ elb_backend.create_load_balancer(
+ name=load_balancer_name,
+ zones=availability_zones,
+ ports=ports,
+ )
+ template = Template(CREATE_LOAD_BALANCER_TEMPLATE)
+ return template.render()
+
+ def describe_load_balancers(self):
+ names = [value[0] for key, value in self.querystring.items() if "LoadBalancerNames.member" in key]
+ load_balancers = elb_backend.describe_load_balancers(names)
+ template = Template(DESCRIBE_LOAD_BALANCERS_TEMPLATE)
+ return template.render(load_balancers=load_balancers)
+
+ def delete_load_balancer(self):
+ load_balancer_name = self.querystring.get('LoadBalancerName')[0]
+ elb_backend.delete_load_balancer(load_balancer_name)
+ template = Template(DELETE_LOAD_BALANCER_TEMPLATE)
+ return template.render()
+
+ def configure_health_check(self):
+ check = elb_backend.configure_health_check(
+ load_balancer_name=self.querystring.get('LoadBalancerName')[0],
+ timeout=self.querystring.get('HealthCheck.Timeout')[0],
+ healthy_threshold=self.querystring.get('HealthCheck.HealthyThreshold')[0],
+ unhealthy_threshold=self.querystring.get('HealthCheck.UnhealthyThreshold')[0],
+ interval=self.querystring.get('HealthCheck.Interval')[0],
+ target=self.querystring.get('HealthCheck.Target')[0],
+ )
+ template = Template(CONFIGURE_HEALTH_CHECK_TEMPLATE)
+ return template.render(check=check)
+
+ def register_instances_with_load_balancer(self):
+ load_balancer_name = self.querystring.get('LoadBalancerName')[0]
+ instance_ids = [value[0] for key, value in self.querystring.items() if "Instances.member" in key]
+ template = Template(REGISTER_INSTANCES_TEMPLATE)
+ load_balancer = elb_backend.register_instances(load_balancer_name, instance_ids)
+ return template.render(load_balancer=load_balancer)
+
+ def deregister_instances_from_load_balancer(self):
+ load_balancer_name = self.querystring.get('LoadBalancerName')[0]
+ instance_ids = [value[0] for key, value in self.querystring.items() if "Instances.member" in key]
+ template = Template(DEREGISTER_INSTANCES_TEMPLATE)
+ load_balancer = elb_backend.deregister_instances(load_balancer_name, instance_ids)
+ return template.render(load_balancer=load_balancer)
+
+CREATE_LOAD_BALANCER_TEMPLATE = """
+ tests.us-east-1.elb.amazonaws.com
+"""
+
+DELETE_LOAD_BALANCER_TEMPLATE = """
+"""
+
+DESCRIBE_LOAD_BALANCERS_TEMPLATE = """
+
+
+ {% for load_balancer in load_balancers %}
+
+
+
+ {{ load_balancer.name }}
+ 2013-01-01T00:00:00.19000Z
+
+ {% if load_balancer.health_check %}
+ {{ load_balancer.health_check.interval }}
+ {{ load_balancer.health_check.target }}
+ {{ load_balancer.health_check.healthy_threshold }}
+ {{ load_balancer.health_check.timeout }}
+ {{ load_balancer.health_check.unhealthy_threshold }}
+ {% endif %}
+
+ vpc-56e10e3d
+
+ {% for listener in load_balancer.listeners %}
+
+
+ AWSConsolePolicy-1
+
+
+ {{ listener.protocol }}
+ {{ listener.load_balancer_port }}
+ {{ listener.protocol }}
+ {{ listener.instance_port }}
+
+
+ {% endfor %}
+
+
+ {% for instance_id in load_balancer.instance_ids %}
+
+ {{ instance_id }}
+
+ {% endfor %}
+
+
+
+
+
+
+ AWSConsolePolicy-1
+ 30
+
+
+
+
+ {% for zone in load_balancer.zones %}
+ {{ zone }}
+ {% endfor %}
+
+ tests.us-east-1.elb.amazonaws.com
+ Z3ZONEID
+ internet-facing
+ tests.us-east-1.elb.amazonaws.com
+
+
+
+
+ {% endfor %}
+
+
+
+ f9880f01-7852-629d-a6c3-3ae2-666a409287e6dc0c
+
+"""
+
+CONFIGURE_HEALTH_CHECK_TEMPLATE = """
+
+ {{ check.interval }}
+ {{ check.target }}
+ {{ check.healthy_threshold }}
+ {{ check.timeout }}
+ {{ check.unhealthy_threshold }}
+
+"""
+
+REGISTER_INSTANCES_TEMPLATE = """
+
+ {% for instance_id in load_balancer.instance_ids %}
+
+ {{ instance_id }}
+
+ {% endfor %}
+
+"""
+
+DEREGISTER_INSTANCES_TEMPLATE = """
+
+ {% for instance_id in load_balancer.instance_ids %}
+
+ {{ instance_id }}
+
+ {% endfor %}
+
+"""
diff --git a/moto/elb/urls.py b/moto/elb/urls.py
new file mode 100644
index 000000000..e41ed2921
--- /dev/null
+++ b/moto/elb/urls.py
@@ -0,0 +1,9 @@
+from .responses import ELBResponse
+
+url_bases = [
+ "https?://elasticloadbalancing.(.+).amazonaws.com",
+]
+
+url_paths = {
+ '{0}/$': ELBResponse().dispatch,
+}
diff --git a/moto/emr/__init__.py b/moto/emr/__init__.py
new file mode 100644
index 000000000..7d4de9a5f
--- /dev/null
+++ b/moto/emr/__init__.py
@@ -0,0 +1,2 @@
+from .models import emr_backend
+mock_emr = emr_backend.decorator
diff --git a/moto/emr/models.py b/moto/emr/models.py
new file mode 100644
index 000000000..2fc06ef62
--- /dev/null
+++ b/moto/emr/models.py
@@ -0,0 +1,184 @@
+from moto.core import BaseBackend
+
+from .utils import random_job_id, random_instance_group_id
+
+DEFAULT_JOB_FLOW_ROLE = 'EMRJobflowDefault'
+
+
+class FakeInstanceGroup(object):
+ def __init__(self, id, instance_count, instance_role, instance_type, market, name, bid_price=None):
+ self.id = id
+ self.num_instances = instance_count
+ self.role = instance_role
+ self.type = instance_type
+ self.market = market
+ self.name = name
+ self.bid_price = bid_price
+
+ def set_instance_count(self, instance_count):
+ self.num_instances = instance_count
+
+
+class FakeStep(object):
+ def __init__(self, state, **kwargs):
+ # 'Steps.member.1.HadoopJarStep.Jar': ['/home/hadoop/contrib/streaming/hadoop-streaming.jar'],
+ # 'Steps.member.1.HadoopJarStep.Args.member.1': ['-mapper'],
+ # 'Steps.member.1.HadoopJarStep.Args.member.2': ['s3n://elasticmapreduce/samples/wordcount/wordSplitter.py'],
+ # 'Steps.member.1.HadoopJarStep.Args.member.3': ['-reducer'],
+ # 'Steps.member.1.HadoopJarStep.Args.member.4': ['aggregate'],
+ # 'Steps.member.1.HadoopJarStep.Args.member.5': ['-input'],
+ # 'Steps.member.1.HadoopJarStep.Args.member.6': ['s3n://elasticmapreduce/samples/wordcount/input'],
+ # 'Steps.member.1.HadoopJarStep.Args.member.7': ['-output'],
+ # 'Steps.member.1.HadoopJarStep.Args.member.8': ['s3n:///output/wordcount_output'],
+ # 'Steps.member.1.ActionOnFailure': ['TERMINATE_JOB_FLOW'],
+ # 'Steps.member.1.Name': ['My wordcount example']}
+
+ self.action_on_failure = kwargs['action_on_failure']
+ self.name = kwargs['name']
+ self.jar = kwargs['hadoop_jar_step._jar']
+ self.args = []
+ self.state = state
+
+ arg_index = 1
+ while True:
+ arg = kwargs.get('hadoop_jar_step._args.member.{}'.format(arg_index))
+ if arg:
+ self.args.append(arg)
+ arg_index += 1
+ else:
+ break
+
+
+class FakeJobFlow(object):
+ def __init__(self, job_id, name, log_uri, job_flow_role, visible_to_all_users, steps, instance_attrs):
+ self.id = job_id
+ self.name = name
+ self.log_uri = log_uri
+ self.role = job_flow_role or DEFAULT_JOB_FLOW_ROLE
+ self.state = "STARTING"
+ self.steps = []
+ self.add_steps(steps)
+
+ self.initial_instance_count = instance_attrs.get('instance_count', 0)
+ self.initial_master_instance_type = instance_attrs.get('master_instance_type')
+ self.initial_slave_instance_type = instance_attrs.get('slave_instance_type')
+
+ self.set_visibility(visible_to_all_users)
+ self.normalized_instance_hours = 0
+ self.ec2_key_name = instance_attrs.get('ec2_key_name')
+ self.availability_zone = instance_attrs.get('placement.availability_zone')
+ self.keep_job_flow_alive_when_no_steps = instance_attrs.get('keep_job_flow_alive_when_no_steps')
+ self.termination_protected = instance_attrs.get('termination_protected')
+
+ self.instance_group_ids = []
+
+ def terminate(self):
+ self.state = 'TERMINATED'
+
+ def set_visibility(self, visibility):
+ if visibility == 'true':
+ self.visible_to_all_users = True
+ else:
+ self.visible_to_all_users = False
+
+ def add_steps(self, steps):
+ for index, step in enumerate(steps):
+ if self.steps:
+ # If we already have other steps, this one is pending
+ self.steps.append(FakeStep(state='PENDING', **step))
+ else:
+ self.steps.append(FakeStep(state='STARTING', **step))
+
+ def add_instance_group(self, instance_group_id):
+ self.instance_group_ids.append(instance_group_id)
+
+ @property
+ def instance_groups(self):
+ return emr_backend.get_instance_groups(self.instance_group_ids)
+
+ @property
+ def master_instance_type(self):
+ groups = self.instance_groups
+ if groups:
+ groups[0].type
+ else:
+ return self.initial_master_instance_type
+
+ @property
+ def slave_instance_type(self):
+ groups = self.instance_groups
+ if groups:
+ groups[0].type
+ else:
+ return self.initial_slave_instance_type
+
+ @property
+ def instance_count(self):
+ groups = self.instance_groups
+ if not groups:
+ # No groups,return initial instance count
+ return self.initial_instance_count
+ count = 0
+ for group in groups:
+ count += int(group.num_instances)
+ return count
+
+
+class ElasticMapReduceBackend(BaseBackend):
+
+ def __init__(self):
+ self.job_flows = {}
+ self.instance_groups = {}
+
+ def run_job_flow(self, name, log_uri, job_flow_role, visible_to_all_users, steps, instance_attrs):
+ job_id = random_job_id()
+ job_flow = FakeJobFlow(job_id, name, log_uri, job_flow_role, visible_to_all_users, steps, instance_attrs)
+ self.job_flows[job_id] = job_flow
+ return job_flow
+
+ def add_job_flow_steps(self, job_flow_id, steps):
+ job_flow = self.job_flows[job_flow_id]
+ job_flow.add_steps(steps)
+ return job_flow
+
+ def describe_job_flows(self):
+ return self.job_flows.values()
+
+ def terminate_job_flows(self, job_ids):
+ flows = [flow for flow in self.describe_job_flows() if flow.id in job_ids]
+ for flow in flows:
+ flow.terminate()
+ return flows
+
+ def get_instance_groups(self, instance_group_ids):
+ return [
+ group for group_id, group
+ in self.instance_groups.items()
+ if group_id in instance_group_ids
+ ]
+
+ def add_instance_groups(self, job_flow_id, instance_groups):
+ job_flow = self.job_flows[job_flow_id]
+ result_groups = []
+ for instance_group in instance_groups:
+ instance_group_id = random_instance_group_id()
+ group = FakeInstanceGroup(instance_group_id, **instance_group)
+ self.instance_groups[instance_group_id] = group
+ job_flow.add_instance_group(instance_group_id)
+ result_groups.append(group)
+ return result_groups
+
+ def modify_instance_groups(self, instance_groups):
+ result_groups = []
+ for instance_group in instance_groups:
+ group = self.instance_groups[instance_group['instance_group_id']]
+ group.set_instance_count(instance_group['instance_count'])
+ return result_groups
+
+ def set_visible_to_all_users(self, job_ids, visible_to_all_users):
+ for job_id in job_ids:
+ job = self.job_flows[job_id]
+ job.set_visibility(visible_to_all_users)
+
+
+emr_backend = ElasticMapReduceBackend()
diff --git a/moto/emr/responses.py b/moto/emr/responses.py
new file mode 100644
index 000000000..89da0658f
--- /dev/null
+++ b/moto/emr/responses.py
@@ -0,0 +1,215 @@
+from jinja2 import Template
+
+from moto.core.responses import BaseResponse
+from moto.core.utils import camelcase_to_underscores
+from .models import emr_backend
+
+
+class ElasticMapReduceResponse(BaseResponse):
+
+ def _get_param(self, param_name):
+ return self.querystring.get(param_name, [None])[0]
+
+ def _get_multi_param(self, param_prefix):
+ return [value[0] for key, value in self.querystring.items() if key.startswith(param_prefix)]
+
+ def _get_dict_param(self, param_prefix):
+ return {
+ camelcase_to_underscores(key.replace(param_prefix, "")): value[0]
+ for key, value
+ in self.querystring.items()
+ if key.startswith(param_prefix)
+ }
+
+ def _get_list_prefix(self, param_prefix):
+ results = []
+ param_index = 1
+ while True:
+ index_prefix = "{}.{}.".format(param_prefix, param_index)
+ new_items = {
+ camelcase_to_underscores(key.replace(index_prefix, "")): value[0]
+ for key, value in self.querystring.items()
+ if key.startswith(index_prefix)
+ }
+ if not new_items:
+ break
+ results.append(new_items)
+ param_index += 1
+ return results
+
+ def add_job_flow_steps(self):
+ job_flow_id = self._get_param('JobFlowId')
+ steps = self._get_list_prefix('Steps.member')
+
+ job_flow = emr_backend.add_job_flow_steps(job_flow_id, steps)
+ template = Template(ADD_JOB_FLOW_STEPS_TEMPLATE)
+ return template.render(job_flow=job_flow)
+
+ def run_job_flow(self):
+ flow_name = self._get_param('Name')
+ log_uri = self._get_param('LogUri')
+ steps = self._get_list_prefix('Steps.member')
+ instance_attrs = self._get_dict_param('Instances.')
+ job_flow_role = self._get_param('JobFlowRole')
+ visible_to_all_users = self._get_param('VisibleToAllUsers')
+
+ job_flow = emr_backend.run_job_flow(
+ flow_name, log_uri, job_flow_role,
+ visible_to_all_users, steps, instance_attrs
+ )
+ template = Template(RUN_JOB_FLOW_TEMPLATE)
+ return template.render(job_flow=job_flow)
+
+ def describe_job_flows(self):
+ job_flows = emr_backend.describe_job_flows()
+ template = Template(DESCRIBE_JOB_FLOWS_TEMPLATE)
+ return template.render(job_flows=job_flows)
+
+ def terminate_job_flows(self):
+ job_ids = self._get_multi_param('JobFlowIds.member.')
+ job_flows = emr_backend.terminate_job_flows(job_ids)
+ template = Template(TERMINATE_JOB_FLOWS_TEMPLATE)
+ return template.render(job_flows=job_flows)
+
+ def add_instance_groups(self):
+ jobflow_id = self._get_param('JobFlowId')
+ instance_groups = self._get_list_prefix('InstanceGroups.member')
+ instance_groups = emr_backend.add_instance_groups(jobflow_id, instance_groups)
+ template = Template(ADD_INSTANCE_GROUPS_TEMPLATE)
+ return template.render(instance_groups=instance_groups)
+
+ def modify_instance_groups(self):
+ instance_groups = self._get_list_prefix('InstanceGroups.member')
+ instance_groups = emr_backend.modify_instance_groups(instance_groups)
+ template = Template(MODIFY_INSTANCE_GROUPS_TEMPLATE)
+ return template.render(instance_groups=instance_groups)
+
+ def set_visible_to_all_users(self):
+ visible_to_all_users = self._get_param('VisibleToAllUsers')
+ job_ids = self._get_multi_param('JobFlowIds.member')
+ emr_backend.set_visible_to_all_users(job_ids, visible_to_all_users)
+ template = Template(SET_VISIBLE_TO_ALL_USERS_TEMPLATE)
+ return template.render()
+
+
+RUN_JOB_FLOW_TEMPLATE = """
+
+ {{ job_flow.id }}
+
+
+
+ 8296d8b8-ed85-11dd-9877-6fad448a8419
+
+
+"""
+
+DESCRIBE_JOB_FLOWS_TEMPLATE = """
+
+
+ {% for job_flow in job_flows %}
+
+
+ 2009-01-28T21:49:16Z
+ 2009-01-28T21:49:16Z
+ {{ job_flow.state }}
+
+ {{ job_flow.name }}
+ {{ job_flow.role }}
+ {{ job_flow.log_uri }}
+
+ {% for step in job_flow.steps %}
+
+
+ 2009-01-28T21:49:16Z
+ {{ step.state }}
+
+
+
+ {{ step.jar }}
+ MyMainClass
+
+ {% for arg in step.args %}
+ {{ arg }}
+ {% endfor %}
+
+
+
+ {{ step.name }}
+ CONTINUE
+
+
+ {% endfor %}
+
+ {{ job_flow.id }}
+
+
+ us-east-1a
+
+ {{ job_flow.slave_instance_type }}
+ {{ job_flow.master_instance_type }}
+ {{ job_flow.ec2_key_name }}
+ {{ job_flow.normalized_instance_hours }}
+ {{ job_flow.visible_to_all_users }}
+ {{ job_flow.instance_count }}
+ {{ job_flow.keep_job_flow_alive_when_no_steps }}
+ {{ job_flow.termination_protected }}
+
+ {% for instance_group in job_flow.instance_groups %}
+
+ {{ instance_group.id }}
+ {{ instance_group.role }}
+ {{ instance_group.num_instances }}
+ {{ instance_group.type }}
+ {{ instance_group.market }}
+ {{ instance_group.name }}
+ {{ instance_group.bid_price }}
+
+ {% endfor %}
+
+
+
+ {% endfor %}
+
+
+
+
+ 9cea3229-ed85-11dd-9877-6fad448a8419
+
+
+"""
+
+TERMINATE_JOB_FLOWS_TEMPLATE = """
+
+
+ 2690d7eb-ed86-11dd-9877-6fad448a8419
+
+
+"""
+
+ADD_JOB_FLOW_STEPS_TEMPLATE = """
+
+
+ df6f4f4a-ed85-11dd-9877-6fad448a8419
+
+
+"""
+
+ADD_INSTANCE_GROUPS_TEMPLATE = """
+ {% for instance_group in instance_groups %}{{ instance_group.id }}{% if loop.index != loop.length %},{% endif %}{% endfor %}
+"""
+
+MODIFY_INSTANCE_GROUPS_TEMPLATE = """
+
+
+ 2690d7eb-ed86-11dd-9877-6fad448a8419
+
+
+"""
+
+SET_VISIBLE_TO_ALL_USERS_TEMPLATE = """
+
+
+ 2690d7eb-ed86-11dd-9877-6fad448a8419
+
+
+"""
diff --git a/moto/emr/urls.py b/moto/emr/urls.py
new file mode 100644
index 000000000..8919362f7
--- /dev/null
+++ b/moto/emr/urls.py
@@ -0,0 +1,9 @@
+from .responses import ElasticMapReduceResponse
+
+url_bases = [
+ "https?://elasticmapreduce.(.+).amazonaws.com",
+]
+
+url_paths = {
+ '{0}/$': ElasticMapReduceResponse().dispatch,
+}
diff --git a/moto/emr/utils.py b/moto/emr/utils.py
new file mode 100644
index 000000000..4a0d6db0e
--- /dev/null
+++ b/moto/emr/utils.py
@@ -0,0 +1,14 @@
+import random
+import string
+
+
+def random_job_id(size=13):
+ chars = range(10) + list(string.uppercase)
+ job_tag = ''.join(unicode(random.choice(chars)) for x in range(size))
+ return 'j-{}'.format(job_tag)
+
+
+def random_instance_group_id(size=13):
+ chars = range(10) + list(string.uppercase)
+ job_tag = ''.join(unicode(random.choice(chars)) for x in range(size))
+ return 'i-{}'.format(job_tag)
diff --git a/moto/packages/httpretty.py b/moto/packages/httpretty.py
deleted file mode 100644
index ebd69e4ed..000000000
--- a/moto/packages/httpretty.py
+++ /dev/null
@@ -1,944 +0,0 @@
-# #!/usr/bin/env python
-# -*- coding: utf-8 -*-
-#
-# Copyright (C) <2011-2013> Gabriel Falcão
-#
-# Permission is hereby granted, free of charge, to any person
-# obtaining a copy of this software and associated documentation
-# files (the "Software"), to deal in the Software without
-# restriction, including without limitation the rights to use,
-# copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the
-# Software is furnished to do so, subject to the following
-# conditions:
-#
-# The above copyright notice and this permission notice shall be
-# included in all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-# OTHER DEALINGS IN THE SOFTWARE.
-from __future__ import unicode_literals
-
-version = '0.5.12'
-
-import re
-import inspect
-import socket
-import functools
-import itertools
-import warnings
-import logging
-import sys
-import traceback
-import types
-
-PY3 = sys.version_info[0] == 3
-if PY3:
- text_type = str
- byte_type = bytes
- basestring = (str, bytes)
-
- import io
- StringIO = io.BytesIO
-
- class Py3kObject(object):
- def __repr__(self):
- return self.__str__()
-else:
- text_type = unicode
- byte_type = str
- import StringIO
- StringIO = StringIO.StringIO
-
-
-class Py3kObject(object):
- def __repr__(self):
- ret = self.__str__()
- if PY3:
- return ret
- else:
- ret.encode('utf-8')
-
-from datetime import datetime
-from datetime import timedelta
-try:
- from urllib.parse import urlsplit, urlunsplit, parse_qs, quote, quote_plus
-except ImportError:
- from urlparse import urlsplit, urlunsplit, parse_qs
- from urllib import quote, quote_plus
-
-try:
- from http.server import BaseHTTPRequestHandler
-except ImportError:
- from BaseHTTPServer import BaseHTTPRequestHandler
-
-old_socket = socket.socket
-old_create_connection = socket.create_connection
-old_gethostbyname = socket.gethostbyname
-old_gethostname = socket.gethostname
-old_getaddrinfo = socket.getaddrinfo
-old_socksocket = None
-old_ssl_wrap_socket = None
-old_sslwrap_simple = None
-old_sslsocket = None
-
-try:
- import socks
- old_socksocket = socks.socksocket
-except ImportError:
- socks = None
-
-try:
- import ssl
- old_ssl_wrap_socket = ssl.wrap_socket
- if not PY3:
- old_sslwrap_simple = ssl.sslwrap_simple
- old_sslsocket = ssl.SSLSocket
-except ImportError:
- ssl = None
-
-
-ClassTypes = (type,)
-if not PY3:
- ClassTypes = (type, types.ClassType)
-
-
-POTENTIAL_HTTP_PORTS = [80, 443]
-
-
-class HTTPrettyError(Exception):
- pass
-
-
-def utf8(s):
- if isinstance(s, text_type):
- s = s.encode('utf-8')
-
- return byte_type(s)
-
-
-def decode_utf8(s):
- if isinstance(s, byte_type):
- s = s.decode("utf-8")
-
- return text_type(s)
-
-
-def parse_requestline(s):
- """
- http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5
-
- >>> parse_requestline('GET / HTTP/1.0')
- ('GET', '/', '1.0')
- >>> parse_requestline('post /testurl htTP/1.1')
- ('POST', '/testurl', '1.1')
- >>> parse_requestline('Im not a RequestLine')
- Traceback (most recent call last):
- ...
- ValueError: Not a Request-Line
- """
- methods = b'|'.join(HTTPretty.METHODS)
- m = re.match(br'(' + methods + b')\s+(.*)\s+HTTP/(1.[0|1])', s, re.I)
- if m:
- return m.group(1).upper(), m.group(2), m.group(3)
- else:
- raise ValueError('Not a Request-Line')
-
-
-class HTTPrettyRequest(BaseHTTPRequestHandler, Py3kObject):
- def __init__(self, headers, body=''):
- self.body = utf8(body)
- self.raw_headers = utf8(headers)
- self.client_address = ['10.0.0.1']
- self.rfile = StringIO(b'\r\n\r\n'.join([headers.strip(), body]))
- self.wfile = StringIO()
- self.raw_requestline = self.rfile.readline()
- self.error_code = self.error_message = None
- self.parse_request()
- self.method = self.command
- self.querystring = parse_qs(self.path.split("?", 1)[-1])
-
- def __str__(self):
- return 'HTTPrettyRequest(headers={0}, body="{1}")'.format(
- self.headers,
- self.body,
- )
-
-
-class EmptyRequestHeaders(dict):
- pass
-
-
-class HTTPrettyRequestEmpty(object):
- body = ''
- headers = EmptyRequestHeaders()
-
-
-class FakeSockFile(StringIO):
- pass
-
-
-class FakeSSLSocket(object):
- def __init__(self, sock, *args, **kw):
- self._httpretty_sock = sock
-
- def __getattr__(self, attr):
- if attr == '_httpretty_sock':
- return super(FakeSSLSocket, self).__getattribute__(attr)
-
- return getattr(self._httpretty_sock, attr)
-
-
-class fakesock(object):
- class socket(object):
- _entry = None
- debuglevel = 0
- _sent_data = []
-
- def __init__(self, family, type, protocol=6):
- self.setsockopt(family, type, protocol)
- self.truesock = old_socket(family, type, protocol)
- self._closed = True
- self.fd = FakeSockFile()
- self.timeout = socket._GLOBAL_DEFAULT_TIMEOUT
- self._sock = self
- self.is_http = False
-
- def getpeercert(self, *a, **kw):
- now = datetime.now()
- shift = now + timedelta(days=30 * 12)
- return {
- 'notAfter': shift.strftime('%b %d %H:%M:%S GMT'),
- 'subjectAltName': (
- ('DNS', '*%s' % self._host),
- ('DNS', self._host),
- ('DNS', '*'),
- ),
- 'subject': (
- (
- ('organizationName', u'*.%s' % self._host),
- ),
- (
- ('organizationalUnitName',
- u'Domain Control Validated'),
- ),
- (
- ('commonName', u'*.%s' % self._host),
- ),
- ),
- }
-
- def ssl(self, sock, *args, **kw):
- return sock
-
- def setsockopt(self, family, type, protocol):
- self.family = family
- self.protocol = protocol
- self.type = type
-
- def connect(self, address):
- self._address = (self._host, self._port) = address
- self._closed = False
- self.is_http = self._port in POTENTIAL_HTTP_PORTS
- if not self.is_http:
- self.truesock.connect(self._address)
-
- def close(self):
- if not self._closed:
- self.truesock.close()
- self._closed = True
-
- def makefile(self, mode='r', bufsize=-1):
- self._mode = mode
- self._bufsize = bufsize
-
- if self._entry:
- self._entry.fill_filekind(self.fd, self._request)
-
- return self.fd
-
- def _true_sendall(self, data, *args, **kw):
- if self.is_http:
- self.truesock.connect(self._address)
-
- self.truesock.sendall(data, *args, **kw)
-
- _d = True
- while _d:
- try:
- _d = self.truesock.recv(16)
- self.truesock.settimeout(0.0)
- self.fd.write(_d)
-
- except socket.error:
- break
-
- self.fd.seek(0)
-
- def sendall(self, data, *args, **kw):
-
- self._sent_data.append(data)
- hostnames = [getattr(i.info, 'hostname', None) for i in HTTPretty._entries.keys()]
- self.fd.seek(0)
- try:
- requestline, _ = data.split(b'\r\n', 1)
- method, path, version = parse_requestline(requestline)
- is_parsing_headers = True
- except ValueError:
- is_parsing_headers = False
-
- if not is_parsing_headers:
- if len(self._sent_data) > 1:
- headers, body = map(utf8, self._sent_data[-2:])
-
- method, path, version = parse_requestline(headers)
- split_url = urlsplit(path)
-
- info = URIInfo(hostname=self._host, port=self._port,
- path=split_url.path,
- query=split_url.query)
-
- # If we are sending more data to a dynamic response entry,
- # we need to call the method again.
- if self._entry and self._entry.dynamic_response:
- self._entry.body(info, method, body, headers)
-
- try:
- return HTTPretty.historify_request(headers, body, False)
-
- except Exception as e:
- logging.error(traceback.format_exc(e))
- return self._true_sendall(data, *args, **kw)
-
- # path might come with
- s = urlsplit(path)
- POTENTIAL_HTTP_PORTS.append(int(s.port or 80))
- headers, body = map(utf8, data.split(b'\r\n\r\n', 1))
-
- request = HTTPretty.historify_request(headers, body)
-
- info = URIInfo(hostname=self._host, port=self._port,
- path=s.path,
- query=s.query,
- last_request=request)
-
- entries = []
-
- for matcher, value in HTTPretty._entries.items():
- if matcher.matches(info):
- entries = value
- break
-
- if not entries:
- self._true_sendall(data)
- return
-
- self._entry = matcher.get_next_entry(method)
- self._request = (info, body, headers)
-
- def debug(*a, **kw):
- frame = inspect.stack()[0][0]
- lines = map(utf8, traceback.format_stack(frame))
-
- message = [
- "HTTPretty intercepted and unexpected socket method call.",
- ("Please open an issue at "
- "'https://github.com/gabrielfalcao/HTTPretty/issues'"),
- "And paste the following traceback:\n",
- "".join(decode_utf8(lines)),
- ]
- raise RuntimeError("\n".join(message))
-
- def settimeout(self, new_timeout):
- self.timeout = new_timeout
-
- sendto = send = recvfrom_into = recv_into = recvfrom = recv = debug
-
-
-def fake_wrap_socket(s, *args, **kw):
- return s
-
-
-def create_fake_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None):
- s = fakesock.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)
- if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
- s.settimeout(timeout)
- if source_address:
- s.bind(source_address)
- s.connect(address)
- return s
-
-
-def fake_gethostbyname(host):
- return host
-
-
-def fake_gethostname():
- return 'localhost'
-
-
-def fake_getaddrinfo(
- host, port, family=None, socktype=None, proto=None, flags=None):
- return [(2, 1, 6, '', (host, port))]
-
-
-STATUSES = {
- 100: "Continue",
- 101: "Switching Protocols",
- 102: "Processing",
- 200: "OK",
- 201: "Created",
- 202: "Accepted",
- 203: "Non-Authoritative Information",
- 204: "No Content",
- 205: "Reset Content",
- 206: "Partial Content",
- 207: "Multi-Status",
- 208: "Already Reported",
- 226: "IM Used",
- 300: "Multiple Choices",
- 301: "Moved Permanently",
- 302: "Found",
- 303: "See Other",
- 304: "Not Modified",
- 305: "Use Proxy",
- 306: "Switch Proxy",
- 307: "Temporary Redirect",
- 308: "Permanent Redirect",
- 400: "Bad Request",
- 401: "Unauthorized",
- 402: "Payment Required",
- 403: "Forbidden",
- 404: "Not Found",
- 405: "Method Not Allowed",
- 406: "Not Acceptable",
- 407: "Proxy Authentication Required",
- 408: "Request a Timeout",
- 409: "Conflict",
- 410: "Gone",
- 411: "Length Required",
- 412: "Precondition Failed",
- 413: "Request Entity Too Large",
- 414: "Request-URI Too Long",
- 415: "Unsupported Media Type",
- 416: "Requested Range Not Satisfiable",
- 417: "Expectation Failed",
- 418: "I'm a teapot",
- 420: "Enhance Your Calm",
- 422: "Unprocessable Entity",
- 423: "Locked",
- 424: "Failed Dependency",
- 424: "Method Failure",
- 425: "Unordered Collection",
- 426: "Upgrade Required",
- 428: "Precondition Required",
- 429: "Too Many Requests",
- 431: "Request Header Fields Too Large",
- 444: "No Response",
- 449: "Retry With",
- 450: "Blocked by Windows Parental Controls",
- 451: "Unavailable For Legal Reasons",
- 451: "Redirect",
- 494: "Request Header Too Large",
- 495: "Cert Error",
- 496: "No Cert",
- 497: "HTTP to HTTPS",
- 499: "Client Closed Request",
- 500: "Internal Server Error",
- 501: "Not Implemented",
- 502: "Bad Gateway",
- 503: "Service Unavailable",
- 504: "Gateway Timeout",
- 505: "HTTP Version Not Supported",
- 506: "Variant Also Negotiates",
- 507: "Insufficient Storage",
- 508: "Loop Detected",
- 509: "Bandwidth Limit Exceeded",
- 510: "Not Extended",
- 511: "Network Authentication Required",
- 598: "Network read timeout error",
- 599: "Network connect timeout error",
-}
-
-
-class Entry(Py3kObject):
- def __init__(self, method, uri, body,
- adding_headers=None,
- forcing_headers=None,
- status=200,
- streaming=False,
- **headers):
-
- self.method = method
- self.uri = uri
-
- if callable(body):
- self.dynamic_response = True
- else:
- self.dynamic_response = False
-
- self.body = body
- self.streaming = streaming
-
- if self.dynamic_response or self.streaming:
- self.body_length = 0
- else:
- self.body_length = len(self.body or '')
-
- self.adding_headers = adding_headers or {}
- self.forcing_headers = forcing_headers or {}
- self.status = int(status)
-
- for k, v in headers.items():
- name = "-".join(k.split("_")).capitalize()
- self.adding_headers[name] = v
-
- self.validate()
-
- def validate(self):
- content_length_keys = 'Content-Length', 'content-length'
- for key in content_length_keys:
- got = self.adding_headers.get(
- key, self.forcing_headers.get(key, None))
-
- if got is None:
- continue
-
- try:
- igot = int(got)
- except ValueError:
- warnings.warn(
- 'HTTPretty got to register the Content-Length header ' \
- 'with "%r" which is not a number' % got,
- )
-
- if igot > self.body_length:
- raise HTTPrettyError(
- 'HTTPretty got inconsistent parameters. The header ' \
- 'Content-Length you registered expects size "%d" but ' \
- 'the body you registered for that has actually length ' \
- '"%d".' % (
- igot, self.body_length,
- )
- )
-
- def __str__(self):
- return r'' % (
- self.method, self.uri, self.status)
-
- def normalize_headers(self, headers):
- new = {}
- for k in headers:
- new_k = '-'.join([s.lower() for s in k.split('-')])
- new[new_k] = headers[k]
-
- return new
-
- def fill_filekind(self, fk, request):
- now = datetime.utcnow()
-
- headers = {
- 'status': self.status,
- 'date': now.strftime('%a, %d %b %Y %H:%M:%S GMT'),
- 'server': 'Python/HTTPretty',
- 'connection': 'close',
- }
-
- if self.forcing_headers:
- headers = self.forcing_headers
-
- if self.dynamic_response:
- req_info, req_body, req_headers = request
- response = self.body(req_info, self.method, req_body, req_headers)
- if isinstance(response, basestring):
- body = response
- else:
- body, new_headers = response
- headers.update(new_headers)
- else:
- body = self.body
-
- if self.adding_headers:
- headers.update(self.normalize_headers(self.adding_headers))
-
- headers = self.normalize_headers(headers)
-
- status = headers.get('status', self.status)
- string_list = [
- 'HTTP/1.1 %d %s' % (status, STATUSES[status]),
- ]
-
- if 'date' in headers:
- string_list.append('date: %s' % headers.pop('date'))
-
- if not self.forcing_headers:
- content_type = headers.pop('content-type',
- 'text/plain; charset=utf-8')
-
- body_length = self.body_length
- if self.dynamic_response:
- body_length = len(body)
- content_length = headers.pop('content-length', body_length)
-
- string_list.append('content-type: %s' % content_type)
- if not self.streaming:
- string_list.append('content-length: %s' % content_length)
-
- string_list.append('server: %s' % headers.pop('server'))
-
- for k, v in headers.items():
- string_list.append(
- '{0}: {1}'.format(k, v),
- )
-
- for item in string_list:
- fk.write(utf8(item) + b'\n')
-
- fk.write(b'\r\n')
-
- if self.streaming:
- self.body, body = itertools.tee(body)
- for chunk in body:
- fk.write(utf8(chunk))
- else:
- fk.write(utf8(body))
-
- fk.seek(0)
-
-
-def url_fix(s, charset='utf-8'):
- scheme, netloc, path, querystring, fragment = urlsplit(s)
- path = quote(path, b'/%')
- querystring = quote_plus(querystring, b':&=')
- return urlunsplit((scheme, netloc, path, querystring, fragment))
-
-
-class URIInfo(Py3kObject):
- def __init__(self,
- username='',
- password='',
- hostname='',
- port=80,
- path='/',
- query='',
- fragment='',
- scheme='',
- last_request=None):
-
- self.username = username or ''
- self.password = password or ''
- self.hostname = hostname or ''
-
- if port:
- port = int(port)
-
- elif scheme == 'https':
- port = 443
-
- self.port = port or 80
- self.path = path or ''
- self.query = query or ''
- self.scheme = scheme or (self.port is 80 and "http" or "https")
- self.fragment = fragment or ''
- self.last_request = last_request
-
- def __str__(self):
- attrs = (
- 'username',
- 'password',
- 'hostname',
- 'port',
- 'path',
- )
- fmt = ", ".join(['%s="%s"' % (k, getattr(self, k, '')) for k in attrs])
- return r'' % fmt
-
- def __hash__(self):
- return hash(text_type(self))
-
- def __eq__(self, other):
- self_tuple = (
- self.port,
- decode_utf8(self.hostname),
- url_fix(decode_utf8(self.path)),
- )
- other_tuple = (
- other.port,
- decode_utf8(other.hostname),
- url_fix(decode_utf8(other.path)),
- )
- return self_tuple == other_tuple
-
- def full_url(self):
- credentials = ""
- if self.password:
- credentials = "{0}:{1}@".format(
- self.username, self.password)
-
- result = "{scheme}://{credentials}{host}{path}".format(
- scheme=self.scheme,
- credentials=credentials,
- host=decode_utf8(self.hostname),
- path=decode_utf8(self.path)
- )
- return result
-
- @classmethod
- def from_uri(cls, uri, entry):
- result = urlsplit(uri)
- POTENTIAL_HTTP_PORTS.append(int(result.port or 80))
- return cls(result.username,
- result.password,
- result.hostname,
- result.port,
- result.path,
- result.query,
- result.fragment,
- result.scheme,
- entry)
-
-
-class URIMatcher(object):
- regex = None
- info = None
-
- def __init__(self, uri, entries):
- if type(uri).__name__ == 'SRE_Pattern':
- self.regex = uri
- else:
- self.info = URIInfo.from_uri(uri, entries)
-
- self.entries = entries
-
- #hash of current_entry pointers, per method.
- self.current_entries = {}
-
- def matches(self, info):
- if self.info:
- return self.info == info
- else:
- return self.regex.search(info.full_url())
-
- def __str__(self):
- wrap = 'URLMatcher({0})'
- if self.info:
- return wrap.format(text_type(self.info))
- else:
- return wrap.format(self.regex.pattern)
-
- def get_next_entry(self, method='GET'):
- """Cycle through available responses, but only once.
- Any subsequent requests will receive the last response"""
-
- if method not in self.current_entries:
- self.current_entries[method] = 0
-
- #restrict selection to entries that match the requested method
- entries_for_method = [e for e in self.entries if e.method == method]
-
- if self.current_entries[method] >= len(entries_for_method):
- self.current_entries[method] = -1
-
- if not self.entries or not entries_for_method:
- raise ValueError('I have no entries for method %s: %s'
- % (method, self))
-
- entry = entries_for_method[self.current_entries[method]]
- if self.current_entries[method] != -1:
- self.current_entries[method] += 1
- return entry
-
- def __hash__(self):
- return hash(text_type(self))
-
- def __eq__(self, other):
- return text_type(self) == text_type(other)
-
-
-class HTTPretty(Py3kObject):
- u"""The URI registration class"""
- _entries = {}
- latest_requests = []
- GET = b'GET'
- PUT = b'PUT'
- POST = b'POST'
- DELETE = b'DELETE'
- HEAD = b'HEAD'
- PATCH = b'PATCH'
- METHODS = (GET, PUT, POST, DELETE, HEAD, PATCH)
- last_request = HTTPrettyRequestEmpty()
- _is_enabled = False
-
- @classmethod
- def reset(cls):
- cls._entries.clear()
- cls.latest_requests = []
- cls.last_request = HTTPrettyRequestEmpty()
-
- @classmethod
- def historify_request(cls, headers, body='', append=True):
- request = HTTPrettyRequest(headers, body)
- cls.last_request = request
- if append:
- cls.latest_requests.append(request)
- else:
- cls.latest_requests[-1] = request
- return request
-
- @classmethod
- def register_uri(cls, method, uri, body='HTTPretty :)',
- adding_headers=None,
- forcing_headers=None,
- status=200,
- responses=None, **headers):
-
- if isinstance(responses, list) and len(responses) > 0:
- for response in responses:
- response.uri = uri
- response.method = method
- entries_for_this_uri = responses
- else:
- headers['body'] = body
- headers['adding_headers'] = adding_headers
- headers['forcing_headers'] = forcing_headers
- headers['status'] = status
-
- entries_for_this_uri = [
- cls.Response(method=method, uri=uri, **headers),
- ]
-
- matcher = URIMatcher(uri, entries_for_this_uri)
- if matcher in cls._entries:
- matcher.entries.extend(cls._entries[matcher])
- del cls._entries[matcher]
-
- cls._entries[matcher] = entries_for_this_uri
-
- def __str__(self):
- return u'' % len(self._entries)
-
- @classmethod
- def Response(cls, body, method=None, uri=None, adding_headers=None, forcing_headers=None,
- status=200, streaming=False, **headers):
-
- headers['body'] = body
- headers['adding_headers'] = adding_headers
- headers['forcing_headers'] = forcing_headers
- headers['status'] = int(status)
- headers['streaming'] = streaming
- return Entry(method, uri, **headers)
-
- @classmethod
- def disable(cls):
- cls._is_enabled = False
- socket.socket = old_socket
- socket.SocketType = old_socket
- socket._socketobject = old_socket
-
- socket.create_connection = old_create_connection
- socket.gethostname = old_gethostname
- socket.gethostbyname = old_gethostbyname
- socket.getaddrinfo = old_getaddrinfo
- socket.inet_aton = old_gethostbyname
-
- socket.__dict__['socket'] = old_socket
- socket.__dict__['_socketobject'] = old_socket
- socket.__dict__['SocketType'] = old_socket
-
- socket.__dict__['create_connection'] = old_create_connection
- socket.__dict__['gethostname'] = old_gethostname
- socket.__dict__['gethostbyname'] = old_gethostbyname
- socket.__dict__['getaddrinfo'] = old_getaddrinfo
- socket.__dict__['inet_aton'] = old_gethostbyname
-
- if socks:
- socks.socksocket = old_socksocket
- socks.__dict__['socksocket'] = old_socksocket
-
- if ssl:
- ssl.wrap_socket = old_ssl_wrap_socket
- ssl.SSLSocket = old_sslsocket
- ssl.__dict__['wrap_socket'] = old_ssl_wrap_socket
- ssl.__dict__['SSLSocket'] = old_sslsocket
-
- if not PY3:
- ssl.sslwrap_simple = old_sslwrap_simple
- ssl.__dict__['sslwrap_simple'] = old_sslwrap_simple
-
- @classmethod
- def is_enabled(cls):
- return cls._is_enabled
-
- @classmethod
- def enable(cls):
- cls._is_enabled = True
- socket.socket = fakesock.socket
- socket._socketobject = fakesock.socket
- socket.SocketType = fakesock.socket
-
- socket.create_connection = create_fake_connection
- socket.gethostname = fake_gethostname
- socket.gethostbyname = fake_gethostbyname
- socket.getaddrinfo = fake_getaddrinfo
- socket.inet_aton = fake_gethostbyname
-
- socket.__dict__['socket'] = fakesock.socket
- socket.__dict__['_socketobject'] = fakesock.socket
- socket.__dict__['SocketType'] = fakesock.socket
-
- socket.__dict__['create_connection'] = create_fake_connection
- socket.__dict__['gethostname'] = fake_gethostname
- socket.__dict__['gethostbyname'] = fake_gethostbyname
- socket.__dict__['inet_aton'] = fake_gethostbyname
- socket.__dict__['getaddrinfo'] = fake_getaddrinfo
-
- if socks:
- socks.socksocket = fakesock.socket
- socks.__dict__['socksocket'] = fakesock.socket
-
- if ssl:
- ssl.wrap_socket = fake_wrap_socket
- ssl.SSLSocket = FakeSSLSocket
-
- ssl.__dict__['wrap_socket'] = fake_wrap_socket
- ssl.__dict__['SSLSocket'] = FakeSSLSocket
-
- if not PY3:
- ssl.sslwrap_simple = fake_wrap_socket
- ssl.__dict__['sslwrap_simple'] = fake_wrap_socket
-
-
-def httprettified(test):
- "A decorator tests that use HTTPretty"
- def decorate_class(klass):
- for attr in dir(klass):
- if not attr.startswith('test_'):
- continue
-
- attr_value = getattr(klass, attr)
- if not hasattr(attr_value, "__call__"):
- continue
-
- setattr(klass, attr, decorate_callable(attr_value))
- return klass
-
- def decorate_callable(test):
- @functools.wraps(test)
- def wrapper(*args, **kw):
- HTTPretty.reset()
- HTTPretty.enable()
- try:
- return test(*args, **kw)
- finally:
- HTTPretty.disable()
- return wrapper
-
- if isinstance(test, ClassTypes):
- return decorate_class(test)
- return decorate_callable(test)
diff --git a/moto/s3/models.py b/moto/s3/models.py
index a2547ea47..67c48beb4 100644
--- a/moto/s3/models.py
+++ b/moto/s3/models.py
@@ -1,23 +1,54 @@
-# from boto.s3.bucket import Bucket
-# from boto.s3.key import Key
import os
import base64
-import md5
+import datetime
+import hashlib
from moto.core import BaseBackend
+from moto.core.utils import iso_8601_datetime, rfc_1123_datetime
+from .utils import clean_key_name
class FakeKey(object):
def __init__(self, name, value):
self.name = name
self.value = value
+ self.last_modified = datetime.datetime.now()
+ self._metadata = {}
+
+ def set_metadata(self, key, metadata):
+ self._metadata[key] = metadata
+
+ def append_to_value(self, value):
+ self.value += value
+ self.last_modified = datetime.datetime.now()
@property
def etag(self):
- value_md5 = md5.new()
+ value_md5 = hashlib.md5()
value_md5.update(self.value)
return '"{0}"'.format(value_md5.hexdigest())
+ @property
+ def last_modified_ISO8601(self):
+ return iso_8601_datetime(self.last_modified)
+
+ @property
+ def last_modified_RFC1123(self):
+ # Different datetime formats depending on how the key is obtained
+ # https://github.com/boto/boto/issues/466
+ return rfc_1123_datetime(self.last_modified)
+
+ @property
+ def metadata(self):
+ return self._metadata
+
+ @property
+ def response_dict(self):
+ return {
+ 'etag': self.etag,
+ 'last-modified': self.last_modified_RFC1123,
+ }
+
@property
def size(self):
return len(self.value)
@@ -87,13 +118,23 @@ class S3Backend(BaseBackend):
return None
def set_key(self, bucket_name, key_name, value):
+ key_name = clean_key_name(key_name)
+
bucket = self.buckets[bucket_name]
new_key = FakeKey(name=key_name, value=value)
bucket.keys[key_name] = new_key
return new_key
+ def append_to_key(self, bucket_name, key_name, value):
+ key_name = clean_key_name(key_name)
+
+ key = self.get_key(bucket_name, key_name)
+ key.append_to_value(value)
+ return key
+
def get_key(self, bucket_name, key_name):
+ key_name = clean_key_name(key_name)
bucket = self.get_bucket(bucket_name)
if bucket:
return bucket.keys.get(key_name)
@@ -120,21 +161,24 @@ class S3Backend(BaseBackend):
multipart = bucket.multiparts[multipart_id]
return multipart.set_part(part_id, value)
- def prefix_query(self, bucket, prefix):
+ def prefix_query(self, bucket, prefix, delimiter):
key_results = set()
folder_results = set()
if prefix:
for key_name, key in bucket.keys.iteritems():
if key_name.startswith(prefix):
- if '/' in key_name.lstrip(prefix):
- key_without_prefix = key_name.lstrip(prefix).split("/")[0]
- folder_results.add("{}{}".format(prefix, key_without_prefix))
+ key_without_prefix = key_name.replace(prefix, "", 1)
+ if delimiter and delimiter in key_without_prefix:
+ # If delimiter, we need to split out folder_results
+ key_without_delimiter = key_without_prefix.split(delimiter)[0]
+ folder_results.add("{}{}{}".format(prefix, key_without_delimiter, delimiter))
else:
key_results.add(key)
else:
for key_name, key in bucket.keys.iteritems():
- if '/' in key_name:
- folder_results.add(key_name.split("/")[0])
+ if delimiter and delimiter in key_name:
+ # If delimiter, we need to split out folder_results
+ folder_results.add(key_name.split(delimiter)[0])
else:
key_results.add(key)
@@ -144,10 +188,13 @@ class S3Backend(BaseBackend):
return key_results, folder_results
def delete_key(self, bucket_name, key_name):
+ key_name = clean_key_name(key_name)
bucket = self.buckets[bucket_name]
return bucket.keys.pop(key_name)
def copy_key(self, src_bucket_name, src_key_name, dest_bucket_name, dest_key_name):
+ src_key_name = clean_key_name(src_key_name)
+ dest_key_name = clean_key_name(dest_key_name)
src_bucket = self.buckets[src_bucket_name]
dest_bucket = self.buckets[dest_bucket_name]
dest_bucket.keys[dest_key_name] = src_bucket.keys[src_key_name]
diff --git a/moto/s3/responses.py b/moto/s3/responses.py
index fc1fdcec2..bc359d87d 100644
--- a/moto/s3/responses.py
+++ b/moto/s3/responses.py
@@ -1,10 +1,10 @@
-from urlparse import parse_qs
+from urlparse import parse_qs, urlparse
+import re
from jinja2 import Template
from .models import s3_backend
-from moto.core.utils import headers_to_dict
-from .utils import bucket_name_from_hostname
+from .utils import bucket_name_from_url
def all_buckets():
@@ -14,11 +14,22 @@ def all_buckets():
return template.render(buckets=all_buckets)
-def bucket_response(uri, method, body, headers):
- hostname = uri.hostname
- querystring = parse_qs(uri.query)
+def bucket_response(request, full_url, headers):
+ response = _bucket_response(request, full_url, headers)
+ if isinstance(response, basestring):
+ return 200, headers, response
- bucket_name = bucket_name_from_hostname(hostname)
+ else:
+ status_code, headers, response_content = response
+ return status_code, headers, response_content
+
+
+def _bucket_response(request, full_url, headers):
+ parsed_url = urlparse(full_url)
+ querystring = parse_qs(parsed_url.query)
+ method = request.method
+
+ bucket_name = bucket_name_from_url(full_url)
if not bucket_name:
# If no bucket specified, list all buckets
return all_buckets()
@@ -27,16 +38,18 @@ def bucket_response(uri, method, body, headers):
bucket = s3_backend.get_bucket(bucket_name)
if bucket:
prefix = querystring.get('prefix', [None])[0]
- result_keys, result_folders = s3_backend.prefix_query(bucket, prefix)
+ delimiter = querystring.get('delimiter', [None])[0]
+ result_keys, result_folders = s3_backend.prefix_query(bucket, prefix, delimiter)
template = Template(S3_BUCKET_GET_RESPONSE)
return template.render(
bucket=bucket,
prefix=prefix,
+ delimiter=delimiter,
result_keys=result_keys,
result_folders=result_folders
)
else:
- return "", dict(status=404)
+ return 404, headers, ""
elif method == 'PUT':
new_bucket = s3_backend.create_bucket(bucket_name)
template = Template(S3_BUCKET_CREATE_RESPONSE)
@@ -46,34 +59,75 @@ def bucket_response(uri, method, body, headers):
if removed_bucket is None:
# Non-existant bucket
template = Template(S3_DELETE_NON_EXISTING_BUCKET)
- return template.render(bucket_name=bucket_name), dict(status=404)
+ return 404, headers, template.render(bucket_name=bucket_name)
elif removed_bucket:
# Bucket exists
template = Template(S3_DELETE_BUCKET_SUCCESS)
- return template.render(bucket=removed_bucket), dict(status=204)
+ return 204, headers, template.render(bucket=removed_bucket)
else:
# Tried to delete a bucket that still has keys
template = Template(S3_DELETE_BUCKET_WITH_ITEMS_ERROR)
- return template.render(bucket=removed_bucket), dict(status=409)
+ return 409, headers, template.render(bucket=removed_bucket)
+ elif method == 'POST':
+ #POST to bucket-url should create file from form
+ if hasattr(request, 'form'):
+ #Not HTTPretty
+ form = request.form
+ else:
+ #HTTPretty, build new form object
+ form = {}
+ for kv in request.body.split('&'):
+ k, v = kv.split('=')
+ form[k] = v
+
+ key = form['key']
+ f = form['file']
+
+ new_key = s3_backend.set_key(bucket_name, key, f)
+
+ #Metadata
+ meta_regex = re.compile('^x-amz-meta-([a-zA-Z0-9\-_]+)$', flags=re.IGNORECASE)
+ for form_id in form:
+ result = meta_regex.match(form_id)
+ if result:
+ meta_key = result.group(0).lower()
+ metadata = form[form_id]
+ new_key.set_metadata(meta_key, metadata)
+ return 200, headers, ""
else:
raise NotImplementedError("Method {} has not been impelemented in the S3 backend yet".format(method))
-def key_response(uri_info, method, body, headers):
+def key_response(request, full_url, headers):
+ response = _key_response(request, full_url, headers)
+ if isinstance(response, basestring):
+ return 200, headers, response
+ else:
+ status_code, headers, response_content = response
+ return status_code, headers, response_content
- key_name = uri_info.path.lstrip('/')
- hostname = uri_info.hostname
- headers = headers_to_dict(headers)
- query = parse_qs(uri_info.query)
- bucket_name = bucket_name_from_hostname(hostname)
+def _key_response(request, full_url, headers):
+ parsed_url = urlparse(full_url)
+ method = request.method
+
+ key_name = parsed_url.path.lstrip('/')
+ query = parse_qs(parsed_url.query)
+ bucket_name = bucket_name_from_url(full_url)
+ if hasattr(request, 'body'):
+ # Boto
+ body = request.body
+ else:
+ # Flask server
+ body = request.data
if method == 'GET':
key = s3_backend.get_key(bucket_name, key_name)
if key:
- return key.value
+ headers.update(key.metadata)
+ return 200, headers, key.value
else:
- return "", dict(status=404)
+ return 404, headers, ""
if method == 'PUT':
if 'uploadId' in query and 'partNumber' in query and body:
upload_id = query['uploadId'][0]
@@ -82,42 +136,52 @@ def key_response(uri_info, method, body, headers):
return '', dict(etag=key.etag)
- if 'x-amz-copy-source' in headers:
+ if 'x-amz-copy-source' in request.headers:
# Copy key
- src_bucket, src_key = headers.get("x-amz-copy-source").split("/")
+ src_bucket, src_key = request.headers.get("x-amz-copy-source").split("/")
s3_backend.copy_key(src_bucket, src_key, bucket_name, key_name)
template = Template(S3_OBJECT_COPY_RESPONSE)
return template.render(key=src_key)
-
- if body is not None:
- key = s3_backend.get_key(bucket_name, key_name)
- if not key or body:
- # We want to write the key in once of two circumstances.
- # - The key does not currently exist.
- # - The key already exists, but body is a truthy value.
- # This allows us to write empty strings to keys for the first
- # write, but not subsequent. This is because HTTPretty sends
- # an empty string on connection close. This is a temporary fix
- # while HTTPretty gets fixed.
- new_key = s3_backend.set_key(bucket_name, key_name, body)
- template = Template(S3_OBJECT_RESPONSE)
- return template.render(key=new_key), dict(etag=new_key.etag)
- key = s3_backend.get_key(bucket_name, key_name)
- if key:
- return "", dict(etag=key.etag)
+ streaming_request = hasattr(request, 'streaming') and request.streaming
+ closing_connection = headers.get('connection') == 'close'
+ if closing_connection and streaming_request:
+ # Closing the connection of a streaming request. No more data
+ new_key = s3_backend.get_key(bucket_name, key_name)
+ elif streaming_request:
+ # Streaming request, more data
+ new_key = s3_backend.append_to_key(bucket_name, key_name, body)
+ else:
+ # Initial data
+ new_key = s3_backend.set_key(bucket_name, key_name, body)
+ request.streaming = True
+
+ #Metadata
+ meta_regex = re.compile('^x-amz-meta-([a-zA-Z0-9\-_]+)$', flags=re.IGNORECASE)
+ for header in request.headers:
+ if isinstance(header, basestring):
+ result = meta_regex.match(header)
+ if result:
+ meta_key = result.group(0).lower()
+ metadata = request.headers[header]
+ new_key.set_metadata(meta_key, metadata)
+ template = Template(S3_OBJECT_RESPONSE)
+ headers.update(new_key.response_dict)
+ return 200, headers, template.render(key=new_key)
elif method == 'HEAD':
key = s3_backend.get_key(bucket_name, key_name)
if key:
- return S3_OBJECT_RESPONSE, dict(etag=key.etag)
+ headers.update(key.metadata)
+ headers.update(key.response_dict)
+ return 200, headers, ""
else:
- return "", dict(status=404)
+ return 404, headers, ""
elif method == 'DELETE':
removed_key = s3_backend.delete_key(bucket_name, key_name)
template = Template(S3_DELETE_OBJECT_SUCCESS)
- return template.render(bucket=removed_key), dict(status=204)
+ return 204, headers, template.render(bucket=removed_key)
elif method == 'POST':
import pdb; pdb.set_trace()
- if body == '' and uri_info.query == 'uploads':
+ if body == '' and parsed_url.query == 'uploads':
multipart = s3_backend.initiate_multipart(bucket_name, key_name)
template = Template(S3_MULTIPART_INITIATE_RESPONSE)
response = template.render(
@@ -125,7 +189,7 @@ def key_response(uri_info, method, body, headers):
key_name=key_name,
multipart_id=multipart.id,
)
- return response, dict()
+ return 200, headers, response
if body == '' and 'uploadId' in query:
upload_id = query['uploadId'][0]
@@ -164,12 +228,12 @@ S3_BUCKET_GET_RESPONSE = """
{{ bucket.name }}
{{ prefix }}
1000
- /
+ {{ delimiter }}
false
{% for key in result_keys %}
{{ key.name }}
- 2006-01-01T12:00:00.000Z
+ {{ key.last_modified_ISO8601 }}
{{ key.etag }}
{{ key.size }}
STANDARD
@@ -180,11 +244,13 @@ S3_BUCKET_GET_RESPONSE = """
STANDARD
{% endfor %}
- {% for folder in result_folders %}
-
- {{ folder }}
-
- {% endfor %}
+ {% if delimiter %}
+ {% for folder in result_folders %}
+
+ {{ folder }}
+
+ {% endfor %}
+ {% endif %}
"""
S3_BUCKET_CREATE_RESPONSE = """
@@ -226,14 +292,14 @@ S3_DELETE_OBJECT_SUCCESS = """
{{ key.etag }}
- 2006-03-01T12:00:00.183Z
+ {{ key.last_modified_ISO8601 }}
"""
S3_OBJECT_COPY_RESPONSE = """
{{ key.etag }}
- 2008-02-18T13:54:10.183Z
+ {{ key.last_modified_ISO8601 }}
"""
diff --git a/moto/s3/urls.py b/moto/s3/urls.py
index 662a717b0..21370c15a 100644
--- a/moto/s3/urls.py
+++ b/moto/s3/urls.py
@@ -1,10 +1,10 @@
from .responses import bucket_response, key_response
url_bases = [
- "https?://(?P[a-zA-Z0-9\-_]*)\.?s3.amazonaws.com"
+ "https?://(?P[a-zA-Z0-9\-_.]*)\.?s3.amazonaws.com"
]
url_paths = {
'{0}/$': bucket_response,
- '{0}/(?P[a-zA-Z0-9\-_]+)': key_response,
+ '{0}/(?P[a-zA-Z0-9\-_.]+)': key_response,
}
diff --git a/moto/s3/utils.py b/moto/s3/utils.py
index 8786585f5..19b0cfdf0 100644
--- a/moto/s3/utils.py
+++ b/moto/s3/utils.py
@@ -1,23 +1,27 @@
import re
+import urllib2
import urlparse
bucket_name_regex = re.compile("(.+).s3.amazonaws.com")
-def bucket_name_from_hostname(hostname):
- if 'amazonaws.com' in hostname:
- bucket_result = bucket_name_regex.search(hostname)
+def bucket_name_from_url(url):
+ domain = urlparse.urlparse(url).netloc
+
+ if domain.startswith('www.'):
+ domain = domain[4:]
+
+ if 'amazonaws.com' in domain:
+ bucket_result = bucket_name_regex.search(domain)
if bucket_result:
return bucket_result.groups()[0]
else:
- # In server mode. Use left-most part of subdomain for bucket name
- split_url = urlparse.urlparse(hostname)
-
- # If 'www' prefixed, strip it.
- clean_hostname = split_url.netloc.lstrip("www.")
-
- if '.' in clean_hostname:
- return clean_hostname.split(".")[0]
+ if '.' in domain:
+ return domain.split(".")[0]
else:
# No subdomain found.
return None
+
+
+def clean_key_name(key_name):
+ return urllib2.unquote(key_name)
diff --git a/moto/server.py b/moto/server.py
index e0d7c60fb..9ef135359 100644
--- a/moto/server.py
+++ b/moto/server.py
@@ -1,14 +1,10 @@
import sys
+import argparse
from flask import Flask
from werkzeug.routing import BaseConverter
-from moto.dynamodb import dynamodb_backend # flake8: noqa
-from moto.ec2 import ec2_backend # flake8: noqa
-from moto.s3 import s3_backend # flake8: noqa
-from moto.ses import ses_backend # flake8: noqa
-from moto.sqs import sqs_backend # flake8: noqa
-
+from moto.backends import BACKENDS
from moto.core.utils import convert_flask_to_httpretty_response
app = Flask(__name__)
@@ -23,23 +19,39 @@ class RegexConverter(BaseConverter):
def configure_urls(service):
- backend = globals()["{}_backend".format(service)]
+ backend = BACKENDS[service]
from werkzeug.routing import Map
+ # Reset view functions to reset the app
+ app.view_functions = {}
app.url_map = Map()
app.url_map.converters['regex'] = RegexConverter
for url_path, handler in backend.flask_paths.iteritems():
app.route(url_path, methods=HTTP_METHODS)(convert_flask_to_httpretty_response(handler))
-def main(args=sys.argv):
- if len(args) != 2:
- print("Usage: moto_server ")
- sys.exit(1)
- service_name = args[1]
- configure_urls(service_name)
+def main(argv=sys.argv[1:]):
+ available_services = BACKENDS.keys()
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ 'service', type=str,
+ choices=available_services,
+ help='Choose which mechanism you want to run')
+ parser.add_argument(
+ '-H', '--host', type=str,
+ help='Which host to bind',
+ default='0.0.0.0')
+ parser.add_argument(
+ '-p', '--port', type=int,
+ help='Port number to use for connection',
+ default=5000)
+
+ args = parser.parse_args(argv)
+
+ configure_urls(args.service)
app.testing = True
- app.run()
+ app.run(host=args.host, port=args.port)
if __name__ == '__main__':
main()
diff --git a/moto/ses/responses.py b/moto/ses/responses.py
index 5002f925c..6640d76be 100644
--- a/moto/ses/responses.py
+++ b/moto/ses/responses.py
@@ -36,7 +36,10 @@ class EmailResponse(BaseResponse):
return template.render()
def send_email(self):
- body = self.querystring.get('Message.Body.Text.Data')[0]
+ bodydatakey = 'Message.Body.Text.Data'
+ if 'Message.Body.Html.Data' in self.querystring:
+ bodydatakey = 'Message.Body.Html.Data'
+ body = self.querystring.get(bodydatakey)[0]
source = self.querystring.get('Source')[0]
subject = self.querystring.get('Message.Subject.Data')[0]
destination = self.querystring.get('Destination.ToAddresses.member.1')[0]
diff --git a/moto/ses/urls.py b/moto/ses/urls.py
index e67423c97..acdc49c82 100644
--- a/moto/ses/urls.py
+++ b/moto/ses/urls.py
@@ -1,7 +1,7 @@
from .responses import EmailResponse
url_bases = [
- "https?://email.us-east-1.amazonaws.com"
+ "https?://email.(.+).amazonaws.com"
]
url_paths = {
diff --git a/moto/sqs/models.py b/moto/sqs/models.py
index 49d004f36..5c6d04fe7 100644
--- a/moto/sqs/models.py
+++ b/moto/sqs/models.py
@@ -1,4 +1,5 @@
-import md5
+import hashlib
+import time
from moto.core import BaseBackend
from moto.core.utils import camelcase_to_underscores, get_random_message_id
@@ -6,6 +7,7 @@ from .utils import generate_receipt_handle
class Message(object):
+
def __init__(self, message_id, body):
self.id = message_id
self.body = body
@@ -13,19 +15,41 @@ class Message(object):
@property
def md5(self):
- body_md5 = md5.new()
+ body_md5 = hashlib.md5()
body_md5.update(self.body)
return body_md5.hexdigest()
class Queue(object):
- camelcase_attributes = ['VisibilityTimeout', 'ApproximateNumberOfMessages']
+ camelcase_attributes = ['ApproximateNumberOfMessages',
+ 'ApproximateNumberOfMessagesDelayed',
+ 'ApproximateNumberOfMessagesNotVisible',
+ 'CreatedTimestamp',
+ 'DelaySeconds',
+ 'LastModifiedTimestamp',
+ 'MaximumMessageSize',
+ 'MessageRetentionPeriod',
+ 'QueueArn',
+ 'ReceiveMessageWaitTimeSeconds',
+ 'VisibilityTimeout']
def __init__(self, name, visibility_timeout):
self.name = name
self.visibility_timeout = visibility_timeout or 30
self.messages = []
+ now = time.time()
+
+ self.approximate_number_of_messages_delayed = 0
+ self.approximate_number_of_messages_not_visible = 0
+ self.created_timestamp = now
+ self.delay_seconds = 0
+ self.last_modified_timestamp = now
+ self.maximum_message_size = 64 << 10
+ self.message_retention_period = 86400 * 4 # four days
+ self.queue_arn = 'arn:aws:sqs:sqs.us-east-1:123456789012:%s' % self.name
+ self.receive_message_wait_time_seconds = 0
+
@property
def attributes(self):
result = {}
@@ -53,7 +77,7 @@ class SQSBackend(BaseBackend):
return self.queues.values()
def get_queue(self, queue_name):
- return self.queues[queue_name]
+ return self.queues.get(queue_name, None)
def delete_queue(self, queue_name):
if queue_name in self.queues:
diff --git a/moto/sqs/responses.py b/moto/sqs/responses.py
index b9bf9130a..0f582cf85 100644
--- a/moto/sqs/responses.py
+++ b/moto/sqs/responses.py
@@ -17,6 +17,16 @@ class QueuesResponse(BaseResponse):
template = Template(CREATE_QUEUE_RESPONSE)
return template.render(queue=queue)
+ def get_queue_url(self):
+ queue_name = self.querystring.get("QueueName")[0]
+ queue = sqs_backend.get_queue(queue_name)
+ if queue:
+ template = Template(GET_QUEUE_URL_RESPONSE)
+ return template.render(queue=queue)
+ else:
+ return "", dict(status=404)
+
+
def list_queues(self):
queues = sqs_backend.list_queues()
template = Template(LIST_QUEUES_RESPONSE)
@@ -143,6 +153,15 @@ CREATE_QUEUE_RESPONSE = """
"""
+GET_QUEUE_URL_RESPONSE = """
+
+ http://sqs.us-east-1.amazonaws.com/123456789012/{{ queue.name }}
+
+
+ 470a6f13-2ed9-4181-ad8a-2fdea142988e
+
+"""
+
LIST_QUEUES_RESPONSE = """
{% for queue in queues %}
diff --git a/moto/sts/__init__.py b/moto/sts/__init__.py
new file mode 100644
index 000000000..f1ca24c7f
--- /dev/null
+++ b/moto/sts/__init__.py
@@ -0,0 +1,2 @@
+from .models import sts_backend
+mock_sts = sts_backend.decorator
diff --git a/moto/sts/models.py b/moto/sts/models.py
new file mode 100644
index 000000000..3a9e64e0c
--- /dev/null
+++ b/moto/sts/models.py
@@ -0,0 +1,39 @@
+import datetime
+from moto.core import BaseBackend
+from moto.core.utils import iso_8601_datetime
+
+
+class Token(object):
+ def __init__(self, duration):
+ now = datetime.datetime.now()
+ self.expiration = now + datetime.timedelta(seconds=duration)
+
+ @property
+ def expiration_ISO8601(self):
+ return iso_8601_datetime(self.expiration)
+
+
+class AssumedRole(object):
+ def __init__(self, role_session_name, role_arn, policy, duration, external_id):
+ self.session_name = role_session_name
+ self.arn = role_arn
+ self.policy = policy
+ now = datetime.datetime.now()
+ self.expiration = now + datetime.timedelta(seconds=duration)
+ self.external_id = external_id
+
+ @property
+ def expiration_ISO8601(self):
+ return iso_8601_datetime(self.expiration)
+
+
+class STSBackend(BaseBackend):
+ def get_session_token(self, duration):
+ token = Token(duration=duration)
+ return token
+
+ def assume_role(self, **kwargs):
+ role = AssumedRole(**kwargs)
+ return role
+
+sts_backend = STSBackend()
diff --git a/moto/sts/responses.py b/moto/sts/responses.py
new file mode 100644
index 000000000..e97c9ec51
--- /dev/null
+++ b/moto/sts/responses.py
@@ -0,0 +1,67 @@
+from jinja2 import Template
+
+from moto.core.responses import BaseResponse
+from .models import sts_backend
+
+
+class TokenResponse(BaseResponse):
+
+ def get_session_token(self):
+ duration = int(self.querystring.get('DurationSeconds', [43200])[0])
+ token = sts_backend.get_session_token(duration=duration)
+ template = Template(GET_SESSION_TOKEN_RESPONSE)
+ return template.render(token=token)
+
+ def assume_role(self):
+ role_session_name = self.querystring.get('RoleSessionName')[0]
+ role_arn = self.querystring.get('RoleArn')[0]
+
+ policy = self.querystring.get('Policy', [None])[0]
+ duration = int(self.querystring.get('DurationSeconds', [3600])[0])
+ external_id = self.querystring.get('ExternalId', [None])[0]
+
+ role = sts_backend.assume_role(
+ role_session_name=role_session_name,
+ role_arn=role_arn,
+ policy=policy,
+ duration=duration,
+ external_id=external_id,
+ )
+ template = Template(ASSUME_ROLE_RESPONSE)
+ return template.render(role=role)
+
+
+GET_SESSION_TOKEN_RESPONSE = """
+
+
+ AQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT+FvwqnKwRcOIfrRh3c/LTo6UDdyJwOOvEVPvLXCrrrUtdnniCEXAMPLE/IvU1dYUg2RVAJBanLiHb4IgRmpRV3zrkuWJOgQs8IZZaIv2BXIa2R4OlgkBN9bkUDNCJiBeb/AXlzBBko7b15fjrBs2+cTQtpZ3CYWFXG8C5zqx37wnOE49mRl/+OtkIKGO7fAE
+ wJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY
+ {{ token.expiration_ISO8601 }}
+ AKIAIOSFODNN7EXAMPLE
+
+
+
+ 58c5dbae-abef-11e0-8cfe-09039844ac7d
+
+"""
+
+
+ASSUME_ROLE_RESPONSE = """
+
+
+ BQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT+FvwqnKwRcOIfrRh3c/LTo6UDdyJwOOvEVPvLXCrrrUtdnniCEXAMPLE/IvU1dYUg2RVAJBanLiHb4IgRmpRV3zrkuWJOgQs8IZZaIv2BXIa2R4OlgkBN9bkUDNCJiBeb/AXlzBBko7b15fjrBs2+cTQtpZ3CYWFXG8C5zqx37wnOE49mRl/+OtkIKGO7fAE
+ aJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY
+ {{ role.expiration_ISO8601 }}
+ AKIAIOSFODNN7EXAMPLE
+
+
+ {{ role.arn }}
+ ARO123EXAMPLE123:{{ role.session_name }}
+
+ 6
+
+
+ c6104cbe-af31-11e0-8154-cbc7ccf896c7
+
+"""
diff --git a/moto/sts/urls.py b/moto/sts/urls.py
new file mode 100644
index 000000000..ab69fe8c2
--- /dev/null
+++ b/moto/sts/urls.py
@@ -0,0 +1,9 @@
+from .responses import TokenResponse
+
+url_bases = [
+ "https?://sts.amazonaws.com"
+]
+
+url_paths = {
+ '{0}/$': TokenResponse().dispatch,
+}
diff --git a/requirements.txt b/requirements.txt
index b3731770e..62f6f0a27 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,6 +1,5 @@
coverage
freezegun
-#httpretty
mock
nose
https://github.com/spulec/python-coveralls/tarball/796d9dba34b759664e42ba39e6414209a0f319ad
diff --git a/setup.py b/setup.py
index e20d9fe97..d5c232b40 100644
--- a/setup.py
+++ b/setup.py
@@ -4,7 +4,7 @@ from setuptools import setup, find_packages
setup(
name='moto',
- version='0.1.1',
+ version='0.2.9',
description='A library that allows your python tests to easily'
' mock out the boto library',
author='Steve Pulec',
@@ -18,7 +18,8 @@ setup(
packages=find_packages(),
install_requires=[
"boto",
- "Jinja2",
"flask",
+ "httpretty>=0.6.1",
+ "Jinja2",
],
)
diff --git a/moto/packages/__init__.py b/tests/__init__.py
similarity index 100%
rename from moto/packages/__init__.py
rename to tests/__init__.py
diff --git a/tests/helpers.py b/tests/helpers.py
new file mode 100644
index 000000000..199f74fcb
--- /dev/null
+++ b/tests/helpers.py
@@ -0,0 +1,19 @@
+import boto
+from nose.plugins.skip import SkipTest
+
+
+def version_tuple(v):
+ return tuple(map(int, (v.split("."))))
+
+
+class requires_boto_gte(object):
+ """Decorator for requiring boto version greater than or equal to 'version'"""
+ def __init__(self, version):
+ self.version = version
+
+ def __call__(self, test):
+ boto_version = version_tuple(boto.__version__)
+ required = version_tuple(self.version)
+ if boto_version >= required:
+ return test()
+ raise SkipTest
diff --git a/tests/test_autoscaling/test_autoscaling.py b/tests/test_autoscaling/test_autoscaling.py
new file mode 100644
index 000000000..676833a96
--- /dev/null
+++ b/tests/test_autoscaling/test_autoscaling.py
@@ -0,0 +1,306 @@
+import boto
+from boto.ec2.autoscale.launchconfig import LaunchConfiguration
+from boto.ec2.autoscale.group import AutoScalingGroup
+import sure # noqa
+
+from moto import mock_autoscaling, mock_ec2
+from tests.helpers import requires_boto_gte
+
+
+@mock_autoscaling
+def test_create_autoscaling_group():
+ conn = boto.connect_autoscale()
+ config = LaunchConfiguration(
+ name='tester',
+ image_id='ami-abcd1234',
+ instance_type='m1.small',
+ )
+ conn.create_launch_configuration(config)
+
+ group = AutoScalingGroup(
+ name='tester_group',
+ availability_zones=['us-east-1c', 'us-east-1b'],
+ default_cooldown=60,
+ desired_capacity=2,
+ health_check_period=100,
+ health_check_type="EC2",
+ max_size=2,
+ min_size=2,
+ launch_config=config,
+ load_balancers=["test_lb"],
+ placement_group="test_placement",
+ vpc_zone_identifier='subnet-1234abcd',
+ termination_policies=["OldestInstance", "NewestInstance"],
+ )
+ conn.create_auto_scaling_group(group)
+
+ group = conn.get_all_groups()[0]
+ group.name.should.equal('tester_group')
+ set(group.availability_zones).should.equal(set(['us-east-1c', 'us-east-1b']))
+ group.desired_capacity.should.equal(2)
+ group.max_size.should.equal(2)
+ group.min_size.should.equal(2)
+ group.vpc_zone_identifier.should.equal('subnet-1234abcd')
+ group.launch_config_name.should.equal('tester')
+ group.default_cooldown.should.equal(60)
+ group.health_check_period.should.equal(100)
+ group.health_check_type.should.equal("EC2")
+ list(group.load_balancers).should.equal(["test_lb"])
+ group.placement_group.should.equal("test_placement")
+ list(group.termination_policies).should.equal(["OldestInstance", "NewestInstance"])
+
+
+@mock_autoscaling
+def test_create_autoscaling_groups_defaults():
+ """ Test with the minimum inputs and check that all of the proper defaults
+ are assigned for the other attributes """
+ conn = boto.connect_autoscale()
+ config = LaunchConfiguration(
+ name='tester',
+ image_id='ami-abcd1234',
+ instance_type='m1.small',
+ )
+ conn.create_launch_configuration(config)
+
+ group = AutoScalingGroup(
+ name='tester_group',
+ max_size=2,
+ min_size=2,
+ launch_config=config,
+ )
+ conn.create_auto_scaling_group(group)
+
+ group = conn.get_all_groups()[0]
+ group.name.should.equal('tester_group')
+ group.max_size.should.equal(2)
+ group.min_size.should.equal(2)
+ group.launch_config_name.should.equal('tester')
+
+ # Defaults
+ list(group.availability_zones).should.equal([])
+ group.desired_capacity.should.equal(2)
+ group.vpc_zone_identifier.should.equal('')
+ group.default_cooldown.should.equal(300)
+ group.health_check_period.should.equal(None)
+ group.health_check_type.should.equal("EC2")
+ list(group.load_balancers).should.equal([])
+ group.placement_group.should.equal(None)
+ list(group.termination_policies).should.equal([])
+
+
+@mock_autoscaling
+def test_autoscaling_group_describe_filter():
+ conn = boto.connect_autoscale()
+ config = LaunchConfiguration(
+ name='tester',
+ image_id='ami-abcd1234',
+ instance_type='m1.small',
+ )
+ conn.create_launch_configuration(config)
+
+ group = AutoScalingGroup(
+ name='tester_group',
+ max_size=2,
+ min_size=2,
+ launch_config=config,
+ )
+ conn.create_auto_scaling_group(group)
+ group.name = 'tester_group2'
+ conn.create_auto_scaling_group(group)
+ group.name = 'tester_group3'
+ conn.create_auto_scaling_group(group)
+
+ conn.get_all_groups(names=['tester_group', 'tester_group2']).should.have.length_of(2)
+ conn.get_all_groups().should.have.length_of(3)
+
+
+@mock_autoscaling
+def test_autoscaling_update():
+ conn = boto.connect_autoscale()
+ config = LaunchConfiguration(
+ name='tester',
+ image_id='ami-abcd1234',
+ instance_type='m1.small',
+ )
+ conn.create_launch_configuration(config)
+
+ group = AutoScalingGroup(
+ name='tester_group',
+ availability_zones=['us-east-1c', 'us-east-1b'],
+ desired_capacity=2,
+ max_size=2,
+ min_size=2,
+ launch_config=config,
+ vpc_zone_identifier='subnet-1234abcd',
+ )
+ conn.create_auto_scaling_group(group)
+
+ group = conn.get_all_groups()[0]
+ group.vpc_zone_identifier.should.equal('subnet-1234abcd')
+
+ group.vpc_zone_identifier = 'subnet-5678efgh'
+ group.update()
+
+ group = conn.get_all_groups()[0]
+ group.vpc_zone_identifier.should.equal('subnet-5678efgh')
+
+
+@mock_autoscaling
+def test_autoscaling_group_delete():
+ conn = boto.connect_autoscale()
+ config = LaunchConfiguration(
+ name='tester',
+ image_id='ami-abcd1234',
+ instance_type='m1.small',
+ )
+ conn.create_launch_configuration(config)
+
+ group = AutoScalingGroup(
+ name='tester_group',
+ max_size=2,
+ min_size=2,
+ launch_config=config,
+ )
+ conn.create_auto_scaling_group(group)
+
+ conn.get_all_groups().should.have.length_of(1)
+
+ conn.delete_auto_scaling_group('tester_group')
+ conn.get_all_groups().should.have.length_of(0)
+
+
+@mock_ec2
+@mock_autoscaling
+def test_autoscaling_group_describe_instances():
+ conn = boto.connect_autoscale()
+ config = LaunchConfiguration(
+ name='tester',
+ image_id='ami-abcd1234',
+ instance_type='m1.small',
+ )
+ conn.create_launch_configuration(config)
+
+ group = AutoScalingGroup(
+ name='tester_group',
+ max_size=2,
+ min_size=2,
+ launch_config=config,
+ )
+ conn.create_auto_scaling_group(group)
+
+ instances = list(conn.get_all_autoscaling_instances())
+ instances.should.have.length_of(2)
+ instances[0].launch_config_name.should.equal('tester')
+ autoscale_instance_ids = [instance.instance_id for instance in instances]
+
+ ec2_conn = boto.connect_ec2()
+ reservations = ec2_conn.get_all_instances()
+ instances = reservations[0].instances
+ instances.should.have.length_of(2)
+ instance_ids = [instance.id for instance in instances]
+ set(autoscale_instance_ids).should.equal(set(instance_ids))
+
+
+@requires_boto_gte("2.8")
+@mock_autoscaling
+def test_set_desired_capacity_up():
+ conn = boto.connect_autoscale()
+ config = LaunchConfiguration(
+ name='tester',
+ image_id='ami-abcd1234',
+ instance_type='m1.small',
+ )
+ conn.create_launch_configuration(config)
+
+ group = AutoScalingGroup(
+ name='tester_group',
+ availability_zones=['us-east-1c', 'us-east-1b'],
+ desired_capacity=2,
+ max_size=2,
+ min_size=2,
+ launch_config=config,
+ vpc_zone_identifier='subnet-1234abcd',
+ )
+ conn.create_auto_scaling_group(group)
+
+ group = conn.get_all_groups()[0]
+ group.desired_capacity.should.equal(2)
+ instances = list(conn.get_all_autoscaling_instances())
+ instances.should.have.length_of(2)
+
+ conn.set_desired_capacity("tester_group", 3)
+ group = conn.get_all_groups()[0]
+ group.desired_capacity.should.equal(3)
+
+ instances = list(conn.get_all_autoscaling_instances())
+ instances.should.have.length_of(3)
+
+
+@requires_boto_gte("2.8")
+@mock_autoscaling
+def test_set_desired_capacity_down():
+ conn = boto.connect_autoscale()
+ config = LaunchConfiguration(
+ name='tester',
+ image_id='ami-abcd1234',
+ instance_type='m1.small',
+ )
+ conn.create_launch_configuration(config)
+
+ group = AutoScalingGroup(
+ name='tester_group',
+ availability_zones=['us-east-1c', 'us-east-1b'],
+ desired_capacity=2,
+ max_size=2,
+ min_size=2,
+ launch_config=config,
+ vpc_zone_identifier='subnet-1234abcd',
+ )
+ conn.create_auto_scaling_group(group)
+
+ group = conn.get_all_groups()[0]
+ group.desired_capacity.should.equal(2)
+ instances = list(conn.get_all_autoscaling_instances())
+ instances.should.have.length_of(2)
+
+ conn.set_desired_capacity("tester_group", 1)
+ group = conn.get_all_groups()[0]
+ group.desired_capacity.should.equal(1)
+
+ instances = list(conn.get_all_autoscaling_instances())
+ instances.should.have.length_of(1)
+
+
+@requires_boto_gte("2.8")
+@mock_autoscaling
+def test_set_desired_capacity_the_same():
+ conn = boto.connect_autoscale()
+ config = LaunchConfiguration(
+ name='tester',
+ image_id='ami-abcd1234',
+ instance_type='m1.small',
+ )
+ conn.create_launch_configuration(config)
+
+ group = AutoScalingGroup(
+ name='tester_group',
+ availability_zones=['us-east-1c', 'us-east-1b'],
+ desired_capacity=2,
+ max_size=2,
+ min_size=2,
+ launch_config=config,
+ vpc_zone_identifier='subnet-1234abcd',
+ )
+ conn.create_auto_scaling_group(group)
+
+ group = conn.get_all_groups()[0]
+ group.desired_capacity.should.equal(2)
+ instances = list(conn.get_all_autoscaling_instances())
+ instances.should.have.length_of(2)
+
+ conn.set_desired_capacity("tester_group", 2)
+ group = conn.get_all_groups()[0]
+ group.desired_capacity.should.equal(2)
+
+ instances = list(conn.get_all_autoscaling_instances())
+ instances.should.have.length_of(2)
diff --git a/tests/test_autoscaling/test_launch_configurations.py b/tests/test_autoscaling/test_launch_configurations.py
new file mode 100644
index 000000000..ece3ecfcc
--- /dev/null
+++ b/tests/test_autoscaling/test_launch_configurations.py
@@ -0,0 +1,125 @@
+import boto
+from boto.ec2.autoscale.launchconfig import LaunchConfiguration
+
+import sure # noqa
+
+from moto import mock_autoscaling
+from tests.helpers import requires_boto_gte
+
+
+@mock_autoscaling
+def test_create_launch_configuration():
+ conn = boto.connect_autoscale()
+ config = LaunchConfiguration(
+ name='tester',
+ image_id='ami-abcd1234',
+ instance_type='m1.small',
+ key_name='the_keys',
+ security_groups=["default", "default2"],
+ user_data="This is some user_data",
+ instance_monitoring=True,
+ instance_profile_name='arn:aws:iam::123456789012:instance-profile/testing',
+ spot_price=0.1,
+ )
+ conn.create_launch_configuration(config)
+
+ launch_config = conn.get_all_launch_configurations()[0]
+ launch_config.name.should.equal('tester')
+ launch_config.image_id.should.equal('ami-abcd1234')
+ launch_config.instance_type.should.equal('m1.small')
+ launch_config.key_name.should.equal('the_keys')
+ set(launch_config.security_groups).should.equal(set(['default', 'default2']))
+ launch_config.user_data.should.equal("This is some user_data")
+ launch_config.instance_monitoring.enabled.should.equal('true')
+ launch_config.instance_profile_name.should.equal('arn:aws:iam::123456789012:instance-profile/testing')
+ launch_config.spot_price.should.equal(0.1)
+
+
+@requires_boto_gte("2.12")
+@mock_autoscaling
+def test_create_launch_configuration_for_2_12():
+ conn = boto.connect_autoscale()
+ config = LaunchConfiguration(
+ name='tester',
+ image_id='ami-abcd1234',
+ ebs_optimized=True,
+ )
+ conn.create_launch_configuration(config)
+
+ launch_config = conn.get_all_launch_configurations()[0]
+ launch_config.ebs_optimized.should.equal(True)
+
+
+@mock_autoscaling
+def test_create_launch_configuration_defaults():
+ """ Test with the minimum inputs and check that all of the proper defaults
+ are assigned for the other attributes """
+ conn = boto.connect_autoscale()
+ config = LaunchConfiguration(
+ name='tester',
+ image_id='ami-abcd1234',
+ instance_type='m1.small',
+ )
+ conn.create_launch_configuration(config)
+
+ launch_config = conn.get_all_launch_configurations()[0]
+ launch_config.name.should.equal('tester')
+ launch_config.image_id.should.equal('ami-abcd1234')
+ launch_config.instance_type.should.equal('m1.small')
+
+ # Defaults
+ launch_config.key_name.should.equal('')
+ list(launch_config.security_groups).should.equal([])
+ launch_config.user_data.should.equal("")
+ launch_config.instance_monitoring.enabled.should.equal('false')
+ launch_config.instance_profile_name.should.equal(None)
+ launch_config.spot_price.should.equal(None)
+ launch_config.ebs_optimized.should.equal(False)
+
+
+@requires_boto_gte("2.12")
+@mock_autoscaling
+def test_create_launch_configuration_defaults_for_2_12():
+ conn = boto.connect_autoscale()
+ config = LaunchConfiguration(
+ name='tester',
+ image_id='ami-abcd1234',
+ )
+ conn.create_launch_configuration(config)
+
+ launch_config = conn.get_all_launch_configurations()[0]
+ launch_config.ebs_optimized.should.equal(False)
+
+
+@mock_autoscaling
+def test_launch_configuration_describe_filter():
+ conn = boto.connect_autoscale()
+ config = LaunchConfiguration(
+ name='tester',
+ image_id='ami-abcd1234',
+ instance_type='m1.small',
+ )
+ conn.create_launch_configuration(config)
+ config.name = 'tester2'
+ conn.create_launch_configuration(config)
+ config.name = 'tester3'
+ conn.create_launch_configuration(config)
+
+ conn.get_all_launch_configurations(names=['tester', 'tester2']).should.have.length_of(2)
+ conn.get_all_launch_configurations().should.have.length_of(3)
+
+
+@mock_autoscaling
+def test_launch_configuration_delete():
+ conn = boto.connect_autoscale()
+ config = LaunchConfiguration(
+ name='tester',
+ image_id='ami-abcd1234',
+ instance_type='m1.small',
+ )
+ conn.create_launch_configuration(config)
+
+ conn.get_all_launch_configurations().should.have.length_of(1)
+
+ conn.delete_launch_configuration('tester')
+ conn.get_all_launch_configurations().should.have.length_of(0)
diff --git a/tests/test_autoscaling/test_policies.py b/tests/test_autoscaling/test_policies.py
new file mode 100644
index 000000000..2f067151a
--- /dev/null
+++ b/tests/test_autoscaling/test_policies.py
@@ -0,0 +1,186 @@
+import boto
+from boto.ec2.autoscale.launchconfig import LaunchConfiguration
+from boto.ec2.autoscale.group import AutoScalingGroup
+from boto.ec2.autoscale.policy import ScalingPolicy
+import sure # noqa
+
+from moto import mock_autoscaling
+
+
+def setup_autoscale_group():
+ conn = boto.connect_autoscale()
+ config = LaunchConfiguration(
+ name='tester',
+ image_id='ami-abcd1234',
+ instance_type='m1.small',
+ )
+ conn.create_launch_configuration(config)
+
+ group = AutoScalingGroup(
+ name='tester_group',
+ max_size=2,
+ min_size=2,
+ launch_config=config,
+ )
+ conn.create_auto_scaling_group(group)
+ return group
+
+
+@mock_autoscaling
+def test_create_policy():
+ setup_autoscale_group()
+ conn = boto.connect_autoscale()
+ policy = ScalingPolicy(
+ name='ScaleUp',
+ adjustment_type='ExactCapacity',
+ as_name='tester_group',
+ scaling_adjustment=3,
+ cooldown=60,
+ )
+ conn.create_scaling_policy(policy)
+
+ policy = conn.get_all_policies()[0]
+ policy.name.should.equal('ScaleUp')
+ policy.adjustment_type.should.equal('ExactCapacity')
+ policy.as_name.should.equal('tester_group')
+ policy.scaling_adjustment.should.equal(3)
+ policy.cooldown.should.equal(60)
+
+
+@mock_autoscaling
+def test_create_policy_default_values():
+ setup_autoscale_group()
+ conn = boto.connect_autoscale()
+ policy = ScalingPolicy(
+ name='ScaleUp',
+ adjustment_type='ExactCapacity',
+ as_name='tester_group',
+ scaling_adjustment=3,
+ )
+ conn.create_scaling_policy(policy)
+
+ policy = conn.get_all_policies()[0]
+ policy.name.should.equal('ScaleUp')
+
+ # Defaults
+ policy.cooldown.should.equal(300)
+
+
+@mock_autoscaling
+def test_update_policy():
+ setup_autoscale_group()
+ conn = boto.connect_autoscale()
+ policy = ScalingPolicy(
+ name='ScaleUp',
+ adjustment_type='ExactCapacity',
+ as_name='tester_group',
+ scaling_adjustment=3,
+ )
+ conn.create_scaling_policy(policy)
+
+ policy = conn.get_all_policies()[0]
+ policy.scaling_adjustment.should.equal(3)
+
+ # Now update it by creating another with the same name
+ policy = ScalingPolicy(
+ name='ScaleUp',
+ adjustment_type='ExactCapacity',
+ as_name='tester_group',
+ scaling_adjustment=2,
+ )
+ conn.create_scaling_policy(policy)
+ policy = conn.get_all_policies()[0]
+ policy.scaling_adjustment.should.equal(2)
+
+
+@mock_autoscaling
+def test_delete_policy():
+ setup_autoscale_group()
+ conn = boto.connect_autoscale()
+ policy = ScalingPolicy(
+ name='ScaleUp',
+ adjustment_type='ExactCapacity',
+ as_name='tester_group',
+ scaling_adjustment=3,
+ )
+ conn.create_scaling_policy(policy)
+
+ conn.get_all_policies().should.have.length_of(1)
+
+ conn.delete_policy('ScaleUp')
+ conn.get_all_policies().should.have.length_of(0)
+
+
+@mock_autoscaling
+def test_execute_policy_exact_capacity():
+ setup_autoscale_group()
+ conn = boto.connect_autoscale()
+ policy = ScalingPolicy(
+ name='ScaleUp',
+ adjustment_type='ExactCapacity',
+ as_name='tester_group',
+ scaling_adjustment=3,
+ )
+ conn.create_scaling_policy(policy)
+
+ conn.execute_policy("ScaleUp")
+
+ instances = list(conn.get_all_autoscaling_instances())
+ instances.should.have.length_of(3)
+
+
+@mock_autoscaling
+def test_execute_policy_positive_change_in_capacity():
+ setup_autoscale_group()
+ conn = boto.connect_autoscale()
+ policy = ScalingPolicy(
+ name='ScaleUp',
+ adjustment_type='ChangeInCapacity',
+ as_name='tester_group',
+ scaling_adjustment=3,
+ )
+ conn.create_scaling_policy(policy)
+
+ conn.execute_policy("ScaleUp")
+
+ instances = list(conn.get_all_autoscaling_instances())
+ instances.should.have.length_of(5)
+
+
+@mock_autoscaling
+def test_execute_policy_percent_change_in_capacity():
+ setup_autoscale_group()
+ conn = boto.connect_autoscale()
+ policy = ScalingPolicy(
+ name='ScaleUp',
+ adjustment_type='PercentChangeInCapacity',
+ as_name='tester_group',
+ scaling_adjustment=50,
+ )
+ conn.create_scaling_policy(policy)
+
+ conn.execute_policy("ScaleUp")
+
+ instances = list(conn.get_all_autoscaling_instances())
+ instances.should.have.length_of(3)
+
+
+@mock_autoscaling
+def test_execute_policy_small_percent_change_in_capacity():
+ """ http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/as-scale-based-on-demand.html
+ If PercentChangeInCapacity returns a value between 0 and 1,
+ Auto Scaling will round it off to 1."""
+ setup_autoscale_group()
+ conn = boto.connect_autoscale()
+ policy = ScalingPolicy(
+ name='ScaleUp',
+ adjustment_type='PercentChangeInCapacity',
+ as_name='tester_group',
+ scaling_adjustment=1,
+ )
+ conn.create_scaling_policy(policy)
+
+ conn.execute_policy("ScaleUp")
+
+ instances = list(conn.get_all_autoscaling_instances())
+ instances.should.have.length_of(3)
diff --git a/tests/test_autoscaling/test_server.py b/tests/test_autoscaling/test_server.py
new file mode 100644
index 000000000..d3ca05cd5
--- /dev/null
+++ b/tests/test_autoscaling/test_server.py
@@ -0,0 +1,16 @@
+import sure # noqa
+
+import moto.server as server
+
+'''
+Test the different server responses
+'''
+server.configure_urls("autoscaling")
+
+
+def test_describe_autoscaling_groups():
+ test_client = server.app.test_client()
+ res = test_client.get('/?Action=DescribeLaunchConfigurations')
+
+ res.data.should.contain('')
diff --git a/tests/test_core/test_decorator_calls.py b/tests/test_core/test_decorator_calls.py
index bdf1d8a86..444bae89d 100644
--- a/tests/test_core/test_decorator_calls.py
+++ b/tests/test_core/test_decorator_calls.py
@@ -1,6 +1,6 @@
import boto
from boto.exception import EC2ResponseError
-import sure # flake8: noqa
+import sure # noqa
from moto import mock_ec2
@@ -11,7 +11,7 @@ Test the different ways that the decorator can be used
@mock_ec2
def test_basic_connect():
- conn = boto.connect_ec2()
+ boto.connect_ec2()
@mock_ec2
@@ -42,3 +42,11 @@ def test_decorator_start_and_stop():
mock.stop()
conn.get_all_instances.when.called_with().should.throw(EC2ResponseError)
+
+
+@mock_ec2
+def test_decorater_wrapped_gets_set():
+ """
+ Moto decorator's __wrapped__ should get set to the tests function
+ """
+ test_decorater_wrapped_gets_set.__wrapped__.__name__.should.equal('test_decorater_wrapped_gets_set')
diff --git a/tests/test_core/test_instance_metadata.py b/tests/test_core/test_instance_metadata.py
new file mode 100644
index 000000000..e11e67b94
--- /dev/null
+++ b/tests/test_core/test_instance_metadata.py
@@ -0,0 +1,36 @@
+import requests
+
+from moto import mock_ec2
+
+
+@mock_ec2
+def test_latest_meta_data():
+ res = requests.get("http://169.254.169.254/latest/meta-data/")
+ res.content.should.equal("iam")
+
+
+@mock_ec2
+def test_meta_data_iam():
+ res = requests.get("http://169.254.169.254/latest/meta-data/iam")
+ json_response = res.json()
+ default_role = json_response['security-credentials']['default-role']
+ default_role.should.contain('AccessKeyId')
+ default_role.should.contain('SecretAccessKey')
+ default_role.should.contain('Token')
+ default_role.should.contain('Expiration')
+
+
+@mock_ec2
+def test_meta_data_security_credentials():
+ res = requests.get("http://169.254.169.254/latest/meta-data/iam/security-credentials/")
+ res.content.should.equal("default-role")
+
+
+@mock_ec2
+def test_meta_data_default_role():
+ res = requests.get("http://169.254.169.254/latest/meta-data/iam/security-credentials/default-role")
+ json_response = res.json()
+ json_response.should.contain('AccessKeyId')
+ json_response.should.contain('SecretAccessKey')
+ json_response.should.contain('Token')
+ json_response.should.contain('Expiration')
diff --git a/tests/test_core/test_server.py b/tests/test_core/test_server.py
index e86098c4f..3d29132f2 100644
--- a/tests/test_core/test_server.py
+++ b/tests/test_core/test_server.py
@@ -1,12 +1,12 @@
from mock import patch
-import sure # flake8: noqa
+import sure # noqa
from moto.server import main
def test_wrong_arguments():
try:
- main(["name", "test1", "test2"])
+ main(["name", "test1", "test2", "test3"])
assert False, ("main() when called with the incorrect number of args"
" should raise a system exit")
except SystemExit:
@@ -15,5 +15,11 @@ def test_wrong_arguments():
@patch('moto.server.app.run')
def test_right_arguments(app_run):
- main(["name", "s3"])
- app_run.assert_called_once_with()
+ main(["s3"])
+ app_run.assert_called_once_with(host='0.0.0.0', port=5000)
+
+
+@patch('moto.server.app.run')
+def test_port_argument(app_run):
+ main(["s3", "--port", "8080"])
+ app_run.assert_called_once_with(host='0.0.0.0', port=8080)
diff --git a/tests/test_core/test_url_mapping.py b/tests/test_core/test_url_mapping.py
index af688da9e..7218398e2 100644
--- a/tests/test_core/test_url_mapping.py
+++ b/tests/test_core/test_url_mapping.py
@@ -1,4 +1,4 @@
-import sure # flake8: noqa
+import sure # noqa
from moto.core.utils import convert_regex_to_flask_path
diff --git a/tests/test_dynamodb/test_dynamodb.py b/tests/test_dynamodb/test_dynamodb.py
index c3582d7c4..ac95fb88b 100644
--- a/tests/test_dynamodb/test_dynamodb.py
+++ b/tests/test_dynamodb/test_dynamodb.py
@@ -1,12 +1,10 @@
import boto
-import sure # flake8: noqa
-from freezegun import freeze_time
+import sure # noqa
import requests
from moto import mock_dynamodb
from moto.dynamodb import dynamodb_backend
-from boto.dynamodb import condition
from boto.exception import DynamoDBResponseError
@@ -43,3 +41,12 @@ def test_sts_handler():
res = requests.post("https://sts.amazonaws.com/", data={"GetSessionToken": ""})
res.ok.should.be.ok
res.text.should.contain("SecretAccessKey")
+
+
+@mock_dynamodb
+def test_dynamodb_with_connect_to_region():
+ # this will work if connected with boto.connect_dynamodb()
+ dynamodb = boto.dynamodb.connect_to_region('us-west-2')
+
+ schema = dynamodb.create_schema('column1', str(), 'column2', int())
+ dynamodb.create_table('table1', schema, 200, 200)
diff --git a/tests/test_dynamodb/test_dynamodb_table_with_range_key.py b/tests/test_dynamodb/test_dynamodb_table_with_range_key.py
index bff2be93b..12700707c 100644
--- a/tests/test_dynamodb/test_dynamodb_table_with_range_key.py
+++ b/tests/test_dynamodb/test_dynamodb_table_with_range_key.py
@@ -1,11 +1,11 @@
import boto
-import sure # flake8: noqa
+import sure # noqa
from freezegun import freeze_time
from moto import mock_dynamodb
-from moto.dynamodb import dynamodb_backend
from boto.dynamodb import condition
+from boto.dynamodb.exceptions import DynamoDBKeyNotFoundError, DynamoDBValidationError
from boto.exception import DynamoDBResponseError
@@ -101,6 +101,8 @@ def test_item_add_and_describe_and_update():
)
item.put()
+ table.has_item("LOLCat Forum", "Check this out!").should.equal(True)
+
returned_item = table.get_item(
hash_key='LOLCat Forum',
range_key='Check this out!',
@@ -150,7 +152,8 @@ def test_get_missing_item():
table.get_item.when.called_with(
hash_key='tester',
range_key='other',
- ).should.throw(DynamoDBResponseError)
+ ).should.throw(DynamoDBKeyNotFoundError)
+ table.has_item("foobar", "more").should.equal(False)
@mock_dynamodb
@@ -163,7 +166,31 @@ def test_get_item_with_undeclared_table():
'HashKeyElement': {'S': 'tester'},
'RangeKeyElement': {'S': 'test-range'},
},
- ).should.throw(DynamoDBResponseError)
+ ).should.throw(DynamoDBKeyNotFoundError)
+
+
+@mock_dynamodb
+def test_get_item_without_range_key():
+ conn = boto.connect_dynamodb()
+ message_table_schema = conn.create_schema(
+ hash_key_name="test_hash",
+ hash_key_proto_value=int,
+ range_key_name="test_range",
+ range_key_proto_value=int,
+ )
+ table = conn.create_table(
+ name='messages',
+ schema=message_table_schema,
+ read_units=10,
+ write_units=10
+ )
+
+ hash_key = 3241526475
+ range_key = 1234567890987
+ new_item = table.new_item(hash_key=hash_key, range_key=range_key)
+ new_item.put()
+
+ table.get_item.when.called_with(hash_key=hash_key).should.throw(DynamoDBValidationError)
@mock_dynamodb
@@ -473,5 +500,6 @@ def test_batch_read():
item.put()
items = table.batch_get_item([('the-key', '123'), ('another-key', '789')])
- count = len([item for item in items])
+ # Iterate through so that batch_item gets called
+ count = len([x for x in items])
count.should.equal(2)
diff --git a/tests/test_dynamodb/test_dynamodb_table_without_range_key.py b/tests/test_dynamodb/test_dynamodb_table_without_range_key.py
index c1ffdf2ce..81e76f7f8 100644
--- a/tests/test_dynamodb/test_dynamodb_table_without_range_key.py
+++ b/tests/test_dynamodb/test_dynamodb_table_without_range_key.py
@@ -1,11 +1,11 @@
import boto
-import sure # flake8: noqa
+import sure # noqa
from freezegun import freeze_time
from moto import mock_dynamodb
-from moto.dynamodb import dynamodb_backend
from boto.dynamodb import condition
+from boto.dynamodb.exceptions import DynamoDBKeyNotFoundError
from boto.exception import DynamoDBResponseError
@@ -137,7 +137,7 @@ def test_get_missing_item():
table.get_item.when.called_with(
hash_key='tester',
- ).should.throw(DynamoDBResponseError)
+ ).should.throw(DynamoDBKeyNotFoundError)
@mock_dynamodb
@@ -149,7 +149,7 @@ def test_get_item_with_undeclared_table():
key={
'HashKeyElement': {'S': 'tester'},
},
- ).should.throw(DynamoDBResponseError)
+ ).should.throw(DynamoDBKeyNotFoundError)
@mock_dynamodb
@@ -411,5 +411,6 @@ def test_batch_read():
item.put()
items = table.batch_get_item([('the-key1'), ('another-key')])
- count = len([item for item in items])
- count.should.equal(2)
+ # Iterate through so that batch_item gets called
+ count = len([x for x in items])
+ count.should.have.equal(2)
diff --git a/tests/test_dynamodb/test_server.py b/tests/test_dynamodb/test_server.py
index 8b90989ae..74ca13b49 100644
--- a/tests/test_dynamodb/test_server.py
+++ b/tests/test_dynamodb/test_server.py
@@ -1,4 +1,4 @@
-import sure # flake8: noqa
+import sure # noqa
import moto.server as server
diff --git a/tests/test_ec2/test_amazon_dev_pay.py b/tests/test_ec2/test_amazon_dev_pay.py
index 2d7566abe..e91f1f4f3 100644
--- a/tests/test_ec2/test_amazon_dev_pay.py
+++ b/tests/test_ec2/test_amazon_dev_pay.py
@@ -1,5 +1,5 @@
import boto
-import sure # flake8: noqa
+import sure # noqa
from moto import mock_ec2
diff --git a/tests/test_ec2/test_amis.py b/tests/test_ec2/test_amis.py
index aaf63409c..6cd3812e5 100644
--- a/tests/test_ec2/test_amis.py
+++ b/tests/test_ec2/test_amis.py
@@ -1,7 +1,7 @@
import boto
from boto.exception import EC2ResponseError
-import sure # flake8: noqa
+import sure # noqa
from moto import mock_ec2
diff --git a/tests/test_ec2/test_availability_zones_and_regions.py b/tests/test_ec2/test_availability_zones_and_regions.py
index e83e090a5..3ac21bbc3 100644
--- a/tests/test_ec2/test_availability_zones_and_regions.py
+++ b/tests/test_ec2/test_availability_zones_and_regions.py
@@ -1,5 +1,5 @@
import boto
-import sure # flake8: noqa
+import sure # noqa
from moto import mock_ec2
diff --git a/tests/test_ec2/test_customer_gateways.py b/tests/test_ec2/test_customer_gateways.py
index 1b5c37a8e..ef6645e40 100644
--- a/tests/test_ec2/test_customer_gateways.py
+++ b/tests/test_ec2/test_customer_gateways.py
@@ -1,5 +1,5 @@
import boto
-import sure # flake8: noqa
+import sure # noqa
from moto import mock_ec2
diff --git a/tests/test_ec2/test_dhcp_options.py b/tests/test_ec2/test_dhcp_options.py
index 37dcc2de4..4806db2b8 100644
--- a/tests/test_ec2/test_dhcp_options.py
+++ b/tests/test_ec2/test_dhcp_options.py
@@ -1,5 +1,5 @@
import boto
-import sure # flake8: noqa
+import sure # noqa
from moto import mock_ec2
diff --git a/tests/test_ec2/test_elastic_block_store.py b/tests/test_ec2/test_elastic_block_store.py
index f6afa13c3..c8c6ef54f 100644
--- a/tests/test_ec2/test_elastic_block_store.py
+++ b/tests/test_ec2/test_elastic_block_store.py
@@ -1,6 +1,6 @@
import boto
from boto.exception import EC2ResponseError
-import sure # flake8: noqa
+import sure # noqa
from moto import mock_ec2
diff --git a/tests/test_ec2/test_elastic_ip_addresses.py b/tests/test_ec2/test_elastic_ip_addresses.py
index a7a0579c5..5aba36b92 100644
--- a/tests/test_ec2/test_elastic_ip_addresses.py
+++ b/tests/test_ec2/test_elastic_ip_addresses.py
@@ -1,5 +1,5 @@
import boto
-import sure # flake8: noqa
+import sure # noqa
from moto import mock_ec2
diff --git a/tests/test_ec2/test_elastic_network_interfaces.py b/tests/test_ec2/test_elastic_network_interfaces.py
index b0ee46b06..b158086fa 100644
--- a/tests/test_ec2/test_elastic_network_interfaces.py
+++ b/tests/test_ec2/test_elastic_network_interfaces.py
@@ -1,5 +1,5 @@
import boto
-import sure # flake8: noqa
+import sure # noqa
from moto import mock_ec2
diff --git a/tests/test_ec2/test_general.py b/tests/test_ec2/test_general.py
index f8314a848..c969eb818 100644
--- a/tests/test_ec2/test_general.py
+++ b/tests/test_ec2/test_general.py
@@ -1,6 +1,6 @@
import boto
from boto.exception import EC2ResponseError
-import sure # flake8: noqa
+import sure # noqa
from moto import mock_ec2
diff --git a/tests/test_ec2/test_instances.py b/tests/test_ec2/test_instances.py
index 618b4d1ff..073ad7e4b 100644
--- a/tests/test_ec2/test_instances.py
+++ b/tests/test_ec2/test_instances.py
@@ -1,13 +1,16 @@
+import base64
+
import boto
from boto.ec2.instance import Reservation, InstanceAttribute
-import sure # flake8: noqa
+from boto.exception import EC2ResponseError
+import sure # noqa
from moto import mock_ec2
################ Test Readme ###############
def add_servers(ami_id, count):
- conn = boto.connect_ec2('the_key', 'the_secret')
+ conn = boto.connect_ec2()
for index in range(count):
conn.run_instances(ami_id)
@@ -16,7 +19,7 @@ def add_servers(ami_id, count):
def test_add_servers():
add_servers('ami-1234abcd', 2)
- conn = boto.connect_ec2('the_key', 'the_secret')
+ conn = boto.connect_ec2()
reservations = conn.get_all_instances()
assert len(reservations) == 2
instance1 = reservations[0].instances[0]
@@ -48,6 +51,61 @@ def test_instance_launch_and_terminate():
instance.state.should.equal('shutting-down')
+@mock_ec2
+def test_get_instances_by_id():
+ conn = boto.connect_ec2()
+ reservation = conn.run_instances('ami-1234abcd', min_count=2)
+ instance1, instance2 = reservation.instances
+
+ reservations = conn.get_all_instances(instance_ids=[instance1.id])
+ reservations.should.have.length_of(1)
+ reservation = reservations[0]
+ reservation.instances.should.have.length_of(1)
+ reservation.instances[0].id.should.equal(instance1.id)
+
+ reservations = conn.get_all_instances(instance_ids=[instance1.id, instance2.id])
+ reservations.should.have.length_of(1)
+ reservation = reservations[0]
+ reservation.instances.should.have.length_of(2)
+ instance_ids = [instance.id for instance in reservation.instances]
+ instance_ids.should.equal([instance1.id, instance2.id])
+
+ # Call get_all_instances with a bad id should raise an error
+ conn.get_all_instances.when.called_with(instance_ids=[instance1.id, "i-1234abcd"]).should.throw(
+ EC2ResponseError,
+ "The instance ID 'i-1234abcd' does not exist"
+ )
+
+
+@mock_ec2
+def test_get_instances_filtering_by_state():
+ conn = boto.connect_ec2()
+ reservation = conn.run_instances('ami-1234abcd', min_count=3)
+ instance1, instance2, instance3 = reservation.instances
+
+ conn.terminate_instances([instance1.id])
+
+ reservations = conn.get_all_instances(filters={'instance-state-name': 'pending'})
+ reservations.should.have.length_of(1)
+ # Since we terminated instance1, only instance2 and instance3 should be returned
+ instance_ids = [instance.id for instance in reservations[0].instances]
+ set(instance_ids).should.equal(set([instance2.id, instance3.id]))
+
+ reservations = conn.get_all_instances([instance2.id], filters={'instance-state-name': 'pending'})
+ reservations.should.have.length_of(1)
+ instance_ids = [instance.id for instance in reservations[0].instances]
+ instance_ids.should.equal([instance2.id])
+
+ reservations = conn.get_all_instances([instance2.id], filters={'instance-state-name': 'terminating'})
+ list(reservations).should.equal([])
+
+ # get_all_instances should still return all 3
+ reservations = conn.get_all_instances()
+ reservations[0].instances.should.have.length_of(3)
+
+ conn.get_all_instances.when.called_with(filters={'not-implemented-filter': 'foobar'}).should.throw(NotImplementedError)
+
+
@mock_ec2
def test_instance_start_and_stop():
conn = boto.connect_ec2('the_key', 'the_secret')
@@ -98,3 +156,16 @@ def test_instance_attribute_user_data():
instance_attribute = instance.get_attribute("userData")
instance_attribute.should.be.a(InstanceAttribute)
instance_attribute.get("userData").should.equal("this is my user data")
+
+
+@mock_ec2
+def test_user_data_with_run_instance():
+ user_data = "some user data"
+ conn = boto.connect_ec2('the_key', 'the_secret')
+ reservation = conn.run_instances('ami-1234abcd', user_data=user_data)
+ instance = reservation.instances[0]
+
+ instance_attribute = instance.get_attribute("userData")
+ instance_attribute.should.be.a(InstanceAttribute)
+ decoded_user_data = base64.decodestring(instance_attribute.get("userData"))
+ decoded_user_data.should.equal("some user data")
diff --git a/tests/test_ec2/test_internet_gateways.py b/tests/test_ec2/test_internet_gateways.py
index ad039bed7..67f0067ea 100644
--- a/tests/test_ec2/test_internet_gateways.py
+++ b/tests/test_ec2/test_internet_gateways.py
@@ -1,5 +1,5 @@
import boto
-import sure # flake8: noqa
+import sure # noqa
from moto import mock_ec2
diff --git a/tests/test_ec2/test_ip_addresses.py b/tests/test_ec2/test_ip_addresses.py
index eecd3117b..1a6c5e84c 100644
--- a/tests/test_ec2/test_ip_addresses.py
+++ b/tests/test_ec2/test_ip_addresses.py
@@ -1,5 +1,5 @@
import boto
-import sure # flake8: noqa
+import sure # noqa
from moto import mock_ec2
diff --git a/tests/test_ec2/test_key_pairs.py b/tests/test_ec2/test_key_pairs.py
index 8d8d7b212..7a961051e 100644
--- a/tests/test_ec2/test_key_pairs.py
+++ b/tests/test_ec2/test_key_pairs.py
@@ -1,5 +1,5 @@
import boto
-import sure # flake8: noqa
+import sure # noqa
from moto import mock_ec2
diff --git a/tests/test_ec2/test_monitoring.py b/tests/test_ec2/test_monitoring.py
index b534a508f..84f513f21 100644
--- a/tests/test_ec2/test_monitoring.py
+++ b/tests/test_ec2/test_monitoring.py
@@ -1,5 +1,5 @@
import boto
-import sure # flake8: noqa
+import sure # noqa
from moto import mock_ec2
diff --git a/tests/test_ec2/test_network_acls.py b/tests/test_ec2/test_network_acls.py
index 190f6f380..92f34e54f 100644
--- a/tests/test_ec2/test_network_acls.py
+++ b/tests/test_ec2/test_network_acls.py
@@ -1,5 +1,5 @@
import boto
-import sure # flake8: noqa
+import sure # noqa
from moto import mock_ec2
diff --git a/tests/test_ec2/test_placement_groups.py b/tests/test_ec2/test_placement_groups.py
index adc913ccb..2be996840 100644
--- a/tests/test_ec2/test_placement_groups.py
+++ b/tests/test_ec2/test_placement_groups.py
@@ -1,5 +1,5 @@
import boto
-import sure # flake8: noqa
+import sure # noqa
from moto import mock_ec2
diff --git a/tests/test_ec2/test_reserved_instances.py b/tests/test_ec2/test_reserved_instances.py
index 8a99f5c23..fdb80bc63 100644
--- a/tests/test_ec2/test_reserved_instances.py
+++ b/tests/test_ec2/test_reserved_instances.py
@@ -1,5 +1,5 @@
import boto
-import sure # flake8: noqa
+import sure # noqa
from moto import mock_ec2
diff --git a/tests/test_ec2/test_route_tables.py b/tests/test_ec2/test_route_tables.py
index 57e85a4f7..3d376961f 100644
--- a/tests/test_ec2/test_route_tables.py
+++ b/tests/test_ec2/test_route_tables.py
@@ -1,5 +1,5 @@
import boto
-import sure # flake8: noqa
+import sure # noqa
from moto import mock_ec2
diff --git a/tests/test_ec2/test_security_groups.py b/tests/test_ec2/test_security_groups.py
index 4cdb9a4eb..ce8de872b 100644
--- a/tests/test_ec2/test_security_groups.py
+++ b/tests/test_ec2/test_security_groups.py
@@ -1,6 +1,6 @@
import boto
from boto.exception import EC2ResponseError
-import sure # flake8: noqa
+import sure # noqa
from moto import mock_ec2
diff --git a/tests/test_ec2/test_server.py b/tests/test_ec2/test_server.py
index 1370bd71b..2d82c1c02 100644
--- a/tests/test_ec2/test_server.py
+++ b/tests/test_ec2/test_server.py
@@ -1,5 +1,5 @@
import re
-import sure # flake8: noqa
+import sure # noqa
import moto.server as server
diff --git a/tests/test_ec2/test_spot_instances.py b/tests/test_ec2/test_spot_instances.py
index 603e4481b..91a3158eb 100644
--- a/tests/test_ec2/test_spot_instances.py
+++ b/tests/test_ec2/test_spot_instances.py
@@ -1,9 +1,99 @@
+import datetime
+
import boto
-import sure # flake8: noqa
+import sure # noqa
from moto import mock_ec2
+from moto.core.utils import iso_8601_datetime
@mock_ec2
-def test_spot_instances():
- pass
+def test_request_spot_instances():
+ conn = boto.connect_ec2()
+
+ conn.create_security_group('group1', 'description')
+ conn.create_security_group('group2', 'description')
+
+ start = iso_8601_datetime(datetime.datetime(2013, 1, 1))
+ end = iso_8601_datetime(datetime.datetime(2013, 1, 2))
+
+ request = conn.request_spot_instances(
+ price=0.5, image_id='ami-abcd1234', count=1, type='one-time',
+ valid_from=start, valid_until=end, launch_group="the-group",
+ availability_zone_group='my-group', key_name="test",
+ security_groups=['group1', 'group2'], user_data="some test data",
+ instance_type='m1.small', placement='us-east-1c',
+ kernel_id="test-kernel", ramdisk_id="test-ramdisk",
+ monitoring_enabled=True, subnet_id="subnet123",
+ )
+
+ requests = conn.get_all_spot_instance_requests()
+ requests.should.have.length_of(1)
+ request = requests[0]
+
+ request.state.should.equal("open")
+ request.price.should.equal(0.5)
+ request.launch_specification.image_id.should.equal('ami-abcd1234')
+ request.type.should.equal('one-time')
+ request.valid_from.should.equal(start)
+ request.valid_until.should.equal(end)
+ request.launch_group.should.equal("the-group")
+ request.availability_zone_group.should.equal('my-group')
+ request.launch_specification.key_name.should.equal("test")
+ security_group_names = [group.name for group in request.launch_specification.groups]
+ set(security_group_names).should.equal(set(['group1', 'group2']))
+ request.launch_specification.instance_type.should.equal('m1.small')
+ request.launch_specification.placement.should.equal('us-east-1c')
+ request.launch_specification.kernel.should.equal("test-kernel")
+ request.launch_specification.ramdisk.should.equal("test-ramdisk")
+ request.launch_specification.subnet_id.should.equal("subnet123")
+
+
+@mock_ec2
+def test_request_spot_instances_default_arguments():
+ """
+ Test that moto set the correct default arguments
+ """
+ conn = boto.connect_ec2()
+
+ request = conn.request_spot_instances(
+ price=0.5, image_id='ami-abcd1234',
+ )
+
+ requests = conn.get_all_spot_instance_requests()
+ requests.should.have.length_of(1)
+ request = requests[0]
+
+ request.state.should.equal("open")
+ request.price.should.equal(0.5)
+ request.launch_specification.image_id.should.equal('ami-abcd1234')
+ request.type.should.equal('one-time')
+ request.valid_from.should.equal(None)
+ request.valid_until.should.equal(None)
+ request.launch_group.should.equal(None)
+ request.availability_zone_group.should.equal(None)
+ request.launch_specification.key_name.should.equal(None)
+ security_group_names = [group.name for group in request.launch_specification.groups]
+ security_group_names.should.equal(["default"])
+ request.launch_specification.instance_type.should.equal('m1.small')
+ request.launch_specification.placement.should.equal(None)
+ request.launch_specification.kernel.should.equal(None)
+ request.launch_specification.ramdisk.should.equal(None)
+ request.launch_specification.subnet_id.should.equal(None)
+
+
+@mock_ec2
+def test_cancel_spot_instance_request():
+ conn = boto.connect_ec2()
+
+ conn.request_spot_instances(
+ price=0.5, image_id='ami-abcd1234',
+ )
+
+ requests = conn.get_all_spot_instance_requests()
+ requests.should.have.length_of(1)
+
+ conn.cancel_spot_instance_requests([requests[0].id])
+
+ requests = conn.get_all_spot_instance_requests()
+ requests.should.have.length_of(0)
diff --git a/tests/test_ec2/test_subnets.py b/tests/test_ec2/test_subnets.py
index e6ebebc6b..f12d35de5 100644
--- a/tests/test_ec2/test_subnets.py
+++ b/tests/test_ec2/test_subnets.py
@@ -1,6 +1,6 @@
import boto
from boto.exception import EC2ResponseError
-import sure # flake8: noqa
+import sure # noqa
from moto import mock_ec2
diff --git a/tests/test_ec2/test_tags.py b/tests/test_ec2/test_tags.py
index f73fc0479..1138b5e9f 100644
--- a/tests/test_ec2/test_tags.py
+++ b/tests/test_ec2/test_tags.py
@@ -1,5 +1,7 @@
+import itertools
+
import boto
-import sure # flake8: noqa
+import sure # noqa
from moto import mock_ec2
@@ -19,3 +21,17 @@ def test_instance_launch_and_terminate():
instance.remove_tag("a key")
conn.get_all_tags().should.have.length_of(0)
+
+
+@mock_ec2
+def test_instance_launch_and_retrieve_all_instances():
+ conn = boto.connect_ec2('the_key', 'the_secret')
+ reservation = conn.run_instances('ami-1234abcd')
+ instance = reservation.instances[0]
+
+ instance.add_tag("a key", "some value")
+ chain = itertools.chain.from_iterable
+ existing_instances = list(chain([res.instances for res in conn.get_all_instances()]))
+ existing_instances.should.have.length_of(1)
+ existing_instance = existing_instances[0]
+ existing_instance.tags["a key"].should.equal("some value")
diff --git a/tests/test_ec2/test_virtual_private_gateways.py b/tests/test_ec2/test_virtual_private_gateways.py
index 24e85b98d..b9fa80a65 100644
--- a/tests/test_ec2/test_virtual_private_gateways.py
+++ b/tests/test_ec2/test_virtual_private_gateways.py
@@ -1,5 +1,5 @@
import boto
-import sure # flake8: noqa
+import sure # noqa
from moto import mock_ec2
diff --git a/tests/test_ec2/test_vm_export.py b/tests/test_ec2/test_vm_export.py
index 6050a8c40..478372b78 100644
--- a/tests/test_ec2/test_vm_export.py
+++ b/tests/test_ec2/test_vm_export.py
@@ -1,5 +1,5 @@
import boto
-import sure # flake8: noqa
+import sure # noqa
from moto import mock_ec2
diff --git a/tests/test_ec2/test_vm_import.py b/tests/test_ec2/test_vm_import.py
index ef3e5e1a5..0c310fc5b 100644
--- a/tests/test_ec2/test_vm_import.py
+++ b/tests/test_ec2/test_vm_import.py
@@ -1,5 +1,5 @@
import boto
-import sure # flake8: noqa
+import sure # noqa
from moto import mock_ec2
diff --git a/tests/test_ec2/test_vpcs.py b/tests/test_ec2/test_vpcs.py
index cdf28ee55..3a4570ecd 100644
--- a/tests/test_ec2/test_vpcs.py
+++ b/tests/test_ec2/test_vpcs.py
@@ -1,6 +1,6 @@
import boto
from boto.exception import EC2ResponseError
-import sure # flake8: noqa
+import sure # noqa
from moto import mock_ec2
diff --git a/tests/test_ec2/test_vpn_connections.py b/tests/test_ec2/test_vpn_connections.py
index 589d11952..4ce1e398e 100644
--- a/tests/test_ec2/test_vpn_connections.py
+++ b/tests/test_ec2/test_vpn_connections.py
@@ -1,5 +1,5 @@
import boto
-import sure # flake8: noqa
+import sure # noqa
from moto import mock_ec2
diff --git a/tests/test_ec2/test_windows.py b/tests/test_ec2/test_windows.py
index 960538465..92f3be6a4 100644
--- a/tests/test_ec2/test_windows.py
+++ b/tests/test_ec2/test_windows.py
@@ -1,5 +1,5 @@
import boto
-import sure # flake8: noqa
+import sure # noqa
from moto import mock_ec2
diff --git a/tests/test_elb/test_elb.py b/tests/test_elb/test_elb.py
new file mode 100644
index 000000000..cd4e449e1
--- /dev/null
+++ b/tests/test_elb/test_elb.py
@@ -0,0 +1,124 @@
+import boto
+from boto.ec2.elb import HealthCheck
+import sure # noqa
+
+from moto import mock_elb, mock_ec2
+
+
+@mock_elb
+def test_create_load_balancer():
+ conn = boto.connect_elb()
+
+ zones = ['us-east-1a', 'us-east-1b']
+ ports = [(80, 8080, 'http'), (443, 8443, 'tcp')]
+ conn.create_load_balancer('my-lb', zones, ports)
+
+ balancers = conn.get_all_load_balancers()
+ balancer = balancers[0]
+ balancer.name.should.equal("my-lb")
+ set(balancer.availability_zones).should.equal(set(['us-east-1a', 'us-east-1b']))
+ listener1 = balancer.listeners[0]
+ listener1.load_balancer_port.should.equal(80)
+ listener1.instance_port.should.equal(8080)
+ listener1.protocol.should.equal("HTTP")
+ listener2 = balancer.listeners[1]
+ listener2.load_balancer_port.should.equal(443)
+ listener2.instance_port.should.equal(8443)
+ listener2.protocol.should.equal("TCP")
+
+
+@mock_elb
+def test_get_load_balancers_by_name():
+ conn = boto.connect_elb()
+
+ zones = ['us-east-1a', 'us-east-1b']
+ ports = [(80, 8080, 'http'), (443, 8443, 'tcp')]
+ conn.create_load_balancer('my-lb1', zones, ports)
+ conn.create_load_balancer('my-lb2', zones, ports)
+ conn.create_load_balancer('my-lb3', zones, ports)
+
+ conn.get_all_load_balancers().should.have.length_of(3)
+ conn.get_all_load_balancers(load_balancer_names=['my-lb1']).should.have.length_of(1)
+ conn.get_all_load_balancers(load_balancer_names=['my-lb1', 'my-lb2']).should.have.length_of(2)
+
+
+@mock_elb
+def test_delete_load_balancer():
+ conn = boto.connect_elb()
+
+ zones = ['us-east-1a']
+ ports = [(80, 8080, 'http'), (443, 8443, 'tcp')]
+ conn.create_load_balancer('my-lb', zones, ports)
+
+ balancers = conn.get_all_load_balancers()
+ balancers.should.have.length_of(1)
+
+ conn.delete_load_balancer("my-lb")
+ balancers = conn.get_all_load_balancers()
+ balancers.should.have.length_of(0)
+
+
+@mock_elb
+def test_create_health_check():
+ conn = boto.connect_elb()
+
+ hc = HealthCheck(
+ interval=20,
+ healthy_threshold=3,
+ unhealthy_threshold=5,
+ target='HTTP:8080/health',
+ timeout=23,
+ )
+
+ ports = [(80, 8080, 'http'), (443, 8443, 'tcp')]
+ lb = conn.create_load_balancer('my-lb', [], ports)
+ lb.configure_health_check(hc)
+
+ balancer = conn.get_all_load_balancers()[0]
+ health_check = balancer.health_check
+ health_check.interval.should.equal(20)
+ health_check.healthy_threshold.should.equal(3)
+ health_check.unhealthy_threshold.should.equal(5)
+ health_check.target.should.equal('HTTP:8080/health')
+ health_check.timeout.should.equal(23)
+
+
+@mock_ec2
+@mock_elb
+def test_register_instances():
+ ec2_conn = boto.connect_ec2()
+ reservation = ec2_conn.run_instances('ami-1234abcd', 2)
+ instance_id1 = reservation.instances[0].id
+ instance_id2 = reservation.instances[1].id
+
+ conn = boto.connect_elb()
+ ports = [(80, 8080, 'http'), (443, 8443, 'tcp')]
+ lb = conn.create_load_balancer('my-lb', [], ports)
+
+ lb.register_instances([instance_id1, instance_id2])
+
+ balancer = conn.get_all_load_balancers()[0]
+ instance_ids = [instance.id for instance in balancer.instances]
+ set(instance_ids).should.equal(set([instance_id1, instance_id2]))
+
+
+@mock_ec2
+@mock_elb
+def test_deregister_instances():
+ ec2_conn = boto.connect_ec2()
+ reservation = ec2_conn.run_instances('ami-1234abcd', 2)
+ instance_id1 = reservation.instances[0].id
+ instance_id2 = reservation.instances[1].id
+
+ conn = boto.connect_elb()
+ ports = [(80, 8080, 'http'), (443, 8443, 'tcp')]
+ lb = conn.create_load_balancer('my-lb', [], ports)
+
+ lb.register_instances([instance_id1, instance_id2])
+
+ balancer = conn.get_all_load_balancers()[0]
+ balancer.instances.should.have.length_of(2)
+ balancer.deregister_instances([instance_id1])
+
+ balancer.instances.should.have.length_of(1)
+ balancer.instances[0].id.should.equal(instance_id2)
diff --git a/tests/test_elb/test_server.py b/tests/test_elb/test_server.py
new file mode 100644
index 000000000..9fc172dd6
--- /dev/null
+++ b/tests/test_elb/test_server.py
@@ -0,0 +1,15 @@
+import sure # noqa
+
+import moto.server as server
+
+'''
+Test the different server responses
+'''
+server.configure_urls("elb")
+
+
+def test_elb_describe_instances():
+ test_client = server.app.test_client()
+ res = test_client.get('/?Action=DescribeLoadBalancers')
+
+ res.data.should.contain('DescribeLoadBalancersResponse')
diff --git a/tests/test_emr/test_emr.py b/tests/test_emr/test_emr.py
new file mode 100644
index 000000000..eb17c37ad
--- /dev/null
+++ b/tests/test_emr/test_emr.py
@@ -0,0 +1,292 @@
+import boto
+from boto.emr.instance_group import InstanceGroup
+from boto.emr.step import StreamingStep
+import sure # noqa
+
+from moto import mock_emr
+from tests.helpers import requires_boto_gte
+
+
+@mock_emr
+def test_create_job_flow():
+ conn = boto.connect_emr()
+
+ step1 = StreamingStep(
+ name='My wordcount example',
+ mapper='s3n://elasticmapreduce/samples/wordcount/wordSplitter.py',
+ reducer='aggregate',
+ input='s3n://elasticmapreduce/samples/wordcount/input',
+ output='s3n://output_bucket/output/wordcount_output'
+ )
+
+ step2 = StreamingStep(
+ name='My wordcount example2',
+ mapper='s3n://elasticmapreduce/samples/wordcount/wordSplitter2.py',
+ reducer='aggregate',
+ input='s3n://elasticmapreduce/samples/wordcount/input2',
+ output='s3n://output_bucket/output/wordcount_output2'
+ )
+
+ job_id = conn.run_jobflow(
+ name='My jobflow',
+ log_uri='s3://some_bucket/jobflow_logs',
+ master_instance_type='m1.medium',
+ slave_instance_type='m1.small',
+ steps=[step1, step2],
+ )
+
+ job_flow = conn.describe_jobflow(job_id)
+ job_flow.state.should.equal('STARTING')
+ job_flow.jobflowid.should.equal(job_id)
+ job_flow.name.should.equal('My jobflow')
+ job_flow.masterinstancetype.should.equal('m1.medium')
+ job_flow.slaveinstancetype.should.equal('m1.small')
+ job_flow.loguri.should.equal('s3://some_bucket/jobflow_logs')
+ job_flow.visibletoallusers.should.equal('False')
+ int(job_flow.normalizedinstancehours).should.equal(0)
+ job_step = job_flow.steps[0]
+ job_step.name.should.equal('My wordcount example')
+ job_step.state.should.equal('STARTING')
+ args = [arg.value for arg in job_step.args]
+ args.should.equal([
+ '-mapper',
+ 's3n://elasticmapreduce/samples/wordcount/wordSplitter.py',
+ '-reducer',
+ 'aggregate',
+ '-input',
+ 's3n://elasticmapreduce/samples/wordcount/input',
+ '-output',
+ 's3n://output_bucket/output/wordcount_output',
+ ])
+
+ job_step2 = job_flow.steps[1]
+ job_step2.name.should.equal('My wordcount example2')
+ job_step2.state.should.equal('PENDING')
+ args = [arg.value for arg in job_step2.args]
+ args.should.equal([
+ '-mapper',
+ 's3n://elasticmapreduce/samples/wordcount/wordSplitter2.py',
+ '-reducer',
+ 'aggregate',
+ '-input',
+ 's3n://elasticmapreduce/samples/wordcount/input2',
+ '-output',
+ 's3n://output_bucket/output/wordcount_output2',
+ ])
+
+
+@requires_boto_gte("2.8")
+@mock_emr
+def test_create_job_flow_with_new_params():
+ # Test that run_jobflow works with newer params
+ conn = boto.connect_emr()
+
+ conn.run_jobflow(
+ name='My jobflow',
+ log_uri='s3://some_bucket/jobflow_logs',
+ master_instance_type='m1.medium',
+ slave_instance_type='m1.small',
+ job_flow_role='some-role-arn',
+ steps=[],
+ )
+
+
+@mock_emr
+def test_create_job_flow_visible_to_all_users():
+ conn = boto.connect_emr()
+
+ job_id = conn.run_jobflow(
+ name='My jobflow',
+ log_uri='s3://some_bucket/jobflow_logs',
+ job_flow_role='some-role-arn',
+ steps=[],
+ visible_to_all_users=True,
+ )
+ job_flow = conn.describe_jobflow(job_id)
+ job_flow.visibletoallusers.should.equal('True')
+
+
+@mock_emr
+def test_terminate_job_flow():
+ conn = boto.connect_emr()
+ job_id = conn.run_jobflow(
+ name='My jobflow',
+ log_uri='s3://some_bucket/jobflow_logs',
+ steps=[]
+ )
+
+ flow = conn.describe_jobflows()[0]
+ flow.state.should.equal('STARTING')
+ conn.terminate_jobflow(job_id)
+ flow = conn.describe_jobflows()[0]
+ flow.state.should.equal('TERMINATED')
+
+
+@mock_emr
+def test_add_steps_to_flow():
+ conn = boto.connect_emr()
+
+ step1 = StreamingStep(
+ name='My wordcount example',
+ mapper='s3n://elasticmapreduce/samples/wordcount/wordSplitter.py',
+ reducer='aggregate',
+ input='s3n://elasticmapreduce/samples/wordcount/input',
+ output='s3n://output_bucket/output/wordcount_output'
+ )
+
+ job_id = conn.run_jobflow(
+ name='My jobflow',
+ log_uri='s3://some_bucket/jobflow_logs',
+ steps=[step1]
+ )
+
+ job_flow = conn.describe_jobflow(job_id)
+ job_flow.state.should.equal('STARTING')
+ job_flow.jobflowid.should.equal(job_id)
+ job_flow.name.should.equal('My jobflow')
+ job_flow.loguri.should.equal('s3://some_bucket/jobflow_logs')
+
+ step2 = StreamingStep(
+ name='My wordcount example2',
+ mapper='s3n://elasticmapreduce/samples/wordcount/wordSplitter2.py',
+ reducer='aggregate',
+ input='s3n://elasticmapreduce/samples/wordcount/input2',
+ output='s3n://output_bucket/output/wordcount_output2'
+ )
+
+ conn.add_jobflow_steps(job_id, [step2])
+
+ job_flow = conn.describe_jobflow(job_id)
+ job_step = job_flow.steps[0]
+ job_step.name.should.equal('My wordcount example')
+ job_step.state.should.equal('STARTING')
+ args = [arg.value for arg in job_step.args]
+ args.should.equal([
+ '-mapper',
+ 's3n://elasticmapreduce/samples/wordcount/wordSplitter.py',
+ '-reducer',
+ 'aggregate',
+ '-input',
+ 's3n://elasticmapreduce/samples/wordcount/input',
+ '-output',
+ 's3n://output_bucket/output/wordcount_output',
+ ])
+
+ job_step2 = job_flow.steps[1]
+ job_step2.name.should.equal('My wordcount example2')
+ job_step2.state.should.equal('PENDING')
+ args = [arg.value for arg in job_step2.args]
+ args.should.equal([
+ '-mapper',
+ 's3n://elasticmapreduce/samples/wordcount/wordSplitter2.py',
+ '-reducer',
+ 'aggregate',
+ '-input',
+ 's3n://elasticmapreduce/samples/wordcount/input2',
+ '-output',
+ 's3n://output_bucket/output/wordcount_output2',
+ ])
+
+
+@mock_emr
+def test_create_instance_groups():
+ conn = boto.connect_emr()
+
+ step1 = StreamingStep(
+ name='My wordcount example',
+ mapper='s3n://elasticmapreduce/samples/wordcount/wordSplitter.py',
+ reducer='aggregate',
+ input='s3n://elasticmapreduce/samples/wordcount/input',
+ output='s3n://output_bucket/output/wordcount_output'
+ )
+
+ job_id = conn.run_jobflow(
+ name='My jobflow',
+ log_uri='s3://some_bucket/jobflow_logs',
+ steps=[step1],
+ )
+
+ instance_group = InstanceGroup(6, 'TASK', 'c1.medium', 'SPOT', 'spot-0.07', '0.07')
+ instance_group = conn.add_instance_groups(job_id, [instance_group])
+ instance_group_id = instance_group.instancegroupids
+ job_flow = conn.describe_jobflows()[0]
+ int(job_flow.instancecount).should.equal(6)
+ instance_group = job_flow.instancegroups[0]
+ instance_group.instancegroupid.should.equal(instance_group_id)
+ int(instance_group.instancerunningcount).should.equal(6)
+ instance_group.instancerole.should.equal('TASK')
+ instance_group.instancetype.should.equal('c1.medium')
+ instance_group.market.should.equal('SPOT')
+ instance_group.name.should.equal('spot-0.07')
+ instance_group.bidprice.should.equal('0.07')
+
+
+@mock_emr
+def test_modify_instance_groups():
+ conn = boto.connect_emr()
+
+ step1 = StreamingStep(
+ name='My wordcount example',
+ mapper='s3n://elasticmapreduce/samples/wordcount/wordSplitter.py',
+ reducer='aggregate',
+ input='s3n://elasticmapreduce/samples/wordcount/input',
+ output='s3n://output_bucket/output/wordcount_output'
+ )
+
+ job_id = conn.run_jobflow(
+ name='My jobflow',
+ log_uri='s3://some_bucket/jobflow_logs',
+ steps=[step1]
+ )
+
+ instance_group1 = InstanceGroup(6, 'TASK', 'c1.medium', 'SPOT', 'spot-0.07', '0.07')
+ instance_group2 = InstanceGroup(6, 'TASK', 'c1.medium', 'SPOT', 'spot-0.07', '0.07')
+ instance_group = conn.add_instance_groups(job_id, [instance_group1, instance_group2])
+ instance_group_ids = instance_group.instancegroupids.split(",")
+
+ job_flow = conn.describe_jobflows()[0]
+ int(job_flow.instancecount).should.equal(12)
+ instance_group = job_flow.instancegroups[0]
+ int(instance_group.instancerunningcount).should.equal(6)
+
+ conn.modify_instance_groups(instance_group_ids, [2, 3])
+
+ job_flow = conn.describe_jobflows()[0]
+ int(job_flow.instancecount).should.equal(5)
+ instance_group1 = [
+ group for group
+ in job_flow.instancegroups
+ if group.instancegroupid == instance_group_ids[0]
+ ][0]
+ int(instance_group1.instancerunningcount).should.equal(2)
+ instance_group2 = [
+ group for group
+ in job_flow.instancegroups
+ if group.instancegroupid == instance_group_ids[1]
+ ][0]
+ int(instance_group2.instancerunningcount).should.equal(3)
+
+
+@mock_emr
+def test_set_visible_to_all_users():
+ conn = boto.connect_emr()
+
+ job_id = conn.run_jobflow(
+ name='My jobflow',
+ log_uri='s3://some_bucket/jobflow_logs',
+ job_flow_role='some-role-arn',
+ steps=[],
+ visible_to_all_users=False,
+ )
+ job_flow = conn.describe_jobflow(job_id)
+ job_flow.visibletoallusers.should.equal('False')
+
+ conn.set_visible_to_all_users(job_id, True)
+
+ job_flow = conn.describe_jobflow(job_id)
+ job_flow.visibletoallusers.should.equal('True')
+
+ conn.set_visible_to_all_users(job_id, False)
+
+ job_flow = conn.describe_jobflow(job_id)
+ job_flow.visibletoallusers.should.equal('False')
diff --git a/tests/test_emr/test_server.py b/tests/test_emr/test_server.py
new file mode 100644
index 000000000..85ba7c4db
--- /dev/null
+++ b/tests/test_emr/test_server.py
@@ -0,0 +1,16 @@
+import sure # noqa
+
+import moto.server as server
+
+'''
+Test the different server responses
+'''
+server.configure_urls("emr")
+
+
+def test_describe_jobflows():
+ test_client = server.app.test_client()
+ res = test_client.get('/?Action=DescribeJobFlows')
+
+ res.data.should.contain('')
+ res.data.should.contain('')
diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py
index 1f713fabb..5b5fbf564 100644
--- a/tests/test_s3/test_s3.py
+++ b/tests/test_s3/test_s3.py
@@ -4,9 +4,10 @@ from io import BytesIO
import boto
from boto.exception import S3ResponseError
from boto.s3.key import Key
+from freezegun import freeze_time
import requests
-import sure # flake8: noqa
+import sure # noqa
from moto import mock_s3
@@ -83,6 +84,31 @@ def test_empty_key():
bucket.get_key("the-key").get_contents_as_string().should.equal('')
+@mock_s3
+def test_empty_key_set_on_existing_key():
+ conn = boto.connect_s3('the_key', 'the_secret')
+ bucket = conn.create_bucket("foobar")
+ key = Key(bucket)
+ key.key = "the-key"
+ key.set_contents_from_string("foobar")
+
+ bucket.get_key("the-key").get_contents_as_string().should.equal('foobar')
+
+ key.set_contents_from_string("")
+ bucket.get_key("the-key").get_contents_as_string().should.equal('')
+
+
+@mock_s3
+def test_large_key_save():
+ conn = boto.connect_s3('the_key', 'the_secret')
+ bucket = conn.create_bucket("foobar")
+ key = Key(bucket)
+ key.key = "the-key"
+ key.set_contents_from_string("foobar" * 100000)
+
+ bucket.get_key("the-key").get_contents_as_string().should.equal('foobar' * 100000)
+
+
@mock_s3
def test_copy_key():
conn = boto.connect_s3('the_key', 'the_secret')
@@ -98,39 +124,31 @@ def test_copy_key():
@mock_s3
-def test_get_all_keys():
+def test_set_metadata():
conn = boto.connect_s3('the_key', 'the_secret')
bucket = conn.create_bucket("foobar")
key = Key(bucket)
+ key.key = 'the-key'
+ key.set_metadata('md', 'Metadatastring')
+ key.set_contents_from_string("Testval")
+
+ bucket.get_key('the-key').get_metadata('md').should.equal('Metadatastring')
+
+
+@freeze_time("2012-01-01 12:00:00")
+@mock_s3
+def test_last_modified():
+ # See https://github.com/boto/boto/issues/466
+ conn = boto.connect_s3()
+ bucket = conn.create_bucket("foobar")
+ key = Key(bucket)
key.key = "the-key"
key.set_contents_from_string("some value")
- key2 = Key(bucket)
- key2.key = "folder/some-stuff"
- key2.set_contents_from_string("some value")
+ rs = bucket.get_all_keys()
+ rs[0].last_modified.should.equal('2012-01-01T12:00:00Z')
- key3 = Key(bucket)
- key3.key = "folder/more-folder/foobar"
- key3.set_contents_from_string("some value")
-
- key4 = Key(bucket)
- key4.key = "a-key"
- key4.set_contents_from_string("some value")
-
- keys = bucket.get_all_keys()
- keys.should.have.length_of(3)
-
- keys[0].name.should.equal("a-key")
- keys[1].name.should.equal("the-key")
-
- # Prefix
- keys[2].name.should.equal("folder")
-
- keys = bucket.get_all_keys(prefix="folder/")
- keys.should.have.length_of(2)
-
- keys[0].name.should.equal("folder/some-stuff")
- keys[1].name.should.equal("folder/more-folder")
+ bucket.get_key("the-key").last_modified.should.equal('Sun, 01 Jan 2012 12:00:00 GMT')
@mock_s3
@@ -177,11 +195,102 @@ def test_get_all_buckets():
buckets.should.have.length_of(2)
+@mock_s3
+def test_post_to_bucket():
+ conn = boto.connect_s3('the_key', 'the_secret')
+ bucket = conn.create_bucket("foobar")
+
+ requests.post("https://foobar.s3.amazonaws.com/", {
+ 'key': 'the-key',
+ 'file': 'nothing'
+ })
+
+ bucket.get_key('the-key').get_contents_as_string().should.equal('nothing')
+
+
+@mock_s3
+def test_post_with_metadata_to_bucket():
+ conn = boto.connect_s3('the_key', 'the_secret')
+ bucket = conn.create_bucket("foobar")
+
+ requests.post("https://foobar.s3.amazonaws.com/", {
+ 'key': 'the-key',
+ 'file': 'nothing',
+ 'x-amz-meta-test': 'metadata'
+ })
+
+ bucket.get_key('the-key').get_metadata('test').should.equal('metadata')
+
+
@mock_s3
def test_bucket_method_not_implemented():
- requests.post.when.called_with("https://foobar.s3.amazonaws.com/").should.throw(NotImplementedError)
+ requests.patch.when.called_with("https://foobar.s3.amazonaws.com/").should.throw(NotImplementedError)
@mock_s3
def test_key_method_not_implemented():
requests.post.when.called_with("https://foobar.s3.amazonaws.com/foo").should.throw(NotImplementedError)
+
+
+@mock_s3
+def test_bucket_name_with_dot():
+ conn = boto.connect_s3()
+ bucket = conn.create_bucket('firstname.lastname')
+
+ k = Key(bucket, 'somekey')
+ k.set_contents_from_string('somedata')
+
+
+@mock_s3
+def test_key_with_special_characters():
+ conn = boto.connect_s3()
+ bucket = conn.create_bucket('test_bucket_name')
+
+ key = Key(bucket, 'test_list_keys_2/x?y')
+ key.set_contents_from_string('value1')
+
+ key_list = bucket.list('test_list_keys_2/', '/')
+ keys = [x for x in key_list]
+ keys[0].name.should.equal("test_list_keys_2/x?y")
+
+
+@mock_s3
+def test_bucket_key_listing_order():
+ conn = boto.connect_s3()
+ bucket = conn.create_bucket('test_bucket')
+ prefix = 'toplevel/'
+
+ def store(name):
+ k = Key(bucket, prefix + name)
+ k.set_contents_from_string('somedata')
+
+ names = ['x/key', 'y.key1', 'y.key2', 'y.key3', 'x/y/key', 'x/y/z/key']
+
+ for name in names:
+ store(name)
+
+ delimiter = None
+ keys = [x.name for x in bucket.list(prefix, delimiter)]
+ keys.should.equal([
+ 'toplevel/x/key', 'toplevel/x/y/key', 'toplevel/x/y/z/key',
+ 'toplevel/y.key1', 'toplevel/y.key2', 'toplevel/y.key3'
+ ])
+
+ delimiter = '/'
+ keys = [x.name for x in bucket.list(prefix, delimiter)]
+ keys.should.equal([
+ 'toplevel/y.key1', 'toplevel/y.key2', 'toplevel/y.key3', 'toplevel/x/'
+ ])
+
+ # Test delimiter with no prefix
+ delimiter = '/'
+ keys = [x.name for x in bucket.list(prefix=None, delimiter=delimiter)]
+ keys.should.equal(['toplevel'])
+
+ delimiter = None
+ keys = [x.name for x in bucket.list(prefix + 'x', delimiter)]
+ keys.should.equal([u'toplevel/x/key', u'toplevel/x/y/key', u'toplevel/x/y/z/key'])
+
+ delimiter = '/'
+ keys = [x.name for x in bucket.list(prefix + 'x', delimiter)]
+ keys.should.equal([u'toplevel/x/'])
diff --git a/tests/test_s3/test_s3_utils.py b/tests/test_s3/test_s3_utils.py
new file mode 100644
index 000000000..cb8bd8b8c
--- /dev/null
+++ b/tests/test_s3/test_s3_utils.py
@@ -0,0 +1,14 @@
+from sure import expect
+from moto.s3.utils import bucket_name_from_url
+
+
+def test_base_url():
+ expect(bucket_name_from_url('https://s3.amazonaws.com/')).should.equal(None)
+
+
+def test_localhost_bucket():
+ expect(bucket_name_from_url('https://wfoobar.localhost:5000/abc')).should.equal("wfoobar")
+
+
+def test_localhost_without_bucket():
+ expect(bucket_name_from_url('https://www.localhost:5000/def')).should.equal(None)
diff --git a/tests/test_s3/test_server.py b/tests/test_s3/test_server.py
index 0bfeb6efa..d2f38cb07 100644
--- a/tests/test_s3/test_server.py
+++ b/tests/test_s3/test_server.py
@@ -1,4 +1,4 @@
-import sure # flake8: noqa
+import sure # noqa
import moto.server as server
@@ -33,3 +33,18 @@ def test_s3_server_bucket_create():
res = test_client.get('/bar', 'http://foobar.localhost:5000/')
res.status_code.should.equal(200)
res.data.should.equal("test value")
+
+
+def test_s3_server_post_to_bucket():
+ test_client = server.app.test_client()
+ res = test_client.put('/', 'http://foobar.localhost:5000/')
+ res.status_code.should.equal(200)
+
+ test_client.post('/', "https://foobar.localhost:5000/", data={
+ 'key': 'the-key',
+ 'file': 'nothing'
+ })
+
+ res = test_client.get('/the-key', 'http://foobar.localhost:5000/')
+ res.status_code.should.equal(200)
+ res.data.should.equal("nothing")
diff --git a/tests/test_ses/test_server.py b/tests/test_ses/test_server.py
index 9ec047427..876fa1240 100644
--- a/tests/test_ses/test_server.py
+++ b/tests/test_ses/test_server.py
@@ -1,4 +1,4 @@
-import sure # flake8: noqa
+import sure # noqa
import moto.server as server
diff --git a/tests/test_ses/test_ses.py b/tests/test_ses/test_ses.py
index cde6b76b6..6b8f357df 100644
--- a/tests/test_ses/test_ses.py
+++ b/tests/test_ses/test_ses.py
@@ -3,7 +3,7 @@ import email
import boto
from boto.exception import BotoServerError
-import sure # flake8: noqa
+import sure # noqa
from moto import mock_ses
@@ -44,7 +44,8 @@ def test_delete_identity():
def test_send_email():
conn = boto.connect_ses('the_key', 'the_secret')
- conn.send_email.when.called_with("test@example.com", "test subject",
+ conn.send_email.when.called_with(
+ "test@example.com", "test subject",
"test body", "test_to@example.com").should.throw(BotoServerError)
conn.verify_email_identity("test@example.com")
@@ -53,7 +54,21 @@ def test_send_email():
send_quota = conn.get_send_quota()
sent_count = int(send_quota['GetSendQuotaResponse']['GetSendQuotaResult']['SentLast24Hours'])
sent_count.should.equal(1)
+
+@mock_ses
+def test_send_html_email():
+ conn = boto.connect_ses('the_key', 'the_secret')
+ conn.send_email.when.called_with(
+ "test@example.com", "test subject",
+ "test body", "test_to@example.com", format="html").should.throw(BotoServerError)
+
+ conn.verify_email_identity("test@example.com")
+ conn.send_email("test@example.com", "test subject", "test body", "test_to@example.com", format="html")
+
+ send_quota = conn.get_send_quota()
+ sent_count = int(send_quota['GetSendQuotaResponse']['GetSendQuotaResult']['SentLast24Hours'])
+ sent_count.should.equal(1)
@mock_ses
def test_send_raw_email():
@@ -74,12 +89,18 @@ def test_send_raw_email():
part.add_header('Content-Disposition', 'attachment; filename=test.txt')
message.attach(part)
- conn.send_raw_email.when.called_with(source=message['From'], raw_message=message.as_string(),
- destinations=message['To']).should.throw(BotoServerError)
+ conn.send_raw_email.when.called_with(
+ source=message['From'],
+ raw_message=message.as_string(),
+ destinations=message['To']
+ ).should.throw(BotoServerError)
conn.verify_email_identity("test@example.com")
- conn.send_raw_email(source=message['From'], raw_message=message.as_string(),
- destinations=message['To'])
+ conn.send_raw_email(
+ source=message['From'],
+ raw_message=message.as_string(),
+ destinations=message['To']
+ )
send_quota = conn.get_send_quota()
sent_count = int(send_quota['GetSendQuotaResponse']['GetSendQuotaResult']['SentLast24Hours'])
diff --git a/tests/test_sqs/test_server.py b/tests/test_sqs/test_server.py
index 7a335b455..8934dcecc 100644
--- a/tests/test_sqs/test_server.py
+++ b/tests/test_sqs/test_server.py
@@ -1,5 +1,5 @@
import re
-import sure # flake8: noqa
+import sure # noqa
import moto.server as server
diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py
index 040a84afa..bfd1c2759 100644
--- a/tests/test_sqs/test_sqs.py
+++ b/tests/test_sqs/test_sqs.py
@@ -2,7 +2,7 @@ import boto
from boto.exception import SQSError
from boto.sqs.message import RawMessage
import requests
-import sure # flake8: noqa
+import sure # noqa
from moto import mock_sqs
@@ -18,6 +18,19 @@ def test_create_queue():
all_queues[0].get_timeout().should.equal(60)
+@mock_sqs
+def test_get_queue():
+ conn = boto.connect_sqs('the_key', 'the_secret')
+ conn.create_queue("test-queue", visibility_timeout=60)
+
+ queue = conn.get_queue("test-queue")
+ queue.name.should.equal("test-queue")
+ queue.get_timeout().should.equal(60)
+
+ nonexisting_queue = conn.get_queue("nonexisting_queue")
+ nonexisting_queue.should.be.none
+
+
@mock_sqs
def test_delete_queue():
conn = boto.connect_sqs('the_key', 'the_secret')
@@ -130,3 +143,33 @@ def test_delete_batch_operation():
@mock_sqs
def test_sqs_method_not_implemented():
requests.post.when.called_with("https://sqs.amazonaws.com/?Action=[foobar]").should.throw(NotImplementedError)
+
+
+@mock_sqs
+def test_queue_attributes():
+ conn = boto.connect_sqs('the_key', 'the_secret')
+
+ queue_name = 'test-queue'
+ visibility_timeout = 60
+
+ queue = conn.create_queue(queue_name, visibility_timeout=visibility_timeout)
+
+ attributes = queue.get_attributes()
+
+ attributes['QueueArn'].should.look_like(
+ 'arn:aws:sqs:sqs.us-east-1:123456789012:%s' % queue_name)
+
+ attributes['VisibilityTimeout'].should.look_like(str(visibility_timeout))
+
+ attribute_names = queue.get_attributes().keys()
+ attribute_names.should.contain('ApproximateNumberOfMessagesNotVisible')
+ attribute_names.should.contain('MessageRetentionPeriod')
+ attribute_names.should.contain('ApproximateNumberOfMessagesDelayed')
+ attribute_names.should.contain('MaximumMessageSize')
+ attribute_names.should.contain('CreatedTimestamp')
+ attribute_names.should.contain('ApproximateNumberOfMessages')
+ attribute_names.should.contain('ReceiveMessageWaitTimeSeconds')
+ attribute_names.should.contain('DelaySeconds')
+ attribute_names.should.contain('VisibilityTimeout')
+ attribute_names.should.contain('LastModifiedTimestamp')
+ attribute_names.should.contain('QueueArn')
diff --git a/tests/test_sts/test_server.py b/tests/test_sts/test_server.py
new file mode 100644
index 000000000..9a505422f
--- /dev/null
+++ b/tests/test_sts/test_server.py
@@ -0,0 +1,16 @@
+import sure # noqa
+
+import moto.server as server
+
+'''
+Test the different server responses
+'''
+server.configure_urls("sts")
+
+
+def test_sts_get_session_token():
+ test_client = server.app.test_client()
+ res = test_client.get('/?Action=GetSessionToken')
+ res.status_code.should.equal(200)
+ res.data.should.contain("SessionToken")
+ res.data.should.contain("AccessKeyId")
diff --git a/tests/test_sts/test_sts.py b/tests/test_sts/test_sts.py
new file mode 100644
index 000000000..e0dbc9cd6
--- /dev/null
+++ b/tests/test_sts/test_sts.py
@@ -0,0 +1,51 @@
+import json
+
+import boto
+from freezegun import freeze_time
+import sure # noqa
+
+from moto import mock_sts
+
+
+@freeze_time("2012-01-01 12:00:00")
+@mock_sts
+def test_get_session_token():
+ conn = boto.connect_sts()
+ token = conn.get_session_token(duration=123)
+
+ token.expiration.should.equal('2012-01-01T12:02:03Z')
+ token.session_token.should.equal("AQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT+FvwqnKwRcOIfrRh3c/LTo6UDdyJwOOvEVPvLXCrrrUtdnniCEXAMPLE/IvU1dYUg2RVAJBanLiHb4IgRmpRV3zrkuWJOgQs8IZZaIv2BXIa2R4OlgkBN9bkUDNCJiBeb/AXlzBBko7b15fjrBs2+cTQtpZ3CYWFXG8C5zqx37wnOE49mRl/+OtkIKGO7fAE")
+ token.access_key.should.equal("AKIAIOSFODNN7EXAMPLE")
+ token.secret_key.should.equal("wJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY")
+
+
+@freeze_time("2012-01-01 12:00:00")
+@mock_sts
+def test_assume_role():
+ conn = boto.connect_sts()
+
+ policy = json.dumps({
+ "Statement": [
+ {
+ "Sid": "Stmt13690092345534",
+ "Action": [
+ "S3:ListBucket"
+ ],
+ "Effect": "Allow",
+ "Resource": [
+ "arn:aws:s3:::foobar-tester"
+ ]
+ },
+ ]
+ })
+ s3_role = "arn:aws:iam::123456789012:role/test-role"
+ role = conn.assume_role(s3_role, "session-name", policy, duration_seconds=123)
+
+ credentials = role.credentials
+ credentials.expiration.should.equal('2012-01-01T12:02:03Z')
+ credentials.session_token.should.equal("BQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT+FvwqnKwRcOIfrRh3c/LTo6UDdyJwOOvEVPvLXCrrrUtdnniCEXAMPLE/IvU1dYUg2RVAJBanLiHb4IgRmpRV3zrkuWJOgQs8IZZaIv2BXIa2R4OlgkBN9bkUDNCJiBeb/AXlzBBko7b15fjrBs2+cTQtpZ3CYWFXG8C5zqx37wnOE49mRl/+OtkIKGO7fAE")
+ credentials.access_key.should.equal("AKIAIOSFODNN7EXAMPLE")
+ credentials.secret_key.should.equal("aJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY")
+
+ role.user.arn.should.equal("arn:aws:iam::123456789012:role/test-role")
+ role.user.assume_role_id.should.contain("session-name")