From e64c73efed1da2a17b9e254f5ee6870548b05f80 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Tue, 26 Mar 2013 10:11:16 -0400 Subject: [PATCH 01/85] Allow writing empty body to s3 key if content-length is zero. Better fix for #6. --- moto/s3/responses.py | 24 +++++++++++------------- tests/test_s3/test_s3.py | 14 ++++++++++++++ 2 files changed, 25 insertions(+), 13 deletions(-) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 80a0a9421..5f1be0fbb 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -80,19 +80,17 @@ def key_response(uri_info, method, body, headers): s3_backend.copy_key(src_bucket, src_key, bucket_name, key_name) template = Template(S3_OBJECT_COPY_RESPONSE) return template.render(key=src_key) - if body is not None: - key = s3_backend.get_key(bucket_name, key_name) - if not key or body: - # We want to write the key in once of two circumstances. - # - The key does not currently exist. - # - The key already exists, but body is a truthy value. - # This allows us to write empty strings to keys for the first - # write, but not subsequent. This is because HTTPretty sends - # an empty string on connection close. This is a temporary fix - # while HTTPretty gets fixed. - new_key = s3_backend.set_key(bucket_name, key_name, body) - template = Template(S3_OBJECT_RESPONSE) - return template.render(key=new_key), dict(etag=new_key.etag) + content_length = int(headers.get('Content-Length', 0)) + if body or (body == '' and content_length == 0): + # We want to write the key in once of two circumstances. + # - Anytime we are given a truthy body value + # - We are given an empty body value and the content length is zero. + # The reason we do not set the key to an empty string if the + # content length is not zero is because we are sometimes sent an + # empty string as part of closing the connection. + new_key = s3_backend.set_key(bucket_name, key_name, body) + template = Template(S3_OBJECT_RESPONSE) + return template.render(key=new_key), dict(etag=new_key.etag) key = s3_backend.get_key(bucket_name, key_name) if key: return "", dict(etag=key.etag) diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 31e011bfc..278ce9e2f 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -62,6 +62,20 @@ def test_empty_key(): bucket.get_key("the-key").get_contents_as_string().should.equal('') +@mock_s3 +def test_empty_key_set_on_existing_key(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + key = Key(bucket) + key.key = "the-key" + key.set_contents_from_string("foobar") + + bucket.get_key("the-key").get_contents_as_string().should.equal('foobar') + + key.set_contents_from_string("") + bucket.get_key("the-key").get_contents_as_string().should.equal('') + + @mock_s3 def test_copy_key(): conn = boto.connect_s3('the_key', 'the_secret') From 7f26525445b573404dbe4a35e0275e547857caba Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Tue, 26 Mar 2013 10:13:50 -0400 Subject: [PATCH 02/85] 0.1.2 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index e20d9fe97..e9f537a5b 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ from setuptools import setup, find_packages setup( name='moto', - version='0.1.1', + version='0.1.2', description='A library that allows your python tests to easily' ' mock out the boto library', author='Steve Pulec', From 3882858639463d6e232cd7711b70a7bd6981b019 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Wed, 27 Mar 2013 19:57:36 -0400 Subject: [PATCH 03/85] remove note about which boto version was tested --- README.md | 3 --- 1 file changed, 3 deletions(-) diff --git a/README.md b/README.md index 7fbd40cdf..a22e8b839 100644 --- a/README.md +++ b/README.md @@ -162,9 +162,6 @@ Then go to [localhost](http://localhost:5000/?Action=DescribeInstances) to see a $ pip install moto ``` -This library has been tested on boto v2.5+. - - ## Thanks A huge thanks to [Gabriel Falcão](https://github.com/gabrielfalcao) and his [HTTPretty](https://github.com/gabrielfalcao/HTTPretty) library. Moto would not exist without it. From b7c46ae7bfc30f460c5db10d23f1051bf82c54b8 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Fri, 29 Mar 2013 17:45:33 -0400 Subject: [PATCH 04/85] fix S3 last_modified. Closes #8 --- moto/s3/models.py | 22 ++++++++++++++++++++-- moto/s3/responses.py | 12 ++++++------ tests/test_s3/test_s3.py | 18 ++++++++++++++++++ 3 files changed, 44 insertions(+), 8 deletions(-) diff --git a/moto/s3/models.py b/moto/s3/models.py index d80eec417..d8cd06103 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -1,5 +1,4 @@ -# from boto.s3.bucket import Bucket -# from boto.s3.key import Key +import datetime import md5 from moto.core import BaseBackend @@ -9,6 +8,7 @@ class FakeKey(object): def __init__(self, name, value): self.name = name self.value = value + self.last_modified = datetime.datetime.now() @property def etag(self): @@ -16,6 +16,24 @@ class FakeKey(object): value_md5.update(self.value) return '"{0}"'.format(value_md5.hexdigest()) + @property + def last_modified_ISO8601(self): + return self.last_modified.strftime("%Y-%m-%dT%H:%M:%SZ") + + @property + def last_modified_RFC1123(self): + # Different datetime formats depending on how the key is obtained + # https://github.com/boto/boto/issues/466 + RFC1123 = '%a, %d %b %Y %H:%M:%S GMT' + return self.last_modified.strftime(RFC1123) + + @property + def response_dict(self): + return { + 'etag': self.etag, + 'last-modified': self.last_modified_RFC1123, + } + @property def size(self): return len(self.value) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 5f1be0fbb..bc3669bc7 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -90,14 +90,14 @@ def key_response(uri_info, method, body, headers): # empty string as part of closing the connection. new_key = s3_backend.set_key(bucket_name, key_name, body) template = Template(S3_OBJECT_RESPONSE) - return template.render(key=new_key), dict(etag=new_key.etag) + return template.render(key=new_key), new_key.response_dict key = s3_backend.get_key(bucket_name, key_name) if key: - return "", dict(etag=key.etag) + return "", key.response_dict elif method == 'HEAD': key = s3_backend.get_key(bucket_name, key_name) if key: - return S3_OBJECT_RESPONSE, dict(etag=key.etag) + return S3_OBJECT_RESPONSE, key.response_dict else: return "", dict(status=404) elif method == 'DELETE': @@ -133,7 +133,7 @@ S3_BUCKET_GET_RESPONSE = """ {% for key in result_keys %} {{ key.name }} - 2006-01-01T12:00:00.000Z + {{ key.last_modified_ISO8601 }} {{ key.etag }} {{ key.size }} STANDARD @@ -190,13 +190,13 @@ S3_DELETE_OBJECT_SUCCESS = """ {{ key.etag }} - 2006-03-01T12:00:00.183Z + {{ key.last_modified_ISO8601 }} """ S3_OBJECT_COPY_RESPONSE = """ {{ key.etag }} - 2008-02-18T13:54:10.183Z + {{ key.last_modified_ISO8601 }} """ diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 278ce9e2f..1c8c4c8a8 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -1,8 +1,10 @@ +import datetime import urllib2 import boto from boto.exception import S3ResponseError from boto.s3.key import Key +from freezegun import freeze_time import requests import sure # flake8: noqa @@ -90,6 +92,22 @@ def test_copy_key(): bucket.get_key("new-key").get_contents_as_string().should.equal("some value") +@freeze_time("2012-01-01 12:00:00") +@mock_s3 +def test_last_modified(): + # See https://github.com/boto/boto/issues/466 + conn = boto.connect_s3() + bucket = conn.create_bucket("foobar") + key = Key(bucket) + key.key = "the-key" + key.set_contents_from_string("some value") + + rs = bucket.get_all_keys() + rs[0].last_modified.should.equal('2012-01-01T12:00:00Z') + + bucket.get_key("the-key").last_modified.should.equal('Sun, 01 Jan 2012 12:00:00 GMT') + + @mock_s3 def test_get_all_keys(): conn = boto.connect_s3('the_key', 'the_secret') From d96769a91c6f848c40ecc6d627bf77f97d58c345 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Sat, 13 Apr 2013 17:28:00 -0400 Subject: [PATCH 05/85] Fix for buckets with periods in them. Closes #15. --- moto/s3/urls.py | 4 ++-- tests/test_s3/test_s3.py | 9 +++++++++ 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/moto/s3/urls.py b/moto/s3/urls.py index 662a717b0..21370c15a 100644 --- a/moto/s3/urls.py +++ b/moto/s3/urls.py @@ -1,10 +1,10 @@ from .responses import bucket_response, key_response url_bases = [ - "https?://(?P[a-zA-Z0-9\-_]*)\.?s3.amazonaws.com" + "https?://(?P[a-zA-Z0-9\-_.]*)\.?s3.amazonaws.com" ] url_paths = { '{0}/$': bucket_response, - '{0}/(?P[a-zA-Z0-9\-_]+)': key_response, + '{0}/(?P[a-zA-Z0-9\-_.]+)': key_response, } diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 1c8c4c8a8..d4ac1170b 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -196,3 +196,12 @@ def test_bucket_method_not_implemented(): @mock_s3 def test_key_method_not_implemented(): requests.post.when.called_with("https://foobar.s3.amazonaws.com/foo").should.throw(NotImplementedError) + + +@mock_s3 +def test_bucket_name_with_dot(): + conn = boto.connect_s3() + bucket = conn.create_bucket('firstname.lastname') + + k = Key(bucket, 'somekey') + k.set_contents_from_string('somedata') From 8fe0c918534928f7f2cacc2fa20cbe9d682a181e Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Sat, 13 Apr 2013 18:22:26 -0400 Subject: [PATCH 06/85] Fix services to work better with other regions. Closes #17. --- moto/dynamodb/urls.py | 2 +- moto/ec2/urls.py | 2 +- moto/ses/urls.py | 2 +- tests/test_dynamodb/test_dynamodb.py | 9 +++++++++ 4 files changed, 12 insertions(+), 3 deletions(-) diff --git a/moto/dynamodb/urls.py b/moto/dynamodb/urls.py index 1132b7815..85634ef2f 100644 --- a/moto/dynamodb/urls.py +++ b/moto/dynamodb/urls.py @@ -1,7 +1,7 @@ from .responses import handler url_bases = [ - "https?://dynamodb.us-east-1.amazonaws.com", + "https?://dynamodb.(.+).amazonaws.com", "https?://sts.amazonaws.com", ] diff --git a/moto/ec2/urls.py b/moto/ec2/urls.py index e4f05aea7..65413369d 100644 --- a/moto/ec2/urls.py +++ b/moto/ec2/urls.py @@ -2,7 +2,7 @@ from .responses import EC2Response url_bases = [ - "https?://ec2.us-east-1.amazonaws.com", + "https?://ec2.(.+).amazonaws.com", ] url_paths = { diff --git a/moto/ses/urls.py b/moto/ses/urls.py index e67423c97..acdc49c82 100644 --- a/moto/ses/urls.py +++ b/moto/ses/urls.py @@ -1,7 +1,7 @@ from .responses import EmailResponse url_bases = [ - "https?://email.us-east-1.amazonaws.com" + "https?://email.(.+).amazonaws.com" ] url_paths = { diff --git a/tests/test_dynamodb/test_dynamodb.py b/tests/test_dynamodb/test_dynamodb.py index c3582d7c4..5a2f36321 100644 --- a/tests/test_dynamodb/test_dynamodb.py +++ b/tests/test_dynamodb/test_dynamodb.py @@ -43,3 +43,12 @@ def test_sts_handler(): res = requests.post("https://sts.amazonaws.com/", data={"GetSessionToken": ""}) res.ok.should.be.ok res.text.should.contain("SecretAccessKey") + + +@mock_dynamodb +def test_dynamodb_with_connect_to_region(): + # this will work if connected with boto.connect_dynamodb() + dynamodb = boto.dynamodb.connect_to_region('us-west-2') + + schema = dynamodb.create_schema('column1', str(), 'column2', int()) + dynamodb.create_table('table1', schema, 200, 200) From 98d7a50790995473e53afd033ba9a2da19a5e460 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Sat, 13 Apr 2013 19:00:37 -0400 Subject: [PATCH 07/85] Unquote s3 key names. Closes #13. --- moto/s3/models.py | 7 +++++++ moto/s3/utils.py | 5 +++++ tests/test_s3/test_s3.py | 13 +++++++++++++ 3 files changed, 25 insertions(+) diff --git a/moto/s3/models.py b/moto/s3/models.py index d8cd06103..644edfb95 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -2,6 +2,7 @@ import datetime import md5 from moto.core import BaseBackend +from .utils import clean_key_name class FakeKey(object): @@ -72,6 +73,8 @@ class S3Backend(BaseBackend): return None def set_key(self, bucket_name, key_name, value): + key_name = clean_key_name(key_name) + bucket = self.buckets[bucket_name] new_key = FakeKey(name=key_name, value=value) bucket.keys[key_name] = new_key @@ -79,6 +82,7 @@ class S3Backend(BaseBackend): return new_key def get_key(self, bucket_name, key_name): + key_name = clean_key_name(key_name) bucket = self.get_bucket(bucket_name) if bucket: return bucket.keys.get(key_name) @@ -107,10 +111,13 @@ class S3Backend(BaseBackend): return key_results, folder_results def delete_key(self, bucket_name, key_name): + key_name = clean_key_name(key_name) bucket = self.buckets[bucket_name] return bucket.keys.pop(key_name) def copy_key(self, src_bucket_name, src_key_name, dest_bucket_name, dest_key_name): + src_key_name = clean_key_name(src_key_name) + dest_key_name = clean_key_name(dest_key_name) src_bucket = self.buckets[src_bucket_name] dest_bucket = self.buckets[dest_bucket_name] dest_bucket.keys[dest_key_name] = src_bucket.keys[src_key_name] diff --git a/moto/s3/utils.py b/moto/s3/utils.py index 8786585f5..d9e5671e9 100644 --- a/moto/s3/utils.py +++ b/moto/s3/utils.py @@ -1,4 +1,5 @@ import re +import urllib2 import urlparse bucket_name_regex = re.compile("(.+).s3.amazonaws.com") @@ -21,3 +22,7 @@ def bucket_name_from_hostname(hostname): else: # No subdomain found. return None + + +def clean_key_name(key_name): + return urllib2.unquote(key_name) diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index d4ac1170b..a12ea9c30 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -205,3 +205,16 @@ def test_bucket_name_with_dot(): k = Key(bucket, 'somekey') k.set_contents_from_string('somedata') + + +@mock_s3 +def test_key_with_special_characters(): + conn = boto.connect_s3() + bucket = conn.create_bucket('test_bucket_name') + + key = Key(bucket, 'test_list_keys_2/x?y') + key.set_contents_from_string('value1') + + key_list = bucket.list('test_list_keys_2/', '/') + keys = [x for x in key_list] + keys[0].name.should.equal("test_list_keys_2/x?y") From be26daaff499d78b1863d9ed8501e5fd06a99c2b Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Sat, 13 Apr 2013 19:23:32 -0400 Subject: [PATCH 08/85] Fix S3 bucket list objects order and delimiters. Closes #14. --- moto/s3/models.py | 10 +++--- moto/s3/responses.py | 18 +++++++---- tests/test_s3/test_s3.py | 70 +++++++++++++++++++--------------------- 3 files changed, 51 insertions(+), 47 deletions(-) diff --git a/moto/s3/models.py b/moto/s3/models.py index 644edfb95..7912edfe9 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -87,20 +87,22 @@ class S3Backend(BaseBackend): if bucket: return bucket.keys.get(key_name) - def prefix_query(self, bucket, prefix): + def prefix_query(self, bucket, prefix, delimiter): key_results = set() folder_results = set() if prefix: for key_name, key in bucket.keys.iteritems(): if key_name.startswith(prefix): - if '/' in key_name.lstrip(prefix): - key_without_prefix = key_name.lstrip(prefix).split("/")[0] + if delimiter and '/' in key_name.lstrip(prefix): + # If delimiter, we need to split out folder_results + key_without_prefix = "{}/".format(key_name.lstrip(prefix).split("/")[0]) folder_results.add("{}{}".format(prefix, key_without_prefix)) else: key_results.add(key) else: for key_name, key in bucket.keys.iteritems(): - if '/' in key_name: + if delimiter and '/' in key_name: + # If delimiter, we need to split out folder_results folder_results.add(key_name.split("/")[0]) else: key_results.add(key) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index bc3669bc7..e5a2bed65 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -27,11 +27,13 @@ def bucket_response(uri, method, body, headers): bucket = s3_backend.get_bucket(bucket_name) if bucket: prefix = querystring.get('prefix', [None])[0] - result_keys, result_folders = s3_backend.prefix_query(bucket, prefix) + delimiter = querystring.get('delimiter') + result_keys, result_folders = s3_backend.prefix_query(bucket, prefix, delimiter) template = Template(S3_BUCKET_GET_RESPONSE) return template.render( bucket=bucket, prefix=prefix, + delimiter=delimiter, result_keys=result_keys, result_folders=result_folders ) @@ -128,7 +130,7 @@ S3_BUCKET_GET_RESPONSE = """ {{ bucket.name }} {{ prefix }} 1000 - / + {{ delimiter }} false {% for key in result_keys %} @@ -144,11 +146,13 @@ S3_BUCKET_GET_RESPONSE = """ STANDARD {% endfor %} - {% for folder in result_folders %} - - {{ folder }} - - {% endfor %} + {% if delimiter %} + {% for folder in result_folders %} + + {{ folder }} + + {% endfor %} + {% endif %} """ S3_BUCKET_CREATE_RESPONSE = """ diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index a12ea9c30..a68e511e3 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -108,42 +108,6 @@ def test_last_modified(): bucket.get_key("the-key").last_modified.should.equal('Sun, 01 Jan 2012 12:00:00 GMT') -@mock_s3 -def test_get_all_keys(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - key = Key(bucket) - key.key = "the-key" - key.set_contents_from_string("some value") - - key2 = Key(bucket) - key2.key = "folder/some-stuff" - key2.set_contents_from_string("some value") - - key3 = Key(bucket) - key3.key = "folder/more-folder/foobar" - key3.set_contents_from_string("some value") - - key4 = Key(bucket) - key4.key = "a-key" - key4.set_contents_from_string("some value") - - keys = bucket.get_all_keys() - keys.should.have.length_of(3) - - keys[0].name.should.equal("a-key") - keys[1].name.should.equal("the-key") - - # Prefix - keys[2].name.should.equal("folder") - - keys = bucket.get_all_keys(prefix="folder/") - keys.should.have.length_of(2) - - keys[0].name.should.equal("folder/some-stuff") - keys[1].name.should.equal("folder/more-folder") - - @mock_s3 def test_missing_bucket(): conn = boto.connect_s3('the_key', 'the_secret') @@ -218,3 +182,37 @@ def test_key_with_special_characters(): key_list = bucket.list('test_list_keys_2/', '/') keys = [x for x in key_list] keys[0].name.should.equal("test_list_keys_2/x?y") + + +@mock_s3 +def test_bucket_key_listing_order(): + conn = boto.connect_s3() + bucket = conn.create_bucket('test_bucket') + prefix = 'toplevel/' + + def store(name): + k = Key(bucket, prefix + name) + k.set_contents_from_string('somedata') + + names = ['x/key', 'y.key1', 'y.key2', 'y.key3', 'x/y/key', 'x/y/z/key'] + + for name in names: + store(name) + + delimiter = None + keys = [x.name for x in bucket.list(prefix, delimiter)] + keys.should.equal([ + 'toplevel/x/key', 'toplevel/x/y/key', 'toplevel/x/y/z/key', + 'toplevel/y.key1', 'toplevel/y.key2', 'toplevel/y.key3' + ]) + + delimiter = '/' + keys = [x.name for x in bucket.list(prefix, delimiter)] + keys.should.equal([ + 'toplevel/y.key1', 'toplevel/y.key2', 'toplevel/y.key3', 'toplevel/x/' + ]) + + # Test delimiter with no prefix + delimiter = '/' + keys = [x.name for x in bucket.list(prefix=None, delimiter=delimiter)] + keys.should.equal(['toplevel']) From afc7a64a341f08fde6a51ed8acc79f9691ce5ef8 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Sun, 14 Apr 2013 11:17:25 -0400 Subject: [PATCH 09/85] 0.1.3 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index e9f537a5b..f65bfc2fb 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ from setuptools import setup, find_packages setup( name='moto', - version='0.1.2', + version='0.1.3', description='A library that allows your python tests to easily' ' mock out the boto library', author='Steve Pulec', From dbf2368aa65993c5cc1e95679f9b1f23bcda75e3 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Thu, 18 Apr 2013 23:07:24 -0400 Subject: [PATCH 10/85] Implement SQS get_queue_url. Closes #18 --- moto/sqs/responses.py | 15 +++++++++++++++ tests/test_sqs/test_sqs.py | 10 ++++++++++ 2 files changed, 25 insertions(+) diff --git a/moto/sqs/responses.py b/moto/sqs/responses.py index b9bf9130a..484fe681d 100644 --- a/moto/sqs/responses.py +++ b/moto/sqs/responses.py @@ -17,6 +17,12 @@ class QueuesResponse(BaseResponse): template = Template(CREATE_QUEUE_RESPONSE) return template.render(queue=queue) + def get_queue_url(self): + queue_name = self.querystring.get("QueueName")[0] + queue = sqs_backend.get_queue(queue_name) + template = Template(GET_QUEUE_URL_RESPONSE) + return template.render(queue=queue) + def list_queues(self): queues = sqs_backend.list_queues() template = Template(LIST_QUEUES_RESPONSE) @@ -143,6 +149,15 @@ CREATE_QUEUE_RESPONSE = """ """ +GET_QUEUE_URL_RESPONSE = """ + + http://sqs.us-east-1.amazonaws.com/123456789012/{{ queue.name }} + + + 470a6f13-2ed9-4181-ad8a-2fdea142988e + +""" + LIST_QUEUES_RESPONSE = """ {% for queue in queues %} diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index 040a84afa..eeb22ec96 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -18,6 +18,16 @@ def test_create_queue(): all_queues[0].get_timeout().should.equal(60) +@mock_sqs +def test_get_queue(): + conn = boto.connect_sqs('the_key', 'the_secret') + conn.create_queue("test-queue", visibility_timeout=60) + + queue = conn.get_queue("test-queue") + queue.name.should.equal("test-queue") + queue.get_timeout().should.equal(60) + + @mock_sqs def test_delete_queue(): conn = boto.connect_sqs('the_key', 'the_secret') From 6107658c657bdf3ba165ef2b8f19c07ea448c7df Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Thu, 18 Apr 2013 23:07:52 -0400 Subject: [PATCH 11/85] 0.1.4 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index f65bfc2fb..89ff49f02 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ from setuptools import setup, find_packages setup( name='moto', - version='0.1.3', + version='0.1.4', description='A library that allows your python tests to easily' ' mock out the boto library', author='Steve Pulec', From c9fb6f1cc16ac6360ebf91baf43b8820655f3fb4 Mon Sep 17 00:00:00 2001 From: "dilshod.tadjibaev" Date: Tue, 23 Apr 2013 23:53:30 -0700 Subject: [PATCH 12/85] Implemented remaining Queue attributes. This closes spulec/moto#22 The following attributes were added: - ApproximateNumberOfMessagesDelayed - ApproximateNumberOfMessagesNotVisible - CreatedTimestamp - DelaySeconds - LastModifiedTimestamp - MaximumMessageSize - MessageRetentionPeriod - QueueArn - ReceiveMessageWaitTimeSeconds --- moto/sqs/models.py | 26 +++++++++++++++++++++++++- tests/test_sqs/test_sqs.py | 30 ++++++++++++++++++++++++++++++ 2 files changed, 55 insertions(+), 1 deletion(-) diff --git a/moto/sqs/models.py b/moto/sqs/models.py index 49d004f36..c0ac5926d 100644 --- a/moto/sqs/models.py +++ b/moto/sqs/models.py @@ -1,4 +1,5 @@ import md5 +import time from moto.core import BaseBackend from moto.core.utils import camelcase_to_underscores, get_random_message_id @@ -6,6 +7,7 @@ from .utils import generate_receipt_handle class Message(object): + def __init__(self, message_id, body): self.id = message_id self.body = body @@ -19,13 +21,35 @@ class Message(object): class Queue(object): - camelcase_attributes = ['VisibilityTimeout', 'ApproximateNumberOfMessages'] + camelcase_attributes = ['ApproximateNumberOfMessages', + 'ApproximateNumberOfMessagesDelayed', + 'ApproximateNumberOfMessagesNotVisible', + 'CreatedTimestamp', + 'DelaySeconds', + 'LastModifiedTimestamp', + 'MaximumMessageSize', + 'MessageRetentionPeriod', + 'QueueArn', + 'ReceiveMessageWaitTimeSeconds', + 'VisibilityTimeout'] def __init__(self, name, visibility_timeout): self.name = name self.visibility_timeout = visibility_timeout or 30 self.messages = [] + now = time.time() + + self.approximate_number_of_messages_delayed = 0 + self.approximate_number_of_messages_not_visible = 0 + self.created_timestamp = now + self.delay_seconds = 0 + self.last_modified_timestamp = now + self.maximum_message_size = 64 << 10 + self.message_retention_period = 86400 * 4 # four days + self.queue_arn = 'arn:aws:sqs:sqs.us-east-1:123456789012:%s' % self.name + self.receive_message_wait_time_seconds = 0 + @property def attributes(self): result = {} diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index eeb22ec96..eef6b29e7 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -140,3 +140,33 @@ def test_delete_batch_operation(): @mock_sqs def test_sqs_method_not_implemented(): requests.post.when.called_with("https://sqs.amazonaws.com/?Action=[foobar]").should.throw(NotImplementedError) + + +@mock_sqs +def test_queue_attributes(): + conn = boto.connect_sqs('the_key', 'the_secret') + + queue_name = 'test-queue' + visibility_timeout = 60 + + queue = conn.create_queue(queue_name, visibility_timeout=visibility_timeout) + + attributes = queue.get_attributes() + + attributes['QueueArn'].should.look_like( + 'arn:aws:sqs:sqs.us-east-1:123456789012:%s' % queue_name) + + attributes['VisibilityTimeout'].should.look_like(str(visibility_timeout)) + + attribute_names = queue.get_attributes().keys() + attribute_names.should.contain('ApproximateNumberOfMessagesNotVisible') + attribute_names.should.contain('MessageRetentionPeriod') + attribute_names.should.contain('ApproximateNumberOfMessagesDelayed') + attribute_names.should.contain('MaximumMessageSize') + attribute_names.should.contain('CreatedTimestamp') + attribute_names.should.contain('ApproximateNumberOfMessages') + attribute_names.should.contain('ReceiveMessageWaitTimeSeconds') + attribute_names.should.contain('DelaySeconds') + attribute_names.should.contain('VisibilityTimeout') + attribute_names.should.contain('LastModifiedTimestamp') + attribute_names.should.contain('QueueArn') From 64baad79b04aff41bd2674ca17ea398b30b25d9b Mon Sep 17 00:00:00 2001 From: "dilshod.tadjibaev" Date: Wed, 24 Apr 2013 00:41:45 -0700 Subject: [PATCH 13/85] Returning None in get_queue when not found. Closes spulec/moto#23 --- moto/sqs/models.py | 2 +- moto/sqs/responses.py | 8 ++++++-- tests/test_sqs/test_sqs.py | 3 +++ 3 files changed, 10 insertions(+), 3 deletions(-) diff --git a/moto/sqs/models.py b/moto/sqs/models.py index 49d004f36..afceaf9e5 100644 --- a/moto/sqs/models.py +++ b/moto/sqs/models.py @@ -53,7 +53,7 @@ class SQSBackend(BaseBackend): return self.queues.values() def get_queue(self, queue_name): - return self.queues[queue_name] + return self.queues.get(queue_name, None) def delete_queue(self, queue_name): if queue_name in self.queues: diff --git a/moto/sqs/responses.py b/moto/sqs/responses.py index 484fe681d..0f582cf85 100644 --- a/moto/sqs/responses.py +++ b/moto/sqs/responses.py @@ -20,8 +20,12 @@ class QueuesResponse(BaseResponse): def get_queue_url(self): queue_name = self.querystring.get("QueueName")[0] queue = sqs_backend.get_queue(queue_name) - template = Template(GET_QUEUE_URL_RESPONSE) - return template.render(queue=queue) + if queue: + template = Template(GET_QUEUE_URL_RESPONSE) + return template.render(queue=queue) + else: + return "", dict(status=404) + def list_queues(self): queues = sqs_backend.list_queues() diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index eeb22ec96..0fe25e2cd 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -27,6 +27,9 @@ def test_get_queue(): queue.name.should.equal("test-queue") queue.get_timeout().should.equal(60) + nonexisting_queue = conn.get_queue("nonexisting_queue") + nonexisting_queue.should.be.none + @mock_sqs def test_delete_queue(): From 5a9762f964120f608d6aac19b3ac5b61673324e0 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Fri, 26 Apr 2013 17:13:43 -0400 Subject: [PATCH 14/85] Add license. --- LICENSE | 202 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 202 insertions(+) create mode 100644 LICENSE diff --git a/LICENSE b/LICENSE new file mode 100644 index 000000000..90a85e714 --- /dev/null +++ b/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright 2012 Steve Pulec + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file From d5789ae8661979426cd04d5151fd91dc356ac06a Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Sat, 27 Apr 2013 17:54:24 -0400 Subject: [PATCH 15/85] add Authors file --- AUTHORS.md | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 AUTHORS.md diff --git a/AUTHORS.md b/AUTHORS.md new file mode 100644 index 000000000..8ce4ae01c --- /dev/null +++ b/AUTHORS.md @@ -0,0 +1,6 @@ +## Moto Contributors + +Moto is written by Steve Pulec with contributions from: + +* [Zach Smith](https://github.com/zmsmith) +* [Dilshod Tadjibaev](https://github.com/antimora) From 85042c5c76a9b3c816d0770f51252986ba66d0c9 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Sat, 27 Apr 2013 17:58:37 -0400 Subject: [PATCH 16/85] 0.1.5 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 89ff49f02..fab0a59d1 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ from setuptools import setup, find_packages setup( name='moto', - version='0.1.4', + version='0.1.5', description='A library that allows your python tests to easily' ' mock out the boto library', author='Steve Pulec', From 84aeff5b060eedb699b17713f3dfd2e40df06b0a Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Mon, 29 Apr 2013 23:36:24 -0400 Subject: [PATCH 17/85] Switch to real version of httpretty. woot. --- moto/core/models.py | 2 +- moto/packages/__init__.py | 0 moto/packages/httpretty.py | 944 ------------------------------------- setup.py | 1 + 4 files changed, 2 insertions(+), 945 deletions(-) delete mode 100644 moto/packages/__init__.py delete mode 100644 moto/packages/httpretty.py diff --git a/moto/core/models.py b/moto/core/models.py index e98c1eed3..c451fb11d 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -1,7 +1,7 @@ import functools import re -from moto.packages.httpretty import HTTPretty +from httpretty import HTTPretty from .responses import metadata_response from .utils import convert_regex_to_flask_path diff --git a/moto/packages/__init__.py b/moto/packages/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/moto/packages/httpretty.py b/moto/packages/httpretty.py deleted file mode 100644 index ebd69e4ed..000000000 --- a/moto/packages/httpretty.py +++ /dev/null @@ -1,944 +0,0 @@ -# #!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# Copyright (C) <2011-2013> Gabriel Falcão -# -# Permission is hereby granted, free of charge, to any person -# obtaining a copy of this software and associated documentation -# files (the "Software"), to deal in the Software without -# restriction, including without limitation the rights to use, -# copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the -# Software is furnished to do so, subject to the following -# conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -# OTHER DEALINGS IN THE SOFTWARE. -from __future__ import unicode_literals - -version = '0.5.12' - -import re -import inspect -import socket -import functools -import itertools -import warnings -import logging -import sys -import traceback -import types - -PY3 = sys.version_info[0] == 3 -if PY3: - text_type = str - byte_type = bytes - basestring = (str, bytes) - - import io - StringIO = io.BytesIO - - class Py3kObject(object): - def __repr__(self): - return self.__str__() -else: - text_type = unicode - byte_type = str - import StringIO - StringIO = StringIO.StringIO - - -class Py3kObject(object): - def __repr__(self): - ret = self.__str__() - if PY3: - return ret - else: - ret.encode('utf-8') - -from datetime import datetime -from datetime import timedelta -try: - from urllib.parse import urlsplit, urlunsplit, parse_qs, quote, quote_plus -except ImportError: - from urlparse import urlsplit, urlunsplit, parse_qs - from urllib import quote, quote_plus - -try: - from http.server import BaseHTTPRequestHandler -except ImportError: - from BaseHTTPServer import BaseHTTPRequestHandler - -old_socket = socket.socket -old_create_connection = socket.create_connection -old_gethostbyname = socket.gethostbyname -old_gethostname = socket.gethostname -old_getaddrinfo = socket.getaddrinfo -old_socksocket = None -old_ssl_wrap_socket = None -old_sslwrap_simple = None -old_sslsocket = None - -try: - import socks - old_socksocket = socks.socksocket -except ImportError: - socks = None - -try: - import ssl - old_ssl_wrap_socket = ssl.wrap_socket - if not PY3: - old_sslwrap_simple = ssl.sslwrap_simple - old_sslsocket = ssl.SSLSocket -except ImportError: - ssl = None - - -ClassTypes = (type,) -if not PY3: - ClassTypes = (type, types.ClassType) - - -POTENTIAL_HTTP_PORTS = [80, 443] - - -class HTTPrettyError(Exception): - pass - - -def utf8(s): - if isinstance(s, text_type): - s = s.encode('utf-8') - - return byte_type(s) - - -def decode_utf8(s): - if isinstance(s, byte_type): - s = s.decode("utf-8") - - return text_type(s) - - -def parse_requestline(s): - """ - http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5 - - >>> parse_requestline('GET / HTTP/1.0') - ('GET', '/', '1.0') - >>> parse_requestline('post /testurl htTP/1.1') - ('POST', '/testurl', '1.1') - >>> parse_requestline('Im not a RequestLine') - Traceback (most recent call last): - ... - ValueError: Not a Request-Line - """ - methods = b'|'.join(HTTPretty.METHODS) - m = re.match(br'(' + methods + b')\s+(.*)\s+HTTP/(1.[0|1])', s, re.I) - if m: - return m.group(1).upper(), m.group(2), m.group(3) - else: - raise ValueError('Not a Request-Line') - - -class HTTPrettyRequest(BaseHTTPRequestHandler, Py3kObject): - def __init__(self, headers, body=''): - self.body = utf8(body) - self.raw_headers = utf8(headers) - self.client_address = ['10.0.0.1'] - self.rfile = StringIO(b'\r\n\r\n'.join([headers.strip(), body])) - self.wfile = StringIO() - self.raw_requestline = self.rfile.readline() - self.error_code = self.error_message = None - self.parse_request() - self.method = self.command - self.querystring = parse_qs(self.path.split("?", 1)[-1]) - - def __str__(self): - return 'HTTPrettyRequest(headers={0}, body="{1}")'.format( - self.headers, - self.body, - ) - - -class EmptyRequestHeaders(dict): - pass - - -class HTTPrettyRequestEmpty(object): - body = '' - headers = EmptyRequestHeaders() - - -class FakeSockFile(StringIO): - pass - - -class FakeSSLSocket(object): - def __init__(self, sock, *args, **kw): - self._httpretty_sock = sock - - def __getattr__(self, attr): - if attr == '_httpretty_sock': - return super(FakeSSLSocket, self).__getattribute__(attr) - - return getattr(self._httpretty_sock, attr) - - -class fakesock(object): - class socket(object): - _entry = None - debuglevel = 0 - _sent_data = [] - - def __init__(self, family, type, protocol=6): - self.setsockopt(family, type, protocol) - self.truesock = old_socket(family, type, protocol) - self._closed = True - self.fd = FakeSockFile() - self.timeout = socket._GLOBAL_DEFAULT_TIMEOUT - self._sock = self - self.is_http = False - - def getpeercert(self, *a, **kw): - now = datetime.now() - shift = now + timedelta(days=30 * 12) - return { - 'notAfter': shift.strftime('%b %d %H:%M:%S GMT'), - 'subjectAltName': ( - ('DNS', '*%s' % self._host), - ('DNS', self._host), - ('DNS', '*'), - ), - 'subject': ( - ( - ('organizationName', u'*.%s' % self._host), - ), - ( - ('organizationalUnitName', - u'Domain Control Validated'), - ), - ( - ('commonName', u'*.%s' % self._host), - ), - ), - } - - def ssl(self, sock, *args, **kw): - return sock - - def setsockopt(self, family, type, protocol): - self.family = family - self.protocol = protocol - self.type = type - - def connect(self, address): - self._address = (self._host, self._port) = address - self._closed = False - self.is_http = self._port in POTENTIAL_HTTP_PORTS - if not self.is_http: - self.truesock.connect(self._address) - - def close(self): - if not self._closed: - self.truesock.close() - self._closed = True - - def makefile(self, mode='r', bufsize=-1): - self._mode = mode - self._bufsize = bufsize - - if self._entry: - self._entry.fill_filekind(self.fd, self._request) - - return self.fd - - def _true_sendall(self, data, *args, **kw): - if self.is_http: - self.truesock.connect(self._address) - - self.truesock.sendall(data, *args, **kw) - - _d = True - while _d: - try: - _d = self.truesock.recv(16) - self.truesock.settimeout(0.0) - self.fd.write(_d) - - except socket.error: - break - - self.fd.seek(0) - - def sendall(self, data, *args, **kw): - - self._sent_data.append(data) - hostnames = [getattr(i.info, 'hostname', None) for i in HTTPretty._entries.keys()] - self.fd.seek(0) - try: - requestline, _ = data.split(b'\r\n', 1) - method, path, version = parse_requestline(requestline) - is_parsing_headers = True - except ValueError: - is_parsing_headers = False - - if not is_parsing_headers: - if len(self._sent_data) > 1: - headers, body = map(utf8, self._sent_data[-2:]) - - method, path, version = parse_requestline(headers) - split_url = urlsplit(path) - - info = URIInfo(hostname=self._host, port=self._port, - path=split_url.path, - query=split_url.query) - - # If we are sending more data to a dynamic response entry, - # we need to call the method again. - if self._entry and self._entry.dynamic_response: - self._entry.body(info, method, body, headers) - - try: - return HTTPretty.historify_request(headers, body, False) - - except Exception as e: - logging.error(traceback.format_exc(e)) - return self._true_sendall(data, *args, **kw) - - # path might come with - s = urlsplit(path) - POTENTIAL_HTTP_PORTS.append(int(s.port or 80)) - headers, body = map(utf8, data.split(b'\r\n\r\n', 1)) - - request = HTTPretty.historify_request(headers, body) - - info = URIInfo(hostname=self._host, port=self._port, - path=s.path, - query=s.query, - last_request=request) - - entries = [] - - for matcher, value in HTTPretty._entries.items(): - if matcher.matches(info): - entries = value - break - - if not entries: - self._true_sendall(data) - return - - self._entry = matcher.get_next_entry(method) - self._request = (info, body, headers) - - def debug(*a, **kw): - frame = inspect.stack()[0][0] - lines = map(utf8, traceback.format_stack(frame)) - - message = [ - "HTTPretty intercepted and unexpected socket method call.", - ("Please open an issue at " - "'https://github.com/gabrielfalcao/HTTPretty/issues'"), - "And paste the following traceback:\n", - "".join(decode_utf8(lines)), - ] - raise RuntimeError("\n".join(message)) - - def settimeout(self, new_timeout): - self.timeout = new_timeout - - sendto = send = recvfrom_into = recv_into = recvfrom = recv = debug - - -def fake_wrap_socket(s, *args, **kw): - return s - - -def create_fake_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None): - s = fakesock.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP) - if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT: - s.settimeout(timeout) - if source_address: - s.bind(source_address) - s.connect(address) - return s - - -def fake_gethostbyname(host): - return host - - -def fake_gethostname(): - return 'localhost' - - -def fake_getaddrinfo( - host, port, family=None, socktype=None, proto=None, flags=None): - return [(2, 1, 6, '', (host, port))] - - -STATUSES = { - 100: "Continue", - 101: "Switching Protocols", - 102: "Processing", - 200: "OK", - 201: "Created", - 202: "Accepted", - 203: "Non-Authoritative Information", - 204: "No Content", - 205: "Reset Content", - 206: "Partial Content", - 207: "Multi-Status", - 208: "Already Reported", - 226: "IM Used", - 300: "Multiple Choices", - 301: "Moved Permanently", - 302: "Found", - 303: "See Other", - 304: "Not Modified", - 305: "Use Proxy", - 306: "Switch Proxy", - 307: "Temporary Redirect", - 308: "Permanent Redirect", - 400: "Bad Request", - 401: "Unauthorized", - 402: "Payment Required", - 403: "Forbidden", - 404: "Not Found", - 405: "Method Not Allowed", - 406: "Not Acceptable", - 407: "Proxy Authentication Required", - 408: "Request a Timeout", - 409: "Conflict", - 410: "Gone", - 411: "Length Required", - 412: "Precondition Failed", - 413: "Request Entity Too Large", - 414: "Request-URI Too Long", - 415: "Unsupported Media Type", - 416: "Requested Range Not Satisfiable", - 417: "Expectation Failed", - 418: "I'm a teapot", - 420: "Enhance Your Calm", - 422: "Unprocessable Entity", - 423: "Locked", - 424: "Failed Dependency", - 424: "Method Failure", - 425: "Unordered Collection", - 426: "Upgrade Required", - 428: "Precondition Required", - 429: "Too Many Requests", - 431: "Request Header Fields Too Large", - 444: "No Response", - 449: "Retry With", - 450: "Blocked by Windows Parental Controls", - 451: "Unavailable For Legal Reasons", - 451: "Redirect", - 494: "Request Header Too Large", - 495: "Cert Error", - 496: "No Cert", - 497: "HTTP to HTTPS", - 499: "Client Closed Request", - 500: "Internal Server Error", - 501: "Not Implemented", - 502: "Bad Gateway", - 503: "Service Unavailable", - 504: "Gateway Timeout", - 505: "HTTP Version Not Supported", - 506: "Variant Also Negotiates", - 507: "Insufficient Storage", - 508: "Loop Detected", - 509: "Bandwidth Limit Exceeded", - 510: "Not Extended", - 511: "Network Authentication Required", - 598: "Network read timeout error", - 599: "Network connect timeout error", -} - - -class Entry(Py3kObject): - def __init__(self, method, uri, body, - adding_headers=None, - forcing_headers=None, - status=200, - streaming=False, - **headers): - - self.method = method - self.uri = uri - - if callable(body): - self.dynamic_response = True - else: - self.dynamic_response = False - - self.body = body - self.streaming = streaming - - if self.dynamic_response or self.streaming: - self.body_length = 0 - else: - self.body_length = len(self.body or '') - - self.adding_headers = adding_headers or {} - self.forcing_headers = forcing_headers or {} - self.status = int(status) - - for k, v in headers.items(): - name = "-".join(k.split("_")).capitalize() - self.adding_headers[name] = v - - self.validate() - - def validate(self): - content_length_keys = 'Content-Length', 'content-length' - for key in content_length_keys: - got = self.adding_headers.get( - key, self.forcing_headers.get(key, None)) - - if got is None: - continue - - try: - igot = int(got) - except ValueError: - warnings.warn( - 'HTTPretty got to register the Content-Length header ' \ - 'with "%r" which is not a number' % got, - ) - - if igot > self.body_length: - raise HTTPrettyError( - 'HTTPretty got inconsistent parameters. The header ' \ - 'Content-Length you registered expects size "%d" but ' \ - 'the body you registered for that has actually length ' \ - '"%d".' % ( - igot, self.body_length, - ) - ) - - def __str__(self): - return r'' % ( - self.method, self.uri, self.status) - - def normalize_headers(self, headers): - new = {} - for k in headers: - new_k = '-'.join([s.lower() for s in k.split('-')]) - new[new_k] = headers[k] - - return new - - def fill_filekind(self, fk, request): - now = datetime.utcnow() - - headers = { - 'status': self.status, - 'date': now.strftime('%a, %d %b %Y %H:%M:%S GMT'), - 'server': 'Python/HTTPretty', - 'connection': 'close', - } - - if self.forcing_headers: - headers = self.forcing_headers - - if self.dynamic_response: - req_info, req_body, req_headers = request - response = self.body(req_info, self.method, req_body, req_headers) - if isinstance(response, basestring): - body = response - else: - body, new_headers = response - headers.update(new_headers) - else: - body = self.body - - if self.adding_headers: - headers.update(self.normalize_headers(self.adding_headers)) - - headers = self.normalize_headers(headers) - - status = headers.get('status', self.status) - string_list = [ - 'HTTP/1.1 %d %s' % (status, STATUSES[status]), - ] - - if 'date' in headers: - string_list.append('date: %s' % headers.pop('date')) - - if not self.forcing_headers: - content_type = headers.pop('content-type', - 'text/plain; charset=utf-8') - - body_length = self.body_length - if self.dynamic_response: - body_length = len(body) - content_length = headers.pop('content-length', body_length) - - string_list.append('content-type: %s' % content_type) - if not self.streaming: - string_list.append('content-length: %s' % content_length) - - string_list.append('server: %s' % headers.pop('server')) - - for k, v in headers.items(): - string_list.append( - '{0}: {1}'.format(k, v), - ) - - for item in string_list: - fk.write(utf8(item) + b'\n') - - fk.write(b'\r\n') - - if self.streaming: - self.body, body = itertools.tee(body) - for chunk in body: - fk.write(utf8(chunk)) - else: - fk.write(utf8(body)) - - fk.seek(0) - - -def url_fix(s, charset='utf-8'): - scheme, netloc, path, querystring, fragment = urlsplit(s) - path = quote(path, b'/%') - querystring = quote_plus(querystring, b':&=') - return urlunsplit((scheme, netloc, path, querystring, fragment)) - - -class URIInfo(Py3kObject): - def __init__(self, - username='', - password='', - hostname='', - port=80, - path='/', - query='', - fragment='', - scheme='', - last_request=None): - - self.username = username or '' - self.password = password or '' - self.hostname = hostname or '' - - if port: - port = int(port) - - elif scheme == 'https': - port = 443 - - self.port = port or 80 - self.path = path or '' - self.query = query or '' - self.scheme = scheme or (self.port is 80 and "http" or "https") - self.fragment = fragment or '' - self.last_request = last_request - - def __str__(self): - attrs = ( - 'username', - 'password', - 'hostname', - 'port', - 'path', - ) - fmt = ", ".join(['%s="%s"' % (k, getattr(self, k, '')) for k in attrs]) - return r'' % fmt - - def __hash__(self): - return hash(text_type(self)) - - def __eq__(self, other): - self_tuple = ( - self.port, - decode_utf8(self.hostname), - url_fix(decode_utf8(self.path)), - ) - other_tuple = ( - other.port, - decode_utf8(other.hostname), - url_fix(decode_utf8(other.path)), - ) - return self_tuple == other_tuple - - def full_url(self): - credentials = "" - if self.password: - credentials = "{0}:{1}@".format( - self.username, self.password) - - result = "{scheme}://{credentials}{host}{path}".format( - scheme=self.scheme, - credentials=credentials, - host=decode_utf8(self.hostname), - path=decode_utf8(self.path) - ) - return result - - @classmethod - def from_uri(cls, uri, entry): - result = urlsplit(uri) - POTENTIAL_HTTP_PORTS.append(int(result.port or 80)) - return cls(result.username, - result.password, - result.hostname, - result.port, - result.path, - result.query, - result.fragment, - result.scheme, - entry) - - -class URIMatcher(object): - regex = None - info = None - - def __init__(self, uri, entries): - if type(uri).__name__ == 'SRE_Pattern': - self.regex = uri - else: - self.info = URIInfo.from_uri(uri, entries) - - self.entries = entries - - #hash of current_entry pointers, per method. - self.current_entries = {} - - def matches(self, info): - if self.info: - return self.info == info - else: - return self.regex.search(info.full_url()) - - def __str__(self): - wrap = 'URLMatcher({0})' - if self.info: - return wrap.format(text_type(self.info)) - else: - return wrap.format(self.regex.pattern) - - def get_next_entry(self, method='GET'): - """Cycle through available responses, but only once. - Any subsequent requests will receive the last response""" - - if method not in self.current_entries: - self.current_entries[method] = 0 - - #restrict selection to entries that match the requested method - entries_for_method = [e for e in self.entries if e.method == method] - - if self.current_entries[method] >= len(entries_for_method): - self.current_entries[method] = -1 - - if not self.entries or not entries_for_method: - raise ValueError('I have no entries for method %s: %s' - % (method, self)) - - entry = entries_for_method[self.current_entries[method]] - if self.current_entries[method] != -1: - self.current_entries[method] += 1 - return entry - - def __hash__(self): - return hash(text_type(self)) - - def __eq__(self, other): - return text_type(self) == text_type(other) - - -class HTTPretty(Py3kObject): - u"""The URI registration class""" - _entries = {} - latest_requests = [] - GET = b'GET' - PUT = b'PUT' - POST = b'POST' - DELETE = b'DELETE' - HEAD = b'HEAD' - PATCH = b'PATCH' - METHODS = (GET, PUT, POST, DELETE, HEAD, PATCH) - last_request = HTTPrettyRequestEmpty() - _is_enabled = False - - @classmethod - def reset(cls): - cls._entries.clear() - cls.latest_requests = [] - cls.last_request = HTTPrettyRequestEmpty() - - @classmethod - def historify_request(cls, headers, body='', append=True): - request = HTTPrettyRequest(headers, body) - cls.last_request = request - if append: - cls.latest_requests.append(request) - else: - cls.latest_requests[-1] = request - return request - - @classmethod - def register_uri(cls, method, uri, body='HTTPretty :)', - adding_headers=None, - forcing_headers=None, - status=200, - responses=None, **headers): - - if isinstance(responses, list) and len(responses) > 0: - for response in responses: - response.uri = uri - response.method = method - entries_for_this_uri = responses - else: - headers['body'] = body - headers['adding_headers'] = adding_headers - headers['forcing_headers'] = forcing_headers - headers['status'] = status - - entries_for_this_uri = [ - cls.Response(method=method, uri=uri, **headers), - ] - - matcher = URIMatcher(uri, entries_for_this_uri) - if matcher in cls._entries: - matcher.entries.extend(cls._entries[matcher]) - del cls._entries[matcher] - - cls._entries[matcher] = entries_for_this_uri - - def __str__(self): - return u'' % len(self._entries) - - @classmethod - def Response(cls, body, method=None, uri=None, adding_headers=None, forcing_headers=None, - status=200, streaming=False, **headers): - - headers['body'] = body - headers['adding_headers'] = adding_headers - headers['forcing_headers'] = forcing_headers - headers['status'] = int(status) - headers['streaming'] = streaming - return Entry(method, uri, **headers) - - @classmethod - def disable(cls): - cls._is_enabled = False - socket.socket = old_socket - socket.SocketType = old_socket - socket._socketobject = old_socket - - socket.create_connection = old_create_connection - socket.gethostname = old_gethostname - socket.gethostbyname = old_gethostbyname - socket.getaddrinfo = old_getaddrinfo - socket.inet_aton = old_gethostbyname - - socket.__dict__['socket'] = old_socket - socket.__dict__['_socketobject'] = old_socket - socket.__dict__['SocketType'] = old_socket - - socket.__dict__['create_connection'] = old_create_connection - socket.__dict__['gethostname'] = old_gethostname - socket.__dict__['gethostbyname'] = old_gethostbyname - socket.__dict__['getaddrinfo'] = old_getaddrinfo - socket.__dict__['inet_aton'] = old_gethostbyname - - if socks: - socks.socksocket = old_socksocket - socks.__dict__['socksocket'] = old_socksocket - - if ssl: - ssl.wrap_socket = old_ssl_wrap_socket - ssl.SSLSocket = old_sslsocket - ssl.__dict__['wrap_socket'] = old_ssl_wrap_socket - ssl.__dict__['SSLSocket'] = old_sslsocket - - if not PY3: - ssl.sslwrap_simple = old_sslwrap_simple - ssl.__dict__['sslwrap_simple'] = old_sslwrap_simple - - @classmethod - def is_enabled(cls): - return cls._is_enabled - - @classmethod - def enable(cls): - cls._is_enabled = True - socket.socket = fakesock.socket - socket._socketobject = fakesock.socket - socket.SocketType = fakesock.socket - - socket.create_connection = create_fake_connection - socket.gethostname = fake_gethostname - socket.gethostbyname = fake_gethostbyname - socket.getaddrinfo = fake_getaddrinfo - socket.inet_aton = fake_gethostbyname - - socket.__dict__['socket'] = fakesock.socket - socket.__dict__['_socketobject'] = fakesock.socket - socket.__dict__['SocketType'] = fakesock.socket - - socket.__dict__['create_connection'] = create_fake_connection - socket.__dict__['gethostname'] = fake_gethostname - socket.__dict__['gethostbyname'] = fake_gethostbyname - socket.__dict__['inet_aton'] = fake_gethostbyname - socket.__dict__['getaddrinfo'] = fake_getaddrinfo - - if socks: - socks.socksocket = fakesock.socket - socks.__dict__['socksocket'] = fakesock.socket - - if ssl: - ssl.wrap_socket = fake_wrap_socket - ssl.SSLSocket = FakeSSLSocket - - ssl.__dict__['wrap_socket'] = fake_wrap_socket - ssl.__dict__['SSLSocket'] = FakeSSLSocket - - if not PY3: - ssl.sslwrap_simple = fake_wrap_socket - ssl.__dict__['sslwrap_simple'] = fake_wrap_socket - - -def httprettified(test): - "A decorator tests that use HTTPretty" - def decorate_class(klass): - for attr in dir(klass): - if not attr.startswith('test_'): - continue - - attr_value = getattr(klass, attr) - if not hasattr(attr_value, "__call__"): - continue - - setattr(klass, attr, decorate_callable(attr_value)) - return klass - - def decorate_callable(test): - @functools.wraps(test) - def wrapper(*args, **kw): - HTTPretty.reset() - HTTPretty.enable() - try: - return test(*args, **kw) - finally: - HTTPretty.disable() - return wrapper - - if isinstance(test, ClassTypes): - return decorate_class(test) - return decorate_callable(test) diff --git a/setup.py b/setup.py index fab0a59d1..7f43993a8 100644 --- a/setup.py +++ b/setup.py @@ -20,5 +20,6 @@ setup( "boto", "Jinja2", "flask", + "httpretty", ], ) From fd828bdb2d3f580c228a4d0fc6190a7d733caa82 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Tue, 30 Apr 2013 08:33:53 -0400 Subject: [PATCH 18/85] Revert "Switch to real version of httpretty. woot." This reverts commit 84aeff5b060eedb699b17713f3dfd2e40df06b0a. --- moto/core/models.py | 2 +- moto/packages/__init__.py | 0 moto/packages/httpretty.py | 944 +++++++++++++++++++++++++++++++++++++ setup.py | 1 - 4 files changed, 945 insertions(+), 2 deletions(-) create mode 100644 moto/packages/__init__.py create mode 100644 moto/packages/httpretty.py diff --git a/moto/core/models.py b/moto/core/models.py index c451fb11d..e98c1eed3 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -1,7 +1,7 @@ import functools import re -from httpretty import HTTPretty +from moto.packages.httpretty import HTTPretty from .responses import metadata_response from .utils import convert_regex_to_flask_path diff --git a/moto/packages/__init__.py b/moto/packages/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/moto/packages/httpretty.py b/moto/packages/httpretty.py new file mode 100644 index 000000000..ebd69e4ed --- /dev/null +++ b/moto/packages/httpretty.py @@ -0,0 +1,944 @@ +# #!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright (C) <2011-2013> Gabriel Falcão +# +# Permission is hereby granted, free of charge, to any person +# obtaining a copy of this software and associated documentation +# files (the "Software"), to deal in the Software without +# restriction, including without limitation the rights to use, +# copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following +# conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +from __future__ import unicode_literals + +version = '0.5.12' + +import re +import inspect +import socket +import functools +import itertools +import warnings +import logging +import sys +import traceback +import types + +PY3 = sys.version_info[0] == 3 +if PY3: + text_type = str + byte_type = bytes + basestring = (str, bytes) + + import io + StringIO = io.BytesIO + + class Py3kObject(object): + def __repr__(self): + return self.__str__() +else: + text_type = unicode + byte_type = str + import StringIO + StringIO = StringIO.StringIO + + +class Py3kObject(object): + def __repr__(self): + ret = self.__str__() + if PY3: + return ret + else: + ret.encode('utf-8') + +from datetime import datetime +from datetime import timedelta +try: + from urllib.parse import urlsplit, urlunsplit, parse_qs, quote, quote_plus +except ImportError: + from urlparse import urlsplit, urlunsplit, parse_qs + from urllib import quote, quote_plus + +try: + from http.server import BaseHTTPRequestHandler +except ImportError: + from BaseHTTPServer import BaseHTTPRequestHandler + +old_socket = socket.socket +old_create_connection = socket.create_connection +old_gethostbyname = socket.gethostbyname +old_gethostname = socket.gethostname +old_getaddrinfo = socket.getaddrinfo +old_socksocket = None +old_ssl_wrap_socket = None +old_sslwrap_simple = None +old_sslsocket = None + +try: + import socks + old_socksocket = socks.socksocket +except ImportError: + socks = None + +try: + import ssl + old_ssl_wrap_socket = ssl.wrap_socket + if not PY3: + old_sslwrap_simple = ssl.sslwrap_simple + old_sslsocket = ssl.SSLSocket +except ImportError: + ssl = None + + +ClassTypes = (type,) +if not PY3: + ClassTypes = (type, types.ClassType) + + +POTENTIAL_HTTP_PORTS = [80, 443] + + +class HTTPrettyError(Exception): + pass + + +def utf8(s): + if isinstance(s, text_type): + s = s.encode('utf-8') + + return byte_type(s) + + +def decode_utf8(s): + if isinstance(s, byte_type): + s = s.decode("utf-8") + + return text_type(s) + + +def parse_requestline(s): + """ + http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5 + + >>> parse_requestline('GET / HTTP/1.0') + ('GET', '/', '1.0') + >>> parse_requestline('post /testurl htTP/1.1') + ('POST', '/testurl', '1.1') + >>> parse_requestline('Im not a RequestLine') + Traceback (most recent call last): + ... + ValueError: Not a Request-Line + """ + methods = b'|'.join(HTTPretty.METHODS) + m = re.match(br'(' + methods + b')\s+(.*)\s+HTTP/(1.[0|1])', s, re.I) + if m: + return m.group(1).upper(), m.group(2), m.group(3) + else: + raise ValueError('Not a Request-Line') + + +class HTTPrettyRequest(BaseHTTPRequestHandler, Py3kObject): + def __init__(self, headers, body=''): + self.body = utf8(body) + self.raw_headers = utf8(headers) + self.client_address = ['10.0.0.1'] + self.rfile = StringIO(b'\r\n\r\n'.join([headers.strip(), body])) + self.wfile = StringIO() + self.raw_requestline = self.rfile.readline() + self.error_code = self.error_message = None + self.parse_request() + self.method = self.command + self.querystring = parse_qs(self.path.split("?", 1)[-1]) + + def __str__(self): + return 'HTTPrettyRequest(headers={0}, body="{1}")'.format( + self.headers, + self.body, + ) + + +class EmptyRequestHeaders(dict): + pass + + +class HTTPrettyRequestEmpty(object): + body = '' + headers = EmptyRequestHeaders() + + +class FakeSockFile(StringIO): + pass + + +class FakeSSLSocket(object): + def __init__(self, sock, *args, **kw): + self._httpretty_sock = sock + + def __getattr__(self, attr): + if attr == '_httpretty_sock': + return super(FakeSSLSocket, self).__getattribute__(attr) + + return getattr(self._httpretty_sock, attr) + + +class fakesock(object): + class socket(object): + _entry = None + debuglevel = 0 + _sent_data = [] + + def __init__(self, family, type, protocol=6): + self.setsockopt(family, type, protocol) + self.truesock = old_socket(family, type, protocol) + self._closed = True + self.fd = FakeSockFile() + self.timeout = socket._GLOBAL_DEFAULT_TIMEOUT + self._sock = self + self.is_http = False + + def getpeercert(self, *a, **kw): + now = datetime.now() + shift = now + timedelta(days=30 * 12) + return { + 'notAfter': shift.strftime('%b %d %H:%M:%S GMT'), + 'subjectAltName': ( + ('DNS', '*%s' % self._host), + ('DNS', self._host), + ('DNS', '*'), + ), + 'subject': ( + ( + ('organizationName', u'*.%s' % self._host), + ), + ( + ('organizationalUnitName', + u'Domain Control Validated'), + ), + ( + ('commonName', u'*.%s' % self._host), + ), + ), + } + + def ssl(self, sock, *args, **kw): + return sock + + def setsockopt(self, family, type, protocol): + self.family = family + self.protocol = protocol + self.type = type + + def connect(self, address): + self._address = (self._host, self._port) = address + self._closed = False + self.is_http = self._port in POTENTIAL_HTTP_PORTS + if not self.is_http: + self.truesock.connect(self._address) + + def close(self): + if not self._closed: + self.truesock.close() + self._closed = True + + def makefile(self, mode='r', bufsize=-1): + self._mode = mode + self._bufsize = bufsize + + if self._entry: + self._entry.fill_filekind(self.fd, self._request) + + return self.fd + + def _true_sendall(self, data, *args, **kw): + if self.is_http: + self.truesock.connect(self._address) + + self.truesock.sendall(data, *args, **kw) + + _d = True + while _d: + try: + _d = self.truesock.recv(16) + self.truesock.settimeout(0.0) + self.fd.write(_d) + + except socket.error: + break + + self.fd.seek(0) + + def sendall(self, data, *args, **kw): + + self._sent_data.append(data) + hostnames = [getattr(i.info, 'hostname', None) for i in HTTPretty._entries.keys()] + self.fd.seek(0) + try: + requestline, _ = data.split(b'\r\n', 1) + method, path, version = parse_requestline(requestline) + is_parsing_headers = True + except ValueError: + is_parsing_headers = False + + if not is_parsing_headers: + if len(self._sent_data) > 1: + headers, body = map(utf8, self._sent_data[-2:]) + + method, path, version = parse_requestline(headers) + split_url = urlsplit(path) + + info = URIInfo(hostname=self._host, port=self._port, + path=split_url.path, + query=split_url.query) + + # If we are sending more data to a dynamic response entry, + # we need to call the method again. + if self._entry and self._entry.dynamic_response: + self._entry.body(info, method, body, headers) + + try: + return HTTPretty.historify_request(headers, body, False) + + except Exception as e: + logging.error(traceback.format_exc(e)) + return self._true_sendall(data, *args, **kw) + + # path might come with + s = urlsplit(path) + POTENTIAL_HTTP_PORTS.append(int(s.port or 80)) + headers, body = map(utf8, data.split(b'\r\n\r\n', 1)) + + request = HTTPretty.historify_request(headers, body) + + info = URIInfo(hostname=self._host, port=self._port, + path=s.path, + query=s.query, + last_request=request) + + entries = [] + + for matcher, value in HTTPretty._entries.items(): + if matcher.matches(info): + entries = value + break + + if not entries: + self._true_sendall(data) + return + + self._entry = matcher.get_next_entry(method) + self._request = (info, body, headers) + + def debug(*a, **kw): + frame = inspect.stack()[0][0] + lines = map(utf8, traceback.format_stack(frame)) + + message = [ + "HTTPretty intercepted and unexpected socket method call.", + ("Please open an issue at " + "'https://github.com/gabrielfalcao/HTTPretty/issues'"), + "And paste the following traceback:\n", + "".join(decode_utf8(lines)), + ] + raise RuntimeError("\n".join(message)) + + def settimeout(self, new_timeout): + self.timeout = new_timeout + + sendto = send = recvfrom_into = recv_into = recvfrom = recv = debug + + +def fake_wrap_socket(s, *args, **kw): + return s + + +def create_fake_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None): + s = fakesock.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP) + if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT: + s.settimeout(timeout) + if source_address: + s.bind(source_address) + s.connect(address) + return s + + +def fake_gethostbyname(host): + return host + + +def fake_gethostname(): + return 'localhost' + + +def fake_getaddrinfo( + host, port, family=None, socktype=None, proto=None, flags=None): + return [(2, 1, 6, '', (host, port))] + + +STATUSES = { + 100: "Continue", + 101: "Switching Protocols", + 102: "Processing", + 200: "OK", + 201: "Created", + 202: "Accepted", + 203: "Non-Authoritative Information", + 204: "No Content", + 205: "Reset Content", + 206: "Partial Content", + 207: "Multi-Status", + 208: "Already Reported", + 226: "IM Used", + 300: "Multiple Choices", + 301: "Moved Permanently", + 302: "Found", + 303: "See Other", + 304: "Not Modified", + 305: "Use Proxy", + 306: "Switch Proxy", + 307: "Temporary Redirect", + 308: "Permanent Redirect", + 400: "Bad Request", + 401: "Unauthorized", + 402: "Payment Required", + 403: "Forbidden", + 404: "Not Found", + 405: "Method Not Allowed", + 406: "Not Acceptable", + 407: "Proxy Authentication Required", + 408: "Request a Timeout", + 409: "Conflict", + 410: "Gone", + 411: "Length Required", + 412: "Precondition Failed", + 413: "Request Entity Too Large", + 414: "Request-URI Too Long", + 415: "Unsupported Media Type", + 416: "Requested Range Not Satisfiable", + 417: "Expectation Failed", + 418: "I'm a teapot", + 420: "Enhance Your Calm", + 422: "Unprocessable Entity", + 423: "Locked", + 424: "Failed Dependency", + 424: "Method Failure", + 425: "Unordered Collection", + 426: "Upgrade Required", + 428: "Precondition Required", + 429: "Too Many Requests", + 431: "Request Header Fields Too Large", + 444: "No Response", + 449: "Retry With", + 450: "Blocked by Windows Parental Controls", + 451: "Unavailable For Legal Reasons", + 451: "Redirect", + 494: "Request Header Too Large", + 495: "Cert Error", + 496: "No Cert", + 497: "HTTP to HTTPS", + 499: "Client Closed Request", + 500: "Internal Server Error", + 501: "Not Implemented", + 502: "Bad Gateway", + 503: "Service Unavailable", + 504: "Gateway Timeout", + 505: "HTTP Version Not Supported", + 506: "Variant Also Negotiates", + 507: "Insufficient Storage", + 508: "Loop Detected", + 509: "Bandwidth Limit Exceeded", + 510: "Not Extended", + 511: "Network Authentication Required", + 598: "Network read timeout error", + 599: "Network connect timeout error", +} + + +class Entry(Py3kObject): + def __init__(self, method, uri, body, + adding_headers=None, + forcing_headers=None, + status=200, + streaming=False, + **headers): + + self.method = method + self.uri = uri + + if callable(body): + self.dynamic_response = True + else: + self.dynamic_response = False + + self.body = body + self.streaming = streaming + + if self.dynamic_response or self.streaming: + self.body_length = 0 + else: + self.body_length = len(self.body or '') + + self.adding_headers = adding_headers or {} + self.forcing_headers = forcing_headers or {} + self.status = int(status) + + for k, v in headers.items(): + name = "-".join(k.split("_")).capitalize() + self.adding_headers[name] = v + + self.validate() + + def validate(self): + content_length_keys = 'Content-Length', 'content-length' + for key in content_length_keys: + got = self.adding_headers.get( + key, self.forcing_headers.get(key, None)) + + if got is None: + continue + + try: + igot = int(got) + except ValueError: + warnings.warn( + 'HTTPretty got to register the Content-Length header ' \ + 'with "%r" which is not a number' % got, + ) + + if igot > self.body_length: + raise HTTPrettyError( + 'HTTPretty got inconsistent parameters. The header ' \ + 'Content-Length you registered expects size "%d" but ' \ + 'the body you registered for that has actually length ' \ + '"%d".' % ( + igot, self.body_length, + ) + ) + + def __str__(self): + return r'' % ( + self.method, self.uri, self.status) + + def normalize_headers(self, headers): + new = {} + for k in headers: + new_k = '-'.join([s.lower() for s in k.split('-')]) + new[new_k] = headers[k] + + return new + + def fill_filekind(self, fk, request): + now = datetime.utcnow() + + headers = { + 'status': self.status, + 'date': now.strftime('%a, %d %b %Y %H:%M:%S GMT'), + 'server': 'Python/HTTPretty', + 'connection': 'close', + } + + if self.forcing_headers: + headers = self.forcing_headers + + if self.dynamic_response: + req_info, req_body, req_headers = request + response = self.body(req_info, self.method, req_body, req_headers) + if isinstance(response, basestring): + body = response + else: + body, new_headers = response + headers.update(new_headers) + else: + body = self.body + + if self.adding_headers: + headers.update(self.normalize_headers(self.adding_headers)) + + headers = self.normalize_headers(headers) + + status = headers.get('status', self.status) + string_list = [ + 'HTTP/1.1 %d %s' % (status, STATUSES[status]), + ] + + if 'date' in headers: + string_list.append('date: %s' % headers.pop('date')) + + if not self.forcing_headers: + content_type = headers.pop('content-type', + 'text/plain; charset=utf-8') + + body_length = self.body_length + if self.dynamic_response: + body_length = len(body) + content_length = headers.pop('content-length', body_length) + + string_list.append('content-type: %s' % content_type) + if not self.streaming: + string_list.append('content-length: %s' % content_length) + + string_list.append('server: %s' % headers.pop('server')) + + for k, v in headers.items(): + string_list.append( + '{0}: {1}'.format(k, v), + ) + + for item in string_list: + fk.write(utf8(item) + b'\n') + + fk.write(b'\r\n') + + if self.streaming: + self.body, body = itertools.tee(body) + for chunk in body: + fk.write(utf8(chunk)) + else: + fk.write(utf8(body)) + + fk.seek(0) + + +def url_fix(s, charset='utf-8'): + scheme, netloc, path, querystring, fragment = urlsplit(s) + path = quote(path, b'/%') + querystring = quote_plus(querystring, b':&=') + return urlunsplit((scheme, netloc, path, querystring, fragment)) + + +class URIInfo(Py3kObject): + def __init__(self, + username='', + password='', + hostname='', + port=80, + path='/', + query='', + fragment='', + scheme='', + last_request=None): + + self.username = username or '' + self.password = password or '' + self.hostname = hostname or '' + + if port: + port = int(port) + + elif scheme == 'https': + port = 443 + + self.port = port or 80 + self.path = path or '' + self.query = query or '' + self.scheme = scheme or (self.port is 80 and "http" or "https") + self.fragment = fragment or '' + self.last_request = last_request + + def __str__(self): + attrs = ( + 'username', + 'password', + 'hostname', + 'port', + 'path', + ) + fmt = ", ".join(['%s="%s"' % (k, getattr(self, k, '')) for k in attrs]) + return r'' % fmt + + def __hash__(self): + return hash(text_type(self)) + + def __eq__(self, other): + self_tuple = ( + self.port, + decode_utf8(self.hostname), + url_fix(decode_utf8(self.path)), + ) + other_tuple = ( + other.port, + decode_utf8(other.hostname), + url_fix(decode_utf8(other.path)), + ) + return self_tuple == other_tuple + + def full_url(self): + credentials = "" + if self.password: + credentials = "{0}:{1}@".format( + self.username, self.password) + + result = "{scheme}://{credentials}{host}{path}".format( + scheme=self.scheme, + credentials=credentials, + host=decode_utf8(self.hostname), + path=decode_utf8(self.path) + ) + return result + + @classmethod + def from_uri(cls, uri, entry): + result = urlsplit(uri) + POTENTIAL_HTTP_PORTS.append(int(result.port or 80)) + return cls(result.username, + result.password, + result.hostname, + result.port, + result.path, + result.query, + result.fragment, + result.scheme, + entry) + + +class URIMatcher(object): + regex = None + info = None + + def __init__(self, uri, entries): + if type(uri).__name__ == 'SRE_Pattern': + self.regex = uri + else: + self.info = URIInfo.from_uri(uri, entries) + + self.entries = entries + + #hash of current_entry pointers, per method. + self.current_entries = {} + + def matches(self, info): + if self.info: + return self.info == info + else: + return self.regex.search(info.full_url()) + + def __str__(self): + wrap = 'URLMatcher({0})' + if self.info: + return wrap.format(text_type(self.info)) + else: + return wrap.format(self.regex.pattern) + + def get_next_entry(self, method='GET'): + """Cycle through available responses, but only once. + Any subsequent requests will receive the last response""" + + if method not in self.current_entries: + self.current_entries[method] = 0 + + #restrict selection to entries that match the requested method + entries_for_method = [e for e in self.entries if e.method == method] + + if self.current_entries[method] >= len(entries_for_method): + self.current_entries[method] = -1 + + if not self.entries or not entries_for_method: + raise ValueError('I have no entries for method %s: %s' + % (method, self)) + + entry = entries_for_method[self.current_entries[method]] + if self.current_entries[method] != -1: + self.current_entries[method] += 1 + return entry + + def __hash__(self): + return hash(text_type(self)) + + def __eq__(self, other): + return text_type(self) == text_type(other) + + +class HTTPretty(Py3kObject): + u"""The URI registration class""" + _entries = {} + latest_requests = [] + GET = b'GET' + PUT = b'PUT' + POST = b'POST' + DELETE = b'DELETE' + HEAD = b'HEAD' + PATCH = b'PATCH' + METHODS = (GET, PUT, POST, DELETE, HEAD, PATCH) + last_request = HTTPrettyRequestEmpty() + _is_enabled = False + + @classmethod + def reset(cls): + cls._entries.clear() + cls.latest_requests = [] + cls.last_request = HTTPrettyRequestEmpty() + + @classmethod + def historify_request(cls, headers, body='', append=True): + request = HTTPrettyRequest(headers, body) + cls.last_request = request + if append: + cls.latest_requests.append(request) + else: + cls.latest_requests[-1] = request + return request + + @classmethod + def register_uri(cls, method, uri, body='HTTPretty :)', + adding_headers=None, + forcing_headers=None, + status=200, + responses=None, **headers): + + if isinstance(responses, list) and len(responses) > 0: + for response in responses: + response.uri = uri + response.method = method + entries_for_this_uri = responses + else: + headers['body'] = body + headers['adding_headers'] = adding_headers + headers['forcing_headers'] = forcing_headers + headers['status'] = status + + entries_for_this_uri = [ + cls.Response(method=method, uri=uri, **headers), + ] + + matcher = URIMatcher(uri, entries_for_this_uri) + if matcher in cls._entries: + matcher.entries.extend(cls._entries[matcher]) + del cls._entries[matcher] + + cls._entries[matcher] = entries_for_this_uri + + def __str__(self): + return u'' % len(self._entries) + + @classmethod + def Response(cls, body, method=None, uri=None, adding_headers=None, forcing_headers=None, + status=200, streaming=False, **headers): + + headers['body'] = body + headers['adding_headers'] = adding_headers + headers['forcing_headers'] = forcing_headers + headers['status'] = int(status) + headers['streaming'] = streaming + return Entry(method, uri, **headers) + + @classmethod + def disable(cls): + cls._is_enabled = False + socket.socket = old_socket + socket.SocketType = old_socket + socket._socketobject = old_socket + + socket.create_connection = old_create_connection + socket.gethostname = old_gethostname + socket.gethostbyname = old_gethostbyname + socket.getaddrinfo = old_getaddrinfo + socket.inet_aton = old_gethostbyname + + socket.__dict__['socket'] = old_socket + socket.__dict__['_socketobject'] = old_socket + socket.__dict__['SocketType'] = old_socket + + socket.__dict__['create_connection'] = old_create_connection + socket.__dict__['gethostname'] = old_gethostname + socket.__dict__['gethostbyname'] = old_gethostbyname + socket.__dict__['getaddrinfo'] = old_getaddrinfo + socket.__dict__['inet_aton'] = old_gethostbyname + + if socks: + socks.socksocket = old_socksocket + socks.__dict__['socksocket'] = old_socksocket + + if ssl: + ssl.wrap_socket = old_ssl_wrap_socket + ssl.SSLSocket = old_sslsocket + ssl.__dict__['wrap_socket'] = old_ssl_wrap_socket + ssl.__dict__['SSLSocket'] = old_sslsocket + + if not PY3: + ssl.sslwrap_simple = old_sslwrap_simple + ssl.__dict__['sslwrap_simple'] = old_sslwrap_simple + + @classmethod + def is_enabled(cls): + return cls._is_enabled + + @classmethod + def enable(cls): + cls._is_enabled = True + socket.socket = fakesock.socket + socket._socketobject = fakesock.socket + socket.SocketType = fakesock.socket + + socket.create_connection = create_fake_connection + socket.gethostname = fake_gethostname + socket.gethostbyname = fake_gethostbyname + socket.getaddrinfo = fake_getaddrinfo + socket.inet_aton = fake_gethostbyname + + socket.__dict__['socket'] = fakesock.socket + socket.__dict__['_socketobject'] = fakesock.socket + socket.__dict__['SocketType'] = fakesock.socket + + socket.__dict__['create_connection'] = create_fake_connection + socket.__dict__['gethostname'] = fake_gethostname + socket.__dict__['gethostbyname'] = fake_gethostbyname + socket.__dict__['inet_aton'] = fake_gethostbyname + socket.__dict__['getaddrinfo'] = fake_getaddrinfo + + if socks: + socks.socksocket = fakesock.socket + socks.__dict__['socksocket'] = fakesock.socket + + if ssl: + ssl.wrap_socket = fake_wrap_socket + ssl.SSLSocket = FakeSSLSocket + + ssl.__dict__['wrap_socket'] = fake_wrap_socket + ssl.__dict__['SSLSocket'] = FakeSSLSocket + + if not PY3: + ssl.sslwrap_simple = fake_wrap_socket + ssl.__dict__['sslwrap_simple'] = fake_wrap_socket + + +def httprettified(test): + "A decorator tests that use HTTPretty" + def decorate_class(klass): + for attr in dir(klass): + if not attr.startswith('test_'): + continue + + attr_value = getattr(klass, attr) + if not hasattr(attr_value, "__call__"): + continue + + setattr(klass, attr, decorate_callable(attr_value)) + return klass + + def decorate_callable(test): + @functools.wraps(test) + def wrapper(*args, **kw): + HTTPretty.reset() + HTTPretty.enable() + try: + return test(*args, **kw) + finally: + HTTPretty.disable() + return wrapper + + if isinstance(test, ClassTypes): + return decorate_class(test) + return decorate_callable(test) diff --git a/setup.py b/setup.py index 7f43993a8..fab0a59d1 100644 --- a/setup.py +++ b/setup.py @@ -20,6 +20,5 @@ setup( "boto", "Jinja2", "flask", - "httpretty", ], ) From 47bd4c49a30cc271dac9c7f50587508c1d736b08 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Fri, 3 May 2013 19:33:13 -0400 Subject: [PATCH 19/85] attempting to move to upstream httpretty --- Makefile | 6 +- moto/core/models.py | 2 +- moto/core/responses.py | 53 +- moto/core/utils.py | 18 +- moto/dynamodb/responses.py | 108 +- moto/dynamodb/urls.py | 4 +- moto/ec2/responses/__init__.py | 86 +- moto/ec2/responses/amis.py | 7 +- .../availability_zones_and_regions.py | 3 - moto/ec2/responses/elastic_block_store.py | 3 - moto/ec2/responses/general.py | 5 +- moto/ec2/responses/instances.py | 22 +- moto/ec2/responses/security_groups.py | 4 - moto/ec2/responses/subnets.py | 3 - moto/ec2/responses/tags.py | 9 +- moto/ec2/responses/vpcs.py | 3 - moto/packages/__init__.py | 0 moto/packages/httpretty.py | 944 ------------------ moto/s3/responses.py | 77 +- moto/s3/utils.py | 21 +- requirements.txt | 1 - setup.py | 6 +- tests/test_s3/test_s3_utils.py | 14 + 23 files changed, 238 insertions(+), 1161 deletions(-) delete mode 100644 moto/packages/__init__.py delete mode 100644 moto/packages/httpretty.py create mode 100644 tests/test_s3/test_s3_utils.py diff --git a/Makefile b/Makefile index 0420a9ea3..fb83906d6 100644 --- a/Makefile +++ b/Makefile @@ -1,9 +1,9 @@ SHELL := /bin/bash init: - python setup.py develop - pip install -r requirements.txt + @python setup.py develop + @pip install -r requirements.txt test: rm -f .coverage - nosetests --with-coverage ./tests/ + @nosetests --with-coverage ./tests/ diff --git a/moto/core/models.py b/moto/core/models.py index e98c1eed3..c451fb11d 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -1,7 +1,7 @@ import functools import re -from moto.packages.httpretty import HTTPretty +from httpretty import HTTPretty from .responses import metadata_response from .utils import convert_regex_to_flask_path diff --git a/moto/core/responses.py b/moto/core/responses.py index d74bcd2e4..a25f5f26a 100644 --- a/moto/core/responses.py +++ b/moto/core/responses.py @@ -1,50 +1,75 @@ import datetime import json -from urlparse import parse_qs +from urlparse import parse_qs, urlparse from moto.core.utils import headers_to_dict, camelcase_to_underscores, method_names_from_class class BaseResponse(object): - def dispatch(self, uri, method, body, headers): - if body: - querystring = parse_qs(body) + + def dispatch(self, request, full_url, headers): + if hasattr(request, 'body'): + # Boto + self.body = request.body else: + # Flask server + self.body = request.data + + querystring = parse_qs(urlparse(full_url).query) + if not querystring: + querystring = parse_qs(self.body) + if not querystring: querystring = headers_to_dict(headers) - self.path = uri.path + self.uri = full_url + self.path = urlparse(full_url).path self.querystring = querystring + self.method = request.method - action = querystring.get('Action', [""])[0] + self.headers = dict(request.headers) + self.response_headers = headers + return self.call_action() + + def call_action(self): + headers = self.response_headers + action = self.querystring.get('Action', [""])[0] action = camelcase_to_underscores(action) - method_names = method_names_from_class(self.__class__) if action in method_names: method = getattr(self, action) - return method() + response = method() + if isinstance(response, basestring): + return 200, headers, response + else: + body, new_headers = response + status = new_headers.pop('status', 200) + headers.update(new_headers) + return status, headers, body raise NotImplementedError("The {} action has not been implemented".format(action)) -def metadata_response(uri, method, body, headers): +def metadata_response(request, full_url, headers): """ Mock response for localhost metadata http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AESDG-chapter-instancedata.html """ + parsed_url = urlparse(full_url) tomorrow = datetime.datetime.now() + datetime.timedelta(days=1) - path = uri.path.lstrip("/latest/meta-data/") + path = parsed_url.path.lstrip("/latest/meta-data/") if path == '': - return "iam/" + result = "iam/" elif path == 'iam/': - return 'security-credentials/' + result = 'security-credentials/' elif path == 'iam/security-credentials/': - return 'default-role' + result = 'default-role' elif path == 'iam/security-credentials/default-role': - return json.dumps(dict( + result = json.dumps(dict( AccessKeyId="test-key", SecretAccessKey="test-secret-key", Token="test-session-token", Expiration=tomorrow.strftime("%Y-%m-%dT%H:%M:%SZ") )) + return 200, headers, result diff --git a/moto/core/utils.py b/moto/core/utils.py index 8532698cb..13aca14b0 100644 --- a/moto/core/utils.py +++ b/moto/core/utils.py @@ -1,4 +1,3 @@ -from collections import namedtuple import inspect import random import re @@ -91,23 +90,12 @@ class convert_flask_to_httpretty_response(object): return "{}.{}".format(outer, self.callback.__name__) def __call__(self, args=None, **kwargs): - hostname = request.host_url - method = request.method - path = request.path - query = request.query_string - - # Mimic the HTTPretty URIInfo class - URI = namedtuple('URI', 'hostname method path query') - uri = URI(hostname, method, path, query) - - body = request.data or query headers = dict(request.headers) - result = self.callback(uri, method, body, headers) + result = self.callback(request, request.url, headers) if isinstance(result, basestring): # result is just the response return result else: - # result is a responce, headers tuple - response, headers = result - status = headers.pop('status', None) + # result is a status, headers, response tuple + status, headers, response = result return response, status, headers diff --git a/moto/dynamodb/responses.py b/moto/dynamodb/responses.py index a75443a11..dece06542 100644 --- a/moto/dynamodb/responses.py +++ b/moto/dynamodb/responses.py @@ -1,6 +1,7 @@ import json -from moto.core.utils import headers_to_dict, camelcase_to_underscores +from moto.core.responses import BaseResponse +from moto.core.utils import camelcase_to_underscores from .models import dynamodb_backend, dynamo_json_dump @@ -27,17 +28,11 @@ GET_SESSION_TOKEN_RESULT = """ """ -def sts_handler(uri, method, body, headers): +def sts_handler(): return GET_SESSION_TOKEN_RESULT -class DynamoHandler(object): - - def __init__(self, uri, method, body, headers): - self.uri = uri - self.method = method - self.body = body - self.headers = headers +class DynamoHandler(BaseResponse): def get_endpoint_name(self, headers): """Parses request headers and extracts part od the X-Amz-Target @@ -45,22 +40,35 @@ class DynamoHandler(object): ie: X-Amz-Target: DynamoDB_20111205.ListTables -> ListTables """ - match = headers.get('X-Amz-Target') + # Headers are case-insensitive. Probably a better way to do this. + match = headers.get('x-amz-target') or headers.get('X-Amz-Target') if match: return match.split(".")[1] def error(self, type_, status=400): - return dynamo_json_dump({'__type': type_}), dict(status=400) + return status, self.response_headers, dynamo_json_dump({'__type': type_}) - def dispatch(self): + def call_action(self): + if 'GetSessionToken' in self.body: + return 200, self.response_headers, sts_handler() + + self.body = json.loads(self.body or '{}') endpoint = self.get_endpoint_name(self.headers) if endpoint: endpoint = camelcase_to_underscores(endpoint) - return getattr(self, endpoint)(self.uri, self.method, self.body, self.headers) - else: - return "", dict(status=404) + response = getattr(self, endpoint)() + if isinstance(response, basestring): + return 200, self.response_headers, response - def list_tables(self, uri, method, body, headers): + else: + status_code, new_headers, response_content = response + self.response_headers.update(new_headers) + return status_code, self.response_headers, response_content + else: + return 404, self.response_headers, "" + + def list_tables(self): + body = self.body limit = body.get('Limit') if body.get("ExclusiveStartTableName"): last = body.get("ExclusiveStartTableName") @@ -77,7 +85,8 @@ class DynamoHandler(object): response["LastEvaluatedTableName"] = tables[-1] return dynamo_json_dump(response) - def create_table(self, uri, method, body, headers): + def create_table(self): + body = self.body name = body['TableName'] key_schema = body['KeySchema'] @@ -104,8 +113,8 @@ class DynamoHandler(object): ) return dynamo_json_dump(table.describe) - def delete_table(self, uri, method, body, headers): - name = body['TableName'] + def delete_table(self): + name = self.body['TableName'] table = dynamodb_backend.delete_table(name) if table: return dynamo_json_dump(table.describe) @@ -113,16 +122,16 @@ class DynamoHandler(object): er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException' return self.error(er) - def update_table(self, uri, method, body, headers): - name = body['TableName'] - throughput = body["ProvisionedThroughput"] + def update_table(self): + name = self.body['TableName'] + throughput = self.body["ProvisionedThroughput"] new_read_units = throughput["ReadCapacityUnits"] new_write_units = throughput["WriteCapacityUnits"] table = dynamodb_backend.update_table_throughput(name, new_read_units, new_write_units) return dynamo_json_dump(table.describe) - def describe_table(self, uri, method, body, headers): - name = body['TableName'] + def describe_table(self): + name = self.body['TableName'] try: table = dynamodb_backend.tables[name] except KeyError: @@ -130,9 +139,9 @@ class DynamoHandler(object): return self.error(er) return dynamo_json_dump(table.describe) - def put_item(self, uri, method, body, headers): - name = body['TableName'] - item = body['Item'] + def put_item(self): + name = self.body['TableName'] + item = self.body['Item'] result = dynamodb_backend.put_item(name, item) if result: item_dict = result.to_json() @@ -142,8 +151,8 @@ class DynamoHandler(object): er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException' return self.error(er) - def batch_write_item(self, uri, method, body, headers): - table_batches = body['RequestItems'] + def batch_write_item(self): + table_batches = self.body['RequestItems'] for table_name, table_requests in table_batches.iteritems(): for table_request in table_requests: @@ -173,12 +182,12 @@ class DynamoHandler(object): return dynamo_json_dump(response) - def get_item(self, uri, method, body, headers): - name = body['TableName'] - key = body['Key'] + def get_item(self): + name = self.body['TableName'] + key = self.body['Key'] hash_key = key['HashKeyElement'] range_key = key.get('RangeKeyElement') - attrs_to_get = body.get('AttributesToGet') + attrs_to_get = self.body.get('AttributesToGet') item = dynamodb_backend.get_item(name, hash_key, range_key) if item: item_dict = item.describe_attrs(attrs_to_get) @@ -188,8 +197,8 @@ class DynamoHandler(object): er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException' return self.error(er) - def batch_get_item(self, uri, method, body, headers): - table_batches = body['RequestItems'] + def batch_get_item(self): + table_batches = self.body['RequestItems'] results = { "Responses": { @@ -211,10 +220,10 @@ class DynamoHandler(object): results["Responses"][table_name] = {"Items": items, "ConsumedCapacityUnits": 1} return dynamo_json_dump(results) - def query(self, uri, method, body, headers): - name = body['TableName'] - hash_key = body['HashKeyValue'] - range_condition = body.get('RangeKeyCondition') + def query(self): + name = self.body['TableName'] + hash_key = self.body['HashKeyValue'] + range_condition = self.body.get('RangeKeyCondition') if range_condition: range_comparison = range_condition['ComparisonOperator'] range_values = range_condition['AttributeValueList'] @@ -242,11 +251,11 @@ class DynamoHandler(object): # } return dynamo_json_dump(result) - def scan(self, uri, method, body, headers): - name = body['TableName'] + def scan(self): + name = self.body['TableName'] filters = {} - scan_filters = body.get('ScanFilter', {}) + scan_filters = self.body.get('ScanFilter', {}) for attribute_name, scan_filter in scan_filters.iteritems(): # Keys are attribute names. Values are tuples of (comparison, comparison_value) comparison_operator = scan_filter["ComparisonOperator"] @@ -274,12 +283,12 @@ class DynamoHandler(object): # } return dynamo_json_dump(result) - def delete_item(self, uri, method, body, headers): - name = body['TableName'] - key = body['Key'] + def delete_item(self): + name = self.body['TableName'] + key = self.body['Key'] hash_key = key['HashKeyElement'] range_key = key.get('RangeKeyElement') - return_values = body.get('ReturnValues', '') + return_values = self.body.get('ReturnValues', '') item = dynamodb_backend.delete_item(name, hash_key, range_key) if item: if return_values == 'ALL_OLD': @@ -291,10 +300,3 @@ class DynamoHandler(object): else: er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException' return self.error(er) - - -def handler(uri, method, body, headers): - if 'GetSessionToken' in body: - return sts_handler(uri, method, body, headers) - body = json.loads(body or '{}') - return DynamoHandler(uri, method, body, headers_to_dict(headers)).dispatch() diff --git a/moto/dynamodb/urls.py b/moto/dynamodb/urls.py index 85634ef2f..6ed5e00d5 100644 --- a/moto/dynamodb/urls.py +++ b/moto/dynamodb/urls.py @@ -1,4 +1,4 @@ -from .responses import handler +from .responses import DynamoHandler url_bases = [ "https?://dynamodb.(.+).amazonaws.com", @@ -6,5 +6,5 @@ url_bases = [ ] url_paths = { - "{0}/": handler, + "{0}/": DynamoHandler().dispatch, } diff --git a/moto/ec2/responses/__init__.py b/moto/ec2/responses/__init__.py index 0a50797ee..690419438 100644 --- a/moto/ec2/responses/__init__.py +++ b/moto/ec2/responses/__init__.py @@ -1,6 +1,4 @@ -from urlparse import parse_qs - -from moto.core.utils import camelcase_to_underscores, method_names_from_class +from moto.core.responses import BaseResponse from .amazon_dev_pay import AmazonDevPay from .amis import AmisResponse @@ -32,53 +30,35 @@ from .vpn_connections import VPNConnections from .windows import Windows -class EC2Response(object): - - sub_responses = [ - AmazonDevPay, - AmisResponse, - AvailabilityZonesAndRegions, - CustomerGateways, - DHCPOptions, - ElasticBlockStore, - ElasticIPAddresses, - ElasticNetworkInterfaces, - General, - InstanceResponse, - InternetGateways, - IPAddresses, - KeyPairs, - Monitoring, - NetworkACLs, - PlacementGroups, - ReservedInstances, - RouteTables, - SecurityGroups, - SpotInstances, - Subnets, - TagResponse, - VirtualPrivateGateways, - VMExport, - VMImport, - VPCs, - VPNConnections, - Windows, - ] - - def dispatch(self, uri, method, body, headers): - if body: - querystring = parse_qs(body) - else: - querystring = parse_qs(headers) - - action = querystring.get('Action', [None])[0] - if action: - action = camelcase_to_underscores(action) - - for sub_response in self.sub_responses: - method_names = method_names_from_class(sub_response) - if action in method_names: - response = sub_response(querystring) - method = getattr(response, action) - return method() - raise NotImplementedError("The {} action has not been implemented".format(action)) +class EC2Response( + BaseResponse, + AmazonDevPay, + AmisResponse, + AvailabilityZonesAndRegions, + CustomerGateways, + DHCPOptions, + ElasticBlockStore, + ElasticIPAddresses, + ElasticNetworkInterfaces, + General, + InstanceResponse, + InternetGateways, + IPAddresses, + KeyPairs, + Monitoring, + NetworkACLs, + PlacementGroups, + ReservedInstances, + RouteTables, + SecurityGroups, + SpotInstances, + Subnets, + TagResponse, + VirtualPrivateGateways, + VMExport, + VMImport, + VPCs, + VPNConnections, + Windows, +): + pass diff --git a/moto/ec2/responses/amis.py b/moto/ec2/responses/amis.py index afce0bbb5..feddc89f1 100644 --- a/moto/ec2/responses/amis.py +++ b/moto/ec2/responses/amis.py @@ -5,14 +5,11 @@ from moto.ec2.utils import instance_ids_from_querystring class AmisResponse(object): - def __init__(self, querystring): - self.querystring = querystring - self.instance_ids = instance_ids_from_querystring(querystring) - def create_image(self): name = self.querystring.get('Name')[0] description = self.querystring.get('Description')[0] - instance_id = self.instance_ids[0] + instance_ids = instance_ids_from_querystring(self.querystring) + instance_id = instance_ids[0] image = ec2_backend.create_image(instance_id, name, description) if not image: return "There is not instance with id {}".format(instance_id), dict(status=404) diff --git a/moto/ec2/responses/availability_zones_and_regions.py b/moto/ec2/responses/availability_zones_and_regions.py index 4faeda764..f216a644f 100644 --- a/moto/ec2/responses/availability_zones_and_regions.py +++ b/moto/ec2/responses/availability_zones_and_regions.py @@ -4,9 +4,6 @@ from moto.ec2.models import ec2_backend class AvailabilityZonesAndRegions(object): - def __init__(self, querystring): - self.querystring = querystring - def describe_availability_zones(self): zones = ec2_backend.describe_availability_zones() template = Template(DESCRIBE_ZONES_RESPONSE) diff --git a/moto/ec2/responses/elastic_block_store.py b/moto/ec2/responses/elastic_block_store.py index bdea18188..d81c61c9d 100644 --- a/moto/ec2/responses/elastic_block_store.py +++ b/moto/ec2/responses/elastic_block_store.py @@ -4,9 +4,6 @@ from moto.ec2.models import ec2_backend class ElasticBlockStore(object): - def __init__(self, querystring): - self.querystring = querystring - def attach_volume(self): volume_id = self.querystring.get('VolumeId')[0] instance_id = self.querystring.get('InstanceId')[0] diff --git a/moto/ec2/responses/general.py b/moto/ec2/responses/general.py index ad133a30c..5353bb99a 100644 --- a/moto/ec2/responses/general.py +++ b/moto/ec2/responses/general.py @@ -5,11 +5,8 @@ from moto.ec2.utils import instance_ids_from_querystring class General(object): - def __init__(self, querystring): - self.querystring = querystring - self.instance_ids = instance_ids_from_querystring(querystring) - def get_console_output(self): + self.instance_ids = instance_ids_from_querystring(self.querystring) instance_id = self.instance_ids[0] instance = ec2_backend.get_instance(instance_id) if instance: diff --git a/moto/ec2/responses/instances.py b/moto/ec2/responses/instances.py index 7c7c9d725..7170a0928 100644 --- a/moto/ec2/responses/instances.py +++ b/moto/ec2/responses/instances.py @@ -6,10 +6,6 @@ from moto.ec2.utils import instance_ids_from_querystring class InstanceResponse(object): - def __init__(self, querystring): - self.querystring = querystring - self.instance_ids = instance_ids_from_querystring(querystring) - def describe_instances(self): template = Template(EC2_DESCRIBE_INSTANCES) return template.render(reservations=ec2_backend.all_reservations()) @@ -22,22 +18,26 @@ class InstanceResponse(object): return template.render(reservation=new_reservation) def terminate_instances(self): - instances = ec2_backend.terminate_instances(self.instance_ids) + instance_ids = instance_ids_from_querystring(self.querystring) + instances = ec2_backend.terminate_instances(instance_ids) template = Template(EC2_TERMINATE_INSTANCES) return template.render(instances=instances) def reboot_instances(self): - instances = ec2_backend.reboot_instances(self.instance_ids) + instance_ids = instance_ids_from_querystring(self.querystring) + instances = ec2_backend.reboot_instances(instance_ids) template = Template(EC2_REBOOT_INSTANCES) return template.render(instances=instances) def stop_instances(self): - instances = ec2_backend.stop_instances(self.instance_ids) + instance_ids = instance_ids_from_querystring(self.querystring) + instances = ec2_backend.stop_instances(instance_ids) template = Template(EC2_STOP_INSTANCES) return template.render(instances=instances) def start_instances(self): - instances = ec2_backend.start_instances(self.instance_ids) + instance_ids = instance_ids_from_querystring(self.querystring) + instances = ec2_backend.start_instances(instance_ids) template = Template(EC2_START_INSTANCES) return template.render(instances=instances) @@ -45,7 +45,8 @@ class InstanceResponse(object): # TODO this and modify below should raise IncorrectInstanceState if instance not in stopped state attribute = self.querystring.get("Attribute")[0] key = camelcase_to_underscores(attribute) - instance_id = self.instance_ids[0] + instance_ids = instance_ids_from_querystring(self.querystring) + instance_id = instance_ids[0] instance, value = ec2_backend.describe_instance_attribute(instance_id, key) template = Template(EC2_DESCRIBE_INSTANCE_ATTRIBUTE) return template.render(instance=instance, attribute=attribute, value=value) @@ -57,7 +58,8 @@ class InstanceResponse(object): value = self.querystring.get(key)[0] normalized_attribute = camelcase_to_underscores(key.split(".")[0]) - instance_id = self.instance_ids[0] + instance_ids = instance_ids_from_querystring(self.querystring) + instance_id = instance_ids[0] ec2_backend.modify_instance_attribute(instance_id, normalized_attribute, value) return EC2_MODIFY_INSTANCE_ATTRIBUTE diff --git a/moto/ec2/responses/security_groups.py b/moto/ec2/responses/security_groups.py index 2768494a8..1b40e182f 100644 --- a/moto/ec2/responses/security_groups.py +++ b/moto/ec2/responses/security_groups.py @@ -1,7 +1,6 @@ from jinja2 import Template from moto.ec2.models import ec2_backend -from moto.ec2.utils import resource_ids_from_querystring def process_rules_from_querystring(querystring): @@ -22,9 +21,6 @@ def process_rules_from_querystring(querystring): class SecurityGroups(object): - def __init__(self, querystring): - self.querystring = querystring - def authorize_security_group_egress(self): raise NotImplementedError('SecurityGroups.authorize_security_group_egress is not yet implemented') diff --git a/moto/ec2/responses/subnets.py b/moto/ec2/responses/subnets.py index 97a5da287..761f492e5 100644 --- a/moto/ec2/responses/subnets.py +++ b/moto/ec2/responses/subnets.py @@ -4,9 +4,6 @@ from moto.ec2.models import ec2_backend class Subnets(object): - def __init__(self, querystring): - self.querystring = querystring - def create_subnet(self): vpc_id = self.querystring.get('VpcId')[0] cidr_block = self.querystring.get('CidrBlock')[0] diff --git a/moto/ec2/responses/tags.py b/moto/ec2/responses/tags.py index 18478e9a5..dd8dce8e8 100644 --- a/moto/ec2/responses/tags.py +++ b/moto/ec2/responses/tags.py @@ -5,17 +5,16 @@ from moto.ec2.utils import resource_ids_from_querystring class TagResponse(object): - def __init__(self, querystring): - self.querystring = querystring - self.resource_ids = resource_ids_from_querystring(querystring) def create_tags(self): - for resource_id, tag in self.resource_ids.iteritems(): + resource_ids = resource_ids_from_querystring(self.querystring) + for resource_id, tag in resource_ids.iteritems(): ec2_backend.create_tag(resource_id, tag[0], tag[1]) return CREATE_RESPONSE def delete_tags(self): - for resource_id, tag in self.resource_ids.iteritems(): + resource_ids = resource_ids_from_querystring(self.querystring) + for resource_id, tag in resource_ids.iteritems(): ec2_backend.delete_tag(resource_id, tag[0]) template = Template(DELETE_RESPONSE) return template.render(reservations=ec2_backend.all_reservations()) diff --git a/moto/ec2/responses/vpcs.py b/moto/ec2/responses/vpcs.py index 857b9b2bb..c2b16f9cd 100644 --- a/moto/ec2/responses/vpcs.py +++ b/moto/ec2/responses/vpcs.py @@ -4,9 +4,6 @@ from moto.ec2.models import ec2_backend class VPCs(object): - def __init__(self, querystring): - self.querystring = querystring - def create_vpc(self): cidr_block = self.querystring.get('CidrBlock')[0] vpc = ec2_backend.create_vpc(cidr_block) diff --git a/moto/packages/__init__.py b/moto/packages/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/moto/packages/httpretty.py b/moto/packages/httpretty.py deleted file mode 100644 index ebd69e4ed..000000000 --- a/moto/packages/httpretty.py +++ /dev/null @@ -1,944 +0,0 @@ -# #!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# Copyright (C) <2011-2013> Gabriel Falcão -# -# Permission is hereby granted, free of charge, to any person -# obtaining a copy of this software and associated documentation -# files (the "Software"), to deal in the Software without -# restriction, including without limitation the rights to use, -# copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the -# Software is furnished to do so, subject to the following -# conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -# OTHER DEALINGS IN THE SOFTWARE. -from __future__ import unicode_literals - -version = '0.5.12' - -import re -import inspect -import socket -import functools -import itertools -import warnings -import logging -import sys -import traceback -import types - -PY3 = sys.version_info[0] == 3 -if PY3: - text_type = str - byte_type = bytes - basestring = (str, bytes) - - import io - StringIO = io.BytesIO - - class Py3kObject(object): - def __repr__(self): - return self.__str__() -else: - text_type = unicode - byte_type = str - import StringIO - StringIO = StringIO.StringIO - - -class Py3kObject(object): - def __repr__(self): - ret = self.__str__() - if PY3: - return ret - else: - ret.encode('utf-8') - -from datetime import datetime -from datetime import timedelta -try: - from urllib.parse import urlsplit, urlunsplit, parse_qs, quote, quote_plus -except ImportError: - from urlparse import urlsplit, urlunsplit, parse_qs - from urllib import quote, quote_plus - -try: - from http.server import BaseHTTPRequestHandler -except ImportError: - from BaseHTTPServer import BaseHTTPRequestHandler - -old_socket = socket.socket -old_create_connection = socket.create_connection -old_gethostbyname = socket.gethostbyname -old_gethostname = socket.gethostname -old_getaddrinfo = socket.getaddrinfo -old_socksocket = None -old_ssl_wrap_socket = None -old_sslwrap_simple = None -old_sslsocket = None - -try: - import socks - old_socksocket = socks.socksocket -except ImportError: - socks = None - -try: - import ssl - old_ssl_wrap_socket = ssl.wrap_socket - if not PY3: - old_sslwrap_simple = ssl.sslwrap_simple - old_sslsocket = ssl.SSLSocket -except ImportError: - ssl = None - - -ClassTypes = (type,) -if not PY3: - ClassTypes = (type, types.ClassType) - - -POTENTIAL_HTTP_PORTS = [80, 443] - - -class HTTPrettyError(Exception): - pass - - -def utf8(s): - if isinstance(s, text_type): - s = s.encode('utf-8') - - return byte_type(s) - - -def decode_utf8(s): - if isinstance(s, byte_type): - s = s.decode("utf-8") - - return text_type(s) - - -def parse_requestline(s): - """ - http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5 - - >>> parse_requestline('GET / HTTP/1.0') - ('GET', '/', '1.0') - >>> parse_requestline('post /testurl htTP/1.1') - ('POST', '/testurl', '1.1') - >>> parse_requestline('Im not a RequestLine') - Traceback (most recent call last): - ... - ValueError: Not a Request-Line - """ - methods = b'|'.join(HTTPretty.METHODS) - m = re.match(br'(' + methods + b')\s+(.*)\s+HTTP/(1.[0|1])', s, re.I) - if m: - return m.group(1).upper(), m.group(2), m.group(3) - else: - raise ValueError('Not a Request-Line') - - -class HTTPrettyRequest(BaseHTTPRequestHandler, Py3kObject): - def __init__(self, headers, body=''): - self.body = utf8(body) - self.raw_headers = utf8(headers) - self.client_address = ['10.0.0.1'] - self.rfile = StringIO(b'\r\n\r\n'.join([headers.strip(), body])) - self.wfile = StringIO() - self.raw_requestline = self.rfile.readline() - self.error_code = self.error_message = None - self.parse_request() - self.method = self.command - self.querystring = parse_qs(self.path.split("?", 1)[-1]) - - def __str__(self): - return 'HTTPrettyRequest(headers={0}, body="{1}")'.format( - self.headers, - self.body, - ) - - -class EmptyRequestHeaders(dict): - pass - - -class HTTPrettyRequestEmpty(object): - body = '' - headers = EmptyRequestHeaders() - - -class FakeSockFile(StringIO): - pass - - -class FakeSSLSocket(object): - def __init__(self, sock, *args, **kw): - self._httpretty_sock = sock - - def __getattr__(self, attr): - if attr == '_httpretty_sock': - return super(FakeSSLSocket, self).__getattribute__(attr) - - return getattr(self._httpretty_sock, attr) - - -class fakesock(object): - class socket(object): - _entry = None - debuglevel = 0 - _sent_data = [] - - def __init__(self, family, type, protocol=6): - self.setsockopt(family, type, protocol) - self.truesock = old_socket(family, type, protocol) - self._closed = True - self.fd = FakeSockFile() - self.timeout = socket._GLOBAL_DEFAULT_TIMEOUT - self._sock = self - self.is_http = False - - def getpeercert(self, *a, **kw): - now = datetime.now() - shift = now + timedelta(days=30 * 12) - return { - 'notAfter': shift.strftime('%b %d %H:%M:%S GMT'), - 'subjectAltName': ( - ('DNS', '*%s' % self._host), - ('DNS', self._host), - ('DNS', '*'), - ), - 'subject': ( - ( - ('organizationName', u'*.%s' % self._host), - ), - ( - ('organizationalUnitName', - u'Domain Control Validated'), - ), - ( - ('commonName', u'*.%s' % self._host), - ), - ), - } - - def ssl(self, sock, *args, **kw): - return sock - - def setsockopt(self, family, type, protocol): - self.family = family - self.protocol = protocol - self.type = type - - def connect(self, address): - self._address = (self._host, self._port) = address - self._closed = False - self.is_http = self._port in POTENTIAL_HTTP_PORTS - if not self.is_http: - self.truesock.connect(self._address) - - def close(self): - if not self._closed: - self.truesock.close() - self._closed = True - - def makefile(self, mode='r', bufsize=-1): - self._mode = mode - self._bufsize = bufsize - - if self._entry: - self._entry.fill_filekind(self.fd, self._request) - - return self.fd - - def _true_sendall(self, data, *args, **kw): - if self.is_http: - self.truesock.connect(self._address) - - self.truesock.sendall(data, *args, **kw) - - _d = True - while _d: - try: - _d = self.truesock.recv(16) - self.truesock.settimeout(0.0) - self.fd.write(_d) - - except socket.error: - break - - self.fd.seek(0) - - def sendall(self, data, *args, **kw): - - self._sent_data.append(data) - hostnames = [getattr(i.info, 'hostname', None) for i in HTTPretty._entries.keys()] - self.fd.seek(0) - try: - requestline, _ = data.split(b'\r\n', 1) - method, path, version = parse_requestline(requestline) - is_parsing_headers = True - except ValueError: - is_parsing_headers = False - - if not is_parsing_headers: - if len(self._sent_data) > 1: - headers, body = map(utf8, self._sent_data[-2:]) - - method, path, version = parse_requestline(headers) - split_url = urlsplit(path) - - info = URIInfo(hostname=self._host, port=self._port, - path=split_url.path, - query=split_url.query) - - # If we are sending more data to a dynamic response entry, - # we need to call the method again. - if self._entry and self._entry.dynamic_response: - self._entry.body(info, method, body, headers) - - try: - return HTTPretty.historify_request(headers, body, False) - - except Exception as e: - logging.error(traceback.format_exc(e)) - return self._true_sendall(data, *args, **kw) - - # path might come with - s = urlsplit(path) - POTENTIAL_HTTP_PORTS.append(int(s.port or 80)) - headers, body = map(utf8, data.split(b'\r\n\r\n', 1)) - - request = HTTPretty.historify_request(headers, body) - - info = URIInfo(hostname=self._host, port=self._port, - path=s.path, - query=s.query, - last_request=request) - - entries = [] - - for matcher, value in HTTPretty._entries.items(): - if matcher.matches(info): - entries = value - break - - if not entries: - self._true_sendall(data) - return - - self._entry = matcher.get_next_entry(method) - self._request = (info, body, headers) - - def debug(*a, **kw): - frame = inspect.stack()[0][0] - lines = map(utf8, traceback.format_stack(frame)) - - message = [ - "HTTPretty intercepted and unexpected socket method call.", - ("Please open an issue at " - "'https://github.com/gabrielfalcao/HTTPretty/issues'"), - "And paste the following traceback:\n", - "".join(decode_utf8(lines)), - ] - raise RuntimeError("\n".join(message)) - - def settimeout(self, new_timeout): - self.timeout = new_timeout - - sendto = send = recvfrom_into = recv_into = recvfrom = recv = debug - - -def fake_wrap_socket(s, *args, **kw): - return s - - -def create_fake_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None): - s = fakesock.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP) - if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT: - s.settimeout(timeout) - if source_address: - s.bind(source_address) - s.connect(address) - return s - - -def fake_gethostbyname(host): - return host - - -def fake_gethostname(): - return 'localhost' - - -def fake_getaddrinfo( - host, port, family=None, socktype=None, proto=None, flags=None): - return [(2, 1, 6, '', (host, port))] - - -STATUSES = { - 100: "Continue", - 101: "Switching Protocols", - 102: "Processing", - 200: "OK", - 201: "Created", - 202: "Accepted", - 203: "Non-Authoritative Information", - 204: "No Content", - 205: "Reset Content", - 206: "Partial Content", - 207: "Multi-Status", - 208: "Already Reported", - 226: "IM Used", - 300: "Multiple Choices", - 301: "Moved Permanently", - 302: "Found", - 303: "See Other", - 304: "Not Modified", - 305: "Use Proxy", - 306: "Switch Proxy", - 307: "Temporary Redirect", - 308: "Permanent Redirect", - 400: "Bad Request", - 401: "Unauthorized", - 402: "Payment Required", - 403: "Forbidden", - 404: "Not Found", - 405: "Method Not Allowed", - 406: "Not Acceptable", - 407: "Proxy Authentication Required", - 408: "Request a Timeout", - 409: "Conflict", - 410: "Gone", - 411: "Length Required", - 412: "Precondition Failed", - 413: "Request Entity Too Large", - 414: "Request-URI Too Long", - 415: "Unsupported Media Type", - 416: "Requested Range Not Satisfiable", - 417: "Expectation Failed", - 418: "I'm a teapot", - 420: "Enhance Your Calm", - 422: "Unprocessable Entity", - 423: "Locked", - 424: "Failed Dependency", - 424: "Method Failure", - 425: "Unordered Collection", - 426: "Upgrade Required", - 428: "Precondition Required", - 429: "Too Many Requests", - 431: "Request Header Fields Too Large", - 444: "No Response", - 449: "Retry With", - 450: "Blocked by Windows Parental Controls", - 451: "Unavailable For Legal Reasons", - 451: "Redirect", - 494: "Request Header Too Large", - 495: "Cert Error", - 496: "No Cert", - 497: "HTTP to HTTPS", - 499: "Client Closed Request", - 500: "Internal Server Error", - 501: "Not Implemented", - 502: "Bad Gateway", - 503: "Service Unavailable", - 504: "Gateway Timeout", - 505: "HTTP Version Not Supported", - 506: "Variant Also Negotiates", - 507: "Insufficient Storage", - 508: "Loop Detected", - 509: "Bandwidth Limit Exceeded", - 510: "Not Extended", - 511: "Network Authentication Required", - 598: "Network read timeout error", - 599: "Network connect timeout error", -} - - -class Entry(Py3kObject): - def __init__(self, method, uri, body, - adding_headers=None, - forcing_headers=None, - status=200, - streaming=False, - **headers): - - self.method = method - self.uri = uri - - if callable(body): - self.dynamic_response = True - else: - self.dynamic_response = False - - self.body = body - self.streaming = streaming - - if self.dynamic_response or self.streaming: - self.body_length = 0 - else: - self.body_length = len(self.body or '') - - self.adding_headers = adding_headers or {} - self.forcing_headers = forcing_headers or {} - self.status = int(status) - - for k, v in headers.items(): - name = "-".join(k.split("_")).capitalize() - self.adding_headers[name] = v - - self.validate() - - def validate(self): - content_length_keys = 'Content-Length', 'content-length' - for key in content_length_keys: - got = self.adding_headers.get( - key, self.forcing_headers.get(key, None)) - - if got is None: - continue - - try: - igot = int(got) - except ValueError: - warnings.warn( - 'HTTPretty got to register the Content-Length header ' \ - 'with "%r" which is not a number' % got, - ) - - if igot > self.body_length: - raise HTTPrettyError( - 'HTTPretty got inconsistent parameters. The header ' \ - 'Content-Length you registered expects size "%d" but ' \ - 'the body you registered for that has actually length ' \ - '"%d".' % ( - igot, self.body_length, - ) - ) - - def __str__(self): - return r'' % ( - self.method, self.uri, self.status) - - def normalize_headers(self, headers): - new = {} - for k in headers: - new_k = '-'.join([s.lower() for s in k.split('-')]) - new[new_k] = headers[k] - - return new - - def fill_filekind(self, fk, request): - now = datetime.utcnow() - - headers = { - 'status': self.status, - 'date': now.strftime('%a, %d %b %Y %H:%M:%S GMT'), - 'server': 'Python/HTTPretty', - 'connection': 'close', - } - - if self.forcing_headers: - headers = self.forcing_headers - - if self.dynamic_response: - req_info, req_body, req_headers = request - response = self.body(req_info, self.method, req_body, req_headers) - if isinstance(response, basestring): - body = response - else: - body, new_headers = response - headers.update(new_headers) - else: - body = self.body - - if self.adding_headers: - headers.update(self.normalize_headers(self.adding_headers)) - - headers = self.normalize_headers(headers) - - status = headers.get('status', self.status) - string_list = [ - 'HTTP/1.1 %d %s' % (status, STATUSES[status]), - ] - - if 'date' in headers: - string_list.append('date: %s' % headers.pop('date')) - - if not self.forcing_headers: - content_type = headers.pop('content-type', - 'text/plain; charset=utf-8') - - body_length = self.body_length - if self.dynamic_response: - body_length = len(body) - content_length = headers.pop('content-length', body_length) - - string_list.append('content-type: %s' % content_type) - if not self.streaming: - string_list.append('content-length: %s' % content_length) - - string_list.append('server: %s' % headers.pop('server')) - - for k, v in headers.items(): - string_list.append( - '{0}: {1}'.format(k, v), - ) - - for item in string_list: - fk.write(utf8(item) + b'\n') - - fk.write(b'\r\n') - - if self.streaming: - self.body, body = itertools.tee(body) - for chunk in body: - fk.write(utf8(chunk)) - else: - fk.write(utf8(body)) - - fk.seek(0) - - -def url_fix(s, charset='utf-8'): - scheme, netloc, path, querystring, fragment = urlsplit(s) - path = quote(path, b'/%') - querystring = quote_plus(querystring, b':&=') - return urlunsplit((scheme, netloc, path, querystring, fragment)) - - -class URIInfo(Py3kObject): - def __init__(self, - username='', - password='', - hostname='', - port=80, - path='/', - query='', - fragment='', - scheme='', - last_request=None): - - self.username = username or '' - self.password = password or '' - self.hostname = hostname or '' - - if port: - port = int(port) - - elif scheme == 'https': - port = 443 - - self.port = port or 80 - self.path = path or '' - self.query = query or '' - self.scheme = scheme or (self.port is 80 and "http" or "https") - self.fragment = fragment or '' - self.last_request = last_request - - def __str__(self): - attrs = ( - 'username', - 'password', - 'hostname', - 'port', - 'path', - ) - fmt = ", ".join(['%s="%s"' % (k, getattr(self, k, '')) for k in attrs]) - return r'' % fmt - - def __hash__(self): - return hash(text_type(self)) - - def __eq__(self, other): - self_tuple = ( - self.port, - decode_utf8(self.hostname), - url_fix(decode_utf8(self.path)), - ) - other_tuple = ( - other.port, - decode_utf8(other.hostname), - url_fix(decode_utf8(other.path)), - ) - return self_tuple == other_tuple - - def full_url(self): - credentials = "" - if self.password: - credentials = "{0}:{1}@".format( - self.username, self.password) - - result = "{scheme}://{credentials}{host}{path}".format( - scheme=self.scheme, - credentials=credentials, - host=decode_utf8(self.hostname), - path=decode_utf8(self.path) - ) - return result - - @classmethod - def from_uri(cls, uri, entry): - result = urlsplit(uri) - POTENTIAL_HTTP_PORTS.append(int(result.port or 80)) - return cls(result.username, - result.password, - result.hostname, - result.port, - result.path, - result.query, - result.fragment, - result.scheme, - entry) - - -class URIMatcher(object): - regex = None - info = None - - def __init__(self, uri, entries): - if type(uri).__name__ == 'SRE_Pattern': - self.regex = uri - else: - self.info = URIInfo.from_uri(uri, entries) - - self.entries = entries - - #hash of current_entry pointers, per method. - self.current_entries = {} - - def matches(self, info): - if self.info: - return self.info == info - else: - return self.regex.search(info.full_url()) - - def __str__(self): - wrap = 'URLMatcher({0})' - if self.info: - return wrap.format(text_type(self.info)) - else: - return wrap.format(self.regex.pattern) - - def get_next_entry(self, method='GET'): - """Cycle through available responses, but only once. - Any subsequent requests will receive the last response""" - - if method not in self.current_entries: - self.current_entries[method] = 0 - - #restrict selection to entries that match the requested method - entries_for_method = [e for e in self.entries if e.method == method] - - if self.current_entries[method] >= len(entries_for_method): - self.current_entries[method] = -1 - - if not self.entries or not entries_for_method: - raise ValueError('I have no entries for method %s: %s' - % (method, self)) - - entry = entries_for_method[self.current_entries[method]] - if self.current_entries[method] != -1: - self.current_entries[method] += 1 - return entry - - def __hash__(self): - return hash(text_type(self)) - - def __eq__(self, other): - return text_type(self) == text_type(other) - - -class HTTPretty(Py3kObject): - u"""The URI registration class""" - _entries = {} - latest_requests = [] - GET = b'GET' - PUT = b'PUT' - POST = b'POST' - DELETE = b'DELETE' - HEAD = b'HEAD' - PATCH = b'PATCH' - METHODS = (GET, PUT, POST, DELETE, HEAD, PATCH) - last_request = HTTPrettyRequestEmpty() - _is_enabled = False - - @classmethod - def reset(cls): - cls._entries.clear() - cls.latest_requests = [] - cls.last_request = HTTPrettyRequestEmpty() - - @classmethod - def historify_request(cls, headers, body='', append=True): - request = HTTPrettyRequest(headers, body) - cls.last_request = request - if append: - cls.latest_requests.append(request) - else: - cls.latest_requests[-1] = request - return request - - @classmethod - def register_uri(cls, method, uri, body='HTTPretty :)', - adding_headers=None, - forcing_headers=None, - status=200, - responses=None, **headers): - - if isinstance(responses, list) and len(responses) > 0: - for response in responses: - response.uri = uri - response.method = method - entries_for_this_uri = responses - else: - headers['body'] = body - headers['adding_headers'] = adding_headers - headers['forcing_headers'] = forcing_headers - headers['status'] = status - - entries_for_this_uri = [ - cls.Response(method=method, uri=uri, **headers), - ] - - matcher = URIMatcher(uri, entries_for_this_uri) - if matcher in cls._entries: - matcher.entries.extend(cls._entries[matcher]) - del cls._entries[matcher] - - cls._entries[matcher] = entries_for_this_uri - - def __str__(self): - return u'' % len(self._entries) - - @classmethod - def Response(cls, body, method=None, uri=None, adding_headers=None, forcing_headers=None, - status=200, streaming=False, **headers): - - headers['body'] = body - headers['adding_headers'] = adding_headers - headers['forcing_headers'] = forcing_headers - headers['status'] = int(status) - headers['streaming'] = streaming - return Entry(method, uri, **headers) - - @classmethod - def disable(cls): - cls._is_enabled = False - socket.socket = old_socket - socket.SocketType = old_socket - socket._socketobject = old_socket - - socket.create_connection = old_create_connection - socket.gethostname = old_gethostname - socket.gethostbyname = old_gethostbyname - socket.getaddrinfo = old_getaddrinfo - socket.inet_aton = old_gethostbyname - - socket.__dict__['socket'] = old_socket - socket.__dict__['_socketobject'] = old_socket - socket.__dict__['SocketType'] = old_socket - - socket.__dict__['create_connection'] = old_create_connection - socket.__dict__['gethostname'] = old_gethostname - socket.__dict__['gethostbyname'] = old_gethostbyname - socket.__dict__['getaddrinfo'] = old_getaddrinfo - socket.__dict__['inet_aton'] = old_gethostbyname - - if socks: - socks.socksocket = old_socksocket - socks.__dict__['socksocket'] = old_socksocket - - if ssl: - ssl.wrap_socket = old_ssl_wrap_socket - ssl.SSLSocket = old_sslsocket - ssl.__dict__['wrap_socket'] = old_ssl_wrap_socket - ssl.__dict__['SSLSocket'] = old_sslsocket - - if not PY3: - ssl.sslwrap_simple = old_sslwrap_simple - ssl.__dict__['sslwrap_simple'] = old_sslwrap_simple - - @classmethod - def is_enabled(cls): - return cls._is_enabled - - @classmethod - def enable(cls): - cls._is_enabled = True - socket.socket = fakesock.socket - socket._socketobject = fakesock.socket - socket.SocketType = fakesock.socket - - socket.create_connection = create_fake_connection - socket.gethostname = fake_gethostname - socket.gethostbyname = fake_gethostbyname - socket.getaddrinfo = fake_getaddrinfo - socket.inet_aton = fake_gethostbyname - - socket.__dict__['socket'] = fakesock.socket - socket.__dict__['_socketobject'] = fakesock.socket - socket.__dict__['SocketType'] = fakesock.socket - - socket.__dict__['create_connection'] = create_fake_connection - socket.__dict__['gethostname'] = fake_gethostname - socket.__dict__['gethostbyname'] = fake_gethostbyname - socket.__dict__['inet_aton'] = fake_gethostbyname - socket.__dict__['getaddrinfo'] = fake_getaddrinfo - - if socks: - socks.socksocket = fakesock.socket - socks.__dict__['socksocket'] = fakesock.socket - - if ssl: - ssl.wrap_socket = fake_wrap_socket - ssl.SSLSocket = FakeSSLSocket - - ssl.__dict__['wrap_socket'] = fake_wrap_socket - ssl.__dict__['SSLSocket'] = FakeSSLSocket - - if not PY3: - ssl.sslwrap_simple = fake_wrap_socket - ssl.__dict__['sslwrap_simple'] = fake_wrap_socket - - -def httprettified(test): - "A decorator tests that use HTTPretty" - def decorate_class(klass): - for attr in dir(klass): - if not attr.startswith('test_'): - continue - - attr_value = getattr(klass, attr) - if not hasattr(attr_value, "__call__"): - continue - - setattr(klass, attr, decorate_callable(attr_value)) - return klass - - def decorate_callable(test): - @functools.wraps(test) - def wrapper(*args, **kw): - HTTPretty.reset() - HTTPretty.enable() - try: - return test(*args, **kw) - finally: - HTTPretty.disable() - return wrapper - - if isinstance(test, ClassTypes): - return decorate_class(test) - return decorate_callable(test) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index e5a2bed65..974b2dc49 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -1,10 +1,10 @@ -from urlparse import parse_qs +from urlparse import parse_qs, urlparse from jinja2 import Template from .models import s3_backend from moto.core.utils import headers_to_dict -from .utils import bucket_name_from_hostname +from .utils import bucket_name_from_url def all_buckets(): @@ -14,11 +14,23 @@ def all_buckets(): return template.render(buckets=all_buckets) -def bucket_response(uri, method, body, headers): - hostname = uri.hostname - querystring = parse_qs(uri.query) +def bucket_response(request, full_url, headers): + headers = headers_to_dict(headers) + response = _bucket_response(request, full_url, headers) + if isinstance(response, basestring): + return 200, headers, response - bucket_name = bucket_name_from_hostname(hostname) + else: + status_code, headers, response_content = response + return status_code, headers, response_content + + +def _bucket_response(request, full_url, headers): + parsed_url = urlparse(full_url) + querystring = parse_qs(parsed_url.query) + method = request.method + + bucket_name = bucket_name_from_url(full_url) if not bucket_name: # If no bucket specified, list all buckets return all_buckets() @@ -38,7 +50,7 @@ def bucket_response(uri, method, body, headers): result_folders=result_folders ) else: - return "", dict(status=404) + return 404, headers, "" elif method == 'PUT': new_bucket = s3_backend.create_bucket(bucket_name) template = Template(S3_BUCKET_CREATE_RESPONSE) @@ -48,37 +60,53 @@ def bucket_response(uri, method, body, headers): if removed_bucket is None: # Non-existant bucket template = Template(S3_DELETE_NON_EXISTING_BUCKET) - return template.render(bucket_name=bucket_name), dict(status=404) + return 404, headers, template.render(bucket_name=bucket_name) elif removed_bucket: # Bucket exists template = Template(S3_DELETE_BUCKET_SUCCESS) - return template.render(bucket=removed_bucket), dict(status=204) + return 204, headers, template.render(bucket=removed_bucket) else: # Tried to delete a bucket that still has keys template = Template(S3_DELETE_BUCKET_WITH_ITEMS_ERROR) - return template.render(bucket=removed_bucket), dict(status=409) + return 409, headers, template.render(bucket=removed_bucket) else: raise NotImplementedError("Method {} has not been impelemented in the S3 backend yet".format(method)) -def key_response(uri_info, method, body, headers): - - key_name = uri_info.path.lstrip('/') - hostname = uri_info.hostname +def key_response(request, full_url, headers): headers = headers_to_dict(headers) - bucket_name = bucket_name_from_hostname(hostname) + response = _key_response(request, full_url, headers) + if isinstance(response, basestring): + return 200, headers, response + else: + status_code, headers, response_content = response + return status_code, headers, response_content + + +def _key_response(request, full_url, headers): + parsed_url = urlparse(full_url) + method = request.method + + key_name = parsed_url.path.lstrip('/') + bucket_name = bucket_name_from_url(full_url) + if hasattr(request, 'body'): + # Boto + body = request.body + else: + # Flask server + body = request.data if method == 'GET': key = s3_backend.get_key(bucket_name, key_name) if key: return key.value else: - return "", dict(status=404) + return 404, headers, "" if method == 'PUT': - if 'x-amz-copy-source' in headers: + if 'x-amz-copy-source' in request.headers: # Copy key - src_bucket, src_key = headers.get("x-amz-copy-source").split("/") + src_bucket, src_key = request.headers.get("x-amz-copy-source").split("/") s3_backend.copy_key(src_bucket, src_key, bucket_name, key_name) template = Template(S3_OBJECT_COPY_RESPONSE) return template.render(key=src_key) @@ -92,20 +120,23 @@ def key_response(uri_info, method, body, headers): # empty string as part of closing the connection. new_key = s3_backend.set_key(bucket_name, key_name, body) template = Template(S3_OBJECT_RESPONSE) - return template.render(key=new_key), new_key.response_dict + headers.update(new_key.response_dict) + return 200, headers, template.render(key=new_key) key = s3_backend.get_key(bucket_name, key_name) if key: - return "", key.response_dict + headers.update(key.response_dict) + return 200, headers, "" elif method == 'HEAD': key = s3_backend.get_key(bucket_name, key_name) if key: - return S3_OBJECT_RESPONSE, key.response_dict + headers.update(key.response_dict) + return 200, headers, S3_OBJECT_RESPONSE else: - return "", dict(status=404) + return 404, headers, "" elif method == 'DELETE': removed_key = s3_backend.delete_key(bucket_name, key_name) template = Template(S3_DELETE_OBJECT_SUCCESS) - return template.render(bucket=removed_key), dict(status=204) + return 204, headers, template.render(bucket=removed_key) else: raise NotImplementedError("Method {} has not been impelemented in the S3 backend yet".format(method)) diff --git a/moto/s3/utils.py b/moto/s3/utils.py index d9e5671e9..765303743 100644 --- a/moto/s3/utils.py +++ b/moto/s3/utils.py @@ -5,20 +5,19 @@ import urlparse bucket_name_regex = re.compile("(.+).s3.amazonaws.com") -def bucket_name_from_hostname(hostname): - if 'amazonaws.com' in hostname: - bucket_result = bucket_name_regex.search(hostname) +def bucket_name_from_url(url): + domain = urlparse.urlparse(url).netloc + + # If 'www' prefixed, strip it. + domain = domain.lstrip("www.") + + if 'amazonaws.com' in domain: + bucket_result = bucket_name_regex.search(domain) if bucket_result: return bucket_result.groups()[0] else: - # In server mode. Use left-most part of subdomain for bucket name - split_url = urlparse.urlparse(hostname) - - # If 'www' prefixed, strip it. - clean_hostname = split_url.netloc.lstrip("www.") - - if '.' in clean_hostname: - return clean_hostname.split(".")[0] + if '.' in domain: + return domain.split(".")[0] else: # No subdomain found. return None diff --git a/requirements.txt b/requirements.txt index b3731770e..62f6f0a27 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,5 @@ coverage freezegun -#httpretty mock nose https://github.com/spulec/python-coveralls/tarball/796d9dba34b759664e42ba39e6414209a0f319ad diff --git a/setup.py b/setup.py index fab0a59d1..71244de60 100644 --- a/setup.py +++ b/setup.py @@ -18,7 +18,11 @@ setup( packages=find_packages(), install_requires=[ "boto", - "Jinja2", "flask", + "httpretty", + "Jinja2", + ], + dependency_links=[ + "https://github.com/gabrielfalcao/HTTPretty/tarball/2347df40a3a3cd00e73f0353f5ea2670ad3405c1", ], ) diff --git a/tests/test_s3/test_s3_utils.py b/tests/test_s3/test_s3_utils.py new file mode 100644 index 000000000..5b03d61fd --- /dev/null +++ b/tests/test_s3/test_s3_utils.py @@ -0,0 +1,14 @@ +from sure import expect +from moto.s3.utils import bucket_name_from_url + + +def test_base_url(): + expect(bucket_name_from_url('https://s3.amazonaws.com/')).should.equal(None) + + +def test_localhost_bucket(): + expect(bucket_name_from_url('https://foobar.localhost:5000/abc')).should.equal("foobar") + + +def test_localhost_without_bucket(): + expect(bucket_name_from_url('https://www.localhost:5000/def')).should.equal(None) From 0fc2a638dd2f263d780fe8bc5f2756f5f2e7c2c8 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Fri, 3 May 2013 19:53:56 -0400 Subject: [PATCH 20/85] fix dependency link --- setup.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index 71244de60..3232bc5ff 100644 --- a/setup.py +++ b/setup.py @@ -19,10 +19,10 @@ setup( install_requires=[ "boto", "flask", - "httpretty", + "httpretty==0.6.0a", "Jinja2", ], dependency_links=[ - "https://github.com/gabrielfalcao/HTTPretty/tarball/2347df40a3a3cd00e73f0353f5ea2670ad3405c1", + "https://github.com/gabrielfalcao/HTTPretty/tarball/2347df40a3a3cd00e73f0353f5ea2670ad3405c1#egg=httpretty-0.6.0a", ], ) From 43450771737980c56b0010223cde432531cdaaac Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Fri, 3 May 2013 20:14:33 -0400 Subject: [PATCH 21/85] coverage back at 100% --- moto/core/responses.py | 10 +++------- moto/core/utils.py | 36 +++--------------------------------- moto/s3/responses.py | 24 ++++-------------------- 3 files changed, 10 insertions(+), 60 deletions(-) diff --git a/moto/core/responses.py b/moto/core/responses.py index a25f5f26a..6f8b365fb 100644 --- a/moto/core/responses.py +++ b/moto/core/responses.py @@ -3,7 +3,7 @@ import json from urlparse import parse_qs, urlparse -from moto.core.utils import headers_to_dict, camelcase_to_underscores, method_names_from_class +from moto.core.utils import camelcase_to_underscores, method_names_from_class class BaseResponse(object): @@ -20,7 +20,7 @@ class BaseResponse(object): if not querystring: querystring = parse_qs(self.body) if not querystring: - querystring = headers_to_dict(headers) + querystring = headers self.uri = full_url self.path = urlparse(full_url).path @@ -59,11 +59,7 @@ def metadata_response(request, full_url, headers): parsed_url = urlparse(full_url) tomorrow = datetime.datetime.now() + datetime.timedelta(days=1) path = parsed_url.path.lstrip("/latest/meta-data/") - if path == '': - result = "iam/" - elif path == 'iam/': - result = 'security-credentials/' - elif path == 'iam/security-credentials/': + if path == 'iam/security-credentials/': result = 'default-role' elif path == 'iam/security-credentials/default-role': result = json.dumps(dict( diff --git a/moto/core/utils.py b/moto/core/utils.py index 13aca14b0..35a1e1292 100644 --- a/moto/core/utils.py +++ b/moto/core/utils.py @@ -1,36 +1,10 @@ import inspect import random import re -from urlparse import parse_qs from flask import request -def headers_to_dict(headers): - if isinstance(headers, dict): - # If already dict, return - return headers - - result = {} - for index, header in enumerate(headers.split("\r\n")): - if not header: - continue - if index: - # Parsing headers - key, value = header.split(":", 1) - result[key.strip()] = value.strip() - else: - # Parsing method and path - path_and_querystring = header.split(" /")[1] - if '?' in path_and_querystring: - querystring = path_and_querystring.split("?")[1] - else: - querystring = path_and_querystring - queryset_dict = parse_qs(querystring) - result.update(queryset_dict) - return result - - def camelcase_to_underscores(argument): ''' Converts a camelcase param like theNewAttribute to the equivalent python underscore variable like the_new_attribute''' @@ -92,10 +66,6 @@ class convert_flask_to_httpretty_response(object): def __call__(self, args=None, **kwargs): headers = dict(request.headers) result = self.callback(request, request.url, headers) - if isinstance(result, basestring): - # result is just the response - return result - else: - # result is a status, headers, response tuple - status, headers, response = result - return response, status, headers + # result is a status, headers, response tuple + status, headers, response = result + return response, status, headers diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 974b2dc49..018719aa5 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -3,7 +3,6 @@ from urlparse import parse_qs, urlparse from jinja2 import Template from .models import s3_backend -from moto.core.utils import headers_to_dict from .utils import bucket_name_from_url @@ -15,7 +14,6 @@ def all_buckets(): def bucket_response(request, full_url, headers): - headers = headers_to_dict(headers) response = _bucket_response(request, full_url, headers) if isinstance(response, basestring): return 200, headers, response @@ -74,8 +72,6 @@ def _bucket_response(request, full_url, headers): def key_response(request, full_url, headers): - headers = headers_to_dict(headers) - response = _key_response(request, full_url, headers) if isinstance(response, basestring): return 200, headers, response @@ -110,22 +106,10 @@ def _key_response(request, full_url, headers): s3_backend.copy_key(src_bucket, src_key, bucket_name, key_name) template = Template(S3_OBJECT_COPY_RESPONSE) return template.render(key=src_key) - content_length = int(headers.get('Content-Length', 0)) - if body or (body == '' and content_length == 0): - # We want to write the key in once of two circumstances. - # - Anytime we are given a truthy body value - # - We are given an empty body value and the content length is zero. - # The reason we do not set the key to an empty string if the - # content length is not zero is because we are sometimes sent an - # empty string as part of closing the connection. - new_key = s3_backend.set_key(bucket_name, key_name, body) - template = Template(S3_OBJECT_RESPONSE) - headers.update(new_key.response_dict) - return 200, headers, template.render(key=new_key) - key = s3_backend.get_key(bucket_name, key_name) - if key: - headers.update(key.response_dict) - return 200, headers, "" + new_key = s3_backend.set_key(bucket_name, key_name, body) + template = Template(S3_OBJECT_RESPONSE) + headers.update(new_key.response_dict) + return 200, headers, template.render(key=new_key) elif method == 'HEAD': key = s3_backend.get_key(bucket_name, key_name) if key: From caf73557cdbce9d8028e2792a1aae5b3f620219b Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Mon, 6 May 2013 23:33:59 -0400 Subject: [PATCH 22/85] Fix issue with large S3 values. Closes #11. --- moto/s3/models.py | 11 +++++++++++ moto/s3/responses.py | 13 ++++++++++++- tests/test_s3/test_s3.py | 11 +++++++++++ 3 files changed, 34 insertions(+), 1 deletion(-) diff --git a/moto/s3/models.py b/moto/s3/models.py index 7912edfe9..0a84ba731 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -11,6 +11,10 @@ class FakeKey(object): self.value = value self.last_modified = datetime.datetime.now() + def append_to_value(self, value): + self.value += value + self.last_modified = datetime.datetime.now() + @property def etag(self): value_md5 = md5.new() @@ -81,6 +85,13 @@ class S3Backend(BaseBackend): return new_key + def append_to_key(self, bucket_name, key_name, value): + key_name = clean_key_name(key_name) + + key = self.get_key(bucket_name, key_name) + key.append_to_value(value) + return key + def get_key(self, bucket_name, key_name): key_name = clean_key_name(key_name) bucket = self.get_bucket(bucket_name) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 018719aa5..1d69384da 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -106,7 +106,18 @@ def _key_response(request, full_url, headers): s3_backend.copy_key(src_bucket, src_key, bucket_name, key_name) template = Template(S3_OBJECT_COPY_RESPONSE) return template.render(key=src_key) - new_key = s3_backend.set_key(bucket_name, key_name, body) + streaming_request = hasattr(request, 'streaming') and request.streaming + closing_connection = headers.get('connection') == 'close' + if closing_connection and streaming_request: + # Closing the connection of a streaming request. No more data + new_key = s3_backend.get_key(bucket_name, key_name) + elif streaming_request: + # Streaming request, more data + new_key = s3_backend.append_to_key(bucket_name, key_name, body) + else: + # Initial data + new_key = s3_backend.set_key(bucket_name, key_name, body) + request.streaming = True template = Template(S3_OBJECT_RESPONSE) headers.update(new_key.response_dict) return 200, headers, template.render(key=new_key) diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index a68e511e3..264b8bb08 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -78,6 +78,17 @@ def test_empty_key_set_on_existing_key(): bucket.get_key("the-key").get_contents_as_string().should.equal('') +@mock_s3 +def test_large_key_save(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + key = Key(bucket) + key.key = "the-key" + key.set_contents_from_string("foobar" * 100000) + + bucket.get_key("the-key").get_contents_as_string().should.equal('foobar' * 100000) + + @mock_s3 def test_copy_key(): conn = boto.connect_s3('the_key', 'the_secret') From 6ceb36161c497058c7357d73d698b595847f610f Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Mon, 6 May 2013 23:36:27 -0400 Subject: [PATCH 23/85] 0.2.0 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 3232bc5ff..edbd948d6 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ from setuptools import setup, find_packages setup( name='moto', - version='0.1.5', + version='0.2.0', description='A library that allows your python tests to easily' ' mock out the boto library', author='Steve Pulec', From 4c8244b7556c921efbde4da32a73f9868ea38c19 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Tue, 7 May 2013 00:03:05 -0400 Subject: [PATCH 24/85] Clean up code for listing s3 keys. Fix #14. --- moto/s3/models.py | 11 ++++++----- moto/s3/responses.py | 2 +- tests/test_s3/test_s3.py | 8 ++++++++ 3 files changed, 15 insertions(+), 6 deletions(-) diff --git a/moto/s3/models.py b/moto/s3/models.py index 0a84ba731..69ef827ff 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -104,17 +104,18 @@ class S3Backend(BaseBackend): if prefix: for key_name, key in bucket.keys.iteritems(): if key_name.startswith(prefix): - if delimiter and '/' in key_name.lstrip(prefix): + key_without_prefix = key_name.replace(prefix, "", 1) + if delimiter and delimiter in key_without_prefix: # If delimiter, we need to split out folder_results - key_without_prefix = "{}/".format(key_name.lstrip(prefix).split("/")[0]) - folder_results.add("{}{}".format(prefix, key_without_prefix)) + key_without_delimiter = key_without_prefix.split(delimiter)[0] + folder_results.add("{}{}{}".format(prefix, key_without_delimiter, delimiter)) else: key_results.add(key) else: for key_name, key in bucket.keys.iteritems(): - if delimiter and '/' in key_name: + if delimiter and delimiter in key_name: # If delimiter, we need to split out folder_results - folder_results.add(key_name.split("/")[0]) + folder_results.add(key_name.split(delimiter)[0]) else: key_results.add(key) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 1d69384da..766c06e9f 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -37,7 +37,7 @@ def _bucket_response(request, full_url, headers): bucket = s3_backend.get_bucket(bucket_name) if bucket: prefix = querystring.get('prefix', [None])[0] - delimiter = querystring.get('delimiter') + delimiter = querystring.get('delimiter', [None])[0] result_keys, result_folders = s3_backend.prefix_query(bucket, prefix, delimiter) template = Template(S3_BUCKET_GET_RESPONSE) return template.render( diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 264b8bb08..54da1b9ac 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -227,3 +227,11 @@ def test_bucket_key_listing_order(): delimiter = '/' keys = [x.name for x in bucket.list(prefix=None, delimiter=delimiter)] keys.should.equal(['toplevel']) + + delimiter = None + keys = [x.name for x in bucket.list(prefix + 'x', delimiter)] + keys.should.equal([u'toplevel/x/key', u'toplevel/x/y/key', u'toplevel/x/y/z/key']) + + delimiter = '/' + keys = [x.name for x in bucket.list(prefix + 'x', delimiter)] + keys.should.equal([u'toplevel/x/']) From 755fe6563b5f2bedb23144a2a49e5ef27bdf9c7d Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Tue, 7 May 2013 00:19:04 -0400 Subject: [PATCH 25/85] Fix missing dynamodb key status code to fix has_item. Closes #20 --- moto/dynamodb/responses.py | 2 +- tests/test_dynamodb/test_dynamodb_table_with_range_key.py | 8 ++++++-- .../test_dynamodb_table_without_range_key.py | 5 +++-- 3 files changed, 10 insertions(+), 5 deletions(-) diff --git a/moto/dynamodb/responses.py b/moto/dynamodb/responses.py index dece06542..8f3172112 100644 --- a/moto/dynamodb/responses.py +++ b/moto/dynamodb/responses.py @@ -195,7 +195,7 @@ class DynamoHandler(BaseResponse): return dynamo_json_dump(item_dict) else: er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException' - return self.error(er) + return self.error(er, status=404) def batch_get_item(self): table_batches = self.body['RequestItems'] diff --git a/tests/test_dynamodb/test_dynamodb_table_with_range_key.py b/tests/test_dynamodb/test_dynamodb_table_with_range_key.py index bff2be93b..e357c5928 100644 --- a/tests/test_dynamodb/test_dynamodb_table_with_range_key.py +++ b/tests/test_dynamodb/test_dynamodb_table_with_range_key.py @@ -6,6 +6,7 @@ from moto import mock_dynamodb from moto.dynamodb import dynamodb_backend from boto.dynamodb import condition +from boto.dynamodb.exceptions import DynamoDBKeyNotFoundError from boto.exception import DynamoDBResponseError @@ -101,6 +102,8 @@ def test_item_add_and_describe_and_update(): ) item.put() + table.has_item("LOLCat Forum", "Check this out!").should.equal(True) + returned_item = table.get_item( hash_key='LOLCat Forum', range_key='Check this out!', @@ -150,7 +153,8 @@ def test_get_missing_item(): table.get_item.when.called_with( hash_key='tester', range_key='other', - ).should.throw(DynamoDBResponseError) + ).should.throw(DynamoDBKeyNotFoundError) + table.has_item("foobar").should.equal(False) @mock_dynamodb @@ -163,7 +167,7 @@ def test_get_item_with_undeclared_table(): 'HashKeyElement': {'S': 'tester'}, 'RangeKeyElement': {'S': 'test-range'}, }, - ).should.throw(DynamoDBResponseError) + ).should.throw(DynamoDBKeyNotFoundError) @mock_dynamodb diff --git a/tests/test_dynamodb/test_dynamodb_table_without_range_key.py b/tests/test_dynamodb/test_dynamodb_table_without_range_key.py index c1ffdf2ce..a3d68b113 100644 --- a/tests/test_dynamodb/test_dynamodb_table_without_range_key.py +++ b/tests/test_dynamodb/test_dynamodb_table_without_range_key.py @@ -6,6 +6,7 @@ from moto import mock_dynamodb from moto.dynamodb import dynamodb_backend from boto.dynamodb import condition +from boto.dynamodb.exceptions import DynamoDBKeyNotFoundError from boto.exception import DynamoDBResponseError @@ -137,7 +138,7 @@ def test_get_missing_item(): table.get_item.when.called_with( hash_key='tester', - ).should.throw(DynamoDBResponseError) + ).should.throw(DynamoDBKeyNotFoundError) @mock_dynamodb @@ -149,7 +150,7 @@ def test_get_item_with_undeclared_table(): key={ 'HashKeyElement': {'S': 'tester'}, }, - ).should.throw(DynamoDBResponseError) + ).should.throw(DynamoDBKeyNotFoundError) @mock_dynamodb From 549cb23b7f952a2fefc0cb00690ce63f9db1bfe4 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Thu, 16 May 2013 22:24:26 -0400 Subject: [PATCH 26/85] Better error messaging for dynamodb table gets for range key tables without range keys used. cc #28 --- moto/dynamodb/models.py | 12 ++++++-- moto/dynamodb/responses.py | 7 ++++- .../test_dynamodb_table_with_range_key.py | 28 +++++++++++++++++-- 3 files changed, 41 insertions(+), 6 deletions(-) diff --git a/moto/dynamodb/models.py b/moto/dynamodb/models.py index 84330e279..66612caa8 100644 --- a/moto/dynamodb/models.py +++ b/moto/dynamodb/models.py @@ -101,6 +101,10 @@ class Table(object): self.created_at = datetime.datetime.now() self.items = defaultdict(dict) + @property + def has_range_key(self): + return self.range_key_attr is not None + @property def describe(self): results = { @@ -122,7 +126,7 @@ class Table(object): "TableSizeBytes": 0, } } - if self.range_key_attr: + if self.has_range_key: results["Table"]["KeySchema"]["RangeKeyElement"] = { "AttributeName": self.range_key_attr, "AttributeType": self.range_key_type @@ -132,7 +136,7 @@ class Table(object): def __len__(self): count = 0 for key, value in self.items.iteritems(): - if self.range_key_attr: + if self.has_range_key: count += len(value) else: count += 1 @@ -143,7 +147,7 @@ class Table(object): def put_item(self, item_attrs): hash_value = DynamoType(item_attrs.get(self.hash_key_attr)) - if self.range_key_attr: + if self.has_range_key: range_value = DynamoType(item_attrs.get(self.range_key_attr)) else: range_value = None @@ -157,6 +161,8 @@ class Table(object): return item def get_item(self, hash_key, range_key): + if self.has_range_key and not range_key: + raise ValueError("Table has a range key, but no range key was passed into get_item") try: if range_key: return self.items[hash_key][range_key] diff --git a/moto/dynamodb/responses.py b/moto/dynamodb/responses.py index 8f3172112..b2cc29e8c 100644 --- a/moto/dynamodb/responses.py +++ b/moto/dynamodb/responses.py @@ -188,12 +188,17 @@ class DynamoHandler(BaseResponse): hash_key = key['HashKeyElement'] range_key = key.get('RangeKeyElement') attrs_to_get = self.body.get('AttributesToGet') - item = dynamodb_backend.get_item(name, hash_key, range_key) + try: + item = dynamodb_backend.get_item(name, hash_key, range_key) + except ValueError: + er = 'com.amazon.coral.validate#ValidationException' + return self.error(er, status=400) if item: item_dict = item.describe_attrs(attrs_to_get) item_dict['ConsumedCapacityUnits'] = 0.5 return dynamo_json_dump(item_dict) else: + # Item not found er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException' return self.error(er, status=404) diff --git a/tests/test_dynamodb/test_dynamodb_table_with_range_key.py b/tests/test_dynamodb/test_dynamodb_table_with_range_key.py index e357c5928..83cc81c10 100644 --- a/tests/test_dynamodb/test_dynamodb_table_with_range_key.py +++ b/tests/test_dynamodb/test_dynamodb_table_with_range_key.py @@ -6,7 +6,7 @@ from moto import mock_dynamodb from moto.dynamodb import dynamodb_backend from boto.dynamodb import condition -from boto.dynamodb.exceptions import DynamoDBKeyNotFoundError +from boto.dynamodb.exceptions import DynamoDBKeyNotFoundError, DynamoDBValidationError from boto.exception import DynamoDBResponseError @@ -154,7 +154,7 @@ def test_get_missing_item(): hash_key='tester', range_key='other', ).should.throw(DynamoDBKeyNotFoundError) - table.has_item("foobar").should.equal(False) + table.has_item("foobar", "more").should.equal(False) @mock_dynamodb @@ -170,6 +170,30 @@ def test_get_item_with_undeclared_table(): ).should.throw(DynamoDBKeyNotFoundError) +@mock_dynamodb +def test_get_item_without_range_key(): + conn = boto.connect_dynamodb() + message_table_schema = conn.create_schema( + hash_key_name="test_hash", + hash_key_proto_value=int, + range_key_name="test_range", + range_key_proto_value=int, + ) + table = conn.create_table( + name='messages', + schema=message_table_schema, + read_units=10, + write_units=10 + ) + + hash_key = 3241526475 + range_key = 1234567890987 + new_item = table.new_item(hash_key=hash_key, range_key=range_key) + new_item.put() + + table.get_item.when.called_with(hash_key=hash_key).should.throw(DynamoDBValidationError) + + @mock_dynamodb def test_delete_item(): conn = boto.connect_dynamodb() From fc02faa5bf2a08d8154f0ebf87c8cda2681794d0 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Thu, 16 May 2013 22:26:45 -0400 Subject: [PATCH 27/85] 0.2.1 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index edbd948d6..ebd048ab5 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ from setuptools import setup, find_packages setup( name='moto', - version='0.2.0', + version='0.2.1', description='A library that allows your python tests to easily' ' mock out the boto library', author='Steve Pulec', From 3880be5ea9944dcfaf62794158e356e4ee3651bb Mon Sep 17 00:00:00 2001 From: Dan Berglund Date: Tue, 14 May 2013 19:47:24 +0200 Subject: [PATCH 28/85] Added support for metadata on files, and support for POST:ing files to S3 --- moto/s3/models.py | 11 +++++++++++ moto/s3/responses.py | 33 ++++++++++++++++++++++++++++++--- 2 files changed, 41 insertions(+), 3 deletions(-) diff --git a/moto/s3/models.py b/moto/s3/models.py index 69ef827ff..15d5fbe67 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -10,6 +10,13 @@ class FakeKey(object): self.name = name self.value = value self.last_modified = datetime.datetime.now() + self._metadata = {} + + def set_metadata(self, key, metadata): + self._metadata[key] = metadata + + def get_metadata(self, key): + return self._metadata[key] def append_to_value(self, value): self.value += value @@ -31,6 +38,10 @@ class FakeKey(object): # https://github.com/boto/boto/issues/466 RFC1123 = '%a, %d %b %Y %H:%M:%S GMT' return self.last_modified.strftime(RFC1123) + + @property + def metadata(self): + return self._metadata @property def response_dict(self): diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 766c06e9f..092379b82 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -1,4 +1,5 @@ from urlparse import parse_qs, urlparse +import re from jinja2 import Template @@ -67,6 +68,22 @@ def _bucket_response(request, full_url, headers): # Tried to delete a bucket that still has keys template = Template(S3_DELETE_BUCKET_WITH_ITEMS_ERROR) return 409, headers, template.render(bucket=removed_bucket) + elif method == 'POST': + #POST to bucket-url should create file from form + key = request.form['key'] + f = request.form['file'] + new_key = s3_backend.set_key(bucket_name, key, "") + #TODO Set actual file + + #Metadata + meta_regex = re.compile('^x-amz-meta-([a-zA-Z0-9\-_]+)$', flags=re.IGNORECASE) + for form_id in request.form: + result = meta_regex.match(form_id) + if result: + meta_key = result.group(0).lower() + metadata = request.form[form_id] + new_key.set_metadata(meta_key, metadata) + return 200, headers, "" else: raise NotImplementedError("Method {} has not been impelemented in the S3 backend yet".format(method)) @@ -84,8 +101,8 @@ def _key_response(request, full_url, headers): parsed_url = urlparse(full_url) method = request.method - key_name = parsed_url.path.lstrip('/') bucket_name = bucket_name_from_url(full_url) + key_name = parsed_url.path.split(bucket_name + '/')[-1] if hasattr(request, 'body'): # Boto body = request.body @@ -96,7 +113,8 @@ def _key_response(request, full_url, headers): if method == 'GET': key = s3_backend.get_key(bucket_name, key_name) if key: - return key.value + headers.update(key.metadata) + return 200, headers, key.value else: return 404, headers, "" if method == 'PUT': @@ -118,6 +136,15 @@ def _key_response(request, full_url, headers): # Initial data new_key = s3_backend.set_key(bucket_name, key_name, body) request.streaming = True + + #Metadata + meta_regex = re.compile('^x-amz-meta-([a-zA-Z0-9\-_]+)$', flags=re.IGNORECASE) + for header in request.headers: + result = meta_regex.match(header[0]) + if result: + meta_key = result.group(0).lower() + metadata = header[1] + new_key.set_metadata(meta_key, metadata) template = Template(S3_OBJECT_RESPONSE) headers.update(new_key.response_dict) return 200, headers, template.render(key=new_key) @@ -125,7 +152,7 @@ def _key_response(request, full_url, headers): key = s3_backend.get_key(bucket_name, key_name) if key: headers.update(key.response_dict) - return 200, headers, S3_OBJECT_RESPONSE + return 200, headers, "" else: return 404, headers, "" elif method == 'DELETE': From d8e9301c54b9e68c9ee7cd9a16e484e4ab111162 Mon Sep 17 00:00:00 2001 From: Dan Berglund Date: Wed, 15 May 2013 08:55:25 +0200 Subject: [PATCH 29/85] Added metadata to HEAD-response, boto uses this when only metadata is fetched --- moto/s3/responses.py | 1 + 1 file changed, 1 insertion(+) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 092379b82..23cbb5410 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -151,6 +151,7 @@ def _key_response(request, full_url, headers): elif method == 'HEAD': key = s3_backend.get_key(bucket_name, key_name) if key: + headers.update(key.metadata) headers.update(key.response_dict) return 200, headers, "" else: From 7de4399b93a3b258c9567a4b7bd18fd1d665cfae Mon Sep 17 00:00:00 2001 From: Dan Berglund Date: Fri, 17 May 2013 11:43:09 +0200 Subject: [PATCH 30/85] Added tests and made current tests pass --- moto/s3/responses.py | 35 +++++++++++++++++++++++------------ tests/test_s3/test_s3.py | 37 ++++++++++++++++++++++++++++++++++++- 2 files changed, 59 insertions(+), 13 deletions(-) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 23cbb5410..2062ccee9 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -70,18 +70,28 @@ def _bucket_response(request, full_url, headers): return 409, headers, template.render(bucket=removed_bucket) elif method == 'POST': #POST to bucket-url should create file from form - key = request.form['key'] - f = request.form['file'] - new_key = s3_backend.set_key(bucket_name, key, "") - #TODO Set actual file + if hasattr(request, 'form'): + #Not HTTPretty + form = request.form + else: + #HTTPretty, build new form object + form = {} + for kv in request.body.split('&'): + k, v = kv.split('=') + form[k] = v + + key = form['key'] + f = form['file'] + + new_key = s3_backend.set_key(bucket_name, key, f) #Metadata meta_regex = re.compile('^x-amz-meta-([a-zA-Z0-9\-_]+)$', flags=re.IGNORECASE) - for form_id in request.form: + for form_id in form: result = meta_regex.match(form_id) if result: meta_key = result.group(0).lower() - metadata = request.form[form_id] + metadata = form[form_id] new_key.set_metadata(meta_key, metadata) return 200, headers, "" else: @@ -101,8 +111,8 @@ def _key_response(request, full_url, headers): parsed_url = urlparse(full_url) method = request.method + key_name = parsed_url.path.lstrip('/') bucket_name = bucket_name_from_url(full_url) - key_name = parsed_url.path.split(bucket_name + '/')[-1] if hasattr(request, 'body'): # Boto body = request.body @@ -140,11 +150,12 @@ def _key_response(request, full_url, headers): #Metadata meta_regex = re.compile('^x-amz-meta-([a-zA-Z0-9\-_]+)$', flags=re.IGNORECASE) for header in request.headers: - result = meta_regex.match(header[0]) - if result: - meta_key = result.group(0).lower() - metadata = header[1] - new_key.set_metadata(meta_key, metadata) + if isinstance(header, basestring): + result = meta_regex.match(header) + if result: + meta_key = result.group(0).lower() + metadata = request.headers[header] + new_key.set_metadata(meta_key, metadata) template = Template(S3_OBJECT_RESPONSE) headers.update(new_key.response_dict) return 200, headers, template.render(key=new_key) diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 54da1b9ac..1107c4189 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -101,7 +101,17 @@ def test_copy_key(): bucket.get_key("the-key").get_contents_as_string().should.equal("some value") bucket.get_key("new-key").get_contents_as_string().should.equal("some value") + +@mock_s3 +def test_set_metadata(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + key = Key(bucket) + key.key = 'the-key' + key.set_metadata('md', 'Metadatastring') + key.set_contents_from_string("Testval") + bucket.get_key('the-key').get_metadata('md').should.equal('Metadatastring') @freeze_time("2012-01-01 12:00:00") @mock_s3 @@ -163,9 +173,34 @@ def test_get_all_buckets(): buckets.should.have.length_of(2) +@mock_s3 +def test_post_to_bucket(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + + requests.post("https://foobar.s3.amazonaws.com/", { + 'key': 'the-key', + 'file': 'nothing' + }) + + bucket.get_key('the-key').get_contents_as_string().should.equal('nothing') + +@mock_s3 +def test_post_with_metadata_to_bucket(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + + requests.post("https://foobar.s3.amazonaws.com/", { + 'key': 'the-key', + 'file': 'nothing', + 'x-amz-meta-test': 'metadata' + }) + + bucket.get_key('the-key').get_metadata('test').should.equal('metadata') + @mock_s3 def test_bucket_method_not_implemented(): - requests.post.when.called_with("https://foobar.s3.amazonaws.com/").should.throw(NotImplementedError) + requests.patch.when.called_with("https://foobar.s3.amazonaws.com/").should.throw(NotImplementedError) @mock_s3 From 504aabc9686427c76bdab3a5334ad087e017f8d5 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Fri, 17 May 2013 09:49:16 -0400 Subject: [PATCH 31/85] Add Dan Berglund to authors --- AUTHORS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/AUTHORS.md b/AUTHORS.md index 8ce4ae01c..57606ee0b 100644 --- a/AUTHORS.md +++ b/AUTHORS.md @@ -4,3 +4,4 @@ Moto is written by Steve Pulec with contributions from: * [Zach Smith](https://github.com/zmsmith) * [Dilshod Tadjibaev](https://github.com/antimora) +* [Dan Berglund](https://github.com/cheif) From d42a27b3b336eedd10075a653fdefd916f86b2af Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Fri, 17 May 2013 09:49:31 -0400 Subject: [PATCH 32/85] 0.2.2 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index ebd048ab5..4e2d49924 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ from setuptools import setup, find_packages setup( name='moto', - version='0.2.1', + version='0.2.2', description='A library that allows your python tests to easily' ' mock out the boto library', author='Steve Pulec', From 9f19662d1c3227638c7f06e72ed28a0ec59802a2 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Fri, 17 May 2013 19:35:53 -0400 Subject: [PATCH 33/85] allow passing user data to run_instances --- moto/ec2/models.py | 49 ++++++++++++++++++++------------ moto/ec2/responses/instances.py | 3 +- tests/test_ec2/test_instances.py | 15 ++++++++++ 3 files changed, 48 insertions(+), 19 deletions(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index c254e1716..f69919aec 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -16,10 +16,29 @@ from .utils import ( class Instance(BotoInstance): - def __init__(self): - self._state_name = None - self._state_code = None + def __init__(self, image_id, user_data): super(Instance, self).__init__() + self.id = random_instance_id() + self.image_id = image_id + self._state_name = "pending" + self._state_code = 0 + self.user_data = user_data + + def start(self): + self._state_name = "pending" + self._state_code = 0 + + def stop(self): + self._state_name = "stopping" + self._state_code = 64 + + def terminate(self): + self._state_name = "shutting-down" + self._state_code = 32 + + def reboot(self): + self._state_name = "pending" + self._state_code = 0 class InstanceBackend(object): @@ -33,15 +52,14 @@ class InstanceBackend(object): if instance.id == instance_id: return instance - def add_instances(self, image_id, count): + def add_instances(self, image_id, count, user_data): new_reservation = Reservation() new_reservation.id = random_reservation_id() for index in range(count): - new_instance = Instance() - new_instance.id = random_instance_id() - new_instance.image_id = image_id - new_instance._state_name = "pending" - new_instance._state_code = 0 + new_instance = Instance( + image_id, + user_data, + ) new_reservation.instances.append(new_instance) self.reservations[new_reservation.id] = new_reservation return new_reservation @@ -50,8 +68,7 @@ class InstanceBackend(object): started_instances = [] for instance in self.all_instances(): if instance.id in instance_ids: - instance._state_name = "pending" - instance._state_code = 0 + instance.start() started_instances.append(instance) return started_instances @@ -60,8 +77,7 @@ class InstanceBackend(object): stopped_instances = [] for instance in self.all_instances(): if instance.id in instance_ids: - instance._state_name = "stopping" - instance._state_code = 64 + instance.stop() stopped_instances.append(instance) return stopped_instances @@ -70,8 +86,7 @@ class InstanceBackend(object): terminated_instances = [] for instance in self.all_instances(): if instance.id in instance_ids: - instance._state_name = "shutting-down" - instance._state_code = 32 + instance.terminate() terminated_instances.append(instance) return terminated_instances @@ -80,9 +95,7 @@ class InstanceBackend(object): rebooted_instances = [] for instance in self.all_instances(): if instance.id in instance_ids: - # TODO double check instances go to pending when reboot - instance._state_name = "pending" - instance._state_code = 0 + instance.reboot() rebooted_instances.append(instance) return rebooted_instances diff --git a/moto/ec2/responses/instances.py b/moto/ec2/responses/instances.py index 7170a0928..d752bf0f6 100644 --- a/moto/ec2/responses/instances.py +++ b/moto/ec2/responses/instances.py @@ -13,7 +13,8 @@ class InstanceResponse(object): def run_instances(self): min_count = int(self.querystring.get('MinCount', ['1'])[0]) image_id = self.querystring.get('ImageId')[0] - new_reservation = ec2_backend.add_instances(image_id, min_count) + user_data = self.querystring.get('UserData') + new_reservation = ec2_backend.add_instances(image_id, min_count, user_data) template = Template(EC2_RUN_INSTANCES) return template.render(reservation=new_reservation) diff --git a/tests/test_ec2/test_instances.py b/tests/test_ec2/test_instances.py index 618b4d1ff..47e8b4b16 100644 --- a/tests/test_ec2/test_instances.py +++ b/tests/test_ec2/test_instances.py @@ -1,3 +1,5 @@ +import base64 + import boto from boto.ec2.instance import Reservation, InstanceAttribute import sure # flake8: noqa @@ -98,3 +100,16 @@ def test_instance_attribute_user_data(): instance_attribute = instance.get_attribute("userData") instance_attribute.should.be.a(InstanceAttribute) instance_attribute.get("userData").should.equal("this is my user data") + + +@mock_ec2 +def test_user_data_with_run_instance(): + user_data = "some user data" + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd', user_data=user_data) + instance = reservation.instances[0] + + instance_attribute = instance.get_attribute("userData") + instance_attribute.should.be.a(InstanceAttribute) + decoded_user_data = base64.decodestring(instance_attribute.get("userData")) + decoded_user_data.should.equal("some user data") From 3bc975188f912b353d4236dd6b095859a07fdefb Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Fri, 17 May 2013 19:41:39 -0400 Subject: [PATCH 34/85] more s3 tests for posting --- moto/s3/models.py | 7 ++----- tests/test_s3/test_s3.py | 12 ++++++++---- tests/test_s3/test_server.py | 15 +++++++++++++++ 3 files changed, 25 insertions(+), 9 deletions(-) diff --git a/moto/s3/models.py b/moto/s3/models.py index 15d5fbe67..ef39fea69 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -11,12 +11,9 @@ class FakeKey(object): self.value = value self.last_modified = datetime.datetime.now() self._metadata = {} - + def set_metadata(self, key, metadata): self._metadata[key] = metadata - - def get_metadata(self, key): - return self._metadata[key] def append_to_value(self, value): self.value += value @@ -38,7 +35,7 @@ class FakeKey(object): # https://github.com/boto/boto/issues/466 RFC1123 = '%a, %d %b %Y %H:%M:%S GMT' return self.last_modified.strftime(RFC1123) - + @property def metadata(self): return self._metadata diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 1107c4189..5215d6409 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -101,7 +101,8 @@ def test_copy_key(): bucket.get_key("the-key").get_contents_as_string().should.equal("some value") bucket.get_key("new-key").get_contents_as_string().should.equal("some value") - + + @mock_s3 def test_set_metadata(): conn = boto.connect_s3('the_key', 'the_secret') @@ -113,6 +114,7 @@ def test_set_metadata(): bucket.get_key('the-key').get_metadata('md').should.equal('Metadatastring') + @freeze_time("2012-01-01 12:00:00") @mock_s3 def test_last_modified(): @@ -177,19 +179,20 @@ def test_get_all_buckets(): def test_post_to_bucket(): conn = boto.connect_s3('the_key', 'the_secret') bucket = conn.create_bucket("foobar") - + requests.post("https://foobar.s3.amazonaws.com/", { 'key': 'the-key', 'file': 'nothing' }) bucket.get_key('the-key').get_contents_as_string().should.equal('nothing') - + + @mock_s3 def test_post_with_metadata_to_bucket(): conn = boto.connect_s3('the_key', 'the_secret') bucket = conn.create_bucket("foobar") - + requests.post("https://foobar.s3.amazonaws.com/", { 'key': 'the-key', 'file': 'nothing', @@ -198,6 +201,7 @@ def test_post_with_metadata_to_bucket(): bucket.get_key('the-key').get_metadata('test').should.equal('metadata') + @mock_s3 def test_bucket_method_not_implemented(): requests.patch.when.called_with("https://foobar.s3.amazonaws.com/").should.throw(NotImplementedError) diff --git a/tests/test_s3/test_server.py b/tests/test_s3/test_server.py index 0bfeb6efa..0ee507eae 100644 --- a/tests/test_s3/test_server.py +++ b/tests/test_s3/test_server.py @@ -33,3 +33,18 @@ def test_s3_server_bucket_create(): res = test_client.get('/bar', 'http://foobar.localhost:5000/') res.status_code.should.equal(200) res.data.should.equal("test value") + + +def test_s3_server_post_to_bucket(): + test_client = server.app.test_client() + res = test_client.put('/', 'http://foobar.localhost:5000/') + res.status_code.should.equal(200) + + test_client.post('/', "https://foobar.localhost:5000/", data={ + 'key': 'the-key', + 'file': 'nothing' + }) + + res = test_client.get('/the-key', 'http://foobar.localhost:5000/') + res.status_code.should.equal(200) + res.data.should.equal("nothing") From 124bc04598fb744b3c01a8e8d21526e243db60e4 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Sat, 18 May 2013 19:35:26 -0400 Subject: [PATCH 35/85] 0.2.3 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 4e2d49924..64da00124 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ from setuptools import setup, find_packages setup( name='moto', - version='0.2.2', + version='0.2.3', description='A library that allows your python tests to easily' ' mock out the boto library', author='Steve Pulec', From 212d9c7abed0e682b210ffb2cc978f40a1991fa2 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Fri, 24 May 2013 17:22:34 -0400 Subject: [PATCH 36/85] core sts endpoints completed --- README.md | 2 ++ moto/__init__.py | 1 + moto/core/utils.py | 9 +++++ moto/s3/models.py | 6 ++-- moto/server.py | 1 + moto/sts/__init__.py | 2 ++ moto/sts/models.py | 39 ++++++++++++++++++++ moto/sts/responses.py | 67 +++++++++++++++++++++++++++++++++++ moto/sts/urls.py | 9 +++++ tests/test_sts/test_server.py | 16 +++++++++ tests/test_sts/test_sts.py | 52 +++++++++++++++++++++++++++ 11 files changed, 201 insertions(+), 3 deletions(-) create mode 100644 moto/sts/__init__.py create mode 100644 moto/sts/models.py create mode 100644 moto/sts/responses.py create mode 100644 moto/sts/urls.py create mode 100644 tests/test_sts/test_server.py create mode 100644 tests/test_sts/test_sts.py diff --git a/README.md b/README.md index a22e8b839..ed8e14424 100644 --- a/README.md +++ b/README.md @@ -67,6 +67,8 @@ It gets even better! Moto isn't just S3. Here's the status of the other AWS serv |---------------------------------------------------------------------------| | SQS | @mock_sqs | core endpoints done | |---------------------------------------------------------------------------| +| STS | @mock_sts | core endpoints done | +|---------------------------------------------------------------------------| ``` ### Another Example diff --git a/moto/__init__.py b/moto/__init__.py index 0548f9653..49f121a3c 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -6,3 +6,4 @@ from .ec2 import mock_ec2 from .s3 import mock_s3 from .ses import mock_ses from .sqs import mock_sqs +from .sts import mock_sts diff --git a/moto/core/utils.py b/moto/core/utils.py index 35a1e1292..53418edbf 100644 --- a/moto/core/utils.py +++ b/moto/core/utils.py @@ -69,3 +69,12 @@ class convert_flask_to_httpretty_response(object): # result is a status, headers, response tuple status, headers, response = result return response, status, headers + + +def iso_8601_datetime(datetime): + return datetime.strftime("%Y-%m-%dT%H:%M:%SZ") + + +def rfc_1123_datetime(datetime): + RFC1123 = '%a, %d %b %Y %H:%M:%S GMT' + return datetime.strftime(RFC1123) diff --git a/moto/s3/models.py b/moto/s3/models.py index ef39fea69..524d547e9 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -2,6 +2,7 @@ import datetime import md5 from moto.core import BaseBackend +from moto.core.utils import iso_8601_datetime, rfc_1123_datetime from .utils import clean_key_name @@ -27,14 +28,13 @@ class FakeKey(object): @property def last_modified_ISO8601(self): - return self.last_modified.strftime("%Y-%m-%dT%H:%M:%SZ") + return iso_8601_datetime(self.last_modified) @property def last_modified_RFC1123(self): # Different datetime formats depending on how the key is obtained # https://github.com/boto/boto/issues/466 - RFC1123 = '%a, %d %b %Y %H:%M:%S GMT' - return self.last_modified.strftime(RFC1123) + return rfc_1123_datetime(self.last_modified) @property def metadata(self): diff --git a/moto/server.py b/moto/server.py index e0d7c60fb..18980bffe 100644 --- a/moto/server.py +++ b/moto/server.py @@ -8,6 +8,7 @@ from moto.ec2 import ec2_backend # flake8: noqa from moto.s3 import s3_backend # flake8: noqa from moto.ses import ses_backend # flake8: noqa from moto.sqs import sqs_backend # flake8: noqa +from moto.sts import sts_backend # flake8: noqa from moto.core.utils import convert_flask_to_httpretty_response diff --git a/moto/sts/__init__.py b/moto/sts/__init__.py new file mode 100644 index 000000000..f1ca24c7f --- /dev/null +++ b/moto/sts/__init__.py @@ -0,0 +1,2 @@ +from .models import sts_backend +mock_sts = sts_backend.decorator diff --git a/moto/sts/models.py b/moto/sts/models.py new file mode 100644 index 000000000..3a9e64e0c --- /dev/null +++ b/moto/sts/models.py @@ -0,0 +1,39 @@ +import datetime +from moto.core import BaseBackend +from moto.core.utils import iso_8601_datetime + + +class Token(object): + def __init__(self, duration): + now = datetime.datetime.now() + self.expiration = now + datetime.timedelta(seconds=duration) + + @property + def expiration_ISO8601(self): + return iso_8601_datetime(self.expiration) + + +class AssumedRole(object): + def __init__(self, role_session_name, role_arn, policy, duration, external_id): + self.session_name = role_session_name + self.arn = role_arn + self.policy = policy + now = datetime.datetime.now() + self.expiration = now + datetime.timedelta(seconds=duration) + self.external_id = external_id + + @property + def expiration_ISO8601(self): + return iso_8601_datetime(self.expiration) + + +class STSBackend(BaseBackend): + def get_session_token(self, duration): + token = Token(duration=duration) + return token + + def assume_role(self, **kwargs): + role = AssumedRole(**kwargs) + return role + +sts_backend = STSBackend() diff --git a/moto/sts/responses.py b/moto/sts/responses.py new file mode 100644 index 000000000..e97c9ec51 --- /dev/null +++ b/moto/sts/responses.py @@ -0,0 +1,67 @@ +from jinja2 import Template + +from moto.core.responses import BaseResponse +from .models import sts_backend + + +class TokenResponse(BaseResponse): + + def get_session_token(self): + duration = int(self.querystring.get('DurationSeconds', [43200])[0]) + token = sts_backend.get_session_token(duration=duration) + template = Template(GET_SESSION_TOKEN_RESPONSE) + return template.render(token=token) + + def assume_role(self): + role_session_name = self.querystring.get('RoleSessionName')[0] + role_arn = self.querystring.get('RoleArn')[0] + + policy = self.querystring.get('Policy', [None])[0] + duration = int(self.querystring.get('DurationSeconds', [3600])[0]) + external_id = self.querystring.get('ExternalId', [None])[0] + + role = sts_backend.assume_role( + role_session_name=role_session_name, + role_arn=role_arn, + policy=policy, + duration=duration, + external_id=external_id, + ) + template = Template(ASSUME_ROLE_RESPONSE) + return template.render(role=role) + + +GET_SESSION_TOKEN_RESPONSE = """ + + + AQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT+FvwqnKwRcOIfrRh3c/LTo6UDdyJwOOvEVPvLXCrrrUtdnniCEXAMPLE/IvU1dYUg2RVAJBanLiHb4IgRmpRV3zrkuWJOgQs8IZZaIv2BXIa2R4OlgkBN9bkUDNCJiBeb/AXlzBBko7b15fjrBs2+cTQtpZ3CYWFXG8C5zqx37wnOE49mRl/+OtkIKGO7fAE + wJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY + {{ token.expiration_ISO8601 }} + AKIAIOSFODNN7EXAMPLE + + + + 58c5dbae-abef-11e0-8cfe-09039844ac7d + +""" + + +ASSUME_ROLE_RESPONSE = """ + + + BQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT+FvwqnKwRcOIfrRh3c/LTo6UDdyJwOOvEVPvLXCrrrUtdnniCEXAMPLE/IvU1dYUg2RVAJBanLiHb4IgRmpRV3zrkuWJOgQs8IZZaIv2BXIa2R4OlgkBN9bkUDNCJiBeb/AXlzBBko7b15fjrBs2+cTQtpZ3CYWFXG8C5zqx37wnOE49mRl/+OtkIKGO7fAE + aJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY + {{ role.expiration_ISO8601 }} + AKIAIOSFODNN7EXAMPLE + + + {{ role.arn }} + ARO123EXAMPLE123:{{ role.session_name }} + + 6 + + + c6104cbe-af31-11e0-8154-cbc7ccf896c7 + +""" diff --git a/moto/sts/urls.py b/moto/sts/urls.py new file mode 100644 index 000000000..ab69fe8c2 --- /dev/null +++ b/moto/sts/urls.py @@ -0,0 +1,9 @@ +from .responses import TokenResponse + +url_bases = [ + "https?://sts.amazonaws.com" +] + +url_paths = { + '{0}/$': TokenResponse().dispatch, +} diff --git a/tests/test_sts/test_server.py b/tests/test_sts/test_server.py new file mode 100644 index 000000000..0e7687c7e --- /dev/null +++ b/tests/test_sts/test_server.py @@ -0,0 +1,16 @@ +import sure # flake8: noqa + +import moto.server as server + +''' +Test the different server responses +''' +server.configure_urls("sts") + + +def test_sts_get_session_token(): + test_client = server.app.test_client() + res = test_client.get('/?Action=GetSessionToken') + res.status_code.should.equal(200) + res.data.should.contain("SessionToken") + res.data.should.contain("AccessKeyId") diff --git a/tests/test_sts/test_sts.py b/tests/test_sts/test_sts.py new file mode 100644 index 000000000..0d05b613e --- /dev/null +++ b/tests/test_sts/test_sts.py @@ -0,0 +1,52 @@ +import json + +import boto +from boto.exception import BotoServerError +from freezegun import freeze_time +import sure # flake8: noqa + +from moto import mock_sts + + +@freeze_time("2012-01-01 12:00:00") +@mock_sts +def test_get_session_token(): + conn = boto.connect_sts() + token = conn.get_session_token(duration=123) + + token.expiration.should.equal('2012-01-01T12:02:03Z') + token.session_token.should.equal("AQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT+FvwqnKwRcOIfrRh3c/LTo6UDdyJwOOvEVPvLXCrrrUtdnniCEXAMPLE/IvU1dYUg2RVAJBanLiHb4IgRmpRV3zrkuWJOgQs8IZZaIv2BXIa2R4OlgkBN9bkUDNCJiBeb/AXlzBBko7b15fjrBs2+cTQtpZ3CYWFXG8C5zqx37wnOE49mRl/+OtkIKGO7fAE") + token.access_key.should.equal("AKIAIOSFODNN7EXAMPLE") + token.secret_key.should.equal("wJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY") + + +@freeze_time("2012-01-01 12:00:00") +@mock_sts +def test_assume_role(): + conn = boto.connect_sts() + + policy = json.dumps({ + "Statement": [ + { + "Sid": "Stmt13690092345534", + "Action": [ + "S3:ListBucket" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::foobar-tester" + ] + }, + ] + }) + s3_role = "arn:aws:iam::123456789012:role/test-role" + role = conn.assume_role(s3_role, "session-name", policy, duration_seconds=123) + + credentials = role.credentials + credentials.expiration.should.equal('2012-01-01T12:02:03Z') + credentials.session_token.should.equal("BQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT+FvwqnKwRcOIfrRh3c/LTo6UDdyJwOOvEVPvLXCrrrUtdnniCEXAMPLE/IvU1dYUg2RVAJBanLiHb4IgRmpRV3zrkuWJOgQs8IZZaIv2BXIa2R4OlgkBN9bkUDNCJiBeb/AXlzBBko7b15fjrBs2+cTQtpZ3CYWFXG8C5zqx37wnOE49mRl/+OtkIKGO7fAE") + credentials.access_key.should.equal("AKIAIOSFODNN7EXAMPLE") + credentials.secret_key.should.equal("aJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY") + + role.user.arn.should.equal("arn:aws:iam::123456789012:role/test-role") + role.user.assume_role_id.should.contain("session-name") From d47c481912ff155ca8ac12116524056008ef27b0 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Sat, 25 May 2013 17:13:40 -0400 Subject: [PATCH 37/85] 0.2.4 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 64da00124..b780866b5 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ from setuptools import setup, find_packages setup( name='moto', - version='0.2.3', + version='0.2.4', description='A library that allows your python tests to easily' ' mock out the boto library', author='Steve Pulec', From a2de647880f81b2be3ccd035c8b386b61b10146d Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Tue, 4 Jun 2013 17:30:32 -0400 Subject: [PATCH 38/85] change httpretty to real version --- setup.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/setup.py b/setup.py index b780866b5..35cc70f85 100644 --- a/setup.py +++ b/setup.py @@ -19,10 +19,7 @@ setup( install_requires=[ "boto", "flask", - "httpretty==0.6.0a", + "httpretty>=0.6.1", "Jinja2", ], - dependency_links=[ - "https://github.com/gabrielfalcao/HTTPretty/tarball/2347df40a3a3cd00e73f0353f5ea2670ad3405c1#egg=httpretty-0.6.0a", - ], ) From dd3cfe5ab627daa157c1ced1e28c381c8e9a2a43 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Tue, 4 Jun 2013 17:30:48 -0400 Subject: [PATCH 39/85] 0.2.5 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 35cc70f85..a95a6820b 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ from setuptools import setup, find_packages setup( name='moto', - version='0.2.4', + version='0.2.5', description='A library that allows your python tests to easily' ' mock out the boto library', author='Steve Pulec', From 87c86bb902f03e952bce4b550de8205bb68753a8 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Tue, 25 Jun 2013 12:36:21 -0400 Subject: [PATCH 40/85] Need to reset app view functions to prevent Flask assertion error on resetting view functions. --- moto/server.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/moto/server.py b/moto/server.py index 18980bffe..5746a7108 100644 --- a/moto/server.py +++ b/moto/server.py @@ -26,6 +26,8 @@ class RegexConverter(BaseConverter): def configure_urls(service): backend = globals()["{}_backend".format(service)] from werkzeug.routing import Map + # Reset view functions to reset the app + app.view_functions = {} app.url_map = Map() app.url_map.converters['regex'] = RegexConverter for url_path, handler in backend.flask_paths.iteritems(): From eb70174ed57c3151aa69f3240f6985e1a020649c Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Tue, 25 Jun 2013 12:42:24 -0400 Subject: [PATCH 41/85] Add port option. --- README.md | 8 ++++++++ moto/server.py | 10 +++++++--- tests/test_core/test_server.py | 10 ++++++++-- 3 files changed, 23 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index ed8e14424..243d8c203 100644 --- a/README.md +++ b/README.md @@ -156,6 +156,14 @@ $ moto_server ec2 * Running on http://127.0.0.1:5000/ ``` +You can also pass the port as the second argument: + +```console +$ moto_server ec2 3000 + * Running on http://127.0.0.1:3000/ +``` + + Then go to [localhost](http://localhost:5000/?Action=DescribeInstances) to see a list of running instances (it will be empty since you haven't added any yet). ## Install diff --git a/moto/server.py b/moto/server.py index 5746a7108..70f5b03db 100644 --- a/moto/server.py +++ b/moto/server.py @@ -35,14 +35,18 @@ def configure_urls(service): def main(args=sys.argv): - if len(args) != 2: - print("Usage: moto_server ") + if len(args) not in range(2, 4): + print("Usage: moto_server [port]") sys.exit(1) service_name = args[1] configure_urls(service_name) + try: + port = int(args[2]) + except IndexError: + port = None app.testing = True - app.run() + app.run(port=port) if __name__ == '__main__': main() diff --git a/tests/test_core/test_server.py b/tests/test_core/test_server.py index e86098c4f..6cf87b1d4 100644 --- a/tests/test_core/test_server.py +++ b/tests/test_core/test_server.py @@ -6,7 +6,7 @@ from moto.server import main def test_wrong_arguments(): try: - main(["name", "test1", "test2"]) + main(["name", "test1", "test2", "test3"]) assert False, ("main() when called with the incorrect number of args" " should raise a system exit") except SystemExit: @@ -16,4 +16,10 @@ def test_wrong_arguments(): @patch('moto.server.app.run') def test_right_arguments(app_run): main(["name", "s3"]) - app_run.assert_called_once_with() + app_run.assert_called_once_with(port=None) + + +@patch('moto.server.app.run') +def test_port_argument(app_run): + main(["name", "s3", 8080]) + app_run.assert_called_once_with(port=8080) From 7ef6a1c44eba65a98f833614cc48c7b30836dd17 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Tue, 25 Jun 2013 12:45:24 -0400 Subject: [PATCH 42/85] clean backend finding --- moto/server.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/moto/server.py b/moto/server.py index 70f5b03db..a46222488 100644 --- a/moto/server.py +++ b/moto/server.py @@ -24,7 +24,8 @@ class RegexConverter(BaseConverter): def configure_urls(service): - backend = globals()["{}_backend".format(service)] + module = sys.modules[__name__] + backend = getattr(module, "{}_backend".format(service)) from werkzeug.routing import Map # Reset view functions to reset the app app.view_functions = {} From d7bfe5dea7d65b7b01460ba4a2824d393a978eaa Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Tue, 25 Jun 2013 13:34:11 -0400 Subject: [PATCH 43/85] 0.2.6 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index a95a6820b..ad5254ab6 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ from setuptools import setup, find_packages setup( name='moto', - version='0.2.5', + version='0.2.6', description='A library that allows your python tests to easily' ' mock out the boto library', author='Steve Pulec', From c6d6a0273dfde7ec8c4d9416f7fe4a7c2495a5ae Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Thu, 27 Jun 2013 00:01:33 -0400 Subject: [PATCH 44/85] Fix for iam credentials in boto version < 2.9 --- moto/core/responses.py | 25 +++++++++++++++++-------- tests/test_ec2/test_instances.py | 4 ++-- 2 files changed, 19 insertions(+), 10 deletions(-) diff --git a/moto/core/responses.py b/moto/core/responses.py index 6f8b365fb..7e896e961 100644 --- a/moto/core/responses.py +++ b/moto/core/responses.py @@ -55,17 +55,26 @@ def metadata_response(request, full_url, headers): http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AESDG-chapter-instancedata.html """ - parsed_url = urlparse(full_url) tomorrow = datetime.datetime.now() + datetime.timedelta(days=1) + credentials = dict( + AccessKeyId="test-key", + SecretAccessKey="test-secret-key", + Token="test-session-token", + Expiration=tomorrow.strftime("%Y-%m-%dT%H:%M:%SZ") + ) + path = parsed_url.path.lstrip("/latest/meta-data/") - if path == 'iam/security-credentials/': + if path == '': + result = 'iam' + elif path == 'iam': + result = json.dumps({ + 'security-credentials': { + 'default-role': credentials + } + }) + elif path == 'iam/security-credentials/': result = 'default-role' elif path == 'iam/security-credentials/default-role': - result = json.dumps(dict( - AccessKeyId="test-key", - SecretAccessKey="test-secret-key", - Token="test-session-token", - Expiration=tomorrow.strftime("%Y-%m-%dT%H:%M:%SZ") - )) + result = json.dumps(credentials) return 200, headers, result diff --git a/tests/test_ec2/test_instances.py b/tests/test_ec2/test_instances.py index 47e8b4b16..11c2cc00e 100644 --- a/tests/test_ec2/test_instances.py +++ b/tests/test_ec2/test_instances.py @@ -9,7 +9,7 @@ from moto import mock_ec2 ################ Test Readme ############### def add_servers(ami_id, count): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.connect_ec2() for index in range(count): conn.run_instances(ami_id) @@ -18,7 +18,7 @@ def add_servers(ami_id, count): def test_add_servers(): add_servers('ami-1234abcd', 2) - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.connect_ec2() reservations = conn.get_all_instances() assert len(reservations) == 2 instance1 = reservations[0].instances[0] From 02dbcc9d48962970aa42267e2c632bd8b5d95004 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Thu, 27 Jun 2013 00:04:05 -0400 Subject: [PATCH 45/85] matrix build on boto version for travis --- .travis.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 0b82672cd..6196ed47a 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,10 +1,13 @@ language: python python: - 2.7 +matrix: + - BOTO_VERSION=2.9 + - BOTO_VERSION=2.8 install: - pip install . - pip install -r requirements.txt script: - make test after_success: - - coveralls \ No newline at end of file + - coveralls From fd8a9e4a590c4f8a9b261395a7b31af63cf15165 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Thu, 27 Jun 2013 00:04:55 -0400 Subject: [PATCH 46/85] 0.2.7 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index ad5254ab6..2786c8c3e 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ from setuptools import setup, find_packages setup( name='moto', - version='0.2.6', + version='0.2.7', description='A library that allows your python tests to easily' ' mock out the boto library', author='Steve Pulec', From a0a71d0f442a9b5b5c15b69c2f5ca95f65937417 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Thu, 27 Jun 2013 00:10:19 -0400 Subject: [PATCH 47/85] matrix build on boto version for travis --- .travis.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.travis.yml b/.travis.yml index 6196ed47a..eef61251f 100644 --- a/.travis.yml +++ b/.travis.yml @@ -4,7 +4,9 @@ python: matrix: - BOTO_VERSION=2.9 - BOTO_VERSION=2.8 + - BOTO_VERSION=2.7 install: + - pip install boto==$BOTO_VERSION - pip install . - pip install -r requirements.txt script: From 70bb7d47e06d92795946d3c1c94929ee7755f0ef Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Thu, 27 Jun 2013 09:35:46 -0400 Subject: [PATCH 48/85] bump for travis --- Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/Makefile b/Makefile index fb83906d6..d997ae4b5 100644 --- a/Makefile +++ b/Makefile @@ -7,3 +7,4 @@ init: test: rm -f .coverage @nosetests --with-coverage ./tests/ + From b8957e0c6e5dfa746636934052cb2d5d022fe7cc Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Mon, 8 Jul 2013 12:46:36 -0400 Subject: [PATCH 49/85] Stop using deprecated md5, switch to hashlib. Closes #36 --- moto/s3/models.py | 4 ++-- moto/sqs/models.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/moto/s3/models.py b/moto/s3/models.py index 524d547e9..62de695fa 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -1,5 +1,5 @@ import datetime -import md5 +import hashlib from moto.core import BaseBackend from moto.core.utils import iso_8601_datetime, rfc_1123_datetime @@ -22,7 +22,7 @@ class FakeKey(object): @property def etag(self): - value_md5 = md5.new() + value_md5 = hashlib.md5() value_md5.update(self.value) return '"{0}"'.format(value_md5.hexdigest()) diff --git a/moto/sqs/models.py b/moto/sqs/models.py index 3f3b6b493..5c6d04fe7 100644 --- a/moto/sqs/models.py +++ b/moto/sqs/models.py @@ -1,4 +1,4 @@ -import md5 +import hashlib import time from moto.core import BaseBackend @@ -15,7 +15,7 @@ class Message(object): @property def md5(self): - body_md5 = md5.new() + body_md5 = hashlib.md5() body_md5.update(self.body) return body_md5.hexdigest() From fda5f83f8765d5f8e23b5d9eca7d2107614259bd Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Mon, 8 Jul 2013 19:25:47 -0400 Subject: [PATCH 50/85] Properly implement ec2.get_all_instances filtering by instance id --- moto/ec2/models.py | 17 +++++++++++++++++ moto/ec2/responses/instances.py | 7 ++++++- tests/test_ec2/test_instances.py | 20 ++++++++++++++++++++ 3 files changed, 43 insertions(+), 1 deletion(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index f69919aec..368d27bb6 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -1,3 +1,4 @@ +import copy from collections import defaultdict from boto.ec2.instance import Instance as BotoInstance, Reservation @@ -117,6 +118,22 @@ class InstanceBackend(object): instances.append(instance) return instances + def get_reservations_by_instance_ids(self, instance_ids): + """ Go through all of the reservations and filter to only return those + associated with the given instance_ids. + """ + reservations = [] + for reservation in self.reservations.values(): + reservation_instance_ids = [instance.id for instance in reservation.instances] + matching_reservation = any(instance_id in reservation_instance_ids for instance_id in instance_ids) + if matching_reservation: + # We need to make a copy of the reservation because we have to modify the + # instances to limit to those requested + reservation_copy = copy.deepcopy(reservation) + reservation_copy.instances = [instance for instance in reservation_copy.instances if instance.id in instance_ids] + reservations.append(reservation_copy) + return reservations + def all_reservations(self): return self.reservations.values() diff --git a/moto/ec2/responses/instances.py b/moto/ec2/responses/instances.py index d752bf0f6..d0f044e50 100644 --- a/moto/ec2/responses/instances.py +++ b/moto/ec2/responses/instances.py @@ -7,8 +7,13 @@ from moto.ec2.utils import instance_ids_from_querystring class InstanceResponse(object): def describe_instances(self): + instance_ids = instance_ids_from_querystring(self.querystring) template = Template(EC2_DESCRIBE_INSTANCES) - return template.render(reservations=ec2_backend.all_reservations()) + if instance_ids: + reservations = ec2_backend.get_reservations_by_instance_ids(instance_ids) + else: + reservations = ec2_backend.all_reservations() + return template.render(reservations=reservations) def run_instances(self): min_count = int(self.querystring.get('MinCount', ['1'])[0]) diff --git a/tests/test_ec2/test_instances.py b/tests/test_ec2/test_instances.py index 11c2cc00e..941c2c42a 100644 --- a/tests/test_ec2/test_instances.py +++ b/tests/test_ec2/test_instances.py @@ -50,6 +50,26 @@ def test_instance_launch_and_terminate(): instance.state.should.equal('shutting-down') +@mock_ec2 +def test_get_instances_by_id(): + conn = boto.connect_ec2() + reservation = conn.run_instances('ami-1234abcd', min_count=2) + instance1, instance2 = reservation.instances + + reservations = conn.get_all_instances(instance_ids=[instance1.id]) + reservations.should.have.length_of(1) + reservation = reservations[0] + reservation.instances.should.have.length_of(1) + reservation.instances[0].id.should.equal(instance1.id) + + reservations = conn.get_all_instances(instance_ids=[instance1.id, instance2.id]) + reservations.should.have.length_of(1) + reservation = reservations[0] + reservation.instances.should.have.length_of(2) + instance_ids = [instance.id for instance in reservation.instances] + instance_ids.should.equal([instance1.id, instance2.id]) + + @mock_ec2 def test_instance_start_and_stop(): conn = boto.connect_ec2('the_key', 'the_secret') From dda5b2f14525818d39bc206f95688001012e9ab5 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Mon, 8 Jul 2013 19:40:24 -0400 Subject: [PATCH 51/85] Add tests for instance metadata --- tests/test_core/test_instance_metadata.py | 36 +++++++++++++++++++++++ 1 file changed, 36 insertions(+) create mode 100644 tests/test_core/test_instance_metadata.py diff --git a/tests/test_core/test_instance_metadata.py b/tests/test_core/test_instance_metadata.py new file mode 100644 index 000000000..e11e67b94 --- /dev/null +++ b/tests/test_core/test_instance_metadata.py @@ -0,0 +1,36 @@ +import requests + +from moto import mock_ec2 + + +@mock_ec2 +def test_latest_meta_data(): + res = requests.get("http://169.254.169.254/latest/meta-data/") + res.content.should.equal("iam") + + +@mock_ec2 +def test_meta_data_iam(): + res = requests.get("http://169.254.169.254/latest/meta-data/iam") + json_response = res.json() + default_role = json_response['security-credentials']['default-role'] + default_role.should.contain('AccessKeyId') + default_role.should.contain('SecretAccessKey') + default_role.should.contain('Token') + default_role.should.contain('Expiration') + + +@mock_ec2 +def test_meta_data_security_credentials(): + res = requests.get("http://169.254.169.254/latest/meta-data/iam/security-credentials/") + res.content.should.equal("default-role") + + +@mock_ec2 +def test_meta_data_default_role(): + res = requests.get("http://169.254.169.254/latest/meta-data/iam/security-credentials/default-role") + json_response = res.json() + json_response.should.contain('AccessKeyId') + json_response.should.contain('SecretAccessKey') + json_response.should.contain('Token') + json_response.should.contain('Expiration') From 76ea9172da56816ed6faa9bad6babc18fc060f3a Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Mon, 8 Jul 2013 20:29:13 -0400 Subject: [PATCH 52/85] Clean up ec2 instance state --- moto/ec2/models.py | 25 +++++++++++++++---------- moto/ec2/responses/instances.py | 20 ++++++++++---------- 2 files changed, 25 insertions(+), 20 deletions(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 368d27bb6..978f5cf80 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -16,30 +16,35 @@ from .utils import ( ) +class InstanceState(object): + def __init__(self, name='pending', code=0): + self.name = name + self.code = code + + class Instance(BotoInstance): def __init__(self, image_id, user_data): super(Instance, self).__init__() self.id = random_instance_id() self.image_id = image_id - self._state_name = "pending" - self._state_code = 0 + self._state = InstanceState() self.user_data = user_data def start(self): - self._state_name = "pending" - self._state_code = 0 + self._state.name = "pending" + self._state.code = 0 def stop(self): - self._state_name = "stopping" - self._state_code = 64 + self._state.name = "stopping" + self._state.code = 64 def terminate(self): - self._state_name = "shutting-down" - self._state_code = 32 + self._state.name = "shutting-down" + self._state.code = 32 def reboot(self): - self._state_name = "pending" - self._state_code = 0 + self._state.name = "pending" + self._state.code = 0 class InstanceBackend(object): diff --git a/moto/ec2/responses/instances.py b/moto/ec2/responses/instances.py index d0f044e50..ef7f53fb8 100644 --- a/moto/ec2/responses/instances.py +++ b/moto/ec2/responses/instances.py @@ -86,8 +86,8 @@ EC2_RUN_INSTANCES = """ + +39070fe4-6f6d-4565-aecd-7850607e4555""" diff --git a/moto/ec2/utils.py b/moto/ec2/utils.py index 258927f05..841444b8d 100644 --- a/moto/ec2/utils.py +++ b/moto/ec2/utils.py @@ -1,4 +1,5 @@ import random +import re def random_id(prefix=''): @@ -53,7 +54,7 @@ def resource_ids_from_querystring(querystring_dict): prefix = 'ResourceId' response_values = {} for key, value in querystring_dict.iteritems(): - if prefix in key: + if key.startswith(prefix): resource_index = key.replace(prefix + ".", "") tag_key = querystring_dict.get("Tag.{}.Key".format(resource_index))[0] @@ -65,3 +66,42 @@ def resource_ids_from_querystring(querystring_dict): response_values[value[0]] = (tag_key, tag_value) return response_values + + +def filters_from_querystring(querystring_dict): + response_values = {} + for key, value in querystring_dict.iteritems(): + match = re.search("Filter.(\d).Name", key) + if match: + filter_index = match.groups()[0] + value_prefix = "Filter.{}.Value".format(filter_index) + filter_values = [filter_value[0] for filter_key, filter_value in querystring_dict.iteritems() if filter_key.startswith(value_prefix)] + response_values[value[0]] = filter_values + return response_values + + +filter_dict_attribute_mapping = { + 'instance-state-name': 'state' +} + + +def passes_filter_dict(instance, filter_dict): + for filter_name, filter_values in filter_dict.iteritems(): + if filter_name in filter_dict_attribute_mapping: + instance_attr = filter_dict_attribute_mapping[filter_name] + else: + raise NotImplementedError("Filter dicts have not been implemented in Moto for '%s' yet. Feel free to open an issue at https://github.com/spulec/moto/issues", filter_name) + instance_value = getattr(instance, instance_attr) + if instance_value not in filter_values: + return False + return True + + +def filter_reservations(reservations, filter_dict): + for reservation in reservations: + new_instances = [] + for instance in reservation.instances: + if passes_filter_dict(instance, filter_dict): + new_instances.append(instance) + reservation.instances = new_instances + return reservations diff --git a/tests/test_ec2/test_instances.py b/tests/test_ec2/test_instances.py index 941c2c42a..7176566b5 100644 --- a/tests/test_ec2/test_instances.py +++ b/tests/test_ec2/test_instances.py @@ -2,6 +2,7 @@ import base64 import boto from boto.ec2.instance import Reservation, InstanceAttribute +from boto.exception import EC2ResponseError import sure # flake8: noqa from moto import mock_ec2 @@ -69,6 +70,34 @@ def test_get_instances_by_id(): instance_ids = [instance.id for instance in reservation.instances] instance_ids.should.equal([instance1.id, instance2.id]) + # Call get_all_instances with a bad id should raise an error + conn.get_all_instances.when.called_with(instance_ids=[instance1.id, "i-1234abcd"]).should.throw( + EC2ResponseError, + "The instance ID 'i-1234abcd' does not exist" + ) + + +@mock_ec2 +def test_get_instances_filtering_by_state(): + conn = boto.connect_ec2() + reservation = conn.run_instances('ami-1234abcd', min_count=3) + instance1, instance2, instance3 = reservation.instances + + conn.terminate_instances([instance1.id]) + + reservations = conn.get_all_instances(filters={'instance-state-name': 'pending'}) + reservations.should.have.length_of(1) + # Since we terminated instance1, only instance2 and instance3 should be returned + instance_ids = [instance.id for instance in reservations[0].instances] + set(instance_ids).should.equal(set([instance2.id, instance3.id])) + + reservations = conn.get_all_instances([instance2.id], filters={'instance-state-name': 'pending'}) + reservations.should.have.length_of(1) + instance_ids = [instance.id for instance in reservations[0].instances] + instance_ids.should.equal([instance2.id]) + + conn.get_all_instances.when.called_with(filters={'not-implemented-filter': 'foobar'}).should.throw(NotImplementedError) + @mock_ec2 def test_instance_start_and_stop(): From e8a18f84ff479d17f2d8584f6f97c99f2a3406ed Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Mon, 8 Jul 2013 21:18:19 -0400 Subject: [PATCH 54/85] Try fixing travis matric --- .travis.yml | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/.travis.yml b/.travis.yml index eef61251f..c7af21821 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,10 +1,11 @@ language: python python: - 2.7 -matrix: - - BOTO_VERSION=2.9 - - BOTO_VERSION=2.8 - - BOTO_VERSION=2.7 +env: + matrix: + - BOTO_VERSION=2.9 + - BOTO_VERSION=2.8 + - BOTO_VERSION=2.7 install: - pip install boto==$BOTO_VERSION - pip install . From 46f9430bdd454faba1427dbc8f59a53098789050 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Mon, 8 Jul 2013 22:20:55 -0400 Subject: [PATCH 55/85] Fix bug with modifying original reservations --- moto/ec2/models.py | 16 ++++++++++------ moto/ec2/responses/instances.py | 2 +- tests/test_ec2/test_instances.py | 4 ++++ 3 files changed, 15 insertions(+), 7 deletions(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 6a48c453e..1dac5e3bf 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -129,23 +129,27 @@ class InstanceBackend(object): associated with the given instance_ids. """ reservations = [] - for reservation in self.reservations.values(): + for reservation in self.all_reservations(make_copy=True): reservation_instance_ids = [instance.id for instance in reservation.instances] matching_reservation = any(instance_id in reservation_instance_ids for instance_id in instance_ids) if matching_reservation: # We need to make a copy of the reservation because we have to modify the # instances to limit to those requested - reservation_copy = copy.deepcopy(reservation) - reservation_copy.instances = [instance for instance in reservation_copy.instances if instance.id in instance_ids] - reservations.append(reservation_copy) + reservation.instances = [instance for instance in reservation.instances if instance.id in instance_ids] + reservations.append(reservation) found_instance_ids = [instance.id for reservation in reservations for instance in reservation.instances] if len(found_instance_ids) != len(instance_ids): invalid_id = list(set(instance_ids).difference(set(found_instance_ids)))[0] raise InvalidIdError(invalid_id) return reservations - def all_reservations(self): - return self.reservations.values() + def all_reservations(self, make_copy=False): + if make_copy: + # Return copies so that other functions can modify them with changing + # the originals + return [copy.deepcopy(reservation) for reservation in self.reservations.values()] + else: + return [reservation for reservation in self.reservations.values()] class TagBackend(object): diff --git a/moto/ec2/responses/instances.py b/moto/ec2/responses/instances.py index 388fb8446..1b6097a7d 100644 --- a/moto/ec2/responses/instances.py +++ b/moto/ec2/responses/instances.py @@ -16,7 +16,7 @@ class InstanceResponse(object): template = Template(EC2_INVALID_INSTANCE_ID) return template.render(instance_id=exc.instance_id), dict(status=400) else: - reservations = ec2_backend.all_reservations() + reservations = ec2_backend.all_reservations(make_copy=True) filter_dict = filters_from_querystring(self.querystring) reservations = filter_reservations(reservations, filter_dict) diff --git a/tests/test_ec2/test_instances.py b/tests/test_ec2/test_instances.py index 7176566b5..691e7908f 100644 --- a/tests/test_ec2/test_instances.py +++ b/tests/test_ec2/test_instances.py @@ -96,6 +96,10 @@ def test_get_instances_filtering_by_state(): instance_ids = [instance.id for instance in reservations[0].instances] instance_ids.should.equal([instance2.id]) + # get_all_instances should still return all 3 + reservations = conn.get_all_instances() + reservations[0].instances.should.have.length_of(3) + conn.get_all_instances.when.called_with(filters={'not-implemented-filter': 'foobar'}).should.throw(NotImplementedError) From af31744dbd6890ae13c62df8ef5ef34888ac0438 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Mon, 8 Jul 2013 22:25:25 -0400 Subject: [PATCH 56/85] If ec2 filtering exclude all instances for a reservation, remove the reservation from the results. --- moto/ec2/utils.py | 7 +++++-- tests/test_ec2/test_instances.py | 3 +++ 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/moto/ec2/utils.py b/moto/ec2/utils.py index 841444b8d..7dfa3ea03 100644 --- a/moto/ec2/utils.py +++ b/moto/ec2/utils.py @@ -98,10 +98,13 @@ def passes_filter_dict(instance, filter_dict): def filter_reservations(reservations, filter_dict): + result = [] for reservation in reservations: new_instances = [] for instance in reservation.instances: if passes_filter_dict(instance, filter_dict): new_instances.append(instance) - reservation.instances = new_instances - return reservations + if new_instances: + reservation.instances = new_instances + result.append(reservation) + return result diff --git a/tests/test_ec2/test_instances.py b/tests/test_ec2/test_instances.py index 691e7908f..5244afd4d 100644 --- a/tests/test_ec2/test_instances.py +++ b/tests/test_ec2/test_instances.py @@ -96,6 +96,9 @@ def test_get_instances_filtering_by_state(): instance_ids = [instance.id for instance in reservations[0].instances] instance_ids.should.equal([instance2.id]) + reservations = conn.get_all_instances([instance2.id], filters={'instance-state-name': 'terminating'}) + list(reservations).should.equal([]) + # get_all_instances should still return all 3 reservations = conn.get_all_instances() reservations[0].instances.should.have.length_of(3) From 421a5e60af700ad347d4a1b1df426671440a73ad Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Wed, 17 Jul 2013 12:48:21 -0400 Subject: [PATCH 57/85] Fix EC2 response tags. Closes #37. --- moto/ec2/models.py | 23 +++++++++++++++-------- moto/ec2/responses/instances.py | 11 ++++++++++- tests/test_ec2/test_tags.py | 16 ++++++++++++++++ 3 files changed, 41 insertions(+), 9 deletions(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 1dac5e3bf..0ea10555c 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -47,6 +47,10 @@ class Instance(BotoInstance): self._state.name = "pending" self._state.code = 0 + def get_tags(self): + tags = ec2_backend.describe_tags(self.id) + return tags + class InstanceBackend(object): @@ -165,18 +169,21 @@ class TagBackend(object): def delete_tag(self, resource_id, key): return self.tags[resource_id].pop(key) - def describe_tags(self): + def describe_tags(self, filter_resource_ids=None): results = [] for resource_id, tags in self.tags.iteritems(): ami = 'ami' in resource_id for key, value in tags.iteritems(): - result = { - 'resource_id': resource_id, - 'key': key, - 'value': value, - 'resource_type': 'image' if ami else 'instance', - } - results.append(result) + if not filter_resource_ids or resource_id in filter_resource_ids: + # If we're not filtering, or we are filtering and this + # resource id is in the filter list, add this tag + result = { + 'resource_id': resource_id, + 'key': key, + 'value': value, + 'resource_type': 'image' if ami else 'instance', + } + results.append(result) return results diff --git a/moto/ec2/responses/instances.py b/moto/ec2/responses/instances.py index 1b6097a7d..68be9dafd 100644 --- a/moto/ec2/responses/instances.py +++ b/moto/ec2/responses/instances.py @@ -184,7 +184,16 @@ EC2_DESCRIBE_INSTANCES = """ Date: Mon, 22 Jul 2013 22:50:58 -0400 Subject: [PATCH 58/85] Basic ELB support --- moto/__init__.py | 1 + moto/elb/__init__.py | 2 + moto/elb/models.py | 80 +++++++++++++++ moto/elb/responses.py | 179 ++++++++++++++++++++++++++++++++++ moto/elb/urls.py | 9 ++ tests/test_elb/test_elb.py | 121 +++++++++++++++++++++++ tests/test_elb/test_server.py | 0 7 files changed, 392 insertions(+) create mode 100644 moto/elb/__init__.py create mode 100644 moto/elb/models.py create mode 100644 moto/elb/responses.py create mode 100644 moto/elb/urls.py create mode 100644 tests/test_elb/test_elb.py create mode 100644 tests/test_elb/test_server.py diff --git a/moto/__init__.py b/moto/__init__.py index 49f121a3c..302156efe 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -3,6 +3,7 @@ logging.getLogger('boto').setLevel(logging.CRITICAL) from .dynamodb import mock_dynamodb from .ec2 import mock_ec2 +from .elb import mock_elb from .s3 import mock_s3 from .ses import mock_ses from .sqs import mock_sqs diff --git a/moto/elb/__init__.py b/moto/elb/__init__.py new file mode 100644 index 000000000..fcadac99e --- /dev/null +++ b/moto/elb/__init__.py @@ -0,0 +1,2 @@ +from .models import elb_backend +mock_elb = elb_backend.decorator diff --git a/moto/elb/models.py b/moto/elb/models.py new file mode 100644 index 000000000..aff7f082b --- /dev/null +++ b/moto/elb/models.py @@ -0,0 +1,80 @@ +from moto.core import BaseBackend + + +class FakeHealthCheck(object): + def __init__(self, timeout, healthy_threshold, unhealthy_threshold, + interval, target): + self.timeout = timeout + self.healthy_threshold = healthy_threshold + self.unhealthy_threshold = unhealthy_threshold + self.interval = interval + self.target = target + + +class FakeListener(object): + def __init__(self, load_balancer_port, instance_port, protocol): + self.load_balancer_port = load_balancer_port + self.instance_port = instance_port + self.protocol = protocol.upper() + + +class FakeLoadBalancer(object): + def __init__(self, name, zones, ports): + self.name = name + self.health_check = None + self.instance_ids = [] + self.zones = zones + self.listeners = [] + for protocol, lb_port, instance_port in ports: + listener = FakeListener( + protocol=protocol, + load_balancer_port=lb_port, + instance_port=instance_port, + ) + self.listeners.append(listener) + + +class ELBBackend(BaseBackend): + + def __init__(self): + self.load_balancers = {} + + def create_load_balancer(self, name, zones, ports): + new_load_balancer = FakeLoadBalancer(name=name, zones=zones, ports=ports) + self.load_balancers[name] = new_load_balancer + return new_load_balancer + + def describe_load_balancers(self, names): + balancers = self.load_balancers.values() + if names: + return [balancer for balancer in balancers if balancer.name in names] + else: + return balancers + + def delete_load_balancer(self, load_balancer_name): + self.load_balancers.pop(load_balancer_name, None) + + def get_load_balancer(self, load_balancer_name): + return self.load_balancers.get(load_balancer_name) + + def configure_health_check(self, load_balancer_name, timeout, + healthy_threshold, unhealthy_threshold, interval, + target): + check = FakeHealthCheck(timeout, healthy_threshold, unhealthy_threshold, + interval, target) + load_balancer = self.get_load_balancer(load_balancer_name) + load_balancer.health_check = check + return check + + def register_instances(self, load_balancer_name, instance_ids): + load_balancer = self.get_load_balancer(load_balancer_name) + load_balancer.instance_ids.extend(instance_ids) + return load_balancer + + def deregister_instances(self, load_balancer_name, instance_ids): + load_balancer = self.get_load_balancer(load_balancer_name) + new_instance_ids = [instance_id for instance_id in load_balancer.instance_ids if instance_id not in instance_ids] + load_balancer.instance_ids = new_instance_ids + return load_balancer + +elb_backend = ELBBackend() diff --git a/moto/elb/responses.py b/moto/elb/responses.py new file mode 100644 index 000000000..4fcf055df --- /dev/null +++ b/moto/elb/responses.py @@ -0,0 +1,179 @@ +from jinja2 import Template + +from moto.core.responses import BaseResponse +from .models import elb_backend + + +class ELBResponse(BaseResponse): + + def create_load_balancer(self): + """ + u'Scheme': [u'internet-facing'], + """ + load_balancer_name = self.querystring.get('LoadBalancerName')[0] + availability_zones = [value[0] for key, value in self.querystring.items() if "AvailabilityZones.member" in key] + ports = [] + port_index = 1 + while True: + try: + protocol = self.querystring['Listeners.member.{}.Protocol'.format(port_index)][0] + except KeyError: + break + lb_port = self.querystring['Listeners.member.{}.LoadBalancerPort'.format(port_index)][0] + instance_port = self.querystring['Listeners.member.{}.InstancePort'.format(port_index)][0] + ports.append([protocol, lb_port, instance_port]) + port_index += 1 + elb_backend.create_load_balancer( + name=load_balancer_name, + zones=availability_zones, + ports=ports, + ) + template = Template(CREATE_LOAD_BALANCER_TEMPLATE) + return template.render() + + def describe_load_balancers(self): + names = [value[0] for key, value in self.querystring.items() if "LoadBalancerNames.member" in key] + load_balancers = elb_backend.describe_load_balancers(names) + template = Template(DESCRIBE_LOAD_BALANCERS_TEMPLATE) + return template.render(load_balancers=load_balancers) + + def delete_load_balancer(self): + load_balancer_name = self.querystring.get('LoadBalancerName')[0] + elb_backend.delete_load_balancer(load_balancer_name) + template = Template(DELETE_LOAD_BALANCER_TEMPLATE) + return template.render() + + def configure_health_check(self): + check = elb_backend.configure_health_check( + load_balancer_name=self.querystring.get('LoadBalancerName')[0], + timeout=self.querystring.get('HealthCheck.Timeout')[0], + healthy_threshold=self.querystring.get('HealthCheck.HealthyThreshold')[0], + unhealthy_threshold=self.querystring.get('HealthCheck.UnhealthyThreshold')[0], + interval=self.querystring.get('HealthCheck.Interval')[0], + target=self.querystring.get('HealthCheck.Target')[0], + ) + template = Template(CONFIGURE_HEALTH_CHECK_TEMPLATE) + return template.render(check=check) + + def register_instances_with_load_balancer(self): + load_balancer_name = self.querystring.get('LoadBalancerName')[0] + instance_ids = [value[0] for key, value in self.querystring.items() if "Instances.member" in key] + template = Template(REGISTER_INSTANCES_TEMPLATE) + load_balancer = elb_backend.register_instances(load_balancer_name, instance_ids) + return template.render(load_balancer=load_balancer) + + def deregister_instances_from_load_balancer(self): + load_balancer_name = self.querystring.get('LoadBalancerName')[0] + instance_ids = [value[0] for key, value in self.querystring.items() if "Instances.member" in key] + template = Template(DEREGISTER_INSTANCES_TEMPLATE) + load_balancer = elb_backend.deregister_instances(load_balancer_name, instance_ids) + return template.render(load_balancer=load_balancer) + +CREATE_LOAD_BALANCER_TEMPLATE = """ + tests.us-east-1.elb.amazonaws.com +""" + +DELETE_LOAD_BALANCER_TEMPLATE = """ +""" + +DESCRIBE_LOAD_BALANCERS_TEMPLATE = """ + + + {% for load_balancer in load_balancers %} + + + + {{ load_balancer.name }} + 2013-01-01T00:00:00.19000Z + + {% if load_balancer.health_check %} + {{ load_balancer.health_check.interval }} + {{ load_balancer.health_check.target }} + {{ load_balancer.health_check.healthy_threshold }} + {{ load_balancer.health_check.timeout }} + {{ load_balancer.health_check.unhealthy_threshold }} + {% endif %} + + vpc-56e10e3d + + {% for listener in load_balancer.listeners %} + + + AWSConsolePolicy-1 + + + {{ listener.protocol }} + {{ listener.load_balancer_port }} + {{ listener.protocol }} + {{ listener.instance_port }} + + + {% endfor %} + + + {% for instance_id in load_balancer.instance_ids %} + + {{ instance_id }} + + {% endfor %} + + + + + + + AWSConsolePolicy-1 + 30 + + + + + {% for zone in load_balancer.zones %} + {{ zone }} + {% endfor %} + + tests.us-east-1.elb.amazonaws.com + Z3ZONEID + internet-facing + tests.us-east-1.elb.amazonaws.com + + + + + {% endfor %} + + + + f9880f01-7852-629d-a6c3-3ae2-666a409287e6dc0c + +""" + +CONFIGURE_HEALTH_CHECK_TEMPLATE = """ + + {{ check.interval }} + {{ check.target }} + {{ check.healthy_threshold }} + {{ check.timeout }} + {{ check.unhealthy_threshold }} + +""" + +REGISTER_INSTANCES_TEMPLATE = """ + + {% for instance_id in load_balancer.instance_ids %} + + {{ instance_id }} + + {% endfor %} + +""" + +DEREGISTER_INSTANCES_TEMPLATE = """ + + {% for instance_id in load_balancer.instance_ids %} + + {{ instance_id }} + + {% endfor %} + +""" diff --git a/moto/elb/urls.py b/moto/elb/urls.py new file mode 100644 index 000000000..e41ed2921 --- /dev/null +++ b/moto/elb/urls.py @@ -0,0 +1,9 @@ +from .responses import ELBResponse + +url_bases = [ + "https?://elasticloadbalancing.(.+).amazonaws.com", +] + +url_paths = { + '{0}/$': ELBResponse().dispatch, +} diff --git a/tests/test_elb/test_elb.py b/tests/test_elb/test_elb.py new file mode 100644 index 000000000..11ddc0ced --- /dev/null +++ b/tests/test_elb/test_elb.py @@ -0,0 +1,121 @@ +import boto +from boto.ec2.elb import HealthCheck +import sure # flake8: noqa + +from moto import mock_elb, mock_ec2 + + +@mock_elb +def test_create_load_balancer(): + conn = boto.connect_elb() + + zones = ['us-east-1a', 'us-east-1b'] + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + lb = conn.create_load_balancer('my-lb', zones, ports) + + balancers = conn.get_all_load_balancers() + balancer = balancers[0] + balancer.name.should.equal("my-lb") + set(balancer.availability_zones).should.equal(set(['us-east-1a', 'us-east-1b'])) + listener1 = balancer.listeners[0] + listener1.load_balancer_port.should.equal(80) + listener1.instance_port.should.equal(8080) + listener1.protocol.should.equal("HTTP") + listener2 = balancer.listeners[1] + listener2.load_balancer_port.should.equal(443) + listener2.instance_port.should.equal(8443) + listener2.protocol.should.equal("TCP") + + +@mock_elb +def test_get_load_balancers_by_name(): + conn = boto.connect_elb() + + zones = ['us-east-1a', 'us-east-1b'] + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + lb = conn.create_load_balancer('my-lb1', zones, ports) + lb = conn.create_load_balancer('my-lb2', zones, ports) + lb = conn.create_load_balancer('my-lb3', zones, ports) + + conn.get_all_load_balancers().should.have.length_of(3) + conn.get_all_load_balancers(load_balancer_names=['my-lb1']).should.have.length_of(1) + conn.get_all_load_balancers(load_balancer_names=['my-lb1', 'my-lb2']).should.have.length_of(2) + + +@mock_elb +def test_delete_load_balancer(): + conn = boto.connect_elb() + + zones = ['us-east-1a'] + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + lb = conn.create_load_balancer('my-lb', zones, ports) + + balancers = conn.get_all_load_balancers() + balancers.should.have.length_of(1) + + conn.delete_load_balancer("my-lb") + balancers = conn.get_all_load_balancers() + balancers.should.have.length_of(0) + + +@mock_elb +def test_create_health_check(): + conn = boto.connect_elb() + + hc = HealthCheck( + interval=20, + healthy_threshold=3, + unhealthy_threshold=5, + target='HTTP:8080/health', + timeout=23, + ) + + lb = conn.create_load_balancer('my-lb', [], []) + lb.configure_health_check(hc) + + balancer = conn.get_all_load_balancers()[0] + health_check = balancer.health_check + health_check.interval.should.equal(20) + health_check.healthy_threshold.should.equal(3) + health_check.unhealthy_threshold.should.equal(5) + health_check.target.should.equal('HTTP:8080/health') + health_check.timeout.should.equal(23) + + +@mock_ec2 +@mock_elb +def test_register_instances(): + ec2_conn = boto.connect_ec2() + reservation = ec2_conn.run_instances('ami-1234abcd', 2) + instance_id1 = reservation.instances[0].id + instance_id2 = reservation.instances[1].id + + conn = boto.connect_elb() + lb = conn.create_load_balancer('my-lb', [], []) + + lb.register_instances([instance_id1, instance_id2]) + + balancer = conn.get_all_load_balancers()[0] + instance_ids = [instance.id for instance in balancer.instances] + set(instance_ids).should.equal(set([instance_id1, instance_id2])) + + +@mock_ec2 +@mock_elb +def test_deregister_instances(): + ec2_conn = boto.connect_ec2() + reservation = ec2_conn.run_instances('ami-1234abcd', 2) + instance_id1 = reservation.instances[0].id + instance_id2 = reservation.instances[1].id + + conn = boto.connect_elb() + lb = conn.create_load_balancer('my-lb', [], []) + + lb.register_instances([instance_id1, instance_id2]) + + balancer = conn.get_all_load_balancers()[0] + balancer.instances.should.have.length_of(2) + balancer.deregister_instances([instance_id1]) + + balancer.instances.should.have.length_of(1) + balancer.instances[0].id.should.equal(instance_id2) diff --git a/tests/test_elb/test_server.py b/tests/test_elb/test_server.py new file mode 100644 index 000000000..e69de29bb From befcadc7fcd1444012e697d130ed09c15c456965 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Mon, 22 Jul 2013 22:56:02 -0400 Subject: [PATCH 59/85] Add ELB server test --- moto/server.py | 1 + tests/test_elb/test_server.py | 16 ++++++++++++++++ 2 files changed, 17 insertions(+) diff --git a/moto/server.py b/moto/server.py index a46222488..a00f23937 100644 --- a/moto/server.py +++ b/moto/server.py @@ -5,6 +5,7 @@ from werkzeug.routing import BaseConverter from moto.dynamodb import dynamodb_backend # flake8: noqa from moto.ec2 import ec2_backend # flake8: noqa +from moto.elb import elb_backend # flake8: noqa from moto.s3 import s3_backend # flake8: noqa from moto.ses import ses_backend # flake8: noqa from moto.sqs import sqs_backend # flake8: noqa diff --git a/tests/test_elb/test_server.py b/tests/test_elb/test_server.py index e69de29bb..c739abb55 100644 --- a/tests/test_elb/test_server.py +++ b/tests/test_elb/test_server.py @@ -0,0 +1,16 @@ +import sure # flake8: noqa + +import moto.server as server + +''' +Test the different server responses +''' +server.configure_urls("elb") + + +def test_elb_describe_instances(): + test_client = server.app.test_client() + res = test_client.get('/?Action=DescribeLoadBalancers') + + res.data.should.contain('DescribeLoadBalancersResponse') + res.data.should.contain('LoadBalancerName') From 2d51e38eab67502373daa7bac4054bd7a29770a5 Mon Sep 17 00:00:00 2001 From: Lincoln de Sousa Date: Fri, 26 Jul 2013 14:42:32 -0400 Subject: [PATCH 60/85] Using argparse instead of using sys.argv directly --- Makefile | 2 +- moto/server.py | 37 ++++++++++++++++++++++++---------- tests/test_core/test_server.py | 8 ++++---- 3 files changed, 31 insertions(+), 16 deletions(-) diff --git a/Makefile b/Makefile index d997ae4b5..7521a6d88 100644 --- a/Makefile +++ b/Makefile @@ -6,5 +6,5 @@ init: test: rm -f .coverage - @nosetests --with-coverage ./tests/ + @nosetests -sv --with-coverage ./tests/ diff --git a/moto/server.py b/moto/server.py index a00f23937..d49f6b157 100644 --- a/moto/server.py +++ b/moto/server.py @@ -1,4 +1,5 @@ import sys +import argparse from flask import Flask from werkzeug.routing import BaseConverter @@ -36,19 +37,33 @@ def configure_urls(service): app.route(url_path, methods=HTTP_METHODS)(convert_flask_to_httpretty_response(handler)) -def main(args=sys.argv): - if len(args) not in range(2, 4): - print("Usage: moto_server [port]") - sys.exit(1) - service_name = args[1] - configure_urls(service_name) - try: - port = int(args[2]) - except IndexError: - port = None +def main(argv=sys.argv): + # Yes, I'm using those imports in the beginning of the file to create a + # dynamic list of available services to be shown in the help text when the + # user tries to interact with moto_server. + available_services = [ + x.split('_')[0] for x in globals() if x.endswith('_backend')] + + parser = argparse.ArgumentParser() + parser.add_argument( + 'service', type=str, + choices=available_services, + help='Choose which mechanism you want to run') + parser.add_argument( + '-H', '--host', type=str, + help='Which host to bind', + default='0.0.0.0') + parser.add_argument( + '-p', '--port', type=int, + help='Port number to use for connection', + default=5000) + + args = parser.parse_args(argv) + + configure_urls(args.service) app.testing = True - app.run(port=port) + app.run(host=args.host, port=args.port) if __name__ == '__main__': main() diff --git a/tests/test_core/test_server.py b/tests/test_core/test_server.py index 6cf87b1d4..5ea77c853 100644 --- a/tests/test_core/test_server.py +++ b/tests/test_core/test_server.py @@ -15,11 +15,11 @@ def test_wrong_arguments(): @patch('moto.server.app.run') def test_right_arguments(app_run): - main(["name", "s3"]) - app_run.assert_called_once_with(port=None) + main(["s3"]) + app_run.assert_called_once_with(host='0.0.0.0', port=5000) @patch('moto.server.app.run') def test_port_argument(app_run): - main(["name", "s3", 8080]) - app_run.assert_called_once_with(port=8080) + main(["s3", "--port", "8080"]) + app_run.assert_called_once_with(host='0.0.0.0', port=8080) From 8ad458cd725c7e66f1a69b283d4152d6edb1cd93 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Fri, 26 Jul 2013 15:03:16 -0400 Subject: [PATCH 61/85] Fix sys.argv bug --- moto/server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/server.py b/moto/server.py index d49f6b157..645ce8fe4 100644 --- a/moto/server.py +++ b/moto/server.py @@ -37,7 +37,7 @@ def configure_urls(service): app.route(url_path, methods=HTTP_METHODS)(convert_flask_to_httpretty_response(handler)) -def main(argv=sys.argv): +def main(argv=sys.argv[1:]): # Yes, I'm using those imports in the beginning of the file to create a # dynamic list of available services to be shown in the help text when the # user tries to interact with moto_server. From 168b049b4df02e21d3335775e710c975adae4a35 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Fri, 26 Jul 2013 15:04:34 -0400 Subject: [PATCH 62/85] Add @clarete to authors --- AUTHORS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/AUTHORS.md b/AUTHORS.md index 57606ee0b..3a615ea97 100644 --- a/AUTHORS.md +++ b/AUTHORS.md @@ -5,3 +5,4 @@ Moto is written by Steve Pulec with contributions from: * [Zach Smith](https://github.com/zmsmith) * [Dilshod Tadjibaev](https://github.com/antimora) * [Dan Berglund](https://github.com/cheif) +* [Lincoln de Sousa](https://github.com/clarete) From 674a85ba0bfbb9023d751249a0ee53bf4670dafe Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Fri, 26 Jul 2013 15:14:34 -0400 Subject: [PATCH 63/85] Cleanup backend finding --- moto/backends.py | 17 +++++++++++++++++ moto/server.py | 18 +++--------------- 2 files changed, 20 insertions(+), 15 deletions(-) create mode 100644 moto/backends.py diff --git a/moto/backends.py b/moto/backends.py new file mode 100644 index 000000000..6898ad169 --- /dev/null +++ b/moto/backends.py @@ -0,0 +1,17 @@ +from moto.dynamodb import dynamodb_backend +from moto.ec2 import ec2_backend +from moto.elb import elb_backend +from moto.s3 import s3_backend +from moto.ses import ses_backend +from moto.sqs import sqs_backend +from moto.sts import sts_backend + +BACKENDS = { + 'dynamodb': dynamodb_backend, + 'ec2': ec2_backend, + 'elb': elb_backend, + 's3': s3_backend, + 'ses': ses_backend, + 'sqs': sqs_backend, + 'sts': sts_backend, +} diff --git a/moto/server.py b/moto/server.py index 645ce8fe4..9ef135359 100644 --- a/moto/server.py +++ b/moto/server.py @@ -4,14 +4,7 @@ import argparse from flask import Flask from werkzeug.routing import BaseConverter -from moto.dynamodb import dynamodb_backend # flake8: noqa -from moto.ec2 import ec2_backend # flake8: noqa -from moto.elb import elb_backend # flake8: noqa -from moto.s3 import s3_backend # flake8: noqa -from moto.ses import ses_backend # flake8: noqa -from moto.sqs import sqs_backend # flake8: noqa -from moto.sts import sts_backend # flake8: noqa - +from moto.backends import BACKENDS from moto.core.utils import convert_flask_to_httpretty_response app = Flask(__name__) @@ -26,8 +19,7 @@ class RegexConverter(BaseConverter): def configure_urls(service): - module = sys.modules[__name__] - backend = getattr(module, "{}_backend".format(service)) + backend = BACKENDS[service] from werkzeug.routing import Map # Reset view functions to reset the app app.view_functions = {} @@ -38,11 +30,7 @@ def configure_urls(service): def main(argv=sys.argv[1:]): - # Yes, I'm using those imports in the beginning of the file to create a - # dynamic list of available services to be shown in the help text when the - # user tries to interact with moto_server. - available_services = [ - x.split('_')[0] for x in globals() if x.endswith('_backend')] + available_services = BACKENDS.keys() parser = argparse.ArgumentParser() parser.add_argument( From d57157e749f6e5db6e4cc82c83a5c116f7857beb Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Sat, 27 Jul 2013 16:24:38 -0400 Subject: [PATCH 64/85] Merge in autoscaling --- moto/__init__.py | 1 + moto/autoscaling/__init__.py | 2 + moto/autoscaling/models.py | 225 ++++++++++++ moto/autoscaling/responses.py | 322 ++++++++++++++++++ moto/autoscaling/urls.py | 9 + moto/backends.py | 2 + tests/__init__.py | 0 tests/helpers.py | 19 ++ tests/test_autoscaling/test_autoscaling.py | 290 ++++++++++++++++ .../test_launch_configurations.py | 93 +++++ tests/test_autoscaling/test_policies.py | 186 ++++++++++ tests/test_autoscaling/test_server.py | 16 + 12 files changed, 1165 insertions(+) create mode 100644 moto/autoscaling/__init__.py create mode 100644 moto/autoscaling/models.py create mode 100644 moto/autoscaling/responses.py create mode 100644 moto/autoscaling/urls.py create mode 100644 tests/__init__.py create mode 100644 tests/helpers.py create mode 100644 tests/test_autoscaling/test_autoscaling.py create mode 100644 tests/test_autoscaling/test_launch_configurations.py create mode 100644 tests/test_autoscaling/test_policies.py create mode 100644 tests/test_autoscaling/test_server.py diff --git a/moto/__init__.py b/moto/__init__.py index 302156efe..8113260a7 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -1,6 +1,7 @@ import logging logging.getLogger('boto').setLevel(logging.CRITICAL) +from .autoscaling import mock_autoscaling from .dynamodb import mock_dynamodb from .ec2 import mock_ec2 from .elb import mock_elb diff --git a/moto/autoscaling/__init__.py b/moto/autoscaling/__init__.py new file mode 100644 index 000000000..2c25ca388 --- /dev/null +++ b/moto/autoscaling/__init__.py @@ -0,0 +1,2 @@ +from .models import autoscaling_backend +mock_autoscaling = autoscaling_backend.decorator diff --git a/moto/autoscaling/models.py b/moto/autoscaling/models.py new file mode 100644 index 000000000..3a7f69401 --- /dev/null +++ b/moto/autoscaling/models.py @@ -0,0 +1,225 @@ +from moto.core import BaseBackend +from moto.ec2 import ec2_backend + +# http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AS_Concepts.html#Cooldown +DEFAULT_COOLDOWN = 300 + + +class FakeScalingPolicy(object): + def __init__(self, name, adjustment_type, as_name, scaling_adjustment, + cooldown): + self.name = name + self.adjustment_type = adjustment_type + self.as_name = as_name + self.scaling_adjustment = scaling_adjustment + if cooldown is not None: + self.cooldown = cooldown + else: + self.cooldown = DEFAULT_COOLDOWN + + def execute(self): + if self.adjustment_type == 'ExactCapacity': + autoscaling_backend.set_desired_capacity(self.as_name, self.scaling_adjustment) + elif self.adjustment_type == 'ChangeInCapacity': + autoscaling_backend.change_capacity(self.as_name, self.scaling_adjustment) + elif self.adjustment_type == 'PercentChangeInCapacity': + autoscaling_backend.change_capacity_percent(self.as_name, self.scaling_adjustment) + + +class FakeLaunchConfiguration(object): + def __init__(self, name, image_id, key_name, security_groups, user_data, + instance_type, instance_monitoring, instance_profile_name, + spot_price): + self.name = name + self.image_id = image_id + self.key_name = key_name + self.security_groups = security_groups + self.user_data = user_data + self.instance_type = instance_type + self.instance_monitoring = instance_monitoring + self.instance_profile_name = instance_profile_name + self.spot_price = spot_price + + @property + def instance_monitoring_enabled(self): + if self.instance_monitoring: + return 'true' + return 'false' + + +class FakeAutoScalingGroup(object): + def __init__(self, name, availability_zones, desired_capacity, max_size, + min_size, launch_config_name, vpc_zone_identifier): + self.name = name + self.availability_zones = availability_zones + self.max_size = max_size + self.min_size = min_size + + self.launch_config = autoscaling_backend.launch_configurations[launch_config_name] + self.launch_config_name = launch_config_name + self.vpc_zone_identifier = vpc_zone_identifier + + self.instances = [] + self.set_desired_capacity(desired_capacity) + + def update(self, availability_zones, desired_capacity, max_size, min_size, + launch_config_name, vpc_zone_identifier): + self.availability_zones = availability_zones + self.max_size = max_size + self.min_size = min_size + + self.launch_config = autoscaling_backend.launch_configurations[launch_config_name] + self.launch_config_name = launch_config_name + self.vpc_zone_identifier = vpc_zone_identifier + + self.set_desired_capacity(desired_capacity) + + def set_desired_capacity(self, new_capacity): + if new_capacity is None: + self.desired_capacity = self.min_size + else: + self.desired_capacity = new_capacity + + curr_instance_count = len(self.instances) + + if self.desired_capacity == curr_instance_count: + return + + if self.desired_capacity > curr_instance_count: + # Need more instances + count_needed = self.desired_capacity - curr_instance_count + reservation = ec2_backend.add_instances( + self.launch_config.image_id, + count_needed, + self.launch_config.user_data + ) + for instance in reservation.instances: + instance.autoscaling_group = self + self.instances.extend(reservation.instances) + else: + # Need to remove some instances + count_to_remove = curr_instance_count - self.desired_capacity + instances_to_remove = self.instances[:count_to_remove] + instance_ids_to_remove = [instance.id for instance in instances_to_remove] + ec2_backend.terminate_instances(instance_ids_to_remove) + self.instances = self.instances[count_to_remove:] + + +class AutoScalingBackend(BaseBackend): + + def __init__(self): + self.autoscaling_groups = {} + self.launch_configurations = {} + self.policies = {} + + def create_launch_configuration(self, name, image_id, key_name, + security_groups, user_data, instance_type, + instance_monitoring, instance_profile_name, + spot_price): + launch_configuration = FakeLaunchConfiguration( + name=name, + image_id=image_id, + key_name=key_name, + security_groups=security_groups, + user_data=user_data, + instance_type=instance_type, + instance_monitoring=instance_monitoring, + instance_profile_name=instance_profile_name, + spot_price=spot_price, + ) + self.launch_configurations[name] = launch_configuration + return launch_configuration + + def describe_launch_configurations(self, names): + configurations = self.launch_configurations.values() + if names: + return [configuration for configuration in configurations if configuration.name in names] + else: + return configurations + + def delete_launch_configuration(self, launch_configuration_name): + self.launch_configurations.pop(launch_configuration_name, None) + + def create_autoscaling_group(self, name, availability_zones, + desired_capacity, max_size, min_size, + launch_config_name, vpc_zone_identifier): + group = FakeAutoScalingGroup( + name=name, + availability_zones=availability_zones, + desired_capacity=desired_capacity, + max_size=max_size, + min_size=min_size, + launch_config_name=launch_config_name, + vpc_zone_identifier=vpc_zone_identifier, + ) + self.autoscaling_groups[name] = group + return group + + def update_autoscaling_group(self, name, availability_zones, + desired_capacity, max_size, min_size, + launch_config_name, vpc_zone_identifier): + group = self.autoscaling_groups[name] + group.update(availability_zones, desired_capacity, max_size, + min_size, launch_config_name, vpc_zone_identifier) + return group + + def describe_autoscaling_groups(self, names): + groups = self.autoscaling_groups.values() + if names: + return [group for group in groups if group.name in names] + else: + return groups + + def delete_autoscaling_group(self, group_name): + self.autoscaling_groups.pop(group_name, None) + + def describe_autoscaling_instances(self): + instances = [] + for group in self.autoscaling_groups.values(): + instances.extend(group.instances) + return instances + + def set_desired_capacity(self, group_name, desired_capacity): + group = self.autoscaling_groups[group_name] + group.set_desired_capacity(desired_capacity) + + def change_capacity(self, group_name, scaling_adjustment): + group = self.autoscaling_groups[group_name] + desired_capacity = group.desired_capacity + scaling_adjustment + self.set_desired_capacity(group_name, desired_capacity) + + def change_capacity_percent(self, group_name, scaling_adjustment): + """ http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/as-scale-based-on-demand.html + If PercentChangeInCapacity returns a value between 0 and 1, + Auto Scaling will round it off to 1. If the PercentChangeInCapacity + returns a value greater than 1, Auto Scaling will round it off to the + lower value. For example, if PercentChangeInCapacity returns 12.5, + then Auto Scaling will round it off to 12.""" + group = self.autoscaling_groups[group_name] + percent_change = 1 + (scaling_adjustment / 100.0) + desired_capacity = group.desired_capacity * percent_change + if group.desired_capacity < desired_capacity < group.desired_capacity + 1: + desired_capacity = group.desired_capacity + 1 + else: + desired_capacity = int(desired_capacity) + self.set_desired_capacity(group_name, desired_capacity) + + def create_autoscaling_policy(self, name, adjustment_type, as_name, + scaling_adjustment, cooldown): + policy = FakeScalingPolicy(name, adjustment_type, as_name, + scaling_adjustment, cooldown) + + self.policies[name] = policy + return policy + + def describe_policies(self): + return self.policies.values() + + def delete_policy(self, group_name): + self.policies.pop(group_name, None) + + def execute_policy(self, group_name): + policy = self.policies[group_name] + policy.execute() + +autoscaling_backend = AutoScalingBackend() diff --git a/moto/autoscaling/responses.py b/moto/autoscaling/responses.py new file mode 100644 index 000000000..aa5d5f1a4 --- /dev/null +++ b/moto/autoscaling/responses.py @@ -0,0 +1,322 @@ +from jinja2 import Template + +from moto.core.responses import BaseResponse +from .models import autoscaling_backend + + +class AutoScalingResponse(BaseResponse): + + def _get_param(self, param_name): + return self.querystring.get(param_name, [None])[0] + + def _get_int_param(self, param_name): + value = self._get_param(param_name) + if value is not None: + return int(value) + + def _get_multi_param(self, param_prefix): + return [value[0] for key, value in self.querystring.items() if key.startswith(param_prefix)] + + def create_launch_configuration(self): + instance_monitoring_string = self._get_param('InstanceMonitoring.Enabled') + if instance_monitoring_string == 'true': + instance_monitoring = True + else: + instance_monitoring = False + autoscaling_backend.create_launch_configuration( + name=self._get_param('LaunchConfigurationName'), + image_id=self._get_param('ImageId'), + key_name=self._get_param('KeyName'), + security_groups=self._get_multi_param('SecurityGroups.member.'), + user_data=self._get_param('UserData'), + instance_type=self._get_param('InstanceType'), + instance_monitoring=instance_monitoring, + instance_profile_name=self._get_param('IamInstanceProfile'), + spot_price=self._get_param('SpotPrice'), + ) + template = Template(CREATE_LAUNCH_CONFIGURATION_TEMPLATE) + return template.render() + + def describe_launch_configurations(self): + names = self._get_multi_param('LaunchConfigurationNames') + launch_configurations = autoscaling_backend.describe_launch_configurations(names) + template = Template(DESCRIBE_LAUNCH_CONFIGURATIONS_TEMPLATE) + return template.render(launch_configurations=launch_configurations) + + def delete_launch_configuration(self): + launch_configurations_name = self.querystring.get('LaunchConfigurationName')[0] + autoscaling_backend.delete_launch_configuration(launch_configurations_name) + template = Template(DELETE_LAUNCH_CONFIGURATION_TEMPLATE) + return template.render() + + def create_auto_scaling_group(self): + autoscaling_backend.create_autoscaling_group( + name=self._get_param('AutoScalingGroupName'), + availability_zones=self._get_multi_param('AvailabilityZones.member'), + desired_capacity=self._get_int_param('DesiredCapacity'), + max_size=self._get_int_param('MaxSize'), + min_size=self._get_int_param('MinSize'), + launch_config_name=self._get_param('LaunchConfigurationName'), + vpc_zone_identifier=self._get_param('VPCZoneIdentifier'), + ) + template = Template(CREATE_AUTOSCALING_GROUP_TEMPLATE) + return template.render() + + def describe_auto_scaling_groups(self): + names = self._get_multi_param("AutoScalingGroupNames") + groups = autoscaling_backend.describe_autoscaling_groups(names) + template = Template(DESCRIBE_AUTOSCALING_GROUPS_TEMPLATE) + return template.render(groups=groups) + + def update_auto_scaling_group(self): + autoscaling_backend.update_autoscaling_group( + name=self._get_param('AutoScalingGroupName'), + availability_zones=self._get_multi_param('AvailabilityZones.member'), + desired_capacity=self._get_int_param('DesiredCapacity'), + max_size=self._get_int_param('MaxSize'), + min_size=self._get_int_param('MinSize'), + launch_config_name=self._get_param('LaunchConfigurationName'), + vpc_zone_identifier=self._get_param('VPCZoneIdentifier'), + ) + template = Template(UPDATE_AUTOSCALING_GROUP_TEMPLATE) + return template.render() + + def delete_auto_scaling_group(self): + group_name = self._get_param('AutoScalingGroupName') + autoscaling_backend.delete_autoscaling_group(group_name) + template = Template(DELETE_AUTOSCALING_GROUP_TEMPLATE) + return template.render() + + def set_desired_capacity(self): + group_name = self._get_param('AutoScalingGroupName') + desired_capacity = self._get_int_param('DesiredCapacity') + autoscaling_backend.set_desired_capacity(group_name, desired_capacity) + template = Template(SET_DESIRED_CAPACITY_TEMPLATE) + return template.render() + + def describe_auto_scaling_instances(self): + instances = autoscaling_backend.describe_autoscaling_instances() + template = Template(DESCRIBE_AUTOSCALING_INSTANCES_TEMPLATE) + return template.render(instances=instances) + + def put_scaling_policy(self): + policy = autoscaling_backend.create_autoscaling_policy( + name=self._get_param('PolicyName'), + adjustment_type=self._get_param('AdjustmentType'), + as_name=self._get_param('AutoScalingGroupName'), + scaling_adjustment=self._get_int_param('ScalingAdjustment'), + cooldown=self._get_int_param('Cooldown'), + ) + template = Template(CREATE_SCALING_POLICY_TEMPLATE) + return template.render(policy=policy) + + def describe_policies(self): + policies = autoscaling_backend.describe_policies() + template = Template(DESCRIBE_SCALING_POLICIES_TEMPLATE) + return template.render(policies=policies) + + def delete_policy(self): + group_name = self._get_param('PolicyName') + autoscaling_backend.delete_policy(group_name) + template = Template(DELETE_POLICY_TEMPLATE) + return template.render() + + def execute_policy(self): + group_name = self._get_param('PolicyName') + autoscaling_backend.execute_policy(group_name) + template = Template(EXECUTE_POLICY_TEMPLATE) + return template.render() + + +CREATE_LAUNCH_CONFIGURATION_TEMPLATE = """ + + 7c6e177f-f082-11e1-ac58-3714bEXAMPLE + +""" + +DESCRIBE_LAUNCH_CONFIGURATIONS_TEMPLATE = """ + + + {% for launch_configuration in launch_configurations %} + + + {% for security_group in launch_configuration.security_groups %} + {{ security_group }} + {% endfor %} + + 2013-01-21T23:04:42.200Z + + {% if launch_configuration.instance_profile_name %} + {{ launch_configuration.instance_profile_name }} + {% endif %} + {{ launch_configuration.name }} + {% if launch_configuration.user_data %} + {{ launch_configuration.user_data }} + {% else %} + + {% endif %} + m1.small + arn:aws:autoscaling:us-east-1:803981987763:launchConfiguration: + 9dbbbf87-6141-428a-a409-0752edbe6cad:launchConfigurationName/my-test-lc + + {{ launch_configuration.image_id }} + {% if launch_configuration.key_name %} + {{ launch_configuration.key_name }} + {% else %} + + {% endif %} + + + {{ launch_configuration.instance_monitoring_enabled }} + + {% if launch_configuration.spot_price %} + {{ launch_configuration.spot_price }} + {% endif %} + + {% endfor %} + + + + d05a22f8-b690-11e2-bf8e-2113fEXAMPLE + +""" + +DELETE_LAUNCH_CONFIGURATION_TEMPLATE = """ + + 7347261f-97df-11e2-8756-35eEXAMPLE + +""" + +CREATE_AUTOSCALING_GROUP_TEMPLATE = """ + +8d798a29-f083-11e1-bdfb-cb223EXAMPLE + +""" + +DESCRIBE_AUTOSCALING_GROUPS_TEMPLATE = """ + + + {% for group in groups %} + + + + {{ group.name }} + ELB + 2013-05-06T17:47:15.107Z + + {{ group.launch_config_name }} + + {{ group.desired_capacity }} + + {% for availability_zone in group.availability_zones %} + {{ availability_zone }} + {% endfor %} + + + my-test-asg-loadbalancer + + {{ group.min_size }} + {% if group.vpc_zone_identifier %} + {{ group.vpc_zone_identifier }} + {% else %} + + {% endif %} + 120 + 300 + arn:aws:autoscaling:us-east-1:803981987763:autoScalingGroup:ca861182-c8f9-4ca7-b1eb-cd35505f5ebb + :autoScalingGroupName/my-test-asg-lbs + + Default + + {{ group.max_size }} + + {% endfor %} + + + + 0f02a07d-b677-11e2-9eb0-dd50EXAMPLE + +""" + +UPDATE_AUTOSCALING_GROUP_TEMPLATE = """ + + adafead0-ab8a-11e2-ba13-ab0ccEXAMPLE + +""" + +DELETE_AUTOSCALING_GROUP_TEMPLATE = """ + + 70a76d42-9665-11e2-9fdf-211deEXAMPLE + +""" + +DESCRIBE_AUTOSCALING_INSTANCES_TEMPLATE = """ + + + {% for instance in instances %} + + HEALTHY + {{ instance.autoscaling_group.name }} + us-east-1e + {{ instance.id }} + {{ instance.autoscaling_group.launch_config_name }} + InService + + {% endfor %} + + + + df992dc3-b72f-11e2-81e1-750aa6EXAMPLE + +""" + +CREATE_SCALING_POLICY_TEMPLATE = """ + + arn:aws:autoscaling:us-east-1:803981987763:scalingPolicy:b0dcf5e8 +-02e6-4e31-9719-0675d0dc31ae:autoScalingGroupName/my-test-asg:policyName/my-scal +eout-policy + + + 3cfc6fef-c08b-11e2-a697-2922EXAMPLE + +""" + +DESCRIBE_SCALING_POLICIES_TEMPLATE = """ + + + {% for policy in policies %} + + arn:aws:autoscaling:us-east-1:803981987763:scalingPolicy:c322 +761b-3172-4d56-9a21-0ed9d6161d67:autoScalingGroupName/my-test-asg:policyName/MyScaleDownPolicy + {{ policy.adjustment_type }} + {{ policy.scaling_adjustment }} + {{ policy.name }} + {{ policy.as_name }} + {{ policy.cooldown }} + + + {% endfor %} + + + + ec3bffad-b739-11e2-b38d-15fbEXAMPLE + +""" + +SET_DESIRED_CAPACITY_TEMPLATE = """ + + 9fb7e2db-6998-11e2-a985-57c82EXAMPLE + +""" + +EXECUTE_POLICY_TEMPLATE = """ + + 70a76d42-9665-11e2-9fdf-211deEXAMPLE + +""" + +DELETE_POLICY_TEMPLATE = """ + + 70a76d42-9665-11e2-9fdf-211deEXAMPLE + +""" diff --git a/moto/autoscaling/urls.py b/moto/autoscaling/urls.py new file mode 100644 index 000000000..affa69c96 --- /dev/null +++ b/moto/autoscaling/urls.py @@ -0,0 +1,9 @@ +from .responses import AutoScalingResponse + +url_bases = [ + "https?://autoscaling.(.+).amazonaws.com", +] + +url_paths = { + '{0}/$': AutoScalingResponse().dispatch, +} diff --git a/moto/backends.py b/moto/backends.py index 6898ad169..5a1776455 100644 --- a/moto/backends.py +++ b/moto/backends.py @@ -1,3 +1,4 @@ +from moto.autoscaling import autoscaling_backend from moto.dynamodb import dynamodb_backend from moto.ec2 import ec2_backend from moto.elb import elb_backend @@ -7,6 +8,7 @@ from moto.sqs import sqs_backend from moto.sts import sts_backend BACKENDS = { + 'autoscaling': autoscaling_backend, 'dynamodb': dynamodb_backend, 'ec2': ec2_backend, 'elb': elb_backend, diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/helpers.py b/tests/helpers.py new file mode 100644 index 000000000..199f74fcb --- /dev/null +++ b/tests/helpers.py @@ -0,0 +1,19 @@ +import boto +from nose.plugins.skip import SkipTest + + +def version_tuple(v): + return tuple(map(int, (v.split(".")))) + + +class requires_boto_gte(object): + """Decorator for requiring boto version greater than or equal to 'version'""" + def __init__(self, version): + self.version = version + + def __call__(self, test): + boto_version = version_tuple(boto.__version__) + required = version_tuple(self.version) + if boto_version >= required: + return test() + raise SkipTest diff --git a/tests/test_autoscaling/test_autoscaling.py b/tests/test_autoscaling/test_autoscaling.py new file mode 100644 index 000000000..ede0b6720 --- /dev/null +++ b/tests/test_autoscaling/test_autoscaling.py @@ -0,0 +1,290 @@ +import boto +from boto.ec2.autoscale.launchconfig import LaunchConfiguration +from boto.ec2.autoscale.group import AutoScalingGroup +from nose.plugins.attrib import attr +import sure # flake8: noqa +from unittest import skipIf + +from moto import mock_autoscaling, mock_ec2 +from tests.helpers import requires_boto_gte + + +@mock_autoscaling +def test_create_autoscaling_group(): + conn = boto.connect_autoscale() + config = LaunchConfiguration( + name='tester', + image_id='ami-abcd1234', + instance_type='m1.small', + ) + conn.create_launch_configuration(config) + + group = AutoScalingGroup( + name='tester_group', + availability_zones=['us-east-1c', 'us-east-1b'], + desired_capacity=2, + max_size=2, + min_size=2, + launch_config=config, + vpc_zone_identifier='subnet-1234abcd', + ) + conn.create_auto_scaling_group(group) + + group = conn.get_all_groups()[0] + group.name.should.equal('tester_group') + set(group.availability_zones).should.equal(set(['us-east-1c', 'us-east-1b'])) + group.desired_capacity.should.equal(2) + group.max_size.should.equal(2) + group.min_size.should.equal(2) + group.vpc_zone_identifier.should.equal('subnet-1234abcd') + group.launch_config_name.should.equal('tester') + + +@mock_autoscaling +def test_create_autoscaling_groups_defaults(): + """ Test with the minimum inputs and check that all of the proper defaults + are assigned for the other attributes """ + conn = boto.connect_autoscale() + config = LaunchConfiguration( + name='tester', + image_id='ami-abcd1234', + instance_type='m1.small', + ) + conn.create_launch_configuration(config) + + group = AutoScalingGroup( + name='tester_group', + max_size=2, + min_size=2, + launch_config=config, + ) + conn.create_auto_scaling_group(group) + + group = conn.get_all_groups()[0] + group.name.should.equal('tester_group') + group.max_size.should.equal(2) + group.min_size.should.equal(2) + group.launch_config_name.should.equal('tester') + + # Defaults + list(group.availability_zones).should.equal([]) + group.desired_capacity.should.equal(2) + group.vpc_zone_identifier.should.equal('') + + +@mock_autoscaling +def test_autoscaling_group_describe_filter(): + conn = boto.connect_autoscale() + config = LaunchConfiguration( + name='tester', + image_id='ami-abcd1234', + instance_type='m1.small', + ) + conn.create_launch_configuration(config) + + group = AutoScalingGroup( + name='tester_group', + max_size=2, + min_size=2, + launch_config=config, + ) + conn.create_auto_scaling_group(group) + group.name = 'tester_group2' + conn.create_auto_scaling_group(group) + group.name = 'tester_group3' + conn.create_auto_scaling_group(group) + + conn.get_all_groups(names=['tester_group', 'tester_group2']).should.have.length_of(2) + conn.get_all_groups().should.have.length_of(3) + + +@mock_autoscaling +def test_autoscaling_update(): + conn = boto.connect_autoscale() + config = LaunchConfiguration( + name='tester', + image_id='ami-abcd1234', + instance_type='m1.small', + ) + conn.create_launch_configuration(config) + + group = AutoScalingGroup( + name='tester_group', + availability_zones=['us-east-1c', 'us-east-1b'], + desired_capacity=2, + max_size=2, + min_size=2, + launch_config=config, + vpc_zone_identifier='subnet-1234abcd', + ) + conn.create_auto_scaling_group(group) + + group = conn.get_all_groups()[0] + group.vpc_zone_identifier.should.equal('subnet-1234abcd') + + group.vpc_zone_identifier = 'subnet-5678efgh' + group.update() + + group = conn.get_all_groups()[0] + group.vpc_zone_identifier.should.equal('subnet-5678efgh') + + +@mock_autoscaling +def test_autoscaling_group_delete(): + conn = boto.connect_autoscale() + config = LaunchConfiguration( + name='tester', + image_id='ami-abcd1234', + instance_type='m1.small', + ) + conn.create_launch_configuration(config) + + group = AutoScalingGroup( + name='tester_group', + max_size=2, + min_size=2, + launch_config=config, + ) + conn.create_auto_scaling_group(group) + + conn.get_all_groups().should.have.length_of(1) + + conn.delete_auto_scaling_group('tester_group') + conn.get_all_groups().should.have.length_of(0) + + +@mock_ec2 +@mock_autoscaling +def test_autoscaling_group_describe_instances(): + conn = boto.connect_autoscale() + config = LaunchConfiguration( + name='tester', + image_id='ami-abcd1234', + instance_type='m1.small', + ) + conn.create_launch_configuration(config) + + group = AutoScalingGroup( + name='tester_group', + max_size=2, + min_size=2, + launch_config=config, + ) + conn.create_auto_scaling_group(group) + + instances = list(conn.get_all_autoscaling_instances()) + instances.should.have.length_of(2) + instances[0].launch_config_name.should.equal('tester') + autoscale_instance_ids = [instance.instance_id for instance in instances] + + ec2_conn = boto.connect_ec2() + reservations = ec2_conn.get_all_instances() + instances = reservations[0].instances + instances.should.have.length_of(2) + instance_ids = [instance.id for instance in instances] + set(autoscale_instance_ids).should.equal(set(instance_ids)) + + +@requires_boto_gte("2.8") +@mock_autoscaling +def test_set_desired_capacity_up(): + conn = boto.connect_autoscale() + config = LaunchConfiguration( + name='tester', + image_id='ami-abcd1234', + instance_type='m1.small', + ) + conn.create_launch_configuration(config) + + group = AutoScalingGroup( + name='tester_group', + availability_zones=['us-east-1c', 'us-east-1b'], + desired_capacity=2, + max_size=2, + min_size=2, + launch_config=config, + vpc_zone_identifier='subnet-1234abcd', + ) + conn.create_auto_scaling_group(group) + + group = conn.get_all_groups()[0] + group.desired_capacity.should.equal(2) + instances = list(conn.get_all_autoscaling_instances()) + instances.should.have.length_of(2) + + conn.set_desired_capacity("tester_group", 3) + group = conn.get_all_groups()[0] + group.desired_capacity.should.equal(3) + + instances = list(conn.get_all_autoscaling_instances()) + instances.should.have.length_of(3) + + +@requires_boto_gte("2.8") +@mock_autoscaling +def test_set_desired_capacity_down(): + conn = boto.connect_autoscale() + config = LaunchConfiguration( + name='tester', + image_id='ami-abcd1234', + instance_type='m1.small', + ) + conn.create_launch_configuration(config) + + group = AutoScalingGroup( + name='tester_group', + availability_zones=['us-east-1c', 'us-east-1b'], + desired_capacity=2, + max_size=2, + min_size=2, + launch_config=config, + vpc_zone_identifier='subnet-1234abcd', + ) + conn.create_auto_scaling_group(group) + + group = conn.get_all_groups()[0] + group.desired_capacity.should.equal(2) + instances = list(conn.get_all_autoscaling_instances()) + instances.should.have.length_of(2) + + conn.set_desired_capacity("tester_group", 1) + group = conn.get_all_groups()[0] + group.desired_capacity.should.equal(1) + + instances = list(conn.get_all_autoscaling_instances()) + instances.should.have.length_of(1) + + +@requires_boto_gte("2.8") +@mock_autoscaling +def test_set_desired_capacity_the_same(): + conn = boto.connect_autoscale() + config = LaunchConfiguration( + name='tester', + image_id='ami-abcd1234', + instance_type='m1.small', + ) + conn.create_launch_configuration(config) + + group = AutoScalingGroup( + name='tester_group', + availability_zones=['us-east-1c', 'us-east-1b'], + desired_capacity=2, + max_size=2, + min_size=2, + launch_config=config, + vpc_zone_identifier='subnet-1234abcd', + ) + conn.create_auto_scaling_group(group) + + group = conn.get_all_groups()[0] + group.desired_capacity.should.equal(2) + instances = list(conn.get_all_autoscaling_instances()) + instances.should.have.length_of(2) + + conn.set_desired_capacity("tester_group", 2) + group = conn.get_all_groups()[0] + group.desired_capacity.should.equal(2) + + instances = list(conn.get_all_autoscaling_instances()) + instances.should.have.length_of(2) diff --git a/tests/test_autoscaling/test_launch_configurations.py b/tests/test_autoscaling/test_launch_configurations.py new file mode 100644 index 000000000..fda0a51fb --- /dev/null +++ b/tests/test_autoscaling/test_launch_configurations.py @@ -0,0 +1,93 @@ +import boto +from boto.ec2.autoscale.launchconfig import LaunchConfiguration + +import sure # flake8: noqa + +from moto import mock_autoscaling + + +@mock_autoscaling +def test_create_launch_configuration(): + conn = boto.connect_autoscale() + config = LaunchConfiguration( + name='tester', + image_id='ami-abcd1234', + instance_type='m1.small', + key_name='the_keys', + security_groups=["default", "default2"], + user_data="This is some user_data", + instance_monitoring=True, + instance_profile_name='arn:aws:iam::123456789012:instance-profile/testing', + spot_price=0.1) + conn.create_launch_configuration(config) + + launch_config = conn.get_all_launch_configurations()[0] + launch_config.name.should.equal('tester') + launch_config.image_id.should.equal('ami-abcd1234') + launch_config.instance_type.should.equal('m1.small') + launch_config.key_name.should.equal('the_keys') + set(launch_config.security_groups).should.equal(set(['default', 'default2'])) + launch_config.user_data.should.equal("This is some user_data") + launch_config.instance_monitoring.enabled.should.equal('true') + launch_config.instance_profile_name.should.equal('arn:aws:iam::123456789012:instance-profile/testing') + launch_config.spot_price.should.equal(0.1) + + +@mock_autoscaling +def test_create_launch_configuration_defaults(): + """ Test with the minimum inputs and check that all of the proper defaults + are assigned for the other attributes """ + conn = boto.connect_autoscale() + config = LaunchConfiguration( + name='tester', + image_id='ami-abcd1234', + instance_type='m1.small', + ) + conn.create_launch_configuration(config) + + launch_config = conn.get_all_launch_configurations()[0] + launch_config.name.should.equal('tester') + launch_config.image_id.should.equal('ami-abcd1234') + launch_config.instance_type.should.equal('m1.small') + + # Defaults + launch_config.key_name.should.equal('') + list(launch_config.security_groups).should.equal([]) + launch_config.user_data.should.equal("") + launch_config.instance_monitoring.enabled.should.equal('false') + launch_config.instance_profile_name.should.equal(None) + launch_config.spot_price.should.equal(None) + + +@mock_autoscaling +def test_launch_configuration_describe_filter(): + conn = boto.connect_autoscale() + config = LaunchConfiguration( + name='tester', + image_id='ami-abcd1234', + instance_type='m1.small', + ) + conn.create_launch_configuration(config) + config.name = 'tester2' + conn.create_launch_configuration(config) + config.name = 'tester3' + conn.create_launch_configuration(config) + + conn.get_all_launch_configurations(names=['tester', 'tester2']).should.have.length_of(2) + conn.get_all_launch_configurations().should.have.length_of(3) + + +@mock_autoscaling +def test_launch_configuration_delete(): + conn = boto.connect_autoscale() + config = LaunchConfiguration( + name='tester', + image_id='ami-abcd1234', + instance_type='m1.small', + ) + conn.create_launch_configuration(config) + + conn.get_all_launch_configurations().should.have.length_of(1) + + conn.delete_launch_configuration('tester') + conn.get_all_launch_configurations().should.have.length_of(0) diff --git a/tests/test_autoscaling/test_policies.py b/tests/test_autoscaling/test_policies.py new file mode 100644 index 000000000..7633a38cd --- /dev/null +++ b/tests/test_autoscaling/test_policies.py @@ -0,0 +1,186 @@ +import boto +from boto.ec2.autoscale.launchconfig import LaunchConfiguration +from boto.ec2.autoscale.group import AutoScalingGroup +from boto.ec2.autoscale.policy import ScalingPolicy +import sure # flake8: noqa + +from moto import mock_autoscaling, mock_ec2 + + +def setup_autoscale_group(): + conn = boto.connect_autoscale() + config = LaunchConfiguration( + name='tester', + image_id='ami-abcd1234', + instance_type='m1.small', + ) + conn.create_launch_configuration(config) + + group = AutoScalingGroup( + name='tester_group', + max_size=2, + min_size=2, + launch_config=config, + ) + conn.create_auto_scaling_group(group) + return group + + +@mock_autoscaling +def test_create_policy(): + group = setup_autoscale_group() + conn = boto.connect_autoscale() + policy = ScalingPolicy( + name='ScaleUp', + adjustment_type='ExactCapacity', + as_name='tester_group', + scaling_adjustment=3, + cooldown=60, + ) + conn.create_scaling_policy(policy) + + policy = conn.get_all_policies()[0] + policy.name.should.equal('ScaleUp') + policy.adjustment_type.should.equal('ExactCapacity') + policy.as_name.should.equal('tester_group') + policy.scaling_adjustment.should.equal(3) + policy.cooldown.should.equal(60) + + +@mock_autoscaling +def test_create_policy_default_values(): + group = setup_autoscale_group() + conn = boto.connect_autoscale() + policy = ScalingPolicy( + name='ScaleUp', + adjustment_type='ExactCapacity', + as_name='tester_group', + scaling_adjustment=3, + ) + conn.create_scaling_policy(policy) + + policy = conn.get_all_policies()[0] + policy.name.should.equal('ScaleUp') + + # Defaults + policy.cooldown.should.equal(300) + + +@mock_autoscaling +def test_update_policy(): + group = setup_autoscale_group() + conn = boto.connect_autoscale() + policy = ScalingPolicy( + name='ScaleUp', + adjustment_type='ExactCapacity', + as_name='tester_group', + scaling_adjustment=3, + ) + conn.create_scaling_policy(policy) + + policy = conn.get_all_policies()[0] + policy.scaling_adjustment.should.equal(3) + + # Now update it by creating another with the same name + policy = ScalingPolicy( + name='ScaleUp', + adjustment_type='ExactCapacity', + as_name='tester_group', + scaling_adjustment=2, + ) + conn.create_scaling_policy(policy) + policy = conn.get_all_policies()[0] + policy.scaling_adjustment.should.equal(2) + + +@mock_autoscaling +def test_delete_policy(): + group = setup_autoscale_group() + conn = boto.connect_autoscale() + policy = ScalingPolicy( + name='ScaleUp', + adjustment_type='ExactCapacity', + as_name='tester_group', + scaling_adjustment=3, + ) + conn.create_scaling_policy(policy) + + conn.get_all_policies().should.have.length_of(1) + + conn.delete_policy('ScaleUp') + conn.get_all_policies().should.have.length_of(0) + + +@mock_autoscaling +def test_execute_policy_exact_capacity(): + group = setup_autoscale_group() + conn = boto.connect_autoscale() + policy = ScalingPolicy( + name='ScaleUp', + adjustment_type='ExactCapacity', + as_name='tester_group', + scaling_adjustment=3, + ) + conn.create_scaling_policy(policy) + + conn.execute_policy("ScaleUp") + + instances = list(conn.get_all_autoscaling_instances()) + instances.should.have.length_of(3) + + +@mock_autoscaling +def test_execute_policy_positive_change_in_capacity(): + group = setup_autoscale_group() + conn = boto.connect_autoscale() + policy = ScalingPolicy( + name='ScaleUp', + adjustment_type='ChangeInCapacity', + as_name='tester_group', + scaling_adjustment=3, + ) + conn.create_scaling_policy(policy) + + conn.execute_policy("ScaleUp") + + instances = list(conn.get_all_autoscaling_instances()) + instances.should.have.length_of(5) + + +@mock_autoscaling +def test_execute_policy_percent_change_in_capacity(): + group = setup_autoscale_group() + conn = boto.connect_autoscale() + policy = ScalingPolicy( + name='ScaleUp', + adjustment_type='PercentChangeInCapacity', + as_name='tester_group', + scaling_adjustment=50, + ) + conn.create_scaling_policy(policy) + + conn.execute_policy("ScaleUp") + + instances = list(conn.get_all_autoscaling_instances()) + instances.should.have.length_of(3) + + +@mock_autoscaling +def test_execute_policy_small_percent_change_in_capacity(): + """ http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/as-scale-based-on-demand.html + If PercentChangeInCapacity returns a value between 0 and 1, + Auto Scaling will round it off to 1.""" + group = setup_autoscale_group() + conn = boto.connect_autoscale() + policy = ScalingPolicy( + name='ScaleUp', + adjustment_type='PercentChangeInCapacity', + as_name='tester_group', + scaling_adjustment=1, + ) + conn.create_scaling_policy(policy) + + conn.execute_policy("ScaleUp") + + instances = list(conn.get_all_autoscaling_instances()) + instances.should.have.length_of(3) diff --git a/tests/test_autoscaling/test_server.py b/tests/test_autoscaling/test_server.py new file mode 100644 index 000000000..61fd2107e --- /dev/null +++ b/tests/test_autoscaling/test_server.py @@ -0,0 +1,16 @@ +import sure # flake8: noqa + +import moto.server as server + +''' +Test the different server responses +''' +server.configure_urls("autoscaling") + + +def test_describe_autoscaling_groups(): + test_client = server.app.test_client() + res = test_client.get('/?Action=DescribeLaunchConfigurations') + + res.data.should.contain('') From 46c297aea15fc1f76a71aa4410bca350a9abb979 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Sat, 27 Jul 2013 16:29:47 -0400 Subject: [PATCH 65/85] Add autoscaling and ELB to readme --- README.md | 44 ++++++++++++++++++++++++-------------------- 1 file changed, 24 insertions(+), 20 deletions(-) diff --git a/README.md b/README.md index 243d8c203..2562f25d8 100644 --- a/README.md +++ b/README.md @@ -49,26 +49,30 @@ With the decorator wrapping the test, all the calls to s3 are automatically mock It gets even better! Moto isn't just S3. Here's the status of the other AWS services implemented. ```gherkin -|---------------------------------------------------------------------------| -| Service Name | Decorator | Development Status | -|---------------------------------------------------------------------------| -| DynamoDB | @mock_dynamodb | core endpoints done | -|---------------------------------------------------------------------------| -| EC2 | @mock_ec2 | core endpoints done | -| - AMI | | core endpoints done | -| - EBS | | core endpoints done | -| - Instances | | all endpoints done | -| - Security Groups | | core endpoints done | -| - Tags | | all endpoints done | -|---------------------------------------------------------------------------| -| S3 | @mock_s3 | core endpoints done | -|---------------------------------------------------------------------------| -| SES | @mock_ses | core endpoints done | -|---------------------------------------------------------------------------| -| SQS | @mock_sqs | core endpoints done | -|---------------------------------------------------------------------------| -| STS | @mock_sts | core endpoints done | -|---------------------------------------------------------------------------| +|------------------------------------------------------------------------------| +| Service Name | Decorator | Development Status | +|------------------------------------------------------------------------------| +| Autoscaling | @mock_autoscaling| core endpoints done | +|------------------------------------------------------------------------------| +| DynamoDB | @mock_dynamodb | core endpoints done | +|------------------------------------------------------------------------------| +| EC2 | @mock_ec2 | core endpoints done | +| - AMI | | core endpoints done | +| - EBS | | core endpoints done | +| - Instances | | all endpoints done | +| - Security Groups | | core endpoints done | +| - Tags | | all endpoints done | +|------------------------------------------------------------------------------| +| ELB | @mock_elb | core endpoints done | +|------------------------------------------------------------------------------| +| S3 | @mock_s3 | core endpoints done | +|------------------------------------------------------------------------------| +| SES | @mock_ses | core endpoints done | +|------------------------------------------------------------------------------| +| SQS | @mock_sqs | core endpoints done | +|------------------------------------------------------------------------------| +| STS | @mock_sts | core endpoints done | +|------------------------------------------------------------------------------| ``` ### Another Example From dc4c50d13df7b6463412ede0f1ee944557812f45 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Sun, 28 Jul 2013 17:01:49 -0400 Subject: [PATCH 66/85] 0.2.8 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 2786c8c3e..ad3098cb7 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ from setuptools import setup, find_packages setup( name='moto', - version='0.2.7', + version='0.2.8', description='A library that allows your python tests to easily' ' mock out the boto library', author='Steve Pulec', From 161a5744d106e111b5770938030dd09fb8ac503c Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Sat, 3 Aug 2013 17:09:36 -0400 Subject: [PATCH 67/85] Add more autoscaling params --- moto/autoscaling/models.py | 35 +++++++++++++++--- moto/autoscaling/responses.py | 41 ++++++++++++++++++---- tests/test_autoscaling/test_autoscaling.py | 22 ++++++++++-- 3 files changed, 83 insertions(+), 15 deletions(-) diff --git a/moto/autoscaling/models.py b/moto/autoscaling/models.py index 3a7f69401..da5efb711 100644 --- a/moto/autoscaling/models.py +++ b/moto/autoscaling/models.py @@ -49,7 +49,9 @@ class FakeLaunchConfiguration(object): class FakeAutoScalingGroup(object): def __init__(self, name, availability_zones, desired_capacity, max_size, - min_size, launch_config_name, vpc_zone_identifier): + min_size, launch_config_name, vpc_zone_identifier, + default_cooldown, health_check_period, health_check_type, + load_balancers, placement_group, termination_policies): self.name = name self.availability_zones = availability_zones self.max_size = max_size @@ -59,11 +61,20 @@ class FakeAutoScalingGroup(object): self.launch_config_name = launch_config_name self.vpc_zone_identifier = vpc_zone_identifier + self.default_cooldown = default_cooldown if default_cooldown else DEFAULT_COOLDOWN + self.health_check_period = health_check_period + self.health_check_type = health_check_type if health_check_type else "EC2" + self.load_balancers = load_balancers + self.placement_group = placement_group + self.termination_policies = termination_policies + self.instances = [] self.set_desired_capacity(desired_capacity) def update(self, availability_zones, desired_capacity, max_size, min_size, - launch_config_name, vpc_zone_identifier): + launch_config_name, vpc_zone_identifier, default_cooldown, + health_check_period, health_check_type, load_balancers, + placement_group, termination_policies): self.availability_zones = availability_zones self.max_size = max_size self.min_size = min_size @@ -142,7 +153,10 @@ class AutoScalingBackend(BaseBackend): def create_autoscaling_group(self, name, availability_zones, desired_capacity, max_size, min_size, - launch_config_name, vpc_zone_identifier): + launch_config_name, vpc_zone_identifier, + default_cooldown, health_check_period, + health_check_type, load_balancers, + placement_group, termination_policies): group = FakeAutoScalingGroup( name=name, availability_zones=availability_zones, @@ -151,16 +165,27 @@ class AutoScalingBackend(BaseBackend): min_size=min_size, launch_config_name=launch_config_name, vpc_zone_identifier=vpc_zone_identifier, + default_cooldown=default_cooldown, + health_check_period=health_check_period, + health_check_type=health_check_type, + load_balancers=load_balancers, + placement_group=placement_group, + termination_policies=termination_policies, ) self.autoscaling_groups[name] = group return group def update_autoscaling_group(self, name, availability_zones, desired_capacity, max_size, min_size, - launch_config_name, vpc_zone_identifier): + launch_config_name, vpc_zone_identifier, + default_cooldown, health_check_period, + health_check_type, load_balancers, + placement_group, termination_policies): group = self.autoscaling_groups[name] group.update(availability_zones, desired_capacity, max_size, - min_size, launch_config_name, vpc_zone_identifier) + min_size, launch_config_name, vpc_zone_identifier, + default_cooldown, health_check_period, health_check_type, + load_balancers, placement_group, termination_policies) return group def describe_autoscaling_groups(self, names): diff --git a/moto/autoscaling/responses.py b/moto/autoscaling/responses.py index aa5d5f1a4..fe7f46b8f 100644 --- a/moto/autoscaling/responses.py +++ b/moto/autoscaling/responses.py @@ -58,6 +58,12 @@ class AutoScalingResponse(BaseResponse): min_size=self._get_int_param('MinSize'), launch_config_name=self._get_param('LaunchConfigurationName'), vpc_zone_identifier=self._get_param('VPCZoneIdentifier'), + default_cooldown=self._get_int_param('DefaultCooldown'), + health_check_period=self._get_int_param('HealthCheckGracePeriod'), + health_check_type=self._get_param('HealthCheckType'), + load_balancers=self._get_multi_param('LoadBalancerNames.member'), + placement_group=self._get_param('PlacementGroup'), + termination_policies=self._get_multi_param('TerminationPolicies.member'), ) template = Template(CREATE_AUTOSCALING_GROUP_TEMPLATE) return template.render() @@ -77,6 +83,12 @@ class AutoScalingResponse(BaseResponse): min_size=self._get_int_param('MinSize'), launch_config_name=self._get_param('LaunchConfigurationName'), vpc_zone_identifier=self._get_param('VPCZoneIdentifier'), + default_cooldown=self._get_int_param('DefaultCooldown'), + health_check_period=self._get_int_param('HealthCheckGracePeriod'), + health_check_type=self._get_param('HealthCheckType'), + load_balancers=self._get_multi_param('LoadBalancerNames.member'), + placement_group=self._get_param('PlacementGroup'), + termination_policies=self._get_multi_param('TerminationPolicies.member'), ) template = Template(UPDATE_AUTOSCALING_GROUP_TEMPLATE) return template.render() @@ -201,7 +213,7 @@ DESCRIBE_AUTOSCALING_GROUPS_TEMPLATE = """ {{ group.name }} - ELB + {{ group.health_check_type }} 2013-05-06T17:47:15.107Z {{ group.launch_config_name }} @@ -212,23 +224,38 @@ DESCRIBE_AUTOSCALING_GROUPS_TEMPLATE = """{{ availability_zone }} {% endfor %} - - my-test-asg-loadbalancer - + {% if group.load_balancers %} + + {% for load_balancer in group.load_balancers %} + {{ load_balancer }} + {% endfor %} + + {% else %} + + {% endif %} {{ group.min_size }} {% if group.vpc_zone_identifier %} {{ group.vpc_zone_identifier }} {% else %} {% endif %} - 120 - 300 + {{ group.health_check_period }} + {{ group.default_cooldown }} arn:aws:autoscaling:us-east-1:803981987763:autoScalingGroup:ca861182-c8f9-4ca7-b1eb-cd35505f5ebb :autoScalingGroupName/my-test-asg-lbs + {% if group.termination_policies %} - Default + {% for policy in group.termination_policies %} + {{ policy }} + {% endfor %} + {% else %} + + {% endif %} {{ group.max_size }} + {% if group.placement_group %} + {{ group.placement_group }} + {% endif %} {% endfor %} diff --git a/tests/test_autoscaling/test_autoscaling.py b/tests/test_autoscaling/test_autoscaling.py index ede0b6720..676833a96 100644 --- a/tests/test_autoscaling/test_autoscaling.py +++ b/tests/test_autoscaling/test_autoscaling.py @@ -1,9 +1,7 @@ import boto from boto.ec2.autoscale.launchconfig import LaunchConfiguration from boto.ec2.autoscale.group import AutoScalingGroup -from nose.plugins.attrib import attr -import sure # flake8: noqa -from unittest import skipIf +import sure # noqa from moto import mock_autoscaling, mock_ec2 from tests.helpers import requires_boto_gte @@ -22,11 +20,17 @@ def test_create_autoscaling_group(): group = AutoScalingGroup( name='tester_group', availability_zones=['us-east-1c', 'us-east-1b'], + default_cooldown=60, desired_capacity=2, + health_check_period=100, + health_check_type="EC2", max_size=2, min_size=2, launch_config=config, + load_balancers=["test_lb"], + placement_group="test_placement", vpc_zone_identifier='subnet-1234abcd', + termination_policies=["OldestInstance", "NewestInstance"], ) conn.create_auto_scaling_group(group) @@ -38,6 +42,12 @@ def test_create_autoscaling_group(): group.min_size.should.equal(2) group.vpc_zone_identifier.should.equal('subnet-1234abcd') group.launch_config_name.should.equal('tester') + group.default_cooldown.should.equal(60) + group.health_check_period.should.equal(100) + group.health_check_type.should.equal("EC2") + list(group.load_balancers).should.equal(["test_lb"]) + group.placement_group.should.equal("test_placement") + list(group.termination_policies).should.equal(["OldestInstance", "NewestInstance"]) @mock_autoscaling @@ -70,6 +80,12 @@ def test_create_autoscaling_groups_defaults(): list(group.availability_zones).should.equal([]) group.desired_capacity.should.equal(2) group.vpc_zone_identifier.should.equal('') + group.default_cooldown.should.equal(300) + group.health_check_period.should.equal(None) + group.health_check_type.should.equal("EC2") + list(group.load_balancers).should.equal([]) + group.placement_group.should.equal(None) + list(group.termination_policies).should.equal([]) @mock_autoscaling From 59fe1abfdf6ea7285c57ee5298c24e84ffc26bc6 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Sat, 3 Aug 2013 17:21:25 -0400 Subject: [PATCH 68/85] Cleanup flake8 --- .../test_launch_configurations.py | 2 +- tests/test_autoscaling/test_policies.py | 20 +++++++++---------- tests/test_autoscaling/test_server.py | 2 +- tests/test_core/test_decorator_calls.py | 4 ++-- tests/test_core/test_server.py | 2 +- tests/test_core/test_url_mapping.py | 2 +- tests/test_dynamodb/test_dynamodb.py | 4 +--- .../test_dynamodb_table_with_range_key.py | 6 ++---- .../test_dynamodb_table_without_range_key.py | 6 ++---- tests/test_dynamodb/test_server.py | 2 +- tests/test_ec2/test_amazon_dev_pay.py | 2 +- tests/test_ec2/test_amis.py | 2 +- .../test_availability_zones_and_regions.py | 2 +- tests/test_ec2/test_customer_gateways.py | 2 +- tests/test_ec2/test_dhcp_options.py | 2 +- tests/test_ec2/test_elastic_block_store.py | 2 +- tests/test_ec2/test_elastic_ip_addresses.py | 2 +- .../test_elastic_network_interfaces.py | 2 +- tests/test_ec2/test_general.py | 2 +- tests/test_ec2/test_instances.py | 2 +- tests/test_ec2/test_internet_gateways.py | 2 +- tests/test_ec2/test_ip_addresses.py | 2 +- tests/test_ec2/test_key_pairs.py | 2 +- tests/test_ec2/test_monitoring.py | 2 +- tests/test_ec2/test_network_acls.py | 2 +- tests/test_ec2/test_placement_groups.py | 2 +- tests/test_ec2/test_reserved_instances.py | 2 +- tests/test_ec2/test_route_tables.py | 2 +- tests/test_ec2/test_security_groups.py | 2 +- tests/test_ec2/test_server.py | 2 +- tests/test_ec2/test_spot_instances.py | 2 +- tests/test_ec2/test_subnets.py | 2 +- tests/test_ec2/test_tags.py | 4 ++-- .../test_ec2/test_virtual_private_gateways.py | 2 +- tests/test_ec2/test_vm_export.py | 2 +- tests/test_ec2/test_vm_import.py | 2 +- tests/test_ec2/test_vpcs.py | 2 +- tests/test_ec2/test_vpn_connections.py | 2 +- tests/test_ec2/test_windows.py | 2 +- tests/test_elb/test_elb.py | 12 +++++------ tests/test_elb/test_server.py | 2 +- tests/test_s3/test_s3.py | 3 +-- tests/test_s3/test_server.py | 2 +- tests/test_ses/test_server.py | 2 +- tests/test_ses/test_ses.py | 19 ++++++++++++------ tests/test_sqs/test_server.py | 2 +- tests/test_sqs/test_sqs.py | 2 +- tests/test_sts/test_server.py | 2 +- tests/test_sts/test_sts.py | 3 +-- 49 files changed, 79 insertions(+), 80 deletions(-) diff --git a/tests/test_autoscaling/test_launch_configurations.py b/tests/test_autoscaling/test_launch_configurations.py index fda0a51fb..446275098 100644 --- a/tests/test_autoscaling/test_launch_configurations.py +++ b/tests/test_autoscaling/test_launch_configurations.py @@ -1,7 +1,7 @@ import boto from boto.ec2.autoscale.launchconfig import LaunchConfiguration -import sure # flake8: noqa +import sure # noqa from moto import mock_autoscaling diff --git a/tests/test_autoscaling/test_policies.py b/tests/test_autoscaling/test_policies.py index 7633a38cd..2f067151a 100644 --- a/tests/test_autoscaling/test_policies.py +++ b/tests/test_autoscaling/test_policies.py @@ -2,9 +2,9 @@ import boto from boto.ec2.autoscale.launchconfig import LaunchConfiguration from boto.ec2.autoscale.group import AutoScalingGroup from boto.ec2.autoscale.policy import ScalingPolicy -import sure # flake8: noqa +import sure # noqa -from moto import mock_autoscaling, mock_ec2 +from moto import mock_autoscaling def setup_autoscale_group(): @@ -28,7 +28,7 @@ def setup_autoscale_group(): @mock_autoscaling def test_create_policy(): - group = setup_autoscale_group() + setup_autoscale_group() conn = boto.connect_autoscale() policy = ScalingPolicy( name='ScaleUp', @@ -49,7 +49,7 @@ def test_create_policy(): @mock_autoscaling def test_create_policy_default_values(): - group = setup_autoscale_group() + setup_autoscale_group() conn = boto.connect_autoscale() policy = ScalingPolicy( name='ScaleUp', @@ -68,7 +68,7 @@ def test_create_policy_default_values(): @mock_autoscaling def test_update_policy(): - group = setup_autoscale_group() + setup_autoscale_group() conn = boto.connect_autoscale() policy = ScalingPolicy( name='ScaleUp', @@ -95,7 +95,7 @@ def test_update_policy(): @mock_autoscaling def test_delete_policy(): - group = setup_autoscale_group() + setup_autoscale_group() conn = boto.connect_autoscale() policy = ScalingPolicy( name='ScaleUp', @@ -113,7 +113,7 @@ def test_delete_policy(): @mock_autoscaling def test_execute_policy_exact_capacity(): - group = setup_autoscale_group() + setup_autoscale_group() conn = boto.connect_autoscale() policy = ScalingPolicy( name='ScaleUp', @@ -131,7 +131,7 @@ def test_execute_policy_exact_capacity(): @mock_autoscaling def test_execute_policy_positive_change_in_capacity(): - group = setup_autoscale_group() + setup_autoscale_group() conn = boto.connect_autoscale() policy = ScalingPolicy( name='ScaleUp', @@ -149,7 +149,7 @@ def test_execute_policy_positive_change_in_capacity(): @mock_autoscaling def test_execute_policy_percent_change_in_capacity(): - group = setup_autoscale_group() + setup_autoscale_group() conn = boto.connect_autoscale() policy = ScalingPolicy( name='ScaleUp', @@ -170,7 +170,7 @@ def test_execute_policy_small_percent_change_in_capacity(): """ http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/as-scale-based-on-demand.html If PercentChangeInCapacity returns a value between 0 and 1, Auto Scaling will round it off to 1.""" - group = setup_autoscale_group() + setup_autoscale_group() conn = boto.connect_autoscale() policy = ScalingPolicy( name='ScaleUp', diff --git a/tests/test_autoscaling/test_server.py b/tests/test_autoscaling/test_server.py index 61fd2107e..d3ca05cd5 100644 --- a/tests/test_autoscaling/test_server.py +++ b/tests/test_autoscaling/test_server.py @@ -1,4 +1,4 @@ -import sure # flake8: noqa +import sure # noqa import moto.server as server diff --git a/tests/test_core/test_decorator_calls.py b/tests/test_core/test_decorator_calls.py index bdf1d8a86..f9bdbe1ac 100644 --- a/tests/test_core/test_decorator_calls.py +++ b/tests/test_core/test_decorator_calls.py @@ -1,6 +1,6 @@ import boto from boto.exception import EC2ResponseError -import sure # flake8: noqa +import sure # noqa from moto import mock_ec2 @@ -11,7 +11,7 @@ Test the different ways that the decorator can be used @mock_ec2 def test_basic_connect(): - conn = boto.connect_ec2() + boto.connect_ec2() @mock_ec2 diff --git a/tests/test_core/test_server.py b/tests/test_core/test_server.py index 5ea77c853..3d29132f2 100644 --- a/tests/test_core/test_server.py +++ b/tests/test_core/test_server.py @@ -1,5 +1,5 @@ from mock import patch -import sure # flake8: noqa +import sure # noqa from moto.server import main diff --git a/tests/test_core/test_url_mapping.py b/tests/test_core/test_url_mapping.py index af688da9e..7218398e2 100644 --- a/tests/test_core/test_url_mapping.py +++ b/tests/test_core/test_url_mapping.py @@ -1,4 +1,4 @@ -import sure # flake8: noqa +import sure # noqa from moto.core.utils import convert_regex_to_flask_path diff --git a/tests/test_dynamodb/test_dynamodb.py b/tests/test_dynamodb/test_dynamodb.py index 5a2f36321..ac95fb88b 100644 --- a/tests/test_dynamodb/test_dynamodb.py +++ b/tests/test_dynamodb/test_dynamodb.py @@ -1,12 +1,10 @@ import boto -import sure # flake8: noqa -from freezegun import freeze_time +import sure # noqa import requests from moto import mock_dynamodb from moto.dynamodb import dynamodb_backend -from boto.dynamodb import condition from boto.exception import DynamoDBResponseError diff --git a/tests/test_dynamodb/test_dynamodb_table_with_range_key.py b/tests/test_dynamodb/test_dynamodb_table_with_range_key.py index 83cc81c10..0b5e56b20 100644 --- a/tests/test_dynamodb/test_dynamodb_table_with_range_key.py +++ b/tests/test_dynamodb/test_dynamodb_table_with_range_key.py @@ -1,9 +1,8 @@ import boto -import sure # flake8: noqa +import sure # noqa from freezegun import freeze_time from moto import mock_dynamodb -from moto.dynamodb import dynamodb_backend from boto.dynamodb import condition from boto.dynamodb.exceptions import DynamoDBKeyNotFoundError, DynamoDBValidationError @@ -501,5 +500,4 @@ def test_batch_read(): item.put() items = table.batch_get_item([('the-key', '123'), ('another-key', '789')]) - count = len([item for item in items]) - count.should.equal(2) + items.should.have.length_ofl(2) diff --git a/tests/test_dynamodb/test_dynamodb_table_without_range_key.py b/tests/test_dynamodb/test_dynamodb_table_without_range_key.py index a3d68b113..a3546bbce 100644 --- a/tests/test_dynamodb/test_dynamodb_table_without_range_key.py +++ b/tests/test_dynamodb/test_dynamodb_table_without_range_key.py @@ -1,9 +1,8 @@ import boto -import sure # flake8: noqa +import sure # noqa from freezegun import freeze_time from moto import mock_dynamodb -from moto.dynamodb import dynamodb_backend from boto.dynamodb import condition from boto.dynamodb.exceptions import DynamoDBKeyNotFoundError @@ -412,5 +411,4 @@ def test_batch_read(): item.put() items = table.batch_get_item([('the-key1'), ('another-key')]) - count = len([item for item in items]) - count.should.equal(2) + items.should.have.length_of(2) diff --git a/tests/test_dynamodb/test_server.py b/tests/test_dynamodb/test_server.py index 8b90989ae..74ca13b49 100644 --- a/tests/test_dynamodb/test_server.py +++ b/tests/test_dynamodb/test_server.py @@ -1,4 +1,4 @@ -import sure # flake8: noqa +import sure # noqa import moto.server as server diff --git a/tests/test_ec2/test_amazon_dev_pay.py b/tests/test_ec2/test_amazon_dev_pay.py index 2d7566abe..e91f1f4f3 100644 --- a/tests/test_ec2/test_amazon_dev_pay.py +++ b/tests/test_ec2/test_amazon_dev_pay.py @@ -1,5 +1,5 @@ import boto -import sure # flake8: noqa +import sure # noqa from moto import mock_ec2 diff --git a/tests/test_ec2/test_amis.py b/tests/test_ec2/test_amis.py index aaf63409c..6cd3812e5 100644 --- a/tests/test_ec2/test_amis.py +++ b/tests/test_ec2/test_amis.py @@ -1,7 +1,7 @@ import boto from boto.exception import EC2ResponseError -import sure # flake8: noqa +import sure # noqa from moto import mock_ec2 diff --git a/tests/test_ec2/test_availability_zones_and_regions.py b/tests/test_ec2/test_availability_zones_and_regions.py index e83e090a5..3ac21bbc3 100644 --- a/tests/test_ec2/test_availability_zones_and_regions.py +++ b/tests/test_ec2/test_availability_zones_and_regions.py @@ -1,5 +1,5 @@ import boto -import sure # flake8: noqa +import sure # noqa from moto import mock_ec2 diff --git a/tests/test_ec2/test_customer_gateways.py b/tests/test_ec2/test_customer_gateways.py index 1b5c37a8e..ef6645e40 100644 --- a/tests/test_ec2/test_customer_gateways.py +++ b/tests/test_ec2/test_customer_gateways.py @@ -1,5 +1,5 @@ import boto -import sure # flake8: noqa +import sure # noqa from moto import mock_ec2 diff --git a/tests/test_ec2/test_dhcp_options.py b/tests/test_ec2/test_dhcp_options.py index 37dcc2de4..4806db2b8 100644 --- a/tests/test_ec2/test_dhcp_options.py +++ b/tests/test_ec2/test_dhcp_options.py @@ -1,5 +1,5 @@ import boto -import sure # flake8: noqa +import sure # noqa from moto import mock_ec2 diff --git a/tests/test_ec2/test_elastic_block_store.py b/tests/test_ec2/test_elastic_block_store.py index f6afa13c3..c8c6ef54f 100644 --- a/tests/test_ec2/test_elastic_block_store.py +++ b/tests/test_ec2/test_elastic_block_store.py @@ -1,6 +1,6 @@ import boto from boto.exception import EC2ResponseError -import sure # flake8: noqa +import sure # noqa from moto import mock_ec2 diff --git a/tests/test_ec2/test_elastic_ip_addresses.py b/tests/test_ec2/test_elastic_ip_addresses.py index a7a0579c5..5aba36b92 100644 --- a/tests/test_ec2/test_elastic_ip_addresses.py +++ b/tests/test_ec2/test_elastic_ip_addresses.py @@ -1,5 +1,5 @@ import boto -import sure # flake8: noqa +import sure # noqa from moto import mock_ec2 diff --git a/tests/test_ec2/test_elastic_network_interfaces.py b/tests/test_ec2/test_elastic_network_interfaces.py index b0ee46b06..b158086fa 100644 --- a/tests/test_ec2/test_elastic_network_interfaces.py +++ b/tests/test_ec2/test_elastic_network_interfaces.py @@ -1,5 +1,5 @@ import boto -import sure # flake8: noqa +import sure # noqa from moto import mock_ec2 diff --git a/tests/test_ec2/test_general.py b/tests/test_ec2/test_general.py index f8314a848..c969eb818 100644 --- a/tests/test_ec2/test_general.py +++ b/tests/test_ec2/test_general.py @@ -1,6 +1,6 @@ import boto from boto.exception import EC2ResponseError -import sure # flake8: noqa +import sure # noqa from moto import mock_ec2 diff --git a/tests/test_ec2/test_instances.py b/tests/test_ec2/test_instances.py index 5244afd4d..073ad7e4b 100644 --- a/tests/test_ec2/test_instances.py +++ b/tests/test_ec2/test_instances.py @@ -3,7 +3,7 @@ import base64 import boto from boto.ec2.instance import Reservation, InstanceAttribute from boto.exception import EC2ResponseError -import sure # flake8: noqa +import sure # noqa from moto import mock_ec2 diff --git a/tests/test_ec2/test_internet_gateways.py b/tests/test_ec2/test_internet_gateways.py index ad039bed7..67f0067ea 100644 --- a/tests/test_ec2/test_internet_gateways.py +++ b/tests/test_ec2/test_internet_gateways.py @@ -1,5 +1,5 @@ import boto -import sure # flake8: noqa +import sure # noqa from moto import mock_ec2 diff --git a/tests/test_ec2/test_ip_addresses.py b/tests/test_ec2/test_ip_addresses.py index eecd3117b..1a6c5e84c 100644 --- a/tests/test_ec2/test_ip_addresses.py +++ b/tests/test_ec2/test_ip_addresses.py @@ -1,5 +1,5 @@ import boto -import sure # flake8: noqa +import sure # noqa from moto import mock_ec2 diff --git a/tests/test_ec2/test_key_pairs.py b/tests/test_ec2/test_key_pairs.py index 8d8d7b212..7a961051e 100644 --- a/tests/test_ec2/test_key_pairs.py +++ b/tests/test_ec2/test_key_pairs.py @@ -1,5 +1,5 @@ import boto -import sure # flake8: noqa +import sure # noqa from moto import mock_ec2 diff --git a/tests/test_ec2/test_monitoring.py b/tests/test_ec2/test_monitoring.py index b534a508f..84f513f21 100644 --- a/tests/test_ec2/test_monitoring.py +++ b/tests/test_ec2/test_monitoring.py @@ -1,5 +1,5 @@ import boto -import sure # flake8: noqa +import sure # noqa from moto import mock_ec2 diff --git a/tests/test_ec2/test_network_acls.py b/tests/test_ec2/test_network_acls.py index 190f6f380..92f34e54f 100644 --- a/tests/test_ec2/test_network_acls.py +++ b/tests/test_ec2/test_network_acls.py @@ -1,5 +1,5 @@ import boto -import sure # flake8: noqa +import sure # noqa from moto import mock_ec2 diff --git a/tests/test_ec2/test_placement_groups.py b/tests/test_ec2/test_placement_groups.py index adc913ccb..2be996840 100644 --- a/tests/test_ec2/test_placement_groups.py +++ b/tests/test_ec2/test_placement_groups.py @@ -1,5 +1,5 @@ import boto -import sure # flake8: noqa +import sure # noqa from moto import mock_ec2 diff --git a/tests/test_ec2/test_reserved_instances.py b/tests/test_ec2/test_reserved_instances.py index 8a99f5c23..fdb80bc63 100644 --- a/tests/test_ec2/test_reserved_instances.py +++ b/tests/test_ec2/test_reserved_instances.py @@ -1,5 +1,5 @@ import boto -import sure # flake8: noqa +import sure # noqa from moto import mock_ec2 diff --git a/tests/test_ec2/test_route_tables.py b/tests/test_ec2/test_route_tables.py index 57e85a4f7..3d376961f 100644 --- a/tests/test_ec2/test_route_tables.py +++ b/tests/test_ec2/test_route_tables.py @@ -1,5 +1,5 @@ import boto -import sure # flake8: noqa +import sure # noqa from moto import mock_ec2 diff --git a/tests/test_ec2/test_security_groups.py b/tests/test_ec2/test_security_groups.py index 4cdb9a4eb..ce8de872b 100644 --- a/tests/test_ec2/test_security_groups.py +++ b/tests/test_ec2/test_security_groups.py @@ -1,6 +1,6 @@ import boto from boto.exception import EC2ResponseError -import sure # flake8: noqa +import sure # noqa from moto import mock_ec2 diff --git a/tests/test_ec2/test_server.py b/tests/test_ec2/test_server.py index 1370bd71b..2d82c1c02 100644 --- a/tests/test_ec2/test_server.py +++ b/tests/test_ec2/test_server.py @@ -1,5 +1,5 @@ import re -import sure # flake8: noqa +import sure # noqa import moto.server as server diff --git a/tests/test_ec2/test_spot_instances.py b/tests/test_ec2/test_spot_instances.py index 603e4481b..0a08e243d 100644 --- a/tests/test_ec2/test_spot_instances.py +++ b/tests/test_ec2/test_spot_instances.py @@ -1,5 +1,5 @@ import boto -import sure # flake8: noqa +import sure # noqa from moto import mock_ec2 diff --git a/tests/test_ec2/test_subnets.py b/tests/test_ec2/test_subnets.py index e6ebebc6b..f12d35de5 100644 --- a/tests/test_ec2/test_subnets.py +++ b/tests/test_ec2/test_subnets.py @@ -1,6 +1,6 @@ import boto from boto.exception import EC2ResponseError -import sure # flake8: noqa +import sure # noqa from moto import mock_ec2 diff --git a/tests/test_ec2/test_tags.py b/tests/test_ec2/test_tags.py index 2db250f39..1138b5e9f 100644 --- a/tests/test_ec2/test_tags.py +++ b/tests/test_ec2/test_tags.py @@ -1,7 +1,7 @@ import itertools import boto -import sure # flake8: noqa +import sure # noqa from moto import mock_ec2 @@ -31,7 +31,7 @@ def test_instance_launch_and_retrieve_all_instances(): instance.add_tag("a key", "some value") chain = itertools.chain.from_iterable - existing_instances = list(chain([reservation.instances for reservation in conn.get_all_instances()])) + existing_instances = list(chain([res.instances for res in conn.get_all_instances()])) existing_instances.should.have.length_of(1) existing_instance = existing_instances[0] existing_instance.tags["a key"].should.equal("some value") diff --git a/tests/test_ec2/test_virtual_private_gateways.py b/tests/test_ec2/test_virtual_private_gateways.py index 24e85b98d..b9fa80a65 100644 --- a/tests/test_ec2/test_virtual_private_gateways.py +++ b/tests/test_ec2/test_virtual_private_gateways.py @@ -1,5 +1,5 @@ import boto -import sure # flake8: noqa +import sure # noqa from moto import mock_ec2 diff --git a/tests/test_ec2/test_vm_export.py b/tests/test_ec2/test_vm_export.py index 6050a8c40..478372b78 100644 --- a/tests/test_ec2/test_vm_export.py +++ b/tests/test_ec2/test_vm_export.py @@ -1,5 +1,5 @@ import boto -import sure # flake8: noqa +import sure # noqa from moto import mock_ec2 diff --git a/tests/test_ec2/test_vm_import.py b/tests/test_ec2/test_vm_import.py index ef3e5e1a5..0c310fc5b 100644 --- a/tests/test_ec2/test_vm_import.py +++ b/tests/test_ec2/test_vm_import.py @@ -1,5 +1,5 @@ import boto -import sure # flake8: noqa +import sure # noqa from moto import mock_ec2 diff --git a/tests/test_ec2/test_vpcs.py b/tests/test_ec2/test_vpcs.py index cdf28ee55..3a4570ecd 100644 --- a/tests/test_ec2/test_vpcs.py +++ b/tests/test_ec2/test_vpcs.py @@ -1,6 +1,6 @@ import boto from boto.exception import EC2ResponseError -import sure # flake8: noqa +import sure # noqa from moto import mock_ec2 diff --git a/tests/test_ec2/test_vpn_connections.py b/tests/test_ec2/test_vpn_connections.py index 589d11952..4ce1e398e 100644 --- a/tests/test_ec2/test_vpn_connections.py +++ b/tests/test_ec2/test_vpn_connections.py @@ -1,5 +1,5 @@ import boto -import sure # flake8: noqa +import sure # noqa from moto import mock_ec2 diff --git a/tests/test_ec2/test_windows.py b/tests/test_ec2/test_windows.py index 960538465..92f3be6a4 100644 --- a/tests/test_ec2/test_windows.py +++ b/tests/test_ec2/test_windows.py @@ -1,5 +1,5 @@ import boto -import sure # flake8: noqa +import sure # noqa from moto import mock_ec2 diff --git a/tests/test_elb/test_elb.py b/tests/test_elb/test_elb.py index 11ddc0ced..e21b04b65 100644 --- a/tests/test_elb/test_elb.py +++ b/tests/test_elb/test_elb.py @@ -1,6 +1,6 @@ import boto from boto.ec2.elb import HealthCheck -import sure # flake8: noqa +import sure # noqa from moto import mock_elb, mock_ec2 @@ -11,7 +11,7 @@ def test_create_load_balancer(): zones = ['us-east-1a', 'us-east-1b'] ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - lb = conn.create_load_balancer('my-lb', zones, ports) + conn.create_load_balancer('my-lb', zones, ports) balancers = conn.get_all_load_balancers() balancer = balancers[0] @@ -33,9 +33,9 @@ def test_get_load_balancers_by_name(): zones = ['us-east-1a', 'us-east-1b'] ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - lb = conn.create_load_balancer('my-lb1', zones, ports) - lb = conn.create_load_balancer('my-lb2', zones, ports) - lb = conn.create_load_balancer('my-lb3', zones, ports) + conn.create_load_balancer('my-lb1', zones, ports) + conn.create_load_balancer('my-lb2', zones, ports) + conn.create_load_balancer('my-lb3', zones, ports) conn.get_all_load_balancers().should.have.length_of(3) conn.get_all_load_balancers(load_balancer_names=['my-lb1']).should.have.length_of(1) @@ -48,7 +48,7 @@ def test_delete_load_balancer(): zones = ['us-east-1a'] ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - lb = conn.create_load_balancer('my-lb', zones, ports) + conn.create_load_balancer('my-lb', zones, ports) balancers = conn.get_all_load_balancers() balancers.should.have.length_of(1) diff --git a/tests/test_elb/test_server.py b/tests/test_elb/test_server.py index c739abb55..695e08410 100644 --- a/tests/test_elb/test_server.py +++ b/tests/test_elb/test_server.py @@ -1,4 +1,4 @@ -import sure # flake8: noqa +import sure # noqa import moto.server as server diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 5215d6409..6b33f5760 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -1,4 +1,3 @@ -import datetime import urllib2 import boto @@ -7,7 +6,7 @@ from boto.s3.key import Key from freezegun import freeze_time import requests -import sure # flake8: noqa +import sure # noqa from moto import mock_s3 diff --git a/tests/test_s3/test_server.py b/tests/test_s3/test_server.py index 0ee507eae..d2f38cb07 100644 --- a/tests/test_s3/test_server.py +++ b/tests/test_s3/test_server.py @@ -1,4 +1,4 @@ -import sure # flake8: noqa +import sure # noqa import moto.server as server diff --git a/tests/test_ses/test_server.py b/tests/test_ses/test_server.py index 9ec047427..876fa1240 100644 --- a/tests/test_ses/test_server.py +++ b/tests/test_ses/test_server.py @@ -1,4 +1,4 @@ -import sure # flake8: noqa +import sure # noqa import moto.server as server diff --git a/tests/test_ses/test_ses.py b/tests/test_ses/test_ses.py index cde6b76b6..4c0440c40 100644 --- a/tests/test_ses/test_ses.py +++ b/tests/test_ses/test_ses.py @@ -3,7 +3,7 @@ import email import boto from boto.exception import BotoServerError -import sure # flake8: noqa +import sure # noqa from moto import mock_ses @@ -44,7 +44,8 @@ def test_delete_identity(): def test_send_email(): conn = boto.connect_ses('the_key', 'the_secret') - conn.send_email.when.called_with("test@example.com", "test subject", + conn.send_email.when.called_with( + "test@example.com", "test subject", "test body", "test_to@example.com").should.throw(BotoServerError) conn.verify_email_identity("test@example.com") @@ -74,12 +75,18 @@ def test_send_raw_email(): part.add_header('Content-Disposition', 'attachment; filename=test.txt') message.attach(part) - conn.send_raw_email.when.called_with(source=message['From'], raw_message=message.as_string(), - destinations=message['To']).should.throw(BotoServerError) + conn.send_raw_email.when.called_with( + source=message['From'], + raw_message=message.as_string(), + destinations=message['To'] + ).should.throw(BotoServerError) conn.verify_email_identity("test@example.com") - conn.send_raw_email(source=message['From'], raw_message=message.as_string(), - destinations=message['To']) + conn.send_raw_email( + source=message['From'], + raw_message=message.as_string(), + destinations=message['To'] + ) send_quota = conn.get_send_quota() sent_count = int(send_quota['GetSendQuotaResponse']['GetSendQuotaResult']['SentLast24Hours']) diff --git a/tests/test_sqs/test_server.py b/tests/test_sqs/test_server.py index 7a335b455..8934dcecc 100644 --- a/tests/test_sqs/test_server.py +++ b/tests/test_sqs/test_server.py @@ -1,5 +1,5 @@ import re -import sure # flake8: noqa +import sure # noqa import moto.server as server diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index e4d7de655..bfd1c2759 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -2,7 +2,7 @@ import boto from boto.exception import SQSError from boto.sqs.message import RawMessage import requests -import sure # flake8: noqa +import sure # noqa from moto import mock_sqs diff --git a/tests/test_sts/test_server.py b/tests/test_sts/test_server.py index 0e7687c7e..9a505422f 100644 --- a/tests/test_sts/test_server.py +++ b/tests/test_sts/test_server.py @@ -1,4 +1,4 @@ -import sure # flake8: noqa +import sure # noqa import moto.server as server diff --git a/tests/test_sts/test_sts.py b/tests/test_sts/test_sts.py index 0d05b613e..e0dbc9cd6 100644 --- a/tests/test_sts/test_sts.py +++ b/tests/test_sts/test_sts.py @@ -1,9 +1,8 @@ import json import boto -from boto.exception import BotoServerError from freezegun import freeze_time -import sure # flake8: noqa +import sure # noqa from moto import mock_sts From 02fa630a3c88be0d9c104eda12964f4a01084e25 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Sat, 3 Aug 2013 17:27:34 -0400 Subject: [PATCH 69/85] Fix dynamo batch length --- tests/test_dynamodb/test_dynamodb_table_with_range_key.py | 4 +++- tests/test_dynamodb/test_dynamodb_table_without_range_key.py | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/tests/test_dynamodb/test_dynamodb_table_with_range_key.py b/tests/test_dynamodb/test_dynamodb_table_with_range_key.py index 0b5e56b20..12700707c 100644 --- a/tests/test_dynamodb/test_dynamodb_table_with_range_key.py +++ b/tests/test_dynamodb/test_dynamodb_table_with_range_key.py @@ -500,4 +500,6 @@ def test_batch_read(): item.put() items = table.batch_get_item([('the-key', '123'), ('another-key', '789')]) - items.should.have.length_ofl(2) + # Iterate through so that batch_item gets called + count = len([x for x in items]) + count.should.equal(2) diff --git a/tests/test_dynamodb/test_dynamodb_table_without_range_key.py b/tests/test_dynamodb/test_dynamodb_table_without_range_key.py index a3546bbce..81e76f7f8 100644 --- a/tests/test_dynamodb/test_dynamodb_table_without_range_key.py +++ b/tests/test_dynamodb/test_dynamodb_table_without_range_key.py @@ -411,4 +411,6 @@ def test_batch_read(): item.put() items = table.batch_get_item([('the-key1'), ('another-key')]) - items.should.have.length_of(2) + # Iterate through so that batch_item gets called + count = len([x for x in items]) + count.should.have.equal(2) From cea25e75c5b5959e0707c5ec44d3764d30af00bf Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Wed, 7 Aug 2013 20:32:29 -0400 Subject: [PATCH 70/85] basic emr done --- moto/__init__.py | 1 + moto/emr/__init__.py | 2 + moto/emr/models.py | 167 ++++++++++++++++++++++++ moto/emr/responses.py | 192 ++++++++++++++++++++++++++++ moto/emr/urls.py | 9 ++ moto/emr/utils.py | 14 ++ tests/test_emr/test_emr.py | 233 ++++++++++++++++++++++++++++++++++ tests/test_emr/test_server.py | 16 +++ 8 files changed, 634 insertions(+) create mode 100644 moto/emr/__init__.py create mode 100644 moto/emr/models.py create mode 100644 moto/emr/responses.py create mode 100644 moto/emr/urls.py create mode 100644 moto/emr/utils.py create mode 100644 tests/test_emr/test_emr.py create mode 100644 tests/test_emr/test_server.py diff --git a/moto/__init__.py b/moto/__init__.py index 8113260a7..57e8eef38 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -5,6 +5,7 @@ from .autoscaling import mock_autoscaling from .dynamodb import mock_dynamodb from .ec2 import mock_ec2 from .elb import mock_elb +from .emr import mock_emr from .s3 import mock_s3 from .ses import mock_ses from .sqs import mock_sqs diff --git a/moto/emr/__init__.py b/moto/emr/__init__.py new file mode 100644 index 000000000..7d4de9a5f --- /dev/null +++ b/moto/emr/__init__.py @@ -0,0 +1,2 @@ +from .models import emr_backend +mock_emr = emr_backend.decorator diff --git a/moto/emr/models.py b/moto/emr/models.py new file mode 100644 index 000000000..af2a4b45f --- /dev/null +++ b/moto/emr/models.py @@ -0,0 +1,167 @@ +from moto.core import BaseBackend + +from .utils import random_job_id, random_instance_group_id + + +class FakeInstanceGroup(object): + def __init__(self, id, instance_count, instance_role, instance_type, market, name, bid_price=None): + self.id = id + self.num_instances = instance_count + self.role = instance_role + self.type = instance_type + self.market = market + self.name = name + self.bid_price = bid_price + + def set_instance_count(self, instance_count): + self.num_instances = instance_count + + +class FakeStep(object): + def __init__(self, state, **kwargs): + # 'Steps.member.1.HadoopJarStep.Jar': ['/home/hadoop/contrib/streaming/hadoop-streaming.jar'], + # 'Steps.member.1.HadoopJarStep.Args.member.1': ['-mapper'], + # 'Steps.member.1.HadoopJarStep.Args.member.2': ['s3n://elasticmapreduce/samples/wordcount/wordSplitter.py'], + # 'Steps.member.1.HadoopJarStep.Args.member.3': ['-reducer'], + # 'Steps.member.1.HadoopJarStep.Args.member.4': ['aggregate'], + # 'Steps.member.1.HadoopJarStep.Args.member.5': ['-input'], + # 'Steps.member.1.HadoopJarStep.Args.member.6': ['s3n://elasticmapreduce/samples/wordcount/input'], + # 'Steps.member.1.HadoopJarStep.Args.member.7': ['-output'], + # 'Steps.member.1.HadoopJarStep.Args.member.8': ['s3n:///output/wordcount_output'], + # 'Steps.member.1.ActionOnFailure': ['TERMINATE_JOB_FLOW'], + # 'Steps.member.1.Name': ['My wordcount example']} + + self.action_on_failure = kwargs['action_on_failure'] + self.name = kwargs['name'] + self.jar = kwargs['hadoop_jar_step._jar'] + self.args = [] + self.state = state + + arg_index = 1 + while True: + arg = kwargs.get('hadoop_jar_step._args.member.{}'.format(arg_index)) + if arg: + self.args.append(arg) + arg_index += 1 + else: + break + + +class FakeJobFlow(object): + def __init__(self, job_id, name, log_uri, steps, instance_attrs): + self.id = job_id + self.name = name + self.log_uri = log_uri + self.state = "STARTING" + self.steps = [] + self.add_steps(steps) + + self.initial_instance_count = instance_attrs.get('instance_count', 0) + self.initial_master_instance_type = instance_attrs.get('master_instance_type') + self.initial_slave_instance_type = instance_attrs.get('slave_instance_type') + + self.ec2_key_name = instance_attrs.get('ec2_key_name') + self.availability_zone = instance_attrs.get('placement.availability_zone') + self.keep_job_flow_alive_when_no_steps = instance_attrs.get('keep_job_flow_alive_when_no_steps') + self.termination_protected = instance_attrs.get('termination_protected') + + self.instance_group_ids = [] + + def terminate(self): + self.state = 'TERMINATED' + + def add_steps(self, steps): + for index, step in enumerate(steps): + if self.steps: + # If we already have other steps, this one is pending + self.steps.append(FakeStep(state='PENDING', **step)) + else: + self.steps.append(FakeStep(state='STARTING', **step)) + + def add_instance_group(self, instance_group_id): + self.instance_group_ids.append(instance_group_id) + + @property + def instance_groups(self): + return emr_backend.get_instance_groups(self.instance_group_ids) + + @property + def master_instance_type(self): + groups = self.instance_groups + if groups: + groups[0].type + else: + return self.initial_master_instance_type + + @property + def slave_instance_type(self): + groups = self.instance_groups + if groups: + groups[0].type + else: + return self.initial_slave_instance_type + + @property + def instance_count(self): + groups = self.instance_groups + if not groups: + # No groups,return initial instance count + return self.initial_instance_count + count = 0 + for group in groups: + count += int(group.num_instances) + return count + + +class ElasticMapReduceBackend(BaseBackend): + + def __init__(self): + self.job_flows = {} + self.instance_groups = {} + + def run_job_flow(self, name, log_uri, steps, instance_attrs): + job_id = random_job_id() + job_flow = FakeJobFlow(job_id, name, log_uri, steps, instance_attrs) + self.job_flows[job_id] = job_flow + return job_flow + + def add_job_flow_steps(self, job_flow_id, steps): + job_flow = self.job_flows[job_flow_id] + job_flow.add_steps(steps) + return job_flow + + def describe_job_flows(self): + return self.job_flows.values() + + def terminate_job_flows(self, job_ids): + flows = [flow for flow in self.describe_job_flows() if flow.id in job_ids] + for flow in flows: + flow.terminate() + return flows + + def get_instance_groups(self, instance_group_ids): + return [ + group for group_id, group + in self.instance_groups.items() + if group_id in instance_group_ids + ] + + def add_instance_groups(self, job_flow_id, instance_groups): + job_flow = self.job_flows[job_flow_id] + result_groups = [] + for instance_group in instance_groups: + instance_group_id = random_instance_group_id() + group = FakeInstanceGroup(instance_group_id, **instance_group) + self.instance_groups[instance_group_id] = group + job_flow.add_instance_group(instance_group_id) + result_groups.append(group) + return result_groups + + def modify_instance_groups(self, instance_groups): + result_groups = [] + for instance_group in instance_groups: + group = self.instance_groups[instance_group['instance_group_id']] + group.set_instance_count(instance_group['instance_count']) + return result_groups + +emr_backend = ElasticMapReduceBackend() diff --git a/moto/emr/responses.py b/moto/emr/responses.py new file mode 100644 index 000000000..8886ecbc5 --- /dev/null +++ b/moto/emr/responses.py @@ -0,0 +1,192 @@ +from jinja2 import Template + +from moto.core.responses import BaseResponse +from moto.core.utils import camelcase_to_underscores +from .models import emr_backend + + +class ElasticMapReduceResponse(BaseResponse): + + def _get_param(self, param_name): + return self.querystring.get(param_name, [None])[0] + + def _get_multi_param(self, param_prefix): + return [value[0] for key, value in self.querystring.items() if key.startswith(param_prefix)] + + def _get_dict_param(self, param_prefix): + return { + camelcase_to_underscores(key.replace(param_prefix, "")): value[0] + for key, value + in self.querystring.items() + if key.startswith(param_prefix) + } + + def _get_list_prefix(self, param_prefix): + results = [] + param_index = 1 + while True: + index_prefix = "{}.{}.".format(param_prefix, param_index) + new_items = { + camelcase_to_underscores(key.replace(index_prefix, "")): value[0] + for key, value in self.querystring.items() + if key.startswith(index_prefix) + } + if not new_items: + break + results.append(new_items) + param_index += 1 + return results + + def add_job_flow_steps(self): + job_flow_id = self._get_param('JobFlowId') + steps = self._get_list_prefix('Steps.member') + + job_flow = emr_backend.add_job_flow_steps(job_flow_id, steps) + template = Template(ADD_JOB_FLOW_STEPS_TEMPLATE) + return template.render(job_flow=job_flow) + + def run_job_flow(self): + flow_name = self._get_param('Name') + log_uri = self._get_param('LogUri') + steps = self._get_list_prefix('Steps.member') + instance_attrs = self._get_dict_param('Instances.') + + job_flow = emr_backend.run_job_flow(flow_name, log_uri, steps, instance_attrs) + template = Template(RUN_JOB_FLOW_TEMPLATE) + return template.render(job_flow=job_flow) + + def describe_job_flows(self): + job_flows = emr_backend.describe_job_flows() + template = Template(DESCRIBE_JOB_FLOWS_TEMPLATE) + return template.render(job_flows=job_flows) + + def terminate_job_flows(self): + job_ids = self._get_multi_param('JobFlowIds.member.') + job_flows = emr_backend.terminate_job_flows(job_ids) + template = Template(TERMINATE_JOB_FLOWS_TEMPLATE) + return template.render(job_flows=job_flows) + + def add_instance_groups(self): + jobflow_id = self._get_param('JobFlowId') + instance_groups = self._get_list_prefix('InstanceGroups.member') + instance_groups = emr_backend.add_instance_groups(jobflow_id, instance_groups) + template = Template(ADD_INSTANCE_GROUPS_TEMPLATE) + return template.render(instance_groups=instance_groups) + + def modify_instance_groups(self): + instance_groups = self._get_list_prefix('InstanceGroups.member') + instance_groups = emr_backend.modify_instance_groups(instance_groups) + template = Template(MODIFY_INSTANCE_GROUPS_TEMPLATE) + return template.render(instance_groups=instance_groups) + + +RUN_JOB_FLOW_TEMPLATE = """ + + {{ job_flow.id }} + + + + 8296d8b8-ed85-11dd-9877-6fad448a8419 + + +""" + +DESCRIBE_JOB_FLOWS_TEMPLATE = """ + + + {% for job_flow in job_flows %} + + + 2009-01-28T21:49:16Z + 2009-01-28T21:49:16Z + {{ job_flow.state }} + + {{ job_flow.name }} + {{ job_flow.log_uri }} + + {% for step in job_flow.steps %} + + + 2009-01-28T21:49:16Z + {{ step.state }} + + + + {{ step.jar }} + MyMainClass + + {% for arg in step.args %} + {{ arg }} + {% endfor %} + + + + {{ step.name }} + CONTINUE + + + {% endfor %} + + {{ job_flow.id }} + + + us-east-1a + + {{ job_flow.slave_instance_type }} + {{ job_flow.master_instance_type }} + {{ job_flow.ec2_key_name }} + {{ job_flow.instance_count }} + {{ job_flow.keep_job_flow_alive_when_no_steps }} + {{ job_flow.termination_protected }} + + {% for instance_group in job_flow.instance_groups %} + + {{ instance_group.id }} + {{ instance_group.role }} + {{ instance_group.num_instances }} + {{ instance_group.type }} + {{ instance_group.market }} + {{ instance_group.name }} + {{ instance_group.bid_price }} + + {% endfor %} + + + + {% endfor %} + + + + + 9cea3229-ed85-11dd-9877-6fad448a8419 + + +""" + +TERMINATE_JOB_FLOWS_TEMPLATE = """ + + + 2690d7eb-ed86-11dd-9877-6fad448a8419 + + +""" + +ADD_JOB_FLOW_STEPS_TEMPLATE = """ + + + df6f4f4a-ed85-11dd-9877-6fad448a8419 + + +""" + +ADD_INSTANCE_GROUPS_TEMPLATE = """ + {% for instance_group in instance_groups %}{{ instance_group.id }}{% if loop.index != loop.length %},{% endif %}{% endfor %} +""" + +MODIFY_INSTANCE_GROUPS_TEMPLATE = """ + + + 2690d7eb-ed86-11dd-9877-6fad448a8419 + + +""" diff --git a/moto/emr/urls.py b/moto/emr/urls.py new file mode 100644 index 000000000..8919362f7 --- /dev/null +++ b/moto/emr/urls.py @@ -0,0 +1,9 @@ +from .responses import ElasticMapReduceResponse + +url_bases = [ + "https?://elasticmapreduce.(.+).amazonaws.com", +] + +url_paths = { + '{0}/$': ElasticMapReduceResponse().dispatch, +} diff --git a/moto/emr/utils.py b/moto/emr/utils.py new file mode 100644 index 000000000..4a0d6db0e --- /dev/null +++ b/moto/emr/utils.py @@ -0,0 +1,14 @@ +import random +import string + + +def random_job_id(size=13): + chars = range(10) + list(string.uppercase) + job_tag = ''.join(unicode(random.choice(chars)) for x in range(size)) + return 'j-{}'.format(job_tag) + + +def random_instance_group_id(size=13): + chars = range(10) + list(string.uppercase) + job_tag = ''.join(unicode(random.choice(chars)) for x in range(size)) + return 'i-{}'.format(job_tag) diff --git a/tests/test_emr/test_emr.py b/tests/test_emr/test_emr.py new file mode 100644 index 000000000..add8c1859 --- /dev/null +++ b/tests/test_emr/test_emr.py @@ -0,0 +1,233 @@ +import boto +from boto.emr.instance_group import InstanceGroup +from boto.emr.step import StreamingStep +import sure # noqa + +from moto import mock_emr + + +@mock_emr +def test_create_job_flow(): + conn = boto.connect_emr() + + step1 = StreamingStep( + name='My wordcount example', + mapper='s3n://elasticmapreduce/samples/wordcount/wordSplitter.py', + reducer='aggregate', + input='s3n://elasticmapreduce/samples/wordcount/input', + output='s3n://output_bucket/output/wordcount_output' + ) + + step2 = StreamingStep( + name='My wordcount example2', + mapper='s3n://elasticmapreduce/samples/wordcount/wordSplitter2.py', + reducer='aggregate', + input='s3n://elasticmapreduce/samples/wordcount/input2', + output='s3n://output_bucket/output/wordcount_output2' + ) + + job_id = conn.run_jobflow( + name='My jobflow', + log_uri='s3://some_bucket/jobflow_logs', + master_instance_type='m1.medium', + slave_instance_type='m1.small', + steps=[step1, step2] + ) + + job_flow = conn.describe_jobflow(job_id) + job_flow.state.should.equal('STARTING') + job_flow.jobflowid.should.equal(job_id) + job_flow.name.should.equal('My jobflow') + job_flow.masterinstancetype.should.equal('m1.medium') + job_flow.slaveinstancetype.should.equal('m1.small') + job_flow.loguri.should.equal('s3://some_bucket/jobflow_logs') + job_step = job_flow.steps[0] + job_step.name.should.equal('My wordcount example') + job_step.state.should.equal('STARTING') + args = [arg.value for arg in job_step.args] + args.should.equal([ + '-mapper', + 's3n://elasticmapreduce/samples/wordcount/wordSplitter.py', + '-reducer', + 'aggregate', + '-input', + 's3n://elasticmapreduce/samples/wordcount/input', + '-output', + 's3n://output_bucket/output/wordcount_output', + ]) + + job_step2 = job_flow.steps[1] + job_step2.name.should.equal('My wordcount example2') + job_step2.state.should.equal('PENDING') + args = [arg.value for arg in job_step2.args] + args.should.equal([ + '-mapper', + 's3n://elasticmapreduce/samples/wordcount/wordSplitter2.py', + '-reducer', + 'aggregate', + '-input', + 's3n://elasticmapreduce/samples/wordcount/input2', + '-output', + 's3n://output_bucket/output/wordcount_output2', + ]) + + +@mock_emr +def test_terminate_job_flow(): + conn = boto.connect_emr() + job_id = conn.run_jobflow( + name='My jobflow', + log_uri='s3://some_bucket/jobflow_logs', + steps=[] + ) + + flow = conn.describe_jobflows()[0] + flow.state.should.equal('STARTING') + conn.terminate_jobflow(job_id) + flow = conn.describe_jobflows()[0] + flow.state.should.equal('TERMINATED') + + +@mock_emr +def test_add_steps_to_flow(): + conn = boto.connect_emr() + + step1 = StreamingStep( + name='My wordcount example', + mapper='s3n://elasticmapreduce/samples/wordcount/wordSplitter.py', + reducer='aggregate', + input='s3n://elasticmapreduce/samples/wordcount/input', + output='s3n://output_bucket/output/wordcount_output' + ) + + job_id = conn.run_jobflow( + name='My jobflow', + log_uri='s3://some_bucket/jobflow_logs', + steps=[step1] + ) + + job_flow = conn.describe_jobflow(job_id) + job_flow.state.should.equal('STARTING') + job_flow.jobflowid.should.equal(job_id) + job_flow.name.should.equal('My jobflow') + job_flow.loguri.should.equal('s3://some_bucket/jobflow_logs') + + step2 = StreamingStep( + name='My wordcount example2', + mapper='s3n://elasticmapreduce/samples/wordcount/wordSplitter2.py', + reducer='aggregate', + input='s3n://elasticmapreduce/samples/wordcount/input2', + output='s3n://output_bucket/output/wordcount_output2' + ) + + conn.add_jobflow_steps(job_id, [step2]) + + job_flow = conn.describe_jobflow(job_id) + job_step = job_flow.steps[0] + job_step.name.should.equal('My wordcount example') + job_step.state.should.equal('STARTING') + args = [arg.value for arg in job_step.args] + args.should.equal([ + '-mapper', + 's3n://elasticmapreduce/samples/wordcount/wordSplitter.py', + '-reducer', + 'aggregate', + '-input', + 's3n://elasticmapreduce/samples/wordcount/input', + '-output', + 's3n://output_bucket/output/wordcount_output', + ]) + + job_step2 = job_flow.steps[1] + job_step2.name.should.equal('My wordcount example2') + job_step2.state.should.equal('PENDING') + args = [arg.value for arg in job_step2.args] + args.should.equal([ + '-mapper', + 's3n://elasticmapreduce/samples/wordcount/wordSplitter2.py', + '-reducer', + 'aggregate', + '-input', + 's3n://elasticmapreduce/samples/wordcount/input2', + '-output', + 's3n://output_bucket/output/wordcount_output2', + ]) + + +@mock_emr +def test_create_instance_groups(): + conn = boto.connect_emr() + + step1 = StreamingStep( + name='My wordcount example', + mapper='s3n://elasticmapreduce/samples/wordcount/wordSplitter.py', + reducer='aggregate', + input='s3n://elasticmapreduce/samples/wordcount/input', + output='s3n://output_bucket/output/wordcount_output' + ) + + job_id = conn.run_jobflow( + name='My jobflow', + log_uri='s3://some_bucket/jobflow_logs', + steps=[step1] + ) + + instance_group = InstanceGroup(6, 'TASK', 'c1.medium', 'SPOT', 'spot-0.07', '0.07') + instance_group = conn.add_instance_groups(job_id, [instance_group]) + instance_group_id = instance_group.instancegroupids + job_flow = conn.describe_jobflows()[0] + int(job_flow.instancecount).should.equal(6) + instance_group = job_flow.instancegroups[0] + instance_group.instancegroupid.should.equal(instance_group_id) + int(instance_group.instancerunningcount).should.equal(6) + instance_group.instancerole.should.equal('TASK') + instance_group.instancetype.should.equal('c1.medium') + instance_group.market.should.equal('SPOT') + instance_group.name.should.equal('spot-0.07') + instance_group.bidprice.should.equal('0.07') + + +@mock_emr +def test_modify_instance_groups(): + conn = boto.connect_emr() + + step1 = StreamingStep( + name='My wordcount example', + mapper='s3n://elasticmapreduce/samples/wordcount/wordSplitter.py', + reducer='aggregate', + input='s3n://elasticmapreduce/samples/wordcount/input', + output='s3n://output_bucket/output/wordcount_output' + ) + + job_id = conn.run_jobflow( + name='My jobflow', + log_uri='s3://some_bucket/jobflow_logs', + steps=[step1] + ) + + instance_group1 = InstanceGroup(6, 'TASK', 'c1.medium', 'SPOT', 'spot-0.07', '0.07') + instance_group2 = InstanceGroup(6, 'TASK', 'c1.medium', 'SPOT', 'spot-0.07', '0.07') + instance_group = conn.add_instance_groups(job_id, [instance_group1, instance_group2]) + instance_group_ids = instance_group.instancegroupids.split(",") + + job_flow = conn.describe_jobflows()[0] + int(job_flow.instancecount).should.equal(12) + instance_group = job_flow.instancegroups[0] + int(instance_group.instancerunningcount).should.equal(6) + + conn.modify_instance_groups(instance_group_ids, [2, 3]) + + job_flow = conn.describe_jobflows()[0] + int(job_flow.instancecount).should.equal(5) + instance_group1 = [ + group for group + in job_flow.instancegroups + if group.instancegroupid == instance_group_ids[0] + ][0] + int(instance_group1.instancerunningcount).should.equal(2) + instance_group2 = [ + group for group + in job_flow.instancegroups + if group.instancegroupid == instance_group_ids[1] + ][0] + int(instance_group2.instancerunningcount).should.equal(3) diff --git a/tests/test_emr/test_server.py b/tests/test_emr/test_server.py new file mode 100644 index 000000000..85ba7c4db --- /dev/null +++ b/tests/test_emr/test_server.py @@ -0,0 +1,16 @@ +import sure # noqa + +import moto.server as server + +''' +Test the different server responses +''' +server.configure_urls("emr") + + +def test_describe_jobflows(): + test_client = server.app.test_client() + res = test_client.get('/?Action=DescribeJobFlows') + + res.data.should.contain('') + res.data.should.contain('') From ded410460f3e8f3c9a614d0f3eeda8d0febd4a09 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Wed, 7 Aug 2013 20:36:58 -0400 Subject: [PATCH 71/85] Add EMR backend --- moto/backends.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/moto/backends.py b/moto/backends.py index 5a1776455..6f375a8f1 100644 --- a/moto/backends.py +++ b/moto/backends.py @@ -2,6 +2,7 @@ from moto.autoscaling import autoscaling_backend from moto.dynamodb import dynamodb_backend from moto.ec2 import ec2_backend from moto.elb import elb_backend +from moto.emr import emr_backend from moto.s3 import s3_backend from moto.ses import ses_backend from moto.sqs import sqs_backend @@ -12,6 +13,7 @@ BACKENDS = { 'dynamodb': dynamodb_backend, 'ec2': ec2_backend, 'elb': elb_backend, + 'emr': emr_backend, 's3': s3_backend, 'ses': ses_backend, 'sqs': sqs_backend, From e251fd8930cc013df74e64a21cb930bb5b088997 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Thu, 8 Aug 2013 10:40:29 -0400 Subject: [PATCH 72/85] Add job_flow_role param or EMR --- moto/emr/models.py | 9 ++++++--- moto/emr/responses.py | 4 +++- tests/test_emr/test_emr.py | 5 +++-- 3 files changed, 12 insertions(+), 6 deletions(-) diff --git a/moto/emr/models.py b/moto/emr/models.py index af2a4b45f..e8f08d1d9 100644 --- a/moto/emr/models.py +++ b/moto/emr/models.py @@ -2,6 +2,8 @@ from moto.core import BaseBackend from .utils import random_job_id, random_instance_group_id +DEFAULT_JOB_FLOW_ROLE = 'EMRJobflowDefault' + class FakeInstanceGroup(object): def __init__(self, id, instance_count, instance_role, instance_type, market, name, bid_price=None): @@ -48,10 +50,11 @@ class FakeStep(object): class FakeJobFlow(object): - def __init__(self, job_id, name, log_uri, steps, instance_attrs): + def __init__(self, job_id, name, log_uri, job_flow_role, steps, instance_attrs): self.id = job_id self.name = name self.log_uri = log_uri + self.role = job_flow_role or DEFAULT_JOB_FLOW_ROLE self.state = "STARTING" self.steps = [] self.add_steps(steps) @@ -119,9 +122,9 @@ class ElasticMapReduceBackend(BaseBackend): self.job_flows = {} self.instance_groups = {} - def run_job_flow(self, name, log_uri, steps, instance_attrs): + def run_job_flow(self, name, log_uri, job_flow_role, steps, instance_attrs): job_id = random_job_id() - job_flow = FakeJobFlow(job_id, name, log_uri, steps, instance_attrs) + job_flow = FakeJobFlow(job_id, name, log_uri, job_flow_role, steps, instance_attrs) self.job_flows[job_id] = job_flow return job_flow diff --git a/moto/emr/responses.py b/moto/emr/responses.py index 8886ecbc5..5aeecbc3e 100644 --- a/moto/emr/responses.py +++ b/moto/emr/responses.py @@ -50,8 +50,9 @@ class ElasticMapReduceResponse(BaseResponse): log_uri = self._get_param('LogUri') steps = self._get_list_prefix('Steps.member') instance_attrs = self._get_dict_param('Instances.') + job_flow_role = self._get_param('JobFlowRole') - job_flow = emr_backend.run_job_flow(flow_name, log_uri, steps, instance_attrs) + job_flow = emr_backend.run_job_flow(flow_name, log_uri, job_flow_role, steps, instance_attrs) template = Template(RUN_JOB_FLOW_TEMPLATE) return template.render(job_flow=job_flow) @@ -102,6 +103,7 @@ DESCRIBE_JOB_FLOWS_TEMPLATE = """ Date: Sat, 10 Aug 2013 20:03:57 -0400 Subject: [PATCH 74/85] Add visibletoallusers and normalizedinstancehours to EMR --- moto/emr/models.py | 20 +++++++++++++++--- moto/emr/responses.py | 23 ++++++++++++++++++++- tests/test_emr/test_emr.py | 42 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 81 insertions(+), 4 deletions(-) diff --git a/moto/emr/models.py b/moto/emr/models.py index e8f08d1d9..2fc06ef62 100644 --- a/moto/emr/models.py +++ b/moto/emr/models.py @@ -50,7 +50,7 @@ class FakeStep(object): class FakeJobFlow(object): - def __init__(self, job_id, name, log_uri, job_flow_role, steps, instance_attrs): + def __init__(self, job_id, name, log_uri, job_flow_role, visible_to_all_users, steps, instance_attrs): self.id = job_id self.name = name self.log_uri = log_uri @@ -63,6 +63,8 @@ class FakeJobFlow(object): self.initial_master_instance_type = instance_attrs.get('master_instance_type') self.initial_slave_instance_type = instance_attrs.get('slave_instance_type') + self.set_visibility(visible_to_all_users) + self.normalized_instance_hours = 0 self.ec2_key_name = instance_attrs.get('ec2_key_name') self.availability_zone = instance_attrs.get('placement.availability_zone') self.keep_job_flow_alive_when_no_steps = instance_attrs.get('keep_job_flow_alive_when_no_steps') @@ -73,6 +75,12 @@ class FakeJobFlow(object): def terminate(self): self.state = 'TERMINATED' + def set_visibility(self, visibility): + if visibility == 'true': + self.visible_to_all_users = True + else: + self.visible_to_all_users = False + def add_steps(self, steps): for index, step in enumerate(steps): if self.steps: @@ -122,9 +130,9 @@ class ElasticMapReduceBackend(BaseBackend): self.job_flows = {} self.instance_groups = {} - def run_job_flow(self, name, log_uri, job_flow_role, steps, instance_attrs): + def run_job_flow(self, name, log_uri, job_flow_role, visible_to_all_users, steps, instance_attrs): job_id = random_job_id() - job_flow = FakeJobFlow(job_id, name, log_uri, job_flow_role, steps, instance_attrs) + job_flow = FakeJobFlow(job_id, name, log_uri, job_flow_role, visible_to_all_users, steps, instance_attrs) self.job_flows[job_id] = job_flow return job_flow @@ -167,4 +175,10 @@ class ElasticMapReduceBackend(BaseBackend): group.set_instance_count(instance_group['instance_count']) return result_groups + def set_visible_to_all_users(self, job_ids, visible_to_all_users): + for job_id in job_ids: + job = self.job_flows[job_id] + job.set_visibility(visible_to_all_users) + + emr_backend = ElasticMapReduceBackend() diff --git a/moto/emr/responses.py b/moto/emr/responses.py index 5aeecbc3e..89da0658f 100644 --- a/moto/emr/responses.py +++ b/moto/emr/responses.py @@ -51,8 +51,12 @@ class ElasticMapReduceResponse(BaseResponse): steps = self._get_list_prefix('Steps.member') instance_attrs = self._get_dict_param('Instances.') job_flow_role = self._get_param('JobFlowRole') + visible_to_all_users = self._get_param('VisibleToAllUsers') - job_flow = emr_backend.run_job_flow(flow_name, log_uri, job_flow_role, steps, instance_attrs) + job_flow = emr_backend.run_job_flow( + flow_name, log_uri, job_flow_role, + visible_to_all_users, steps, instance_attrs + ) template = Template(RUN_JOB_FLOW_TEMPLATE) return template.render(job_flow=job_flow) @@ -80,6 +84,13 @@ class ElasticMapReduceResponse(BaseResponse): template = Template(MODIFY_INSTANCE_GROUPS_TEMPLATE) return template.render(instance_groups=instance_groups) + def set_visible_to_all_users(self): + visible_to_all_users = self._get_param('VisibleToAllUsers') + job_ids = self._get_multi_param('JobFlowIds.member') + emr_backend.set_visible_to_all_users(job_ids, visible_to_all_users) + template = Template(SET_VISIBLE_TO_ALL_USERS_TEMPLATE) + return template.render() + RUN_JOB_FLOW_TEMPLATE = """ @@ -137,6 +148,8 @@ DESCRIBE_JOB_FLOWS_TEMPLATE = """ + + + 2690d7eb-ed86-11dd-9877-6fad448a8419 + + +""" diff --git a/tests/test_emr/test_emr.py b/tests/test_emr/test_emr.py index 4b3fc4286..eb17c37ad 100644 --- a/tests/test_emr/test_emr.py +++ b/tests/test_emr/test_emr.py @@ -42,6 +42,8 @@ def test_create_job_flow(): job_flow.masterinstancetype.should.equal('m1.medium') job_flow.slaveinstancetype.should.equal('m1.small') job_flow.loguri.should.equal('s3://some_bucket/jobflow_logs') + job_flow.visibletoallusers.should.equal('False') + int(job_flow.normalizedinstancehours).should.equal(0) job_step = job_flow.steps[0] job_step.name.should.equal('My wordcount example') job_step.state.should.equal('STARTING') @@ -89,6 +91,21 @@ def test_create_job_flow_with_new_params(): ) +@mock_emr +def test_create_job_flow_visible_to_all_users(): + conn = boto.connect_emr() + + job_id = conn.run_jobflow( + name='My jobflow', + log_uri='s3://some_bucket/jobflow_logs', + job_flow_role='some-role-arn', + steps=[], + visible_to_all_users=True, + ) + job_flow = conn.describe_jobflow(job_id) + job_flow.visibletoallusers.should.equal('True') + + @mock_emr def test_terminate_job_flow(): conn = boto.connect_emr() @@ -248,3 +265,28 @@ def test_modify_instance_groups(): if group.instancegroupid == instance_group_ids[1] ][0] int(instance_group2.instancerunningcount).should.equal(3) + + +@mock_emr +def test_set_visible_to_all_users(): + conn = boto.connect_emr() + + job_id = conn.run_jobflow( + name='My jobflow', + log_uri='s3://some_bucket/jobflow_logs', + job_flow_role='some-role-arn', + steps=[], + visible_to_all_users=False, + ) + job_flow = conn.describe_jobflow(job_id) + job_flow.visibletoallusers.should.equal('False') + + conn.set_visible_to_all_users(job_id, True) + + job_flow = conn.describe_jobflow(job_id) + job_flow.visibletoallusers.should.equal('True') + + conn.set_visible_to_all_users(job_id, False) + + job_flow = conn.describe_jobflow(job_id) + job_flow.visibletoallusers.should.equal('False') From a0e2cb3d9880026d962d7ed8b25746a482f3cbf5 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Sat, 17 Aug 2013 18:11:29 -0400 Subject: [PATCH 75/85] Add EC2 spot instances --- moto/ec2/models.py | 85 ++++++++++- moto/ec2/responses/spot_instances.py | 197 +++++++++++++++++++++++++- moto/ec2/utils.py | 28 ++-- tests/test_ec2/test_spot_instances.py | 94 +++++++++++- 4 files changed, 381 insertions(+), 23 deletions(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 0ea10555c..2150f2567 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -11,6 +11,7 @@ from .utils import ( random_reservation_id, random_security_group_id, random_snapshot_id, + random_spot_request_id, random_subnet_id, random_volume_id, random_vpc_id, @@ -306,11 +307,12 @@ class SecurityGroupBackend(object): self.groups = {} super(SecurityGroupBackend, self).__init__() - def create_security_group(self, name, description): + def create_security_group(self, name, description, force=False): group_id = random_security_group_id() - existing_group = self.get_security_group_from_name(name) - if existing_group: - return None + if not force: + existing_group = self.get_security_group_from_name(name) + if existing_group: + return None group = SecurityGroup(group_id, name, description) self.groups[group_id] = group return group @@ -333,6 +335,11 @@ class SecurityGroupBackend(object): if group.name == name: return group + if name == 'default': + # If the request is for the default group and it does not exist, create it + default_group = ec2_backend.create_security_group("default", "The default security group", force=True) + return default_group + def authorize_security_group_ingress(self, group_name, ip_protocol, from_port, to_port, ip_ranges=None, source_group_names=None): group = self.get_security_group_from_name(group_name) source_groups = [] @@ -496,9 +503,77 @@ class SubnetBackend(object): return self.subnets.pop(subnet_id, None) +class SpotInstanceRequest(object): + def __init__(self, spot_request_id, price, image_id, type, valid_from, + valid_until, launch_group, availability_zone_group, key_name, + security_groups, user_data, instance_type, placement, kernel_id, + ramdisk_id, monitoring_enabled, subnet_id): + self.id = spot_request_id + self.state = "open" + self.price = price + self.image_id = image_id + self.type = type + self.valid_from = valid_from + self.valid_until = valid_until + self.launch_group = launch_group + self.availability_zone_group = availability_zone_group + self.key_name = key_name + self.user_data = user_data + self.instance_type = instance_type + self.placement = placement + self.kernel_id = kernel_id + self.ramdisk_id = ramdisk_id + self.monitoring_enabled = monitoring_enabled + self.subnet_id = subnet_id + + self.security_groups = [] + if security_groups: + for group_name in security_groups: + group = ec2_backend.get_security_group_from_name(group_name) + if group: + self.security_groups.append(group) + else: + # If not security groups, add the default + default_group = ec2_backend.get_security_group_from_name("default") + self.security_groups.append(default_group) + + +class SpotRequestBackend(object): + def __init__(self): + self.spot_instance_requests = {} + super(SpotRequestBackend, self).__init__() + + def request_spot_instances(self, price, image_id, count, type, valid_from, + valid_until, launch_group, availability_zone_group, + key_name, security_groups, user_data, + instance_type, placement, kernel_id, ramdisk_id, + monitoring_enabled, subnet_id): + requests = [] + for index in range(count): + spot_request_id = random_spot_request_id() + request = SpotInstanceRequest( + spot_request_id, price, image_id, type, valid_from, valid_until, + launch_group, availability_zone_group, key_name, security_groups, + user_data, instance_type, placement, kernel_id, ramdisk_id, + monitoring_enabled, subnet_id + ) + self.spot_instance_requests[spot_request_id] = request + requests.append(request) + return requests + + def describe_spot_instance_requests(self): + return self.spot_instance_requests.values() + + def cancel_spot_instance_requests(self, request_ids): + requests = [] + for request_id in request_ids: + requests.append(self.spot_instance_requests.pop(request_id)) + return requests + + class EC2Backend(BaseBackend, InstanceBackend, TagBackend, AmiBackend, RegionsAndZonesBackend, SecurityGroupBackend, EBSBackend, - VPCBackend, SubnetBackend): + VPCBackend, SubnetBackend, SpotRequestBackend): pass diff --git a/moto/ec2/responses/spot_instances.py b/moto/ec2/responses/spot_instances.py index 0ace72dfd..759914989 100644 --- a/moto/ec2/responses/spot_instances.py +++ b/moto/ec2/responses/spot_instances.py @@ -1,12 +1,25 @@ from jinja2 import Template from moto.ec2.models import ec2_backend -from moto.ec2.utils import resource_ids_from_querystring class SpotInstances(object): + def _get_param(self, param_name): + return self.querystring.get(param_name, [None])[0] + + def _get_int_param(self, param_name): + value = self._get_param(param_name) + if value is not None: + return int(value) + + def _get_multi_param(self, param_prefix): + return [value[0] for key, value in self.querystring.items() if key.startswith(param_prefix)] + def cancel_spot_instance_requests(self): - raise NotImplementedError('SpotInstances.cancel_spot_instance_requests is not yet implemented') + request_ids = self._get_multi_param('SpotInstanceRequestId') + requests = ec2_backend.cancel_spot_instance_requests(request_ids) + template = Template(CANCEL_SPOT_INSTANCES_TEMPLATE) + return template.render(requests=requests) def create_spot_datafeed_subscription(self): raise NotImplementedError('SpotInstances.create_spot_datafeed_subscription is not yet implemented') @@ -18,10 +31,186 @@ class SpotInstances(object): raise NotImplementedError('SpotInstances.describe_spot_datafeed_subscription is not yet implemented') def describe_spot_instance_requests(self): - raise NotImplementedError('SpotInstances.describe_spot_instance_requests is not yet implemented') + requests = ec2_backend.describe_spot_instance_requests() + template = Template(DESCRIBE_SPOT_INSTANCES_TEMPLATE) + return template.render(requests=requests) def describe_spot_price_history(self): raise NotImplementedError('SpotInstances.describe_spot_price_history is not yet implemented') def request_spot_instances(self): - raise NotImplementedError('SpotInstances.request_spot_instances is not yet implemented') + price = self._get_param('SpotPrice') + image_id = self._get_param('LaunchSpecification.ImageId') + count = self._get_int_param('InstanceCount') + type = self._get_param('Type') + valid_from = self._get_param('ValidFrom') + valid_until = self._get_param('ValidUntil') + launch_group = self._get_param('LaunchGroup') + availability_zone_group = self._get_param('AvailabilityZoneGroup') + key_name = self._get_param('LaunchSpecification.KeyName') + security_groups = self._get_multi_param('LaunchSpecification.SecurityGroup.') + user_data = self._get_param('LaunchSpecification.UserData') + instance_type = self._get_param('LaunchSpecification.InstanceType') + placement = self._get_param('LaunchSpecification.Placement.AvailabilityZone') + kernel_id = self._get_param('LaunchSpecification.KernelId') + ramdisk_id = self._get_param('LaunchSpecification.RamdiskId') + monitoring_enabled = self._get_param('LaunchSpecification.Monitoring.Enabled') + subnet_id = self._get_param('LaunchSpecification.SubnetId') + + requests = ec2_backend.request_spot_instances( + price=price, + image_id=image_id, + count=count, + type=type, + valid_from=valid_from, + valid_until=valid_until, + launch_group=launch_group, + availability_zone_group=availability_zone_group, + key_name=key_name, + security_groups=security_groups, + user_data=user_data, + instance_type=instance_type, + placement=placement, + kernel_id=kernel_id, + ramdisk_id=ramdisk_id, + monitoring_enabled=monitoring_enabled, + subnet_id=subnet_id, + ) + + template = Template(REQUEST_SPOT_INSTANCES_TEMPLATE) + return template.render(requests=requests) + + +REQUEST_SPOT_INSTANCES_TEMPLATE = """ + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + + {% for request in requests %} + + {{ request.price }} + {{ request.price }} + {{ request.type }} + {{ request.state }} + + pending-evaluation + YYYY-MM-DDTHH:MM:SS.000Z + Your Spot request has been submitted for review, and is pending evaluation. + + {{ request.availability_zone_group }} + + {{ request.image_id }} + {{ request.key_name }} + + {% for group in request.security_groups %} + + {{ group.id }} + {{ group.name }} + + {% endfor %} + + {{ request.kernel_id }} + {{ request.ramdisk_id }} + {{ request.subnet_id }} + {{ request.instance_type }} + + + {{ request.monitoring_enabled }} + + {{ request.ebs_optimized }} + + {{ request.placement }} + + + + {{ request.launch_group }} + YYYY-MM-DDTHH:MM:SS.000Z + {% if request.valid_from %} + {{ request.valid_from }} + {% endif %} + {% if request.valid_until %} + {{ request.valid_until }} + {% endif %} + Linux/UNIX + + {% endfor %} + +""" + +DESCRIBE_SPOT_INSTANCES_TEMPLATE = """ + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + + {% for request in requests %} + + {{ request.id }} + {{ request.price }} + {{ request.type }} + {{ request.state }} + + pending-evaluation + YYYY-MM-DDTHH:MM:SS.000Z + Your Spot request has been submitted for review, and is pending evaluation. + + {% if request.availability_zone_group %} + {{ request.availability_zone_group }} + {% endif %} + + {{ request.image_id }} + {% if request.key_name %} + {{ request.key_name }} + {% endif %} + + {% for group in request.security_groups %} + + {{ group.id }} + {{ group.name }} + + {% endfor %} + + {% if request.kernel_id %} + {{ request.kernel_id }} + {% endif %} + {% if request.ramdisk_id %} + {{ request.ramdisk_id }} + {% endif %} + {% if request.subnet_id %} + {{ request.subnet_id }} + {% endif %} + {{ request.instance_type }} + + + {{ request.monitoring_enabled }} + + {{ request.ebs_optimized }} + {% if request.placement %} + + {{ request.placement }} + + + {% endif %} + + {% if request.launch_group %} + {{ request.launch_group }} + {% endif %} + YYYY-MM-DDTHH:MM:SS.000Z + {% if request.valid_from %} + {{ request.valid_from }} + {% endif %} + {% if request.valid_until %} + {{ request.valid_until }} + {% endif %} + Linux/UNIX + + {% endfor %} + +""" + +CANCEL_SPOT_INSTANCES_TEMPLATE = """ + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + + {% for request in requests %} + + {{ request.id }} + cancelled + + {% endfor %} + +""" diff --git a/moto/ec2/utils.py b/moto/ec2/utils.py index 7dfa3ea03..a86ed64c5 100644 --- a/moto/ec2/utils.py +++ b/moto/ec2/utils.py @@ -10,6 +10,10 @@ def random_id(prefix=''): return '{}-{}'.format(prefix, instance_tag) +def random_ami_id(): + return random_id(prefix='ami') + + def random_instance_id(): return random_id(prefix='i') @@ -18,14 +22,22 @@ def random_reservation_id(): return random_id(prefix='r') -def random_ami_id(): - return random_id(prefix='ami') - - def random_security_group_id(): return random_id(prefix='sg') +def random_snapshot_id(): + return random_id(prefix='snap') + + +def random_spot_request_id(): + return random_id(prefix='sir') + + +def random_subnet_id(): + return random_id(prefix='subnet') + + def random_volume_id(): return random_id(prefix='vol') @@ -34,14 +46,6 @@ def random_vpc_id(): return random_id(prefix='vpc') -def random_subnet_id(): - return random_id(prefix='subnet') - - -def random_snapshot_id(): - return random_id(prefix='snap') - - def instance_ids_from_querystring(querystring_dict): instance_ids = [] for key, value in querystring_dict.iteritems(): diff --git a/tests/test_ec2/test_spot_instances.py b/tests/test_ec2/test_spot_instances.py index 0a08e243d..91a3158eb 100644 --- a/tests/test_ec2/test_spot_instances.py +++ b/tests/test_ec2/test_spot_instances.py @@ -1,9 +1,99 @@ +import datetime + import boto import sure # noqa from moto import mock_ec2 +from moto.core.utils import iso_8601_datetime @mock_ec2 -def test_spot_instances(): - pass +def test_request_spot_instances(): + conn = boto.connect_ec2() + + conn.create_security_group('group1', 'description') + conn.create_security_group('group2', 'description') + + start = iso_8601_datetime(datetime.datetime(2013, 1, 1)) + end = iso_8601_datetime(datetime.datetime(2013, 1, 2)) + + request = conn.request_spot_instances( + price=0.5, image_id='ami-abcd1234', count=1, type='one-time', + valid_from=start, valid_until=end, launch_group="the-group", + availability_zone_group='my-group', key_name="test", + security_groups=['group1', 'group2'], user_data="some test data", + instance_type='m1.small', placement='us-east-1c', + kernel_id="test-kernel", ramdisk_id="test-ramdisk", + monitoring_enabled=True, subnet_id="subnet123", + ) + + requests = conn.get_all_spot_instance_requests() + requests.should.have.length_of(1) + request = requests[0] + + request.state.should.equal("open") + request.price.should.equal(0.5) + request.launch_specification.image_id.should.equal('ami-abcd1234') + request.type.should.equal('one-time') + request.valid_from.should.equal(start) + request.valid_until.should.equal(end) + request.launch_group.should.equal("the-group") + request.availability_zone_group.should.equal('my-group') + request.launch_specification.key_name.should.equal("test") + security_group_names = [group.name for group in request.launch_specification.groups] + set(security_group_names).should.equal(set(['group1', 'group2'])) + request.launch_specification.instance_type.should.equal('m1.small') + request.launch_specification.placement.should.equal('us-east-1c') + request.launch_specification.kernel.should.equal("test-kernel") + request.launch_specification.ramdisk.should.equal("test-ramdisk") + request.launch_specification.subnet_id.should.equal("subnet123") + + +@mock_ec2 +def test_request_spot_instances_default_arguments(): + """ + Test that moto set the correct default arguments + """ + conn = boto.connect_ec2() + + request = conn.request_spot_instances( + price=0.5, image_id='ami-abcd1234', + ) + + requests = conn.get_all_spot_instance_requests() + requests.should.have.length_of(1) + request = requests[0] + + request.state.should.equal("open") + request.price.should.equal(0.5) + request.launch_specification.image_id.should.equal('ami-abcd1234') + request.type.should.equal('one-time') + request.valid_from.should.equal(None) + request.valid_until.should.equal(None) + request.launch_group.should.equal(None) + request.availability_zone_group.should.equal(None) + request.launch_specification.key_name.should.equal(None) + security_group_names = [group.name for group in request.launch_specification.groups] + security_group_names.should.equal(["default"]) + request.launch_specification.instance_type.should.equal('m1.small') + request.launch_specification.placement.should.equal(None) + request.launch_specification.kernel.should.equal(None) + request.launch_specification.ramdisk.should.equal(None) + request.launch_specification.subnet_id.should.equal(None) + + +@mock_ec2 +def test_cancel_spot_instance_request(): + conn = boto.connect_ec2() + + conn.request_spot_instances( + price=0.5, image_id='ami-abcd1234', + ) + + requests = conn.get_all_spot_instance_requests() + requests.should.have.length_of(1) + + conn.cancel_spot_instance_requests([requests[0].id]) + + requests = conn.get_all_spot_instance_requests() + requests.should.have.length_of(0) From dc2802a2383a1799b75febb71f078d3a85136959 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Tue, 10 Sep 2013 08:27:28 -0400 Subject: [PATCH 76/85] Set __wrapped__ on MockAWS decorator to be the test function. cc #41. --- moto/core/models.py | 1 + tests/test_core/test_decorator_calls.py | 8 ++++++++ 2 files changed, 9 insertions(+) diff --git a/moto/core/models.py b/moto/core/models.py index c451fb11d..f3e6ad701 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -47,6 +47,7 @@ class MockAWS(object): result = func(*args, **kwargs) return result functools.update_wrapper(wrapper, func) + wrapper.__wrapped__ = func return wrapper diff --git a/tests/test_core/test_decorator_calls.py b/tests/test_core/test_decorator_calls.py index f9bdbe1ac..444bae89d 100644 --- a/tests/test_core/test_decorator_calls.py +++ b/tests/test_core/test_decorator_calls.py @@ -42,3 +42,11 @@ def test_decorator_start_and_stop(): mock.stop() conn.get_all_instances.when.called_with().should.throw(EC2ResponseError) + + +@mock_ec2 +def test_decorater_wrapped_gets_set(): + """ + Moto decorator's __wrapped__ should get set to the tests function + """ + test_decorater_wrapped_gets_set.__wrapped__.__name__.should.equal('test_decorater_wrapped_gets_set') From afad0bed142f3608bd94e9421084e86e23aa0c2b Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Tue, 10 Sep 2013 16:54:57 -0400 Subject: [PATCH 77/85] Add ebs_optimized parameter to launch configurations --- moto/autoscaling/models.py | 6 ++++-- moto/autoscaling/responses.py | 2 ++ tests/test_autoscaling/test_launch_configurations.py | 6 +++++- 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/moto/autoscaling/models.py b/moto/autoscaling/models.py index da5efb711..a367ba297 100644 --- a/moto/autoscaling/models.py +++ b/moto/autoscaling/models.py @@ -29,7 +29,7 @@ class FakeScalingPolicy(object): class FakeLaunchConfiguration(object): def __init__(self, name, image_id, key_name, security_groups, user_data, instance_type, instance_monitoring, instance_profile_name, - spot_price): + spot_price, ebs_optimized): self.name = name self.image_id = image_id self.key_name = key_name @@ -39,6 +39,7 @@ class FakeLaunchConfiguration(object): self.instance_monitoring = instance_monitoring self.instance_profile_name = instance_profile_name self.spot_price = spot_price + self.ebs_optimized = ebs_optimized @property def instance_monitoring_enabled(self): @@ -126,7 +127,7 @@ class AutoScalingBackend(BaseBackend): def create_launch_configuration(self, name, image_id, key_name, security_groups, user_data, instance_type, instance_monitoring, instance_profile_name, - spot_price): + spot_price, ebs_optimized): launch_configuration = FakeLaunchConfiguration( name=name, image_id=image_id, @@ -137,6 +138,7 @@ class AutoScalingBackend(BaseBackend): instance_monitoring=instance_monitoring, instance_profile_name=instance_profile_name, spot_price=spot_price, + ebs_optimized=ebs_optimized, ) self.launch_configurations[name] = launch_configuration return launch_configuration diff --git a/moto/autoscaling/responses.py b/moto/autoscaling/responses.py index fe7f46b8f..55cdf9b6e 100644 --- a/moto/autoscaling/responses.py +++ b/moto/autoscaling/responses.py @@ -33,6 +33,7 @@ class AutoScalingResponse(BaseResponse): instance_monitoring=instance_monitoring, instance_profile_name=self._get_param('IamInstanceProfile'), spot_price=self._get_param('SpotPrice'), + ebs_optimized=self._get_param('EbsOptimized'), ) template = Template(CREATE_LAUNCH_CONFIGURATION_TEMPLATE) return template.render() @@ -178,6 +179,7 @@ DESCRIBE_LAUNCH_CONFIGURATIONS_TEMPLATE = """ {% endif %} + {{ launch_configuration.ebs_optimized }} {{ launch_configuration.instance_monitoring_enabled }} diff --git a/tests/test_autoscaling/test_launch_configurations.py b/tests/test_autoscaling/test_launch_configurations.py index 446275098..dc56b3d3d 100644 --- a/tests/test_autoscaling/test_launch_configurations.py +++ b/tests/test_autoscaling/test_launch_configurations.py @@ -18,7 +18,9 @@ def test_create_launch_configuration(): user_data="This is some user_data", instance_monitoring=True, instance_profile_name='arn:aws:iam::123456789012:instance-profile/testing', - spot_price=0.1) + spot_price=0.1, + ebs_optimized=True, + ) conn.create_launch_configuration(config) launch_config = conn.get_all_launch_configurations()[0] @@ -31,6 +33,7 @@ def test_create_launch_configuration(): launch_config.instance_monitoring.enabled.should.equal('true') launch_config.instance_profile_name.should.equal('arn:aws:iam::123456789012:instance-profile/testing') launch_config.spot_price.should.equal(0.1) + launch_config.ebs_optimized.should.equal(True) @mock_autoscaling @@ -57,6 +60,7 @@ def test_create_launch_configuration_defaults(): launch_config.instance_monitoring.enabled.should.equal('false') launch_config.instance_profile_name.should.equal(None) launch_config.spot_price.should.equal(None) + launch_config.ebs_optimized.should.equal(False) @mock_autoscaling From ceed17bec49b977cdd07ff7c23d400de9d27a478 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Tue, 10 Sep 2013 16:55:12 -0400 Subject: [PATCH 78/85] 0.2.9 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index ad3098cb7..d5c232b40 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ from setuptools import setup, find_packages setup( name='moto', - version='0.2.8', + version='0.2.9', description='A library that allows your python tests to easily' ' mock out the boto library', author='Steve Pulec', From 1e11bb1b27ea0c700827652a621052a9b11f318f Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Tue, 10 Sep 2013 17:04:56 -0400 Subject: [PATCH 79/85] Skip tests for ebs_optimized since it is only in 2.12 --- .../test_launch_configurations.py | 31 +++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/tests/test_autoscaling/test_launch_configurations.py b/tests/test_autoscaling/test_launch_configurations.py index dc56b3d3d..f14d996ec 100644 --- a/tests/test_autoscaling/test_launch_configurations.py +++ b/tests/test_autoscaling/test_launch_configurations.py @@ -4,6 +4,7 @@ from boto.ec2.autoscale.launchconfig import LaunchConfiguration import sure # noqa from moto import mock_autoscaling +from tests.helpers import requires_boto_gte @mock_autoscaling @@ -36,6 +37,21 @@ def test_create_launch_configuration(): launch_config.ebs_optimized.should.equal(True) +@requires_boto_gte("2.12") +@mock_autoscaling +def test_create_launch_configuration_for_2_12(): + conn = boto.connect_autoscale() + config = LaunchConfiguration( + name='tester', + image_id='ami-abcd1234', + ebs_optimized=True, + ) + conn.create_launch_configuration(config) + + launch_config = conn.get_all_launch_configurations()[0] + launch_config.ebs_optimized.should.equal(True) + + @mock_autoscaling def test_create_launch_configuration_defaults(): """ Test with the minimum inputs and check that all of the proper defaults @@ -63,6 +79,21 @@ def test_create_launch_configuration_defaults(): launch_config.ebs_optimized.should.equal(False) +@requires_boto_gte("2.12") +@mock_autoscaling +def test_create_launch_configuration_defaults_for_2_12(): + conn = boto.connect_autoscale() + config = LaunchConfiguration( + name='tester', + image_id='ami-abcd1234', + ebs_optimized=True, + ) + conn.create_launch_configuration(config) + + launch_config = conn.get_all_launch_configurations()[0] + launch_config.ebs_optimized.should.equal(False) + + @mock_autoscaling def test_launch_configuration_describe_filter(): conn = boto.connect_autoscale() From a49bc57279345cffe62c7fcc81961a32b4fa36c0 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Mon, 16 Sep 2013 21:24:32 -0400 Subject: [PATCH 80/85] Adding more boto versions to testing --- .travis.yml | 6 +++++- tests/test_autoscaling/test_launch_configurations.py | 3 --- tests/test_elb/test_elb.py | 9 ++++++--- tests/test_elb/test_server.py | 1 - 4 files changed, 11 insertions(+), 8 deletions(-) diff --git a/.travis.yml b/.travis.yml index c7af21821..b6026cbe9 100644 --- a/.travis.yml +++ b/.travis.yml @@ -3,7 +3,11 @@ python: - 2.7 env: matrix: - - BOTO_VERSION=2.9 + #- BOTO_VERSION=2.13.3 + - BOTO_VERSION=2.12.0 + - BOTO_VERSION=2.11.0 + - BOTO_VERSION=2.10.0 + - BOTO_VERSION=2.9.9 - BOTO_VERSION=2.8 - BOTO_VERSION=2.7 install: diff --git a/tests/test_autoscaling/test_launch_configurations.py b/tests/test_autoscaling/test_launch_configurations.py index f14d996ec..ece3ecfcc 100644 --- a/tests/test_autoscaling/test_launch_configurations.py +++ b/tests/test_autoscaling/test_launch_configurations.py @@ -20,7 +20,6 @@ def test_create_launch_configuration(): instance_monitoring=True, instance_profile_name='arn:aws:iam::123456789012:instance-profile/testing', spot_price=0.1, - ebs_optimized=True, ) conn.create_launch_configuration(config) @@ -34,7 +33,6 @@ def test_create_launch_configuration(): launch_config.instance_monitoring.enabled.should.equal('true') launch_config.instance_profile_name.should.equal('arn:aws:iam::123456789012:instance-profile/testing') launch_config.spot_price.should.equal(0.1) - launch_config.ebs_optimized.should.equal(True) @requires_boto_gte("2.12") @@ -86,7 +84,6 @@ def test_create_launch_configuration_defaults_for_2_12(): config = LaunchConfiguration( name='tester', image_id='ami-abcd1234', - ebs_optimized=True, ) conn.create_launch_configuration(config) diff --git a/tests/test_elb/test_elb.py b/tests/test_elb/test_elb.py index e21b04b65..cd4e449e1 100644 --- a/tests/test_elb/test_elb.py +++ b/tests/test_elb/test_elb.py @@ -70,7 +70,8 @@ def test_create_health_check(): timeout=23, ) - lb = conn.create_load_balancer('my-lb', [], []) + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + lb = conn.create_load_balancer('my-lb', [], ports) lb.configure_health_check(hc) balancer = conn.get_all_load_balancers()[0] @@ -91,7 +92,8 @@ def test_register_instances(): instance_id2 = reservation.instances[1].id conn = boto.connect_elb() - lb = conn.create_load_balancer('my-lb', [], []) + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + lb = conn.create_load_balancer('my-lb', [], ports) lb.register_instances([instance_id1, instance_id2]) @@ -109,7 +111,8 @@ def test_deregister_instances(): instance_id2 = reservation.instances[1].id conn = boto.connect_elb() - lb = conn.create_load_balancer('my-lb', [], []) + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + lb = conn.create_load_balancer('my-lb', [], ports) lb.register_instances([instance_id1, instance_id2]) diff --git a/tests/test_elb/test_server.py b/tests/test_elb/test_server.py index 695e08410..9fc172dd6 100644 --- a/tests/test_elb/test_server.py +++ b/tests/test_elb/test_server.py @@ -13,4 +13,3 @@ def test_elb_describe_instances(): res = test_client.get('/?Action=DescribeLoadBalancers') res.data.should.contain('DescribeLoadBalancersResponse') - res.data.should.contain('LoadBalancerName') From 596b435e57275714b3d37529cc342cacc15a86bf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sylvain=20Frai=CC=88sse=CC=81?= Date: Mon, 23 Sep 2013 23:34:33 +0200 Subject: [PATCH 81/85] Fix the 'www.' strip changing the lstrip method by the replace --- moto/s3/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/s3/utils.py b/moto/s3/utils.py index 765303743..7dd0acccc 100644 --- a/moto/s3/utils.py +++ b/moto/s3/utils.py @@ -9,7 +9,7 @@ def bucket_name_from_url(url): domain = urlparse.urlparse(url).netloc # If 'www' prefixed, strip it. - domain = domain.lstrip("www.") + domain = domain.replace("www.", "") if 'amazonaws.com' in domain: bucket_result = bucket_name_regex.search(domain) From 0fd4a5a6f88ac32e5b4be0ef3d12122c2e90cc4a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sylvain=20Frai=CC=88sse=CC=81?= Date: Tue, 24 Sep 2013 00:00:52 +0200 Subject: [PATCH 82/85] Updates a test to show the ltrip use problem --- moto/s3/utils.py | 4 ++-- tests/test_s3/test_s3_utils.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/moto/s3/utils.py b/moto/s3/utils.py index 7dd0acccc..19b0cfdf0 100644 --- a/moto/s3/utils.py +++ b/moto/s3/utils.py @@ -8,8 +8,8 @@ bucket_name_regex = re.compile("(.+).s3.amazonaws.com") def bucket_name_from_url(url): domain = urlparse.urlparse(url).netloc - # If 'www' prefixed, strip it. - domain = domain.replace("www.", "") + if domain.startswith('www.'): + domain = domain[4:] if 'amazonaws.com' in domain: bucket_result = bucket_name_regex.search(domain) diff --git a/tests/test_s3/test_s3_utils.py b/tests/test_s3/test_s3_utils.py index 5b03d61fd..cb8bd8b8c 100644 --- a/tests/test_s3/test_s3_utils.py +++ b/tests/test_s3/test_s3_utils.py @@ -7,7 +7,7 @@ def test_base_url(): def test_localhost_bucket(): - expect(bucket_name_from_url('https://foobar.localhost:5000/abc')).should.equal("foobar") + expect(bucket_name_from_url('https://wfoobar.localhost:5000/abc')).should.equal("wfoobar") def test_localhost_without_bucket(): From f805717231598a7f63832232d7c383f8c519326b Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Tue, 24 Sep 2013 10:44:25 -0400 Subject: [PATCH 83/85] Update README S3 example to actually work. --- README.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 2562f25d8..4af14f623 100644 --- a/README.md +++ b/README.md @@ -37,10 +37,13 @@ from mymodule import MyModel @mock_s3 def test_my_model_save(): + conn = boto.connect_s3() + # We need to create the bucket since this is all in Moto's 'virtual' AWS account + conn.create_bucket('mybucket') + model_instance = MyModel('steve', 'is awesome') model_instance.save() - conn = boto.connect_s3() assert conn.get_bucket('mybucket').get_key('steve') == 'is awesome' ``` From 611b820e3dc1dd0f631e2d167b534614332d6fdf Mon Sep 17 00:00:00 2001 From: Marcus Ahle Date: Wed, 25 Sep 2013 13:51:49 -0400 Subject: [PATCH 84/85] Adding HTML support for SES send_email() --- moto/ses/responses.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/moto/ses/responses.py b/moto/ses/responses.py index 5002f925c..6640d76be 100644 --- a/moto/ses/responses.py +++ b/moto/ses/responses.py @@ -36,7 +36,10 @@ class EmailResponse(BaseResponse): return template.render() def send_email(self): - body = self.querystring.get('Message.Body.Text.Data')[0] + bodydatakey = 'Message.Body.Text.Data' + if 'Message.Body.Html.Data' in self.querystring: + bodydatakey = 'Message.Body.Html.Data' + body = self.querystring.get(bodydatakey)[0] source = self.querystring.get('Source')[0] subject = self.querystring.get('Message.Subject.Data')[0] destination = self.querystring.get('Destination.ToAddresses.member.1')[0] From 766fed9767228014395d5a56c5ec5dbb14c067c9 Mon Sep 17 00:00:00 2001 From: Marcus Ahle Date: Thu, 26 Sep 2013 09:17:38 -0400 Subject: [PATCH 85/85] Adding test for sending html email --- tests/test_ses/test_ses.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/tests/test_ses/test_ses.py b/tests/test_ses/test_ses.py index 4c0440c40..6b8f357df 100644 --- a/tests/test_ses/test_ses.py +++ b/tests/test_ses/test_ses.py @@ -54,7 +54,21 @@ def test_send_email(): send_quota = conn.get_send_quota() sent_count = int(send_quota['GetSendQuotaResponse']['GetSendQuotaResult']['SentLast24Hours']) sent_count.should.equal(1) + +@mock_ses +def test_send_html_email(): + conn = boto.connect_ses('the_key', 'the_secret') + conn.send_email.when.called_with( + "test@example.com", "test subject", + "test body", "test_to@example.com", format="html").should.throw(BotoServerError) + + conn.verify_email_identity("test@example.com") + conn.send_email("test@example.com", "test subject", "test body", "test_to@example.com", format="html") + + send_quota = conn.get_send_quota() + sent_count = int(send_quota['GetSendQuotaResponse']['GetSendQuotaResult']['SentLast24Hours']) + sent_count.should.equal(1) @mock_ses def test_send_raw_email():