From 440213f854c2f77ce6e382cc0dd3edb6631cdae1 Mon Sep 17 00:00:00 2001 From: usmankb Date: Fri, 1 May 2020 21:16:33 +0530 Subject: [PATCH 01/29] Enhancement Adding SES get_send_statistics,create_configuration_set functions --- moto/ses/exceptions.py | 13 +++++++ moto/ses/models.py | 36 ++++++++++++++++- moto/ses/responses.py | 66 ++++++++++++++++++++++++++++++++ tests/test_ses/test_ses.py | 36 +++++++++++++++++ tests/test_ses/test_ses_boto3.py | 52 +++++++++++++++++++++++++ 5 files changed, 202 insertions(+), 1 deletion(-) diff --git a/moto/ses/exceptions.py b/moto/ses/exceptions.py index a905039e2..f57eadf77 100644 --- a/moto/ses/exceptions.py +++ b/moto/ses/exceptions.py @@ -7,3 +7,16 @@ class MessageRejectedError(RESTError): def __init__(self, message): super(MessageRejectedError, self).__init__("MessageRejected", message) + +class ConfigurationSetDoesNotExist(RESTError): + code = 400 + + def __init__(self, message): + super(ConfigurationSetDoesNotExist, self).__init__("ConfigurationSetDoesNotExist", message) + + +class EventDestinationAlreadyExists(RESTError): + code = 400 + + def __init__(self, message): + super(EventDestinationAlreadyExists, self).__init__("EventDestinationAlreadyExists", message) diff --git a/moto/ses/models.py b/moto/ses/models.py index 91241f706..62068e5a9 100644 --- a/moto/ses/models.py +++ b/moto/ses/models.py @@ -1,11 +1,12 @@ from __future__ import unicode_literals +import datetime import email from email.utils import parseaddr from moto.core import BaseBackend, BaseModel from moto.sns.models import sns_backends -from .exceptions import MessageRejectedError +from .exceptions import MessageRejectedError,ConfigurationSetDoesNotExist,EventDestinationAlreadyExists from .utils import get_random_message_id from .feedback import COMMON_MAIL, BOUNCE, COMPLAINT, DELIVERY @@ -81,7 +82,11 @@ class SESBackend(BaseBackend): self.domains = [] self.sent_messages = [] self.sent_message_count = 0 + self.rejected_messages_count = 0 self.sns_topics = {} + self.config_set = {} + self.config_set_event_destination = {} + self.event_destinations = {} def _is_verified_address(self, source): _, address = parseaddr(source) @@ -118,6 +123,7 @@ class SESBackend(BaseBackend): if recipient_count > RECIPIENT_LIMIT: raise MessageRejectedError("Too many recipients.") if not self._is_verified_address(source): + self.rejected_messages_count+=1 raise MessageRejectedError("Email address not verified %s" % source) self.__process_sns_feedback__(source, destinations, region) @@ -135,6 +141,7 @@ class SESBackend(BaseBackend): if recipient_count > RECIPIENT_LIMIT: raise MessageRejectedError("Too many recipients.") if not self._is_verified_address(source): + self.rejected_messages_count += 1 raise MessageRejectedError("Email address not verified %s" % source) self.__process_sns_feedback__(source, destinations, region) @@ -237,5 +244,32 @@ class SESBackend(BaseBackend): return {} + def create_configuration_set(self, configuration_set_name): + self.config_set[configuration_set_name] = 1 + return {} + + def create_configuration_set_event_destination(self,configuration_set_name, event_destination): + + if self.config_set.get(configuration_set_name) is None: + raise ConfigurationSetDoesNotExist("Invalid Configuration Set Name.") + + if self.event_destinations.get(event_destination["Name"]): + raise EventDestinationAlreadyExists("Duplicate Event destination Name.") + + self.config_set_event_destination[configuration_set_name] = event_destination + self.event_destinations[event_destination["Name"]] = 1 + + return {} + + def get_send_statistics(self): + + statistics = {} + statistics["DeliveryAttempts"] = self.sent_message_count + statistics["Rejects"] = self.rejected_messages_count + statistics["Complaints"] = 0 + statistics["Bounces"] = 0 + statistics["Timestamp"] = datetime.datetime.utcnow() + return statistics + ses_backend = SESBackend() diff --git a/moto/ses/responses.py b/moto/ses/responses.py index 1034aeb0d..8bf7bd942 100644 --- a/moto/ses/responses.py +++ b/moto/ses/responses.py @@ -133,6 +133,40 @@ class EmailResponse(BaseResponse): template = self.response_template(SET_IDENTITY_NOTIFICATION_TOPIC_RESPONSE) return template.render() + def get_send_statistics(self): + statistics = ses_backend.get_send_statistics() + template = self.response_template(GET_SEND_STATISTICS) + return template.render(all_statistics=[statistics]) + + def create_configuration_set(self): + configuration_set_name = self.querystring.get("ConfigurationSet.Name")[0] + ses_backend.create_configuration_set(configuration_set_name=configuration_set_name) + template = self.response_template(CREATE_CONFIGURATION_SET) + return template.render() + + def create_configuration_set_event_destination(self): + + configuration_set_name = self._get_param('ConfigurationSetName') + is_configuration_event_enabled = self.querystring.get("EventDestination.Enabled")[0] + configuration_event_name = self.querystring.get("EventDestination.Name")[0] + event_topic_arn = self.querystring.get("EventDestination.SNSDestination.TopicARN")[0] + event_matching_types = self._get_multi_param("EventDestination.MatchingEventTypes.member") + + event_destination = {"Name":configuration_event_name, + "Enabled":is_configuration_event_enabled, + "EventMatchingTypes":event_matching_types, + "SNSDestination":event_topic_arn + } + + ses_backend.create_configuration_set_event_destination( + configuration_set_name=configuration_set_name, + event_destination=event_destination + ) + + template = self.response_template(CREATE_CONFIGURATION_SET_EVENT_DESTINATION) + return template.render() + + VERIFY_EMAIL_IDENTITY = """ @@ -248,3 +282,35 @@ SET_IDENTITY_NOTIFICATION_TOPIC_RESPONSE = """47e0ef1a-9bf2-11e1-9279-0100e8cf109a """ + +GET_SEND_STATISTICS = """ + + {% for statistics in all_statistics %} + + {{ statistics["DeliveryAttempts"] }} + {{ statistics["Rejects"] }} + {{ statistics["Bounces"] }} + {{ statistics["Complaints"] }} + {{ statistics["Timestamp"] }} + + {% endfor %} + + + e0abcdfa-c866-11e0-b6d0-273d09173z49 + +""" + +CREATE_CONFIGURATION_SET = """ + + + 47e0ef1a-9bf2-11e1-9279-0100e8cf109a + +""" + + +CREATE_CONFIGURATION_SET_EVENT_DESTINATION = """ + + + 67e0ef1a-9bf2-11e1-9279-0100e8cf109a + +""" diff --git a/tests/test_ses/test_ses.py b/tests/test_ses/test_ses.py index 851327b9d..637931572 100644 --- a/tests/test_ses/test_ses.py +++ b/tests/test_ses/test_ses.py @@ -127,3 +127,39 @@ def test_send_raw_email(): send_quota["GetSendQuotaResponse"]["GetSendQuotaResult"]["SentLast24Hours"] ) sent_count.should.equal(1) + + +@mock_ses_deprecated +def test_get_send_statistics(): + conn = boto.connect_ses("the_key", "the_secret") + + conn.send_email.when.called_with( + "test@example.com", + "test subject", + "test body", + "test_to@example.com", + format="html", + ).should.throw(BotoServerError) + + # tests to verify rejects in get_send_statistics + result = conn.get_send_statistics() + + reject_count = int(result["GetSendStatisticsResponse"]["SendDataPoints"][0]["Rejects"]) + delivery_count = int(result["GetSendStatisticsResponse"]["SendDataPoints"][0]["DeliveryAttempts"]) + reject_count.should.equal(1) + delivery_count.should.equal(0) + + conn.verify_email_identity("test@example.com") + conn.send_email( + "test@example.com", "test subject", "test body", "test_to@example.com" + ) + + # tests to delivery attempts in get_send_statistics + result = conn.get_send_statistics() + + reject_count = int(result["GetSendStatisticsResponse"]["SendDataPoints"][0]["Rejects"]) + delivery_count = int(result["GetSendStatisticsResponse"]["SendDataPoints"][0]["DeliveryAttempts"]) + reject_count.should.equal(1) + delivery_count.should.equal(1) + + diff --git a/tests/test_ses/test_ses_boto3.py b/tests/test_ses/test_ses_boto3.py index de8aa0813..e14abda3f 100644 --- a/tests/test_ses/test_ses_boto3.py +++ b/tests/test_ses/test_ses_boto3.py @@ -4,6 +4,8 @@ import boto3 from botocore.exceptions import ClientError from six.moves.email_mime_multipart import MIMEMultipart from six.moves.email_mime_text import MIMEText +from nose.tools import assert_raises + import sure # noqa @@ -227,3 +229,53 @@ def test_send_email_notification_with_encoded_sender(): Message={"Subject": {"Data": "hi",}, "Body": {"Text": {"Data": "there",}}}, ) response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + +@mock_ses +def test_create_configuration_set(): + conn = boto3.client("ses", region_name="us-east-1") + conn.create_configuration_set(ConfigurationSet=dict({"Name": "test"})) + + conn.create_configuration_set_event_destination( + ConfigurationSetName='test', + EventDestination={ + 'Name': 'snsEvent', + 'Enabled': True, + 'MatchingEventTypes': [ + 'send', + ], + 'SNSDestination': { + 'TopicARN': 'arn:aws:sns:us-east-1:123456789012:myTopic' + } + }) + + with assert_raises(ClientError) as ex: + conn.create_configuration_set_event_destination( + ConfigurationSetName='failtest', + EventDestination={ + 'Name': 'snsEvent', + 'Enabled': True, + 'MatchingEventTypes': [ + 'send', + ], + 'SNSDestination': { + 'TopicARN': 'arn:aws:sns:us-east-1:123456789012:myTopic' + } + }) + + ex.exception.response["Error"]["Code"].should.equal("ConfigurationSetDoesNotExist") + + with assert_raises(ClientError) as ex: + conn.create_configuration_set_event_destination( + ConfigurationSetName='test', + EventDestination={ + 'Name': 'snsEvent', + 'Enabled': True, + 'MatchingEventTypes': [ + 'send', + ], + 'SNSDestination': { + 'TopicARN': 'arn:aws:sns:us-east-1:123456789012:myTopic' + } + }) + + ex.exception.response["Error"]["Code"].should.equal("EventDestinationAlreadyExists") \ No newline at end of file From b8aa6ddaea81762e5c8f574f915d31ae50171579 Mon Sep 17 00:00:00 2001 From: usmankb Date: Sun, 3 May 2020 08:28:20 +0530 Subject: [PATCH 02/29] Fix response_parameter being ignored in put_integration_response --- moto/apigateway/models.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/moto/apigateway/models.py b/moto/apigateway/models.py index d39b719d6..d1b430068 100644 --- a/moto/apigateway/models.py +++ b/moto/apigateway/models.py @@ -56,8 +56,10 @@ class Deployment(BaseModel, dict): class IntegrationResponse(BaseModel, dict): - def __init__(self, status_code, selection_pattern=None): - self["responseTemplates"] = {"application/json": None} + def __init__(self, status_code, selection_pattern=None, response_templates=None): + if response_templates == None: + response_templates = {"application/json": None} + self["responseTemplates"] = response_templates self["statusCode"] = status_code if selection_pattern: self["selectionPattern"] = selection_pattern @@ -72,8 +74,10 @@ class Integration(BaseModel, dict): self["requestTemplates"] = request_templates self["integrationResponses"] = {"200": IntegrationResponse(200)} - def create_integration_response(self, status_code, selection_pattern): - integration_response = IntegrationResponse(status_code, selection_pattern) + def create_integration_response(self, status_code, selection_pattern, response_templates): + if response_templates == {}: + response_templates = None + integration_response = IntegrationResponse(status_code, selection_pattern, response_templates) self["integrationResponses"][status_code] = integration_response return integration_response @@ -956,7 +960,7 @@ class APIGatewayBackend(BaseBackend): raise InvalidRequestInput() integration = self.get_integration(function_id, resource_id, method_type) integration_response = integration.create_integration_response( - status_code, selection_pattern + status_code, selection_pattern, response_templates ) return integration_response From 1cda64e9a3a190a5caa7f08b5af7b783d335c562 Mon Sep 17 00:00:00 2001 From: usmankb Date: Sun, 3 May 2020 08:31:46 +0530 Subject: [PATCH 03/29] Added tests --- tests/test_apigateway/test_apigateway.py | 58 ++++++++++++++++++++++++ 1 file changed, 58 insertions(+) diff --git a/tests/test_apigateway/test_apigateway.py b/tests/test_apigateway/test_apigateway.py index 7495372d2..0ad815972 100644 --- a/tests/test_apigateway/test_apigateway.py +++ b/tests/test_apigateway/test_apigateway.py @@ -1,5 +1,6 @@ from __future__ import unicode_literals +import json import boto3 from freezegun import freeze_time @@ -1229,6 +1230,63 @@ def test_put_integration_response_requires_responseTemplate(): responseTemplates={}, ) +@mock_apigateway +def test_put_integration_response_with_response_template(): + client = boto3.client("apigateway", region_name="us-west-2") + response = client.create_rest_api(name="my_api", description="this is my api") + api_id = response["id"] + resources = client.get_resources(restApiId=api_id) + root_id = [resource for resource in resources["items"] if resource["path"] == "/"][ + 0 + ]["id"] + + client.put_method( + restApiId=api_id, resourceId=root_id, httpMethod="GET", authorizationType="NONE" + ) + client.put_method_response( + restApiId=api_id, resourceId=root_id, httpMethod="GET", statusCode="200" + ) + client.put_integration( + restApiId=api_id, + resourceId=root_id, + httpMethod="GET", + type="HTTP", + uri="http://httpbin.org/robots.txt", + integrationHttpMethod="POST", + ) + + with assert_raises(ClientError) as ex: + client.put_integration_response( + restApiId=api_id, resourceId=root_id, httpMethod="GET", statusCode="200" + ) + + ex.exception.response["Error"]["Code"].should.equal("BadRequestException") + ex.exception.response["Error"]["Message"].should.equal("Invalid request input") + + client.put_integration_response( + restApiId=api_id, + resourceId=root_id, + httpMethod="GET", + statusCode="200", + selectionPattern= "foobar", + responseTemplates={"application/json": json.dumps({"data":"test"})}) + + + response = client.get_integration_response( + restApiId=api_id, resourceId=root_id, httpMethod="GET", statusCode="200" + ) + + # this is hard to match against, so remove it + response["ResponseMetadata"].pop("HTTPHeaders", None) + response["ResponseMetadata"].pop("RetryAttempts", None) + response.should.equal( + { + "statusCode": "200", + "selectionPattern": "foobar", + "ResponseMetadata": {"HTTPStatusCode": 200}, + "responseTemplates": {"application/json": json.dumps({"data":"test"})}, + } + ) @mock_apigateway def test_put_integration_validation(): From 4365c2bd4ed6bad601d2af6a4b6b5531efe896c6 Mon Sep 17 00:00:00 2001 From: James Belleau Date: Sun, 3 May 2020 18:13:40 -0500 Subject: [PATCH 04/29] Added network functions --- moto/__init__.py | 1 + moto/managedblockchain/__init__.py | 9 ++ moto/managedblockchain/exceptions.py | 16 ++ moto/managedblockchain/models.py | 151 ++++++++++++++++++ moto/managedblockchain/responses.py | 67 ++++++++ moto/managedblockchain/urls.py | 9 ++ moto/managedblockchain/utils.py | 23 +++ .../test_managedblockchain_networks.py | 53 ++++++ 8 files changed, 329 insertions(+) create mode 100644 moto/managedblockchain/__init__.py create mode 100644 moto/managedblockchain/exceptions.py create mode 100644 moto/managedblockchain/models.py create mode 100644 moto/managedblockchain/responses.py create mode 100644 moto/managedblockchain/urls.py create mode 100644 moto/managedblockchain/utils.py create mode 100644 tests/test_managedblockchain/test_managedblockchain_networks.py diff --git a/moto/__init__.py b/moto/__init__.py index 79c1555d3..d3fa7b8aa 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -75,6 +75,7 @@ mock_kms = lazy_load(".kms", "mock_kms") mock_kms_deprecated = lazy_load(".kms", "mock_kms_deprecated") mock_logs = lazy_load(".logs", "mock_logs") mock_logs_deprecated = lazy_load(".logs", "mock_logs_deprecated") +mock_managedblockchain = lazy_load(".managedblockchain", "mock_managedblockchain") mock_opsworks = lazy_load(".opsworks", "mock_opsworks") mock_opsworks_deprecated = lazy_load(".opsworks", "mock_opsworks_deprecated") mock_organizations = lazy_load(".organizations", "mock_organizations") diff --git a/moto/managedblockchain/__init__.py b/moto/managedblockchain/__init__.py new file mode 100644 index 000000000..a95fa7351 --- /dev/null +++ b/moto/managedblockchain/__init__.py @@ -0,0 +1,9 @@ +from __future__ import unicode_literals +from .models import managedblockchain_backends +from ..core.models import base_decorator, deprecated_base_decorator + +managedblockchain_backend = managedblockchain_backends["us-east-1"] +mock_managedblockchain = base_decorator(managedblockchain_backends) +mock_managedblockchain_deprecated = deprecated_base_decorator( + managedblockchain_backends +) diff --git a/moto/managedblockchain/exceptions.py b/moto/managedblockchain/exceptions.py new file mode 100644 index 000000000..3195d7c34 --- /dev/null +++ b/moto/managedblockchain/exceptions.py @@ -0,0 +1,16 @@ +from __future__ import unicode_literals +from moto.core.exceptions import RESTError + + +class ManagedBlockchainClientError(RESTError): + code = 400 + + +class BadRequestException(ManagedBlockchainClientError): + def __init__(self, pretty_called_method, operation_error): + super(BadRequestException, self).__init__( + "BadRequestException", + "An error occurred (BadRequestException) when calling the {0} operation: {1}".format( + pretty_called_method, operation_error + ), + ) diff --git a/moto/managedblockchain/models.py b/moto/managedblockchain/models.py new file mode 100644 index 000000000..32e9ebbb5 --- /dev/null +++ b/moto/managedblockchain/models.py @@ -0,0 +1,151 @@ +from __future__ import unicode_literals + +import datetime + +from boto3 import Session + +from moto.core import BaseBackend, BaseModel + +from .exceptions import BadRequestException + +from .utils import get_network_id + +FRAMEWORKS = [ + "HYPERLEDGER_FABRIC", +] + +FRAMEWORKVERSIONS = [ + "1.2", +] + +EDITIONS = [ + "STARTER", + "STANDARD", +] + + +class ManagedBlockchainNetwork(BaseModel): + def __init__( + self, + id, + name, + framework, + frameworkversion, + frameworkconfiguration, + voting_policy, + member_configuration, + region, + description=None, + ): + self.st = datetime.datetime.now(datetime.timezone.utc) + self.id = id + self.name = name + self.description = description + self.framework = framework + self.frameworkversion = frameworkversion + self.frameworkconfiguration = frameworkconfiguration + self.voting_policy = voting_policy + self.member_configuration = member_configuration + self.region = region + + def to_dict(self): + frameworkattributes = { + "Fabric": { + "OrderingServiceEndpoint": "orderer.{0}.managedblockchain.{1}.amazonaws.com:30001".format( + self.id, self.region + ), + "Edition": self.frameworkconfiguration["Fabric"]["Edition"], + } + } + + vpcendpointname = "com.amazonaws.{0}.managedblockchain.{1}".format( + self.region, self.id + ) + # Use iso_8601_datetime_with_milliseconds ? + d = { + "Id": self.id, + "Name": self.name, + "Framework": self.framework, + "FrameworkVersion": self.frameworkversion, + "FrameworkAttributes": frameworkattributes, + "VpcEndpointServiceName": vpcendpointname, + "VotingPolicy": self.voting_policy, + "Status": "AVAILABLE", + "CreationDate": self.st.strftime("%Y-%m-%dT%H:%M:%S.%f%z"), + } + if self.description is not None: + d["Description"] = self.description + return d + + +class ManagedBlockchainBackend(BaseBackend): + def __init__(self, region_name): + self.networks = {} + self.region_name = region_name + + def reset(self): + region_name = self.region_name + self.__dict__ = {} + self.__init__(region_name) + + def create_network( + self, + json_body, + ): + name = json_body["Name"] + framework = json_body["Framework"] + frameworkversion = json_body["FrameworkVersion"] + frameworkconfiguration = json_body["FrameworkConfiguration"] + voting_policy = json_body["VotingPolicy"] + member_configuration = json_body["MemberConfiguration"] + + # Check framework + if framework not in FRAMEWORKS: + raise BadRequestException("CreateNetwork", "Invalid request body") + + # Check framework version + if frameworkversion not in FRAMEWORKVERSIONS: + raise BadRequestException( + "CreateNetwork", + "Invalid version {0} requested for framework HYPERLEDGER_FABRIC".format( + frameworkversion + ), + ) + + # Check edition + if frameworkconfiguration["Fabric"]["Edition"] not in EDITIONS: + raise BadRequestException("CreateNetwork", "Invalid request body") + + ## Generate network ID + network_id = get_network_id() + + self.networks[network_id] = ManagedBlockchainNetwork( + id=network_id, + name=name, + framework=framework, + frameworkversion=frameworkversion, + frameworkconfiguration=frameworkconfiguration, + voting_policy=voting_policy, + member_configuration=member_configuration, + region=self.region_name, + ) + + def list_networks(self): + return self.networks.values() + + def get_network(self, network_id): + return self.networks[network_id] + + + +managedblockchain_backends = {} +for region in Session().get_available_regions("managedblockchain"): + managedblockchain_backends[region] = ManagedBlockchainBackend(region) +for region in Session().get_available_regions( + "managedblockchain", partition_name="aws-us-gov" +): + managedblockchain_backends[region] = ManagedBlockchainBackend(region) +for region in Session().get_available_regions( + "managedblockchain", partition_name="aws-cn" +): + managedblockchain_backends[region] = ManagedBlockchainBackend(region) diff --git a/moto/managedblockchain/responses.py b/moto/managedblockchain/responses.py new file mode 100644 index 000000000..ff7c5ff5c --- /dev/null +++ b/moto/managedblockchain/responses.py @@ -0,0 +1,67 @@ +from __future__ import unicode_literals + +import json +from six.moves.urllib.parse import urlparse, parse_qs + +from moto.core.responses import BaseResponse +from .models import managedblockchain_backends +from .utils import region_from_managedblckchain_url, networkid_from_managedblockchain_url + + +class ManagedBlockchainResponse(BaseResponse): + def __init__(self, backend): + super(ManagedBlockchainResponse, self).__init__() + self.backend = backend + + @classmethod + def network_response(clazz, request, full_url, headers): + region_name = region_from_managedblckchain_url(full_url) + response_instance = ManagedBlockchainResponse(managedblockchain_backends[region_name]) + return response_instance._network_response(request, full_url, headers) + + def _network_response(self, request, full_url, headers): + method = request.method + if hasattr(request, "body"): + body = request.body + else: + body = request.data + parsed_url = urlparse(full_url) + querystring = parse_qs(parsed_url.query, keep_blank_values=True) + if method == "GET": + return self._all_networks_response(request, full_url, headers) + elif method == "POST": + json_body = json.loads(body.decode("utf-8")) + return self._network_response_post(json_body, querystring, headers) + + def _all_networks_response(self, request, full_url, headers): + mbcnetworks = self.backend.list_networks() + response = json.dumps( + {"Networks": [mbcnetwork.to_dict() for mbcnetwork in mbcnetworks]} + ) + headers["content-type"] = "application/json" + return 200, headers, response + + def _network_response_post(self, json_body, querystring, headers): + self.backend.create_network(json_body) + return 201, headers, "" + + @classmethod + def networkid_response(clazz, request, full_url, headers): + region_name = region_from_managedblckchain_url(full_url) + response_instance = ManagedBlockchainResponse(managedblockchain_backends[region_name]) + return response_instance._networkid_response(request, full_url, headers) + + def _networkid_response(self, request, full_url, headers): + method = request.method + + if method == "GET": + network_id = networkid_from_managedblockchain_url(full_url) + return self._networkid_response_get(network_id, headers) + + def _networkid_response_get(self, network_id, headers): + mbcnetwork = self.backend.get_network(network_id) + response = json.dumps( + {"Network": mbcnetwork.to_dict()} + ) + headers["content-type"] = "application/json" + return 200, headers, response diff --git a/moto/managedblockchain/urls.py b/moto/managedblockchain/urls.py new file mode 100644 index 000000000..806d11926 --- /dev/null +++ b/moto/managedblockchain/urls.py @@ -0,0 +1,9 @@ +from __future__ import unicode_literals +from .responses import ManagedBlockchainResponse + +url_bases = ["https?://managedblockchain.(.+).amazonaws.com"] + +url_paths = { + "{0}/networks$": ManagedBlockchainResponse.network_response, + "{0}/networks/(?P[^/.]+)$": ManagedBlockchainResponse.networkid_response, +} diff --git a/moto/managedblockchain/utils.py b/moto/managedblockchain/utils.py new file mode 100644 index 000000000..687b7990b --- /dev/null +++ b/moto/managedblockchain/utils.py @@ -0,0 +1,23 @@ +import random +import string + +from six.moves.urllib.parse import urlparse + + +def region_from_managedblckchain_url(url): + domain = urlparse(url).netloc + + if "." in domain: + return domain.split(".")[1] + else: + return "us-east-1" + + +def networkid_from_managedblockchain_url(full_url): + return full_url.split("/")[-1] + + +def get_network_id(): + return "n-" + "".join( + random.choice(string.ascii_uppercase + string.digits) for _ in range(26) + ) diff --git a/tests/test_managedblockchain/test_managedblockchain_networks.py b/tests/test_managedblockchain/test_managedblockchain_networks.py new file mode 100644 index 000000000..7bdc0ec59 --- /dev/null +++ b/tests/test_managedblockchain/test_managedblockchain_networks.py @@ -0,0 +1,53 @@ +from __future__ import unicode_literals + +import boto3 +import sure # noqa + +from moto import mock_managedblockchain + + +@mock_managedblockchain +def test_create_network(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + frameworkconfiguration = {"Fabric": {"Edition": "STARTER"}} + + votingpolicy = { + "ApprovalThresholdPolicy": { + "ThresholdPercentage": 50, + "ProposalDurationInHours": 24, + "ThresholdComparator": "GREATER_THAN_OR_EQUAL_TO", + } + } + + memberconfiguration = { + "Name": "testmember1", + "Description": "Test Member 1", + "FrameworkConfiguration": { + "Fabric": {"AdminUsername": "admin", "AdminPassword": "Admin12345"} + }, + "LogPublishingConfiguration": { + "Fabric": {"CaLogs": {"Cloudwatch": {"Enabled": False}}} + }, + } + + conn.create_network( + Name="testnetwork1", + Description="Test Network 1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=frameworkconfiguration, + VotingPolicy=votingpolicy, + MemberConfiguration=memberconfiguration, + ) + + # Find in full list + response = conn.list_networks() + mbcnetworks = response["Networks"] + mbcnetworks.should.have.length_of(1) + mbcnetworks[0]["Name"].should.equal("testnetwork1") + + # Get network details + network_id = mbcnetworks[0]["Id"] + response = conn.get_network(NetworkId=network_id) + response["Network"]["Name"].should.equal("testnetwork1") From 353bc08ac2f4a4af82987f1fa82ef28d8d4b4584 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Mon, 4 May 2020 09:24:46 +0100 Subject: [PATCH 05/29] Linting --- moto/ses/exceptions.py | 9 +++-- moto/ses/models.py | 12 +++++-- moto/ses/responses.py | 36 +++++++++++-------- tests/test_ses/test_ses.py | 18 ++++++---- tests/test_ses/test_ses_boto3.py | 62 ++++++++++++++++---------------- 5 files changed, 80 insertions(+), 57 deletions(-) diff --git a/moto/ses/exceptions.py b/moto/ses/exceptions.py index f57eadf77..c15473188 100644 --- a/moto/ses/exceptions.py +++ b/moto/ses/exceptions.py @@ -8,15 +8,20 @@ class MessageRejectedError(RESTError): def __init__(self, message): super(MessageRejectedError, self).__init__("MessageRejected", message) + class ConfigurationSetDoesNotExist(RESTError): code = 400 def __init__(self, message): - super(ConfigurationSetDoesNotExist, self).__init__("ConfigurationSetDoesNotExist", message) + super(ConfigurationSetDoesNotExist, self).__init__( + "ConfigurationSetDoesNotExist", message + ) class EventDestinationAlreadyExists(RESTError): code = 400 def __init__(self, message): - super(EventDestinationAlreadyExists, self).__init__("EventDestinationAlreadyExists", message) + super(EventDestinationAlreadyExists, self).__init__( + "EventDestinationAlreadyExists", message + ) diff --git a/moto/ses/models.py b/moto/ses/models.py index 62068e5a9..d141e25ae 100644 --- a/moto/ses/models.py +++ b/moto/ses/models.py @@ -6,7 +6,11 @@ from email.utils import parseaddr from moto.core import BaseBackend, BaseModel from moto.sns.models import sns_backends -from .exceptions import MessageRejectedError,ConfigurationSetDoesNotExist,EventDestinationAlreadyExists +from .exceptions import ( + MessageRejectedError, + ConfigurationSetDoesNotExist, + EventDestinationAlreadyExists, +) from .utils import get_random_message_id from .feedback import COMMON_MAIL, BOUNCE, COMPLAINT, DELIVERY @@ -123,7 +127,7 @@ class SESBackend(BaseBackend): if recipient_count > RECIPIENT_LIMIT: raise MessageRejectedError("Too many recipients.") if not self._is_verified_address(source): - self.rejected_messages_count+=1 + self.rejected_messages_count += 1 raise MessageRejectedError("Email address not verified %s" % source) self.__process_sns_feedback__(source, destinations, region) @@ -248,7 +252,9 @@ class SESBackend(BaseBackend): self.config_set[configuration_set_name] = 1 return {} - def create_configuration_set_event_destination(self,configuration_set_name, event_destination): + def create_configuration_set_event_destination( + self, configuration_set_name, event_destination + ): if self.config_set.get(configuration_set_name) is None: raise ConfigurationSetDoesNotExist("Invalid Configuration Set Name.") diff --git a/moto/ses/responses.py b/moto/ses/responses.py index 8bf7bd942..62893094a 100644 --- a/moto/ses/responses.py +++ b/moto/ses/responses.py @@ -140,34 +140,42 @@ class EmailResponse(BaseResponse): def create_configuration_set(self): configuration_set_name = self.querystring.get("ConfigurationSet.Name")[0] - ses_backend.create_configuration_set(configuration_set_name=configuration_set_name) + ses_backend.create_configuration_set( + configuration_set_name=configuration_set_name + ) template = self.response_template(CREATE_CONFIGURATION_SET) return template.render() def create_configuration_set_event_destination(self): - configuration_set_name = self._get_param('ConfigurationSetName') - is_configuration_event_enabled = self.querystring.get("EventDestination.Enabled")[0] + configuration_set_name = self._get_param("ConfigurationSetName") + is_configuration_event_enabled = self.querystring.get( + "EventDestination.Enabled" + )[0] configuration_event_name = self.querystring.get("EventDestination.Name")[0] - event_topic_arn = self.querystring.get("EventDestination.SNSDestination.TopicARN")[0] - event_matching_types = self._get_multi_param("EventDestination.MatchingEventTypes.member") + event_topic_arn = self.querystring.get( + "EventDestination.SNSDestination.TopicARN" + )[0] + event_matching_types = self._get_multi_param( + "EventDestination.MatchingEventTypes.member" + ) - event_destination = {"Name":configuration_event_name, - "Enabled":is_configuration_event_enabled, - "EventMatchingTypes":event_matching_types, - "SNSDestination":event_topic_arn - } + event_destination = { + "Name": configuration_event_name, + "Enabled": is_configuration_event_enabled, + "EventMatchingTypes": event_matching_types, + "SNSDestination": event_topic_arn, + } ses_backend.create_configuration_set_event_destination( - configuration_set_name=configuration_set_name, - event_destination=event_destination - ) + configuration_set_name=configuration_set_name, + event_destination=event_destination, + ) template = self.response_template(CREATE_CONFIGURATION_SET_EVENT_DESTINATION) return template.render() - VERIFY_EMAIL_IDENTITY = """ diff --git a/tests/test_ses/test_ses.py b/tests/test_ses/test_ses.py index 637931572..719e4ede9 100644 --- a/tests/test_ses/test_ses.py +++ b/tests/test_ses/test_ses.py @@ -144,8 +144,12 @@ def test_get_send_statistics(): # tests to verify rejects in get_send_statistics result = conn.get_send_statistics() - reject_count = int(result["GetSendStatisticsResponse"]["SendDataPoints"][0]["Rejects"]) - delivery_count = int(result["GetSendStatisticsResponse"]["SendDataPoints"][0]["DeliveryAttempts"]) + reject_count = int( + result["GetSendStatisticsResponse"]["SendDataPoints"][0]["Rejects"] + ) + delivery_count = int( + result["GetSendStatisticsResponse"]["SendDataPoints"][0]["DeliveryAttempts"] + ) reject_count.should.equal(1) delivery_count.should.equal(0) @@ -157,9 +161,11 @@ def test_get_send_statistics(): # tests to delivery attempts in get_send_statistics result = conn.get_send_statistics() - reject_count = int(result["GetSendStatisticsResponse"]["SendDataPoints"][0]["Rejects"]) - delivery_count = int(result["GetSendStatisticsResponse"]["SendDataPoints"][0]["DeliveryAttempts"]) + reject_count = int( + result["GetSendStatisticsResponse"]["SendDataPoints"][0]["Rejects"] + ) + delivery_count = int( + result["GetSendStatisticsResponse"]["SendDataPoints"][0]["DeliveryAttempts"] + ) reject_count.should.equal(1) delivery_count.should.equal(1) - - diff --git a/tests/test_ses/test_ses_boto3.py b/tests/test_ses/test_ses_boto3.py index e14abda3f..0e6bb9bea 100644 --- a/tests/test_ses/test_ses_boto3.py +++ b/tests/test_ses/test_ses_boto3.py @@ -230,52 +230,50 @@ def test_send_email_notification_with_encoded_sender(): ) response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + @mock_ses def test_create_configuration_set(): conn = boto3.client("ses", region_name="us-east-1") conn.create_configuration_set(ConfigurationSet=dict({"Name": "test"})) conn.create_configuration_set_event_destination( - ConfigurationSetName='test', - EventDestination={ - 'Name': 'snsEvent', - 'Enabled': True, - 'MatchingEventTypes': [ - 'send', - ], - 'SNSDestination': { - 'TopicARN': 'arn:aws:sns:us-east-1:123456789012:myTopic' - } - }) + ConfigurationSetName="test", + EventDestination={ + "Name": "snsEvent", + "Enabled": True, + "MatchingEventTypes": ["send",], + "SNSDestination": { + "TopicARN": "arn:aws:sns:us-east-1:123456789012:myTopic" + }, + }, + ) with assert_raises(ClientError) as ex: conn.create_configuration_set_event_destination( - ConfigurationSetName='failtest', + ConfigurationSetName="failtest", EventDestination={ - 'Name': 'snsEvent', - 'Enabled': True, - 'MatchingEventTypes': [ - 'send', - ], - 'SNSDestination': { - 'TopicARN': 'arn:aws:sns:us-east-1:123456789012:myTopic' - } - }) + "Name": "snsEvent", + "Enabled": True, + "MatchingEventTypes": ["send",], + "SNSDestination": { + "TopicARN": "arn:aws:sns:us-east-1:123456789012:myTopic" + }, + }, + ) ex.exception.response["Error"]["Code"].should.equal("ConfigurationSetDoesNotExist") with assert_raises(ClientError) as ex: conn.create_configuration_set_event_destination( - ConfigurationSetName='test', + ConfigurationSetName="test", EventDestination={ - 'Name': 'snsEvent', - 'Enabled': True, - 'MatchingEventTypes': [ - 'send', - ], - 'SNSDestination': { - 'TopicARN': 'arn:aws:sns:us-east-1:123456789012:myTopic' - } - }) + "Name": "snsEvent", + "Enabled": True, + "MatchingEventTypes": ["send",], + "SNSDestination": { + "TopicARN": "arn:aws:sns:us-east-1:123456789012:myTopic" + }, + }, + ) - ex.exception.response["Error"]["Code"].should.equal("EventDestinationAlreadyExists") \ No newline at end of file + ex.exception.response["Error"]["Code"].should.equal("EventDestinationAlreadyExists") From d6875c25cc369c6704e3d5560045d2d6e080b7f8 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Mon, 4 May 2020 09:27:57 +0100 Subject: [PATCH 06/29] Linting --- moto/apigateway/models.py | 10 +++++++--- tests/test_apigateway/test_apigateway.py | 10 ++++++---- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/moto/apigateway/models.py b/moto/apigateway/models.py index d1b430068..4513c75ab 100644 --- a/moto/apigateway/models.py +++ b/moto/apigateway/models.py @@ -57,7 +57,7 @@ class Deployment(BaseModel, dict): class IntegrationResponse(BaseModel, dict): def __init__(self, status_code, selection_pattern=None, response_templates=None): - if response_templates == None: + if response_templates is None: response_templates = {"application/json": None} self["responseTemplates"] = response_templates self["statusCode"] = status_code @@ -74,10 +74,14 @@ class Integration(BaseModel, dict): self["requestTemplates"] = request_templates self["integrationResponses"] = {"200": IntegrationResponse(200)} - def create_integration_response(self, status_code, selection_pattern, response_templates): + def create_integration_response( + self, status_code, selection_pattern, response_templates + ): if response_templates == {}: response_templates = None - integration_response = IntegrationResponse(status_code, selection_pattern, response_templates) + integration_response = IntegrationResponse( + status_code, selection_pattern, response_templates + ) self["integrationResponses"][status_code] = integration_response return integration_response diff --git a/tests/test_apigateway/test_apigateway.py b/tests/test_apigateway/test_apigateway.py index 0ad815972..295cd1c54 100644 --- a/tests/test_apigateway/test_apigateway.py +++ b/tests/test_apigateway/test_apigateway.py @@ -1230,6 +1230,7 @@ def test_put_integration_response_requires_responseTemplate(): responseTemplates={}, ) + @mock_apigateway def test_put_integration_response_with_response_template(): client = boto3.client("apigateway", region_name="us-west-2") @@ -1268,9 +1269,9 @@ def test_put_integration_response_with_response_template(): resourceId=root_id, httpMethod="GET", statusCode="200", - selectionPattern= "foobar", - responseTemplates={"application/json": json.dumps({"data":"test"})}) - + selectionPattern="foobar", + responseTemplates={"application/json": json.dumps({"data": "test"})}, + ) response = client.get_integration_response( restApiId=api_id, resourceId=root_id, httpMethod="GET", statusCode="200" @@ -1284,10 +1285,11 @@ def test_put_integration_response_with_response_template(): "statusCode": "200", "selectionPattern": "foobar", "ResponseMetadata": {"HTTPStatusCode": 200}, - "responseTemplates": {"application/json": json.dumps({"data":"test"})}, + "responseTemplates": {"application/json": json.dumps({"data": "test"})}, } ) + @mock_apigateway def test_put_integration_validation(): client = boto3.client("apigateway", region_name="us-west-2") From e1baca1569c538cd4771c066d73c2560b8cb60c3 Mon Sep 17 00:00:00 2001 From: Chagui- Date: Tue, 5 May 2020 18:08:28 -0400 Subject: [PATCH 07/29] Implemented parent_group, recursive and name_prefix_filter for function list_thing_groups() --- moto/iot/models.py | 26 ++++++++++++++++++++++++-- moto/iot/responses.py | 2 +- 2 files changed, 25 insertions(+), 3 deletions(-) diff --git a/moto/iot/models.py b/moto/iot/models.py index 2e9979bda..5b74b353c 100644 --- a/moto/iot/models.py +++ b/moto/iot/models.py @@ -857,8 +857,30 @@ class IoTBackend(BaseBackend): del self.thing_groups[thing_group.arn] def list_thing_groups(self, parent_group, name_prefix_filter, recursive): - thing_groups = self.thing_groups.values() - return thing_groups + if recursive is None: + recursive = True + if name_prefix_filter is None: + name_prefix_filter = "" + if parent_group and parent_group not in [ + _.thing_group_name for _ in self.thing_groups.values() + ]: + raise ResourceNotFoundException() + thing_groups = [ + _ for _ in self.thing_groups.values() if _.parent_group_name == parent_group + ] + if recursive: + for g in thing_groups: + thing_groups.extend( + self.list_thing_groups( + parent_group=g.thing_group_name, + name_prefix_filter=None, + recursive=False, + ) + ) + # thing_groups = groups_to_process.values() + return [ + _ for _ in thing_groups if _.thing_group_name.startswith(name_prefix_filter) + ] def update_thing_group( self, thing_group_name, thing_group_properties, expected_version diff --git a/moto/iot/responses.py b/moto/iot/responses.py index c12d4b5c5..07a8c10c2 100644 --- a/moto/iot/responses.py +++ b/moto/iot/responses.py @@ -535,7 +535,7 @@ class IoTResponse(BaseResponse): # max_results = self._get_int_param("maxResults") parent_group = self._get_param("parentGroup") name_prefix_filter = self._get_param("namePrefixFilter") - recursive = self._get_param("recursive") + recursive = self._get_bool_param("recursive") thing_groups = self.iot_backend.list_thing_groups( parent_group=parent_group, name_prefix_filter=name_prefix_filter, From e114eb9383e84099951ffad49af4d172d12863b1 Mon Sep 17 00:00:00 2001 From: Chagui- Date: Tue, 5 May 2020 18:08:56 -0400 Subject: [PATCH 08/29] Added test test_list_thing_groups() --- tests/test_iot/test_iot.py | 134 +++++++++++++++++++++++++++++++++++++ 1 file changed, 134 insertions(+) diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py index 58a820fee..edf623532 100644 --- a/tests/test_iot/test_iot.py +++ b/tests/test_iot/test_iot.py @@ -756,6 +756,140 @@ def test_delete_principal_thing(): client.delete_certificate(certificateId=cert_id) +@mock_iot +def test_list_thing_groups(): + client = boto3.client("iot", region_name="ap-northeast-1") + group_name_1a = "my-group-name-1a" + group_name_1b = "my-group-name-1b" + group_name_2a = "my-group-name-2a" + group_name_2b = "my-group-name-2b" + group_name_3a = "my-group-name-3a" + group_name_3b = "my-group-name-3b" + group_name_3c = "my-group-name-3c" + group_name_3d = "my-group-name-3d" + + # --1a + # |--2a + # | |--3a + # | |--3b + # | + # |--2b + # |--3c + # |--3d + # --1b + + # create thing groups tree + # 1 + thing_group1a = client.create_thing_group(thingGroupName=group_name_1a) + thing_group1a.should.have.key("thingGroupName").which.should.equal(group_name_1a) + thing_group1a.should.have.key("thingGroupArn") + thing_group1b = client.create_thing_group(thingGroupName=group_name_1b) + thing_group1b.should.have.key("thingGroupName").which.should.equal(group_name_1b) + thing_group1b.should.have.key("thingGroupArn") + # 2 + thing_group2a = client.create_thing_group( + thingGroupName=group_name_2a, parentGroupName=group_name_1a + ) + thing_group2a.should.have.key("thingGroupName").which.should.equal(group_name_2a) + thing_group2a.should.have.key("thingGroupArn") + thing_group2b = client.create_thing_group( + thingGroupName=group_name_2b, parentGroupName=group_name_1a + ) + thing_group2b.should.have.key("thingGroupName").which.should.equal(group_name_2b) + thing_group2b.should.have.key("thingGroupArn") + # 3 + thing_group3a = client.create_thing_group( + thingGroupName=group_name_3a, parentGroupName=group_name_2a + ) + thing_group3a.should.have.key("thingGroupName").which.should.equal(group_name_3a) + thing_group3a.should.have.key("thingGroupArn") + thing_group3b = client.create_thing_group( + thingGroupName=group_name_3b, parentGroupName=group_name_2a + ) + thing_group3b.should.have.key("thingGroupName").which.should.equal(group_name_3b) + thing_group3b.should.have.key("thingGroupArn") + thing_group3c = client.create_thing_group( + thingGroupName=group_name_3c, parentGroupName=group_name_2b + ) + thing_group3c.should.have.key("thingGroupName").which.should.equal(group_name_3c) + thing_group3c.should.have.key("thingGroupArn") + thing_group3d = client.create_thing_group( + thingGroupName=group_name_3d, parentGroupName=group_name_2b + ) + thing_group3d.should.have.key("thingGroupName").which.should.equal(group_name_3d) + thing_group3d.should.have.key("thingGroupArn") + + # begin tests + # should list all groups + resp = client.list_thing_groups() + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(8) + # should list all groups non-recursively + resp = client.list_thing_groups(recursive=False) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(2) + + # should list all groups filtered by parent + resp = client.list_thing_groups(parentGroup=group_name_1a) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(6) + resp = client.list_thing_groups(parentGroup=group_name_2a) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(2) + resp = client.list_thing_groups(parentGroup=group_name_1b) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(0) + try: + client.list_thing_groups(parentGroup="inexistant-group-name") + except client.exceptions.ResourceNotFoundException as exc: + error_code = exc.response["Error"]["Code"] + error_code.should.equal("ResourceNotFoundException") + else: + raise Exception("Should have raised error") + # should list all groups filtered by parent non-recursively + resp = client.list_thing_groups(parentGroup=group_name_1a, recursive=False) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(2) + resp = client.list_thing_groups(parentGroup=group_name_2a, recursive=False) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(2) + + # should list all groups filtered by name prefix + resp = client.list_thing_groups(namePrefixFilter="my-group-name-1") + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(2) + resp = client.list_thing_groups(namePrefixFilter="my-group-name-3") + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(4) + resp = client.list_thing_groups(namePrefixFilter="prefix-which-doesn-not-match") + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(0) + # should list all groups filtered by name prefix non-recursively + resp = client.list_thing_groups(namePrefixFilter="my-group-name-1", recursive=False) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(2) + resp = client.list_thing_groups(namePrefixFilter="my-group-name-3", recursive=False) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(0) + + # should list all groups filtered by name prefix and parent + resp = client.list_thing_groups( + namePrefixFilter="my-group-name-2", parentGroup=group_name_1a + ) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(2) + resp = client.list_thing_groups( + namePrefixFilter="my-group-name-3", parentGroup=group_name_1a + ) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(4) + resp = client.list_thing_groups( + namePrefixFilter="prefix-which-doesn-not-match", parentGroup=group_name_1a + ) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(0) + + @mock_iot def test_delete_thing_group(): client = boto3.client("iot", region_name="ap-northeast-1") From 3b8c8fafe2a4c3ee1b1c70a0763fe1233b28086d Mon Sep 17 00:00:00 2001 From: gruebel Date: Wed, 6 May 2020 14:38:25 +0200 Subject: [PATCH 09/29] Fix ssm.get_parameters missing validation --- moto/ssm/models.py | 10 ++++++++++ tests/test_ssm/test_ssm_boto3.py | 27 +++++++++++++++++++++++++++ 2 files changed, 37 insertions(+) diff --git a/moto/ssm/models.py b/moto/ssm/models.py index 3ce3b3a22..67216972e 100644 --- a/moto/ssm/models.py +++ b/moto/ssm/models.py @@ -514,6 +514,16 @@ class SimpleSystemManagerBackend(BaseBackend): def get_parameters(self, names, with_decryption): result = [] + + if len(names) > 10: + raise ValidationException( + "1 validation error detected: " + "Value '[{}]' at 'names' failed to satisfy constraint: " + "Member must have length less than or equal to 10.".format( + ", ".join(names) + ) + ) + for name in names: if name in self._parameters: result.append(self.get_parameter(name, with_decryption)) diff --git a/tests/test_ssm/test_ssm_boto3.py b/tests/test_ssm/test_ssm_boto3.py index 170cd8a3e..e757a4006 100644 --- a/tests/test_ssm/test_ssm_boto3.py +++ b/tests/test_ssm/test_ssm_boto3.py @@ -1,5 +1,7 @@ from __future__ import unicode_literals +import string + import boto3 import botocore.exceptions import sure # noqa @@ -300,6 +302,31 @@ def test_get_parameter(): ) +@mock_ssm +def test_get_parameters_errors(): + client = boto3.client("ssm", region_name="us-east-1") + + ssm_parameters = {name: "value" for name in string.ascii_lowercase[:11]} + + for name, value in ssm_parameters.items(): + client.put_parameter(Name=name, Value=value, Type="String") + + with assert_raises(ClientError) as e: + client.get_parameters(Names=list(ssm_parameters.keys())) + ex = e.exception + ex.operation_name.should.equal("GetParameters") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("ValidationException") + ex.response["Error"]["Message"].should.equal( + "1 validation error detected: " + "Value '[{}]' at 'names' failed to satisfy constraint: " + "Member must have length less than or equal to 10.".format( + ", ".join(ssm_parameters.keys()) + ) + ) + print(ex.response["Error"]["Message"]) + + @mock_ssm def test_get_nonexistant_parameter(): client = boto3.client("ssm", region_name="us-east-1") From 40d1c8c9b9563a50a91dfcb9160073630772c990 Mon Sep 17 00:00:00 2001 From: Chagui- Date: Wed, 6 May 2020 09:10:42 -0400 Subject: [PATCH 10/29] Added generate_thing_group_tree function --- tests/test_iot/test_iot.py | 90 +++++++++++++++++--------------------- 1 file changed, 39 insertions(+), 51 deletions(-) diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py index edf623532..394317fc6 100644 --- a/tests/test_iot/test_iot.py +++ b/tests/test_iot/test_iot.py @@ -9,6 +9,37 @@ from botocore.exceptions import ClientError from nose.tools import assert_raises +def generate_thing_group_tree(iot_client, tree_dict, _parent=None): + """ + Generates a thing group tree given the input tree structure. + :param iot_client: the iot client for boto3 + :param tree_dict: dictionary with the key being the group_name, and the value being a sub tree. + tree_dict = { + "group_name_1a":{ + "group_name_2a":{ + "group_name_3a":{} or None + }, + }, + "group_name_1b":{} + } + :return: a dictionary of created groups, keyed by group name + """ + if tree_dict is None: + tree_dict = {} + created_dict = {} + for group_name in tree_dict.keys(): + params = {"thingGroupName": group_name} + if _parent: + params["parentGroupName"] = _parent + created_group = iot_client.create_thing_group(**params) + created_dict[group_name] = created_group + subtree_dict = generate_thing_group_tree( + iot_client=iot_client, tree_dict=tree_dict[group_name], _parent=group_name + ) + created_dict = {**created_dict, **subtree_dict} + return created_dict + + @mock_iot def test_attach_policy(): client = boto3.client("iot", region_name="ap-northeast-1") @@ -767,57 +798,14 @@ def test_list_thing_groups(): group_name_3b = "my-group-name-3b" group_name_3c = "my-group-name-3c" group_name_3d = "my-group-name-3d" - - # --1a - # |--2a - # | |--3a - # | |--3b - # | - # |--2b - # |--3c - # |--3d - # --1b - - # create thing groups tree - # 1 - thing_group1a = client.create_thing_group(thingGroupName=group_name_1a) - thing_group1a.should.have.key("thingGroupName").which.should.equal(group_name_1a) - thing_group1a.should.have.key("thingGroupArn") - thing_group1b = client.create_thing_group(thingGroupName=group_name_1b) - thing_group1b.should.have.key("thingGroupName").which.should.equal(group_name_1b) - thing_group1b.should.have.key("thingGroupArn") - # 2 - thing_group2a = client.create_thing_group( - thingGroupName=group_name_2a, parentGroupName=group_name_1a - ) - thing_group2a.should.have.key("thingGroupName").which.should.equal(group_name_2a) - thing_group2a.should.have.key("thingGroupArn") - thing_group2b = client.create_thing_group( - thingGroupName=group_name_2b, parentGroupName=group_name_1a - ) - thing_group2b.should.have.key("thingGroupName").which.should.equal(group_name_2b) - thing_group2b.should.have.key("thingGroupArn") - # 3 - thing_group3a = client.create_thing_group( - thingGroupName=group_name_3a, parentGroupName=group_name_2a - ) - thing_group3a.should.have.key("thingGroupName").which.should.equal(group_name_3a) - thing_group3a.should.have.key("thingGroupArn") - thing_group3b = client.create_thing_group( - thingGroupName=group_name_3b, parentGroupName=group_name_2a - ) - thing_group3b.should.have.key("thingGroupName").which.should.equal(group_name_3b) - thing_group3b.should.have.key("thingGroupArn") - thing_group3c = client.create_thing_group( - thingGroupName=group_name_3c, parentGroupName=group_name_2b - ) - thing_group3c.should.have.key("thingGroupName").which.should.equal(group_name_3c) - thing_group3c.should.have.key("thingGroupArn") - thing_group3d = client.create_thing_group( - thingGroupName=group_name_3d, parentGroupName=group_name_2b - ) - thing_group3d.should.have.key("thingGroupName").which.should.equal(group_name_3d) - thing_group3d.should.have.key("thingGroupArn") + tree_dict = { + group_name_1a: { + group_name_2a: {group_name_3a: {} or None, group_name_3b: {} or None}, + group_name_2b: {group_name_3c: {} or None, group_name_3d: {} or None}, + }, + group_name_1b: {}, + } + group_catalog = generate_thing_group_tree(client, tree_dict) # begin tests # should list all groups From 5fd817965326ad308e28064267d44d988619d562 Mon Sep 17 00:00:00 2001 From: Chagui- Date: Wed, 6 May 2020 09:29:16 -0400 Subject: [PATCH 11/29] Refactored test_list_thing_groups into class TestListThingGroup --- tests/test_iot/test_iot.py | 175 ++++++++++++++++++++++--------------- 1 file changed, 105 insertions(+), 70 deletions(-) diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py index 394317fc6..40eb19628 100644 --- a/tests/test_iot/test_iot.py +++ b/tests/test_iot/test_iot.py @@ -787,9 +787,7 @@ def test_delete_principal_thing(): client.delete_certificate(certificateId=cert_id) -@mock_iot -def test_list_thing_groups(): - client = boto3.client("iot", region_name="ap-northeast-1") +class TestListThingGroup: group_name_1a = "my-group-name-1a" group_name_1b = "my-group-name-1b" group_name_2a = "my-group-name-2a" @@ -805,77 +803,114 @@ def test_list_thing_groups(): }, group_name_1b: {}, } - group_catalog = generate_thing_group_tree(client, tree_dict) - # begin tests - # should list all groups - resp = client.list_thing_groups() - resp.should.have.key("thingGroups") - resp["thingGroups"].should.have.length_of(8) - # should list all groups non-recursively - resp = client.list_thing_groups(recursive=False) - resp.should.have.key("thingGroups") - resp["thingGroups"].should.have.length_of(2) + @mock_iot + def test_should_list_all_groups(self): + # setup + client = boto3.client("iot", region_name="ap-northeast-1") + group_catalog = generate_thing_group_tree(client, self.tree_dict) + # test + resp = client.list_thing_groups() + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(8) - # should list all groups filtered by parent - resp = client.list_thing_groups(parentGroup=group_name_1a) - resp.should.have.key("thingGroups") - resp["thingGroups"].should.have.length_of(6) - resp = client.list_thing_groups(parentGroup=group_name_2a) - resp.should.have.key("thingGroups") - resp["thingGroups"].should.have.length_of(2) - resp = client.list_thing_groups(parentGroup=group_name_1b) - resp.should.have.key("thingGroups") - resp["thingGroups"].should.have.length_of(0) - try: - client.list_thing_groups(parentGroup="inexistant-group-name") - except client.exceptions.ResourceNotFoundException as exc: - error_code = exc.response["Error"]["Code"] - error_code.should.equal("ResourceNotFoundException") - else: - raise Exception("Should have raised error") - # should list all groups filtered by parent non-recursively - resp = client.list_thing_groups(parentGroup=group_name_1a, recursive=False) - resp.should.have.key("thingGroups") - resp["thingGroups"].should.have.length_of(2) - resp = client.list_thing_groups(parentGroup=group_name_2a, recursive=False) - resp.should.have.key("thingGroups") - resp["thingGroups"].should.have.length_of(2) - # should list all groups filtered by name prefix - resp = client.list_thing_groups(namePrefixFilter="my-group-name-1") - resp.should.have.key("thingGroups") - resp["thingGroups"].should.have.length_of(2) - resp = client.list_thing_groups(namePrefixFilter="my-group-name-3") - resp.should.have.key("thingGroups") - resp["thingGroups"].should.have.length_of(4) - resp = client.list_thing_groups(namePrefixFilter="prefix-which-doesn-not-match") - resp.should.have.key("thingGroups") - resp["thingGroups"].should.have.length_of(0) - # should list all groups filtered by name prefix non-recursively - resp = client.list_thing_groups(namePrefixFilter="my-group-name-1", recursive=False) - resp.should.have.key("thingGroups") - resp["thingGroups"].should.have.length_of(2) - resp = client.list_thing_groups(namePrefixFilter="my-group-name-3", recursive=False) - resp.should.have.key("thingGroups") - resp["thingGroups"].should.have.length_of(0) + @mock_iot + def test_should_list_all_groups_non_recursively(self): + # setup + client = boto3.client("iot", region_name="ap-northeast-1") + group_catalog = generate_thing_group_tree(client, self.tree_dict) + # test + resp = client.list_thing_groups(recursive=False) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(2) - # should list all groups filtered by name prefix and parent - resp = client.list_thing_groups( - namePrefixFilter="my-group-name-2", parentGroup=group_name_1a - ) - resp.should.have.key("thingGroups") - resp["thingGroups"].should.have.length_of(2) - resp = client.list_thing_groups( - namePrefixFilter="my-group-name-3", parentGroup=group_name_1a - ) - resp.should.have.key("thingGroups") - resp["thingGroups"].should.have.length_of(4) - resp = client.list_thing_groups( - namePrefixFilter="prefix-which-doesn-not-match", parentGroup=group_name_1a - ) - resp.should.have.key("thingGroups") - resp["thingGroups"].should.have.length_of(0) + + @mock_iot + def test_should_list_all_groups_filtered_by_parent(self): + # setup + client = boto3.client("iot", region_name="ap-northeast-1") + group_catalog = generate_thing_group_tree(client, self.tree_dict) + # test + resp = client.list_thing_groups(parentGroup=self.group_name_1a) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(6) + resp = client.list_thing_groups(parentGroup=self.group_name_2a) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(2) + resp = client.list_thing_groups(parentGroup=self.group_name_1b) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(0) + with assert_raises(ClientError) as e: + client.list_thing_groups(parentGroup="inexistant-group-name") + e.exception.response["Error"]["Code"].should.equal("ResourceNotFoundException") + + @mock_iot + def test_should_list_all_groups_filtered_by_parent_non_recursively(self): + # setup + client = boto3.client("iot", region_name="ap-northeast-1") + group_catalog = generate_thing_group_tree(client, self.tree_dict) + # test + resp = client.list_thing_groups(parentGroup=self.group_name_1a, recursive=False) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(2) + resp = client.list_thing_groups(parentGroup=self.group_name_2a, recursive=False) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(2) + + + @mock_iot + def test_should_list_all_groups_filtered_by_name_prefix(self): + # setup + client = boto3.client("iot", region_name="ap-northeast-1") + group_catalog = generate_thing_group_tree(client, self.tree_dict) + # test + resp = client.list_thing_groups(namePrefixFilter="my-group-name-1") + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(2) + resp = client.list_thing_groups(namePrefixFilter="my-group-name-3") + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(4) + resp = client.list_thing_groups(namePrefixFilter="prefix-which-doesn-not-match") + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(0) + + + @mock_iot + def test_should_list_all_groups_filtered_by_name_prefix_non_recursively(self): + # setup + client = boto3.client("iot", region_name="ap-northeast-1") + group_catalog = generate_thing_group_tree(client, self.tree_dict) + # test + resp = client.list_thing_groups(namePrefixFilter="my-group-name-1", recursive=False) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(2) + resp = client.list_thing_groups(namePrefixFilter="my-group-name-3", recursive=False) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(0) + + + @mock_iot + def test_should_list_all_groups_filtered_by_name_prefix_and_parent(self): + # setup + client = boto3.client("iot", region_name="ap-northeast-1") + group_catalog = generate_thing_group_tree(client, self.tree_dict) + # test + resp = client.list_thing_groups( + namePrefixFilter="my-group-name-2", parentGroup=self.group_name_1a + ) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(2) + resp = client.list_thing_groups( + namePrefixFilter="my-group-name-3", parentGroup=self.group_name_1a + ) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(4) + resp = client.list_thing_groups( + namePrefixFilter="prefix-which-doesn-not-match", parentGroup=self.group_name_1a + ) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(0) @mock_iot From 0869c83ea5415cf85ba5e516dec4ee9528c55aa3 Mon Sep 17 00:00:00 2001 From: Chagui- Date: Wed, 6 May 2020 09:32:47 -0400 Subject: [PATCH 12/29] Refactored test_delete_thing_group to use generate_thing_group_tree --- tests/test_iot/test_iot.py | 24 ++++++++---------------- 1 file changed, 8 insertions(+), 16 deletions(-) diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py index 40eb19628..af7abfdcd 100644 --- a/tests/test_iot/test_iot.py +++ b/tests/test_iot/test_iot.py @@ -798,8 +798,8 @@ class TestListThingGroup: group_name_3d = "my-group-name-3d" tree_dict = { group_name_1a: { - group_name_2a: {group_name_3a: {} or None, group_name_3b: {} or None}, - group_name_2b: {group_name_3c: {} or None, group_name_3d: {} or None}, + group_name_2a: {group_name_3a: {}, group_name_3b: {}}, + group_name_2b: {group_name_3c: {}, group_name_3d: {}}, }, group_name_1b: {}, } @@ -918,20 +918,12 @@ def test_delete_thing_group(): client = boto3.client("iot", region_name="ap-northeast-1") group_name_1a = "my-group-name-1a" group_name_2a = "my-group-name-2a" - # --1a - # |--2a - - # create thing groups tree - # 1 - thing_group1a = client.create_thing_group(thingGroupName=group_name_1a) - thing_group1a.should.have.key("thingGroupName").which.should.equal(group_name_1a) - thing_group1a.should.have.key("thingGroupArn") - # 2 - thing_group2a = client.create_thing_group( - thingGroupName=group_name_2a, parentGroupName=group_name_1a - ) - thing_group2a.should.have.key("thingGroupName").which.should.equal(group_name_2a) - thing_group2a.should.have.key("thingGroupArn") + tree_dict = { + group_name_1a: { + group_name_2a: {}, + }, + } + group_catalog = generate_thing_group_tree(client, tree_dict) # delete group with child try: From f7b048442822fd2c2a63ea73d2e75eb39c592961 Mon Sep 17 00:00:00 2001 From: Chagui- Date: Wed, 6 May 2020 09:37:43 -0400 Subject: [PATCH 13/29] Refactored test_describe_thing_group_metadata_hierarchy to use generate_thing_group_tree --- tests/test_iot/test_iot.py | 78 +++++++++----------------------------- 1 file changed, 18 insertions(+), 60 deletions(-) diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py index af7abfdcd..8524bcbc1 100644 --- a/tests/test_iot/test_iot.py +++ b/tests/test_iot/test_iot.py @@ -958,56 +958,14 @@ def test_describe_thing_group_metadata_hierarchy(): group_name_3c = "my-group-name-3c" group_name_3d = "my-group-name-3d" - # --1a - # |--2a - # | |--3a - # | |--3b - # | - # |--2b - # |--3c - # |--3d - # --1b - - # create thing groups tree - # 1 - thing_group1a = client.create_thing_group(thingGroupName=group_name_1a) - thing_group1a.should.have.key("thingGroupName").which.should.equal(group_name_1a) - thing_group1a.should.have.key("thingGroupArn") - thing_group1b = client.create_thing_group(thingGroupName=group_name_1b) - thing_group1b.should.have.key("thingGroupName").which.should.equal(group_name_1b) - thing_group1b.should.have.key("thingGroupArn") - # 2 - thing_group2a = client.create_thing_group( - thingGroupName=group_name_2a, parentGroupName=group_name_1a - ) - thing_group2a.should.have.key("thingGroupName").which.should.equal(group_name_2a) - thing_group2a.should.have.key("thingGroupArn") - thing_group2b = client.create_thing_group( - thingGroupName=group_name_2b, parentGroupName=group_name_1a - ) - thing_group2b.should.have.key("thingGroupName").which.should.equal(group_name_2b) - thing_group2b.should.have.key("thingGroupArn") - # 3 - thing_group3a = client.create_thing_group( - thingGroupName=group_name_3a, parentGroupName=group_name_2a - ) - thing_group3a.should.have.key("thingGroupName").which.should.equal(group_name_3a) - thing_group3a.should.have.key("thingGroupArn") - thing_group3b = client.create_thing_group( - thingGroupName=group_name_3b, parentGroupName=group_name_2a - ) - thing_group3b.should.have.key("thingGroupName").which.should.equal(group_name_3b) - thing_group3b.should.have.key("thingGroupArn") - thing_group3c = client.create_thing_group( - thingGroupName=group_name_3c, parentGroupName=group_name_2b - ) - thing_group3c.should.have.key("thingGroupName").which.should.equal(group_name_3c) - thing_group3c.should.have.key("thingGroupArn") - thing_group3d = client.create_thing_group( - thingGroupName=group_name_3d, parentGroupName=group_name_2b - ) - thing_group3d.should.have.key("thingGroupName").which.should.equal(group_name_3d) - thing_group3d.should.have.key("thingGroupArn") + tree_dict = { + group_name_1a: { + group_name_2a: {group_name_3a: {}, group_name_3b: {}}, + group_name_2b: {group_name_3c: {}, group_name_3d: {}}, + }, + group_name_1b: {}, + } + group_catalog = generate_thing_group_tree(client, tree_dict) # describe groups # groups level 1 @@ -1059,7 +1017,7 @@ def test_describe_thing_group_metadata_hierarchy(): ].should.match(group_name_1a) thing_group_description2a["thingGroupMetadata"]["rootToParentThingGroups"][0][ "groupArn" - ].should.match(thing_group1a["thingGroupArn"]) + ].should.match(group_catalog[group_name_1a]["thingGroupArn"]) thing_group_description2a.should.have.key("version") # 2b thing_group_description2b = client.describe_thing_group( @@ -1085,7 +1043,7 @@ def test_describe_thing_group_metadata_hierarchy(): ].should.match(group_name_1a) thing_group_description2b["thingGroupMetadata"]["rootToParentThingGroups"][0][ "groupArn" - ].should.match(thing_group1a["thingGroupArn"]) + ].should.match(group_catalog[group_name_1a]["thingGroupArn"]) thing_group_description2b.should.have.key("version") # groups level 3 # 3a @@ -1112,13 +1070,13 @@ def test_describe_thing_group_metadata_hierarchy(): ].should.match(group_name_1a) thing_group_description3a["thingGroupMetadata"]["rootToParentThingGroups"][0][ "groupArn" - ].should.match(thing_group1a["thingGroupArn"]) + ].should.match(group_catalog[group_name_1a]["thingGroupArn"]) thing_group_description3a["thingGroupMetadata"]["rootToParentThingGroups"][1][ "groupName" ].should.match(group_name_2a) thing_group_description3a["thingGroupMetadata"]["rootToParentThingGroups"][1][ "groupArn" - ].should.match(thing_group2a["thingGroupArn"]) + ].should.match(group_catalog[group_name_2a]["thingGroupArn"]) thing_group_description3a.should.have.key("version") # 3b thing_group_description3b = client.describe_thing_group( @@ -1144,13 +1102,13 @@ def test_describe_thing_group_metadata_hierarchy(): ].should.match(group_name_1a) thing_group_description3b["thingGroupMetadata"]["rootToParentThingGroups"][0][ "groupArn" - ].should.match(thing_group1a["thingGroupArn"]) + ].should.match(group_catalog[group_name_1a]["thingGroupArn"]) thing_group_description3b["thingGroupMetadata"]["rootToParentThingGroups"][1][ "groupName" ].should.match(group_name_2a) thing_group_description3b["thingGroupMetadata"]["rootToParentThingGroups"][1][ "groupArn" - ].should.match(thing_group2a["thingGroupArn"]) + ].should.match(group_catalog[group_name_2a]["thingGroupArn"]) thing_group_description3b.should.have.key("version") # 3c thing_group_description3c = client.describe_thing_group( @@ -1176,13 +1134,13 @@ def test_describe_thing_group_metadata_hierarchy(): ].should.match(group_name_1a) thing_group_description3c["thingGroupMetadata"]["rootToParentThingGroups"][0][ "groupArn" - ].should.match(thing_group1a["thingGroupArn"]) + ].should.match(group_catalog[group_name_1a]["thingGroupArn"]) thing_group_description3c["thingGroupMetadata"]["rootToParentThingGroups"][1][ "groupName" ].should.match(group_name_2b) thing_group_description3c["thingGroupMetadata"]["rootToParentThingGroups"][1][ "groupArn" - ].should.match(thing_group2b["thingGroupArn"]) + ].should.match(group_catalog[group_name_2b]["thingGroupArn"]) thing_group_description3c.should.have.key("version") # 3d thing_group_description3d = client.describe_thing_group( @@ -1208,13 +1166,13 @@ def test_describe_thing_group_metadata_hierarchy(): ].should.match(group_name_1a) thing_group_description3d["thingGroupMetadata"]["rootToParentThingGroups"][0][ "groupArn" - ].should.match(thing_group1a["thingGroupArn"]) + ].should.match(group_catalog[group_name_1a]["thingGroupArn"]) thing_group_description3d["thingGroupMetadata"]["rootToParentThingGroups"][1][ "groupName" ].should.match(group_name_2b) thing_group_description3d["thingGroupMetadata"]["rootToParentThingGroups"][1][ "groupArn" - ].should.match(thing_group2b["thingGroupArn"]) + ].should.match(group_catalog[group_name_2b]["thingGroupArn"]) thing_group_description3d.should.have.key("version") From c51ef87f710a42df42ad847cb048cbfd109b757b Mon Sep 17 00:00:00 2001 From: Chagui- Date: Wed, 6 May 2020 09:43:34 -0400 Subject: [PATCH 14/29] black --- tests/test_iot/test_iot.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py index 8524bcbc1..6fe43edc2 100644 --- a/tests/test_iot/test_iot.py +++ b/tests/test_iot/test_iot.py @@ -814,7 +814,6 @@ class TestListThingGroup: resp.should.have.key("thingGroups") resp["thingGroups"].should.have.length_of(8) - @mock_iot def test_should_list_all_groups_non_recursively(self): # setup @@ -825,7 +824,6 @@ class TestListThingGroup: resp.should.have.key("thingGroups") resp["thingGroups"].should.have.length_of(2) - @mock_iot def test_should_list_all_groups_filtered_by_parent(self): # setup @@ -843,7 +841,9 @@ class TestListThingGroup: resp["thingGroups"].should.have.length_of(0) with assert_raises(ClientError) as e: client.list_thing_groups(parentGroup="inexistant-group-name") - e.exception.response["Error"]["Code"].should.equal("ResourceNotFoundException") + e.exception.response["Error"]["Code"].should.equal( + "ResourceNotFoundException" + ) @mock_iot def test_should_list_all_groups_filtered_by_parent_non_recursively(self): @@ -858,7 +858,6 @@ class TestListThingGroup: resp.should.have.key("thingGroups") resp["thingGroups"].should.have.length_of(2) - @mock_iot def test_should_list_all_groups_filtered_by_name_prefix(self): # setup @@ -875,21 +874,23 @@ class TestListThingGroup: resp.should.have.key("thingGroups") resp["thingGroups"].should.have.length_of(0) - @mock_iot def test_should_list_all_groups_filtered_by_name_prefix_non_recursively(self): # setup client = boto3.client("iot", region_name="ap-northeast-1") group_catalog = generate_thing_group_tree(client, self.tree_dict) # test - resp = client.list_thing_groups(namePrefixFilter="my-group-name-1", recursive=False) + resp = client.list_thing_groups( + namePrefixFilter="my-group-name-1", recursive=False + ) resp.should.have.key("thingGroups") resp["thingGroups"].should.have.length_of(2) - resp = client.list_thing_groups(namePrefixFilter="my-group-name-3", recursive=False) + resp = client.list_thing_groups( + namePrefixFilter="my-group-name-3", recursive=False + ) resp.should.have.key("thingGroups") resp["thingGroups"].should.have.length_of(0) - @mock_iot def test_should_list_all_groups_filtered_by_name_prefix_and_parent(self): # setup @@ -907,7 +908,8 @@ class TestListThingGroup: resp.should.have.key("thingGroups") resp["thingGroups"].should.have.length_of(4) resp = client.list_thing_groups( - namePrefixFilter="prefix-which-doesn-not-match", parentGroup=self.group_name_1a + namePrefixFilter="prefix-which-doesn-not-match", + parentGroup=self.group_name_1a, ) resp.should.have.key("thingGroups") resp["thingGroups"].should.have.length_of(0) @@ -919,9 +921,7 @@ def test_delete_thing_group(): group_name_1a = "my-group-name-1a" group_name_2a = "my-group-name-2a" tree_dict = { - group_name_1a: { - group_name_2a: {}, - }, + group_name_1a: {group_name_2a: {},}, } group_catalog = generate_thing_group_tree(client, tree_dict) From 8bfc7ed76056c35528e306da383b3e0a1c270978 Mon Sep 17 00:00:00 2001 From: Chagui- Date: Wed, 6 May 2020 10:28:13 -0400 Subject: [PATCH 15/29] Fixed python2 --- tests/test_iot/test_iot.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py index 6fe43edc2..c3ee4c96d 100644 --- a/tests/test_iot/test_iot.py +++ b/tests/test_iot/test_iot.py @@ -36,7 +36,8 @@ def generate_thing_group_tree(iot_client, tree_dict, _parent=None): subtree_dict = generate_thing_group_tree( iot_client=iot_client, tree_dict=tree_dict[group_name], _parent=group_name ) - created_dict = {**created_dict, **subtree_dict} + created_dict.update(created_dict) + created_dict.update(subtree_dict) return created_dict From 2b0e7da9985700f72904a5a3b7130b4f436250b0 Mon Sep 17 00:00:00 2001 From: usmankb Date: Wed, 6 May 2020 20:28:50 +0530 Subject: [PATCH 16/29] SES get send statistics response modification --- moto/ses/responses.py | 30 ++++++++++++++++-------------- tests/test_ses/test_ses.py | 8 ++++---- 2 files changed, 20 insertions(+), 18 deletions(-) diff --git a/moto/ses/responses.py b/moto/ses/responses.py index 62893094a..8c9dc8f75 100644 --- a/moto/ses/responses.py +++ b/moto/ses/responses.py @@ -292,20 +292,22 @@ SET_IDENTITY_NOTIFICATION_TOPIC_RESPONSE = """""" GET_SEND_STATISTICS = """ - - {% for statistics in all_statistics %} - - {{ statistics["DeliveryAttempts"] }} - {{ statistics["Rejects"] }} - {{ statistics["Bounces"] }} - {{ statistics["Complaints"] }} - {{ statistics["Timestamp"] }} - - {% endfor %} - - - e0abcdfa-c866-11e0-b6d0-273d09173z49 - + + + {% for statistics in all_statistics %} + + {{ statistics["DeliveryAttempts"] }} + {{ statistics["Rejects"] }} + {{ statistics["Bounces"] }} + {{ statistics["Complaints"] }} + {{ statistics["Timestamp"] }} + + {% endfor %} + + + e0abcdfa-c866-11e0-b6d0-273d09173z49 + + """ CREATE_CONFIGURATION_SET = """ diff --git a/tests/test_ses/test_ses.py b/tests/test_ses/test_ses.py index 719e4ede9..7d7674bea 100644 --- a/tests/test_ses/test_ses.py +++ b/tests/test_ses/test_ses.py @@ -145,10 +145,10 @@ def test_get_send_statistics(): result = conn.get_send_statistics() reject_count = int( - result["GetSendStatisticsResponse"]["SendDataPoints"][0]["Rejects"] + result["GetSendStatisticsResponse"]["GetSendStatisticsResult"]["SendDataPoints"][0]["Rejects"] ) delivery_count = int( - result["GetSendStatisticsResponse"]["SendDataPoints"][0]["DeliveryAttempts"] + result["GetSendStatisticsResponse"]["GetSendStatisticsResult"]["SendDataPoints"][0]["DeliveryAttempts"] ) reject_count.should.equal(1) delivery_count.should.equal(0) @@ -162,10 +162,10 @@ def test_get_send_statistics(): result = conn.get_send_statistics() reject_count = int( - result["GetSendStatisticsResponse"]["SendDataPoints"][0]["Rejects"] + result["GetSendStatisticsResponse"]["GetSendStatisticsResult"]["SendDataPoints"][0]["Rejects"] ) delivery_count = int( - result["GetSendStatisticsResponse"]["SendDataPoints"][0]["DeliveryAttempts"] + result["GetSendStatisticsResponse"]["GetSendStatisticsResult"]["SendDataPoints"][0]["DeliveryAttempts"] ) reject_count.should.equal(1) delivery_count.should.equal(1) From 55f207050ef133888ac8dd231e3d124953096391 Mon Sep 17 00:00:00 2001 From: Brian Pandola Date: Wed, 6 May 2020 14:28:40 -0700 Subject: [PATCH 17/29] Add `Redshift.ClusterAlreadyExists` Error Closes #2967 --- moto/redshift/exceptions.py | 7 +++++++ moto/redshift/models.py | 3 +++ tests/test_redshift/test_redshift.py | 22 ++++++++++++++++++++++ 3 files changed, 32 insertions(+) diff --git a/moto/redshift/exceptions.py b/moto/redshift/exceptions.py index 0a17e8aab..b5f83d3bc 100644 --- a/moto/redshift/exceptions.py +++ b/moto/redshift/exceptions.py @@ -136,3 +136,10 @@ class SnapshotCopyAlreadyEnabledFaultError(RedshiftClientError): cluster_identifier ), ) + + +class ClusterAlreadyExistsFaultError(RedshiftClientError): + def __init__(self): + super(ClusterAlreadyExistsFaultError, self).__init__( + "ClusterAlreadyExists", "Cluster already exists" + ) diff --git a/moto/redshift/models.py b/moto/redshift/models.py index 17840fb86..07baf18c0 100644 --- a/moto/redshift/models.py +++ b/moto/redshift/models.py @@ -10,6 +10,7 @@ from moto.core import BaseBackend, BaseModel from moto.core.utils import iso_8601_datetime_with_milliseconds from moto.ec2 import ec2_backends from .exceptions import ( + ClusterAlreadyExistsFaultError, ClusterNotFoundError, ClusterParameterGroupNotFoundError, ClusterSecurityGroupNotFoundError, @@ -580,6 +581,8 @@ class RedshiftBackend(BaseBackend): def create_cluster(self, **cluster_kwargs): cluster_identifier = cluster_kwargs["cluster_identifier"] + if cluster_identifier in self.clusters: + raise ClusterAlreadyExistsFaultError() cluster = Cluster(self, **cluster_kwargs) self.clusters[cluster_identifier] = cluster return cluster diff --git a/tests/test_redshift/test_redshift.py b/tests/test_redshift/test_redshift.py index 6bb3b1396..cf96ee15f 100644 --- a/tests/test_redshift/test_redshift.py +++ b/tests/test_redshift/test_redshift.py @@ -915,6 +915,11 @@ def test_create_cluster_from_snapshot(): ClusterIdentifier=original_cluster_identifier, ) + client.restore_from_cluster_snapshot.when.called_with( + ClusterIdentifier=original_cluster_identifier, + SnapshotIdentifier=original_snapshot_identifier, + ).should.throw(ClientError, "ClusterAlreadyExists") + response = client.restore_from_cluster_snapshot( ClusterIdentifier=new_cluster_identifier, SnapshotIdentifier=original_snapshot_identifier, @@ -1333,3 +1338,20 @@ def test_modify_snapshot_copy_retention_period(): response = client.describe_clusters(ClusterIdentifier="test") cluster_snapshot_copy_status = response["Clusters"][0]["ClusterSnapshotCopyStatus"] cluster_snapshot_copy_status["RetentionPeriod"].should.equal(5) + + +@mock_redshift +def test_create_duplicate_cluster_fails(): + kwargs = { + "ClusterIdentifier": "test", + "ClusterType": "single-node", + "DBName": "test", + "MasterUsername": "user", + "MasterUserPassword": "password", + "NodeType": "ds2.xlarge", + } + client = boto3.client("redshift", region_name="us-east-1") + client.create_cluster(**kwargs) + client.create_cluster.when.called_with(**kwargs).should.throw( + ClientError, "ClusterAlreadyExists" + ) From 5ec814a6042b73f000b62d3baa3fd74afc27c992 Mon Sep 17 00:00:00 2001 From: James Belleau Date: Wed, 6 May 2020 21:12:48 -0500 Subject: [PATCH 18/29] Fixes and additional tests --- moto/backends.py | 1 + moto/managedblockchain/models.py | 77 ++++++---- moto/managedblockchain/responses.py | 41 ++++-- moto/managedblockchain/utils.py | 6 + .../test_managedblockchain_networks.py | 132 ++++++++++++++---- 5 files changed, 195 insertions(+), 62 deletions(-) diff --git a/moto/backends.py b/moto/backends.py index bb71429eb..3934afa67 100644 --- a/moto/backends.py +++ b/moto/backends.py @@ -39,6 +39,7 @@ BACKENDS = { "kms": ("kms", "kms_backends"), "lambda": ("awslambda", "lambda_backends"), "logs": ("logs", "logs_backends"), + "managedblockchain": ("managedblockchain", "managedblockchain_backends"), "moto_api": ("core", "moto_api_backends"), "opsworks": ("opsworks", "opsworks_backends"), "organizations": ("organizations", "organizations_backends"), diff --git a/moto/managedblockchain/models.py b/moto/managedblockchain/models.py index 32e9ebbb5..475a19bbd 100644 --- a/moto/managedblockchain/models.py +++ b/moto/managedblockchain/models.py @@ -8,7 +8,7 @@ from moto.core import BaseBackend, BaseModel from .exceptions import BadRequestException -from .utils import get_network_id +from .utils import get_network_id, get_member_id FRAMEWORKS = [ "HYPERLEDGER_FABRIC", @@ -37,7 +37,7 @@ class ManagedBlockchainNetwork(BaseModel): region, description=None, ): - self.st = datetime.datetime.now(datetime.timezone.utc) + self.creationdate = datetime.datetime.utcnow() self.id = id self.name = name self.description = description @@ -49,19 +49,34 @@ class ManagedBlockchainNetwork(BaseModel): self.region = region def to_dict(self): + # Format for list_networks + d = { + "Id": self.id, + "Name": self.name, + "Framework": self.framework, + "FrameworkVersion": self.frameworkversion, + "Status": "AVAILABLE", + "CreationDate": self.creationdate.strftime("%Y-%m-%dT%H:%M:%S.%f%z"), + } + if self.description is not None: + d["Description"] = self.description + return d + + def get_format(self): + # Format for get_networks frameworkattributes = { "Fabric": { "OrderingServiceEndpoint": "orderer.{0}.managedblockchain.{1}.amazonaws.com:30001".format( - self.id, self.region + self.id.lower(), self.region ), "Edition": self.frameworkconfiguration["Fabric"]["Edition"], } } vpcendpointname = "com.amazonaws.{0}.managedblockchain.{1}".format( - self.region, self.id + self.region, self.id.lower() ) - # Use iso_8601_datetime_with_milliseconds ? + d = { "Id": self.id, "Name": self.name, @@ -71,7 +86,7 @@ class ManagedBlockchainNetwork(BaseModel): "VpcEndpointServiceName": vpcendpointname, "VotingPolicy": self.voting_policy, "Status": "AVAILABLE", - "CreationDate": self.st.strftime("%Y-%m-%dT%H:%M:%S.%f%z"), + "CreationDate": self.creationdate.strftime("%Y-%m-%dT%H:%M:%S.%f%z"), } if self.description is not None: d["Description"] = self.description @@ -90,14 +105,21 @@ class ManagedBlockchainBackend(BaseBackend): def create_network( self, - json_body, + name, + framework, + frameworkversion, + frameworkconfiguration, + voting_policy, + member_configuration, + description=None, ): - name = json_body["Name"] - framework = json_body["Framework"] - frameworkversion = json_body["FrameworkVersion"] - frameworkconfiguration = json_body["FrameworkConfiguration"] - voting_policy = json_body["VotingPolicy"] - member_configuration = json_body["MemberConfiguration"] + self.name = name + self.framework = framework + self.frameworkversion = frameworkversion + self.frameworkconfiguration = frameworkconfiguration + self.voting_policy = voting_policy + self.member_configuration = member_configuration + self.description = description # Check framework if framework not in FRAMEWORKS: @@ -119,33 +141,32 @@ class ManagedBlockchainBackend(BaseBackend): ## Generate network ID network_id = get_network_id() + ## Generate memberid ID - will need to actually create member + member_id = get_member_id() + self.networks[network_id] = ManagedBlockchainNetwork( id=network_id, name=name, - framework=framework, - frameworkversion=frameworkversion, - frameworkconfiguration=frameworkconfiguration, - voting_policy=voting_policy, - member_configuration=member_configuration, + framework=self.framework, + frameworkversion=self.frameworkversion, + frameworkconfiguration=self.frameworkconfiguration, + voting_policy=self.voting_policy, + member_configuration=self.member_configuration, region=self.region_name, + description=self.description, ) + # Return the network and member ID + d = {"NetworkId": network_id, "MemberId": member_id} + return d + def list_networks(self): return self.networks.values() def get_network(self, network_id): - return self.networks[network_id] - + return self.networks.get(network_id) managedblockchain_backends = {} for region in Session().get_available_regions("managedblockchain"): managedblockchain_backends[region] = ManagedBlockchainBackend(region) -for region in Session().get_available_regions( - "managedblockchain", partition_name="aws-us-gov" -): - managedblockchain_backends[region] = ManagedBlockchainBackend(region) -for region in Session().get_available_regions( - "managedblockchain", partition_name="aws-cn" -): - managedblockchain_backends[region] = ManagedBlockchainBackend(region) diff --git a/moto/managedblockchain/responses.py b/moto/managedblockchain/responses.py index ff7c5ff5c..93084581d 100644 --- a/moto/managedblockchain/responses.py +++ b/moto/managedblockchain/responses.py @@ -5,7 +5,10 @@ from six.moves.urllib.parse import urlparse, parse_qs from moto.core.responses import BaseResponse from .models import managedblockchain_backends -from .utils import region_from_managedblckchain_url, networkid_from_managedblockchain_url +from .utils import ( + region_from_managedblckchain_url, + networkid_from_managedblockchain_url, +) class ManagedBlockchainResponse(BaseResponse): @@ -16,7 +19,9 @@ class ManagedBlockchainResponse(BaseResponse): @classmethod def network_response(clazz, request, full_url, headers): region_name = region_from_managedblckchain_url(full_url) - response_instance = ManagedBlockchainResponse(managedblockchain_backends[region_name]) + response_instance = ManagedBlockchainResponse( + managedblockchain_backends[region_name] + ) return response_instance._network_response(request, full_url, headers) def _network_response(self, request, full_url, headers): @@ -42,13 +47,35 @@ class ManagedBlockchainResponse(BaseResponse): return 200, headers, response def _network_response_post(self, json_body, querystring, headers): - self.backend.create_network(json_body) - return 201, headers, "" + name = json_body["Name"] + framework = json_body["Framework"] + frameworkversion = json_body["FrameworkVersion"] + frameworkconfiguration = json_body["FrameworkConfiguration"] + voting_policy = json_body["VotingPolicy"] + member_configuration = json_body["MemberConfiguration"] + + # Optional + description = None + if "Description" in json_body: + description = json_body["Description"] + + response = self.backend.create_network( + name, + framework, + frameworkversion, + frameworkconfiguration, + voting_policy, + member_configuration, + description, + ) + return 201, headers, json.dumps(response) @classmethod def networkid_response(clazz, request, full_url, headers): region_name = region_from_managedblckchain_url(full_url) - response_instance = ManagedBlockchainResponse(managedblockchain_backends[region_name]) + response_instance = ManagedBlockchainResponse( + managedblockchain_backends[region_name] + ) return response_instance._networkid_response(request, full_url, headers) def _networkid_response(self, request, full_url, headers): @@ -60,8 +87,6 @@ class ManagedBlockchainResponse(BaseResponse): def _networkid_response_get(self, network_id, headers): mbcnetwork = self.backend.get_network(network_id) - response = json.dumps( - {"Network": mbcnetwork.to_dict()} - ) + response = json.dumps({"Network": mbcnetwork.get_format()}) headers["content-type"] = "application/json" return 200, headers, response diff --git a/moto/managedblockchain/utils.py b/moto/managedblockchain/utils.py index 687b7990b..2a93d93f4 100644 --- a/moto/managedblockchain/utils.py +++ b/moto/managedblockchain/utils.py @@ -21,3 +21,9 @@ def get_network_id(): return "n-" + "".join( random.choice(string.ascii_uppercase + string.digits) for _ in range(26) ) + + +def get_member_id(): + return "m-" + "".join( + random.choice(string.ascii_uppercase + string.digits) for _ in range(26) + ) diff --git a/tests/test_managedblockchain/test_managedblockchain_networks.py b/tests/test_managedblockchain/test_managedblockchain_networks.py index 7bdc0ec59..f9c98676e 100644 --- a/tests/test_managedblockchain/test_managedblockchain_networks.py +++ b/tests/test_managedblockchain/test_managedblockchain_networks.py @@ -3,43 +3,46 @@ from __future__ import unicode_literals import boto3 import sure # noqa +from moto.managedblockchain.exceptions import BadRequestException from moto import mock_managedblockchain +default_frameworkconfiguration = {"Fabric": {"Edition": "STARTER"}} + +default_votingpolicy = { + "ApprovalThresholdPolicy": { + "ThresholdPercentage": 50, + "ProposalDurationInHours": 24, + "ThresholdComparator": "GREATER_THAN_OR_EQUAL_TO", + } +} + +default_memberconfiguration = { + "Name": "testmember1", + "Description": "Test Member 1", + "FrameworkConfiguration": { + "Fabric": {"AdminUsername": "admin", "AdminPassword": "Admin12345"} + }, + "LogPublishingConfiguration": { + "Fabric": {"CaLogs": {"Cloudwatch": {"Enabled": False}}} + }, +} + + @mock_managedblockchain def test_create_network(): conn = boto3.client("managedblockchain", region_name="us-east-1") - frameworkconfiguration = {"Fabric": {"Edition": "STARTER"}} - - votingpolicy = { - "ApprovalThresholdPolicy": { - "ThresholdPercentage": 50, - "ProposalDurationInHours": 24, - "ThresholdComparator": "GREATER_THAN_OR_EQUAL_TO", - } - } - - memberconfiguration = { - "Name": "testmember1", - "Description": "Test Member 1", - "FrameworkConfiguration": { - "Fabric": {"AdminUsername": "admin", "AdminPassword": "Admin12345"} - }, - "LogPublishingConfiguration": { - "Fabric": {"CaLogs": {"Cloudwatch": {"Enabled": False}}} - }, - } - - conn.create_network( + response = conn.create_network( Name="testnetwork1", - Description="Test Network 1", Framework="HYPERLEDGER_FABRIC", FrameworkVersion="1.2", - FrameworkConfiguration=frameworkconfiguration, - VotingPolicy=votingpolicy, - MemberConfiguration=memberconfiguration, + FrameworkConfiguration=default_frameworkconfiguration, + VotingPolicy=default_votingpolicy, + MemberConfiguration=default_memberconfiguration, ) + response["NetworkId"].should.match("n-[A-Z0-9]{26}") + response["MemberId"].should.match("m-[A-Z0-9]{26}") # Find in full list response = conn.list_networks() @@ -51,3 +54,80 @@ def test_create_network(): network_id = mbcnetworks[0]["Id"] response = conn.get_network(NetworkId=network_id) response["Network"]["Name"].should.equal("testnetwork1") + + +@mock_managedblockchain +def test_create_network_withopts(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.create_network( + Name="testnetwork1", + Description="Test Network 1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=default_frameworkconfiguration, + VotingPolicy=default_votingpolicy, + MemberConfiguration=default_memberconfiguration, + ) + response["NetworkId"].should.match("n-[A-Z0-9]{26}") + response["MemberId"].should.match("m-[A-Z0-9]{26}") + + # Find in full list + response = conn.list_networks() + mbcnetworks = response["Networks"] + mbcnetworks.should.have.length_of(1) + mbcnetworks[0]["Description"].should.equal("Test Network 1") + + # Get network details + network_id = mbcnetworks[0]["Id"] + response = conn.get_network(NetworkId=network_id) + response["Network"]["Description"].should.equal("Test Network 1") + + +@mock_managedblockchain +def test_create_network_noframework(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.create_network.when.called_with( + Name="testnetwork1", + Description="Test Network 1", + Framework="HYPERLEDGER_VINYL", + FrameworkVersion="1.2", + FrameworkConfiguration=default_frameworkconfiguration, + VotingPolicy=default_votingpolicy, + MemberConfiguration=default_memberconfiguration, + ).should.throw(Exception, "Invalid request body") + + +@mock_managedblockchain +def test_create_network_badframeworkver(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.create_network.when.called_with( + Name="testnetwork1", + Description="Test Network 1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.X", + FrameworkConfiguration=default_frameworkconfiguration, + VotingPolicy=default_votingpolicy, + MemberConfiguration=default_memberconfiguration, + ).should.throw( + Exception, "Invalid version 1.X requested for framework HYPERLEDGER_FABRIC" + ) + + +@mock_managedblockchain +def test_create_network_badedition(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + frameworkconfiguration = {"Fabric": {"Edition": "SUPER"}} + + response = conn.create_network.when.called_with( + Name="testnetwork1", + Description="Test Network 1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=frameworkconfiguration, + VotingPolicy=default_votingpolicy, + MemberConfiguration=default_memberconfiguration, + ).should.throw(Exception, "Invalid request body") From 811ec3bd2a6921b24ac0d6133b58ed713bd58b38 Mon Sep 17 00:00:00 2001 From: James Belleau Date: Wed, 6 May 2020 21:54:59 -0500 Subject: [PATCH 19/29] Added get network test --- moto/managedblockchain/exceptions.py | 11 +++++++++++ moto/managedblockchain/models.py | 6 +++++- .../test_managedblockchain_networks.py | 9 +++++++++ 3 files changed, 25 insertions(+), 1 deletion(-) diff --git a/moto/managedblockchain/exceptions.py b/moto/managedblockchain/exceptions.py index 3195d7c34..265d8eaea 100644 --- a/moto/managedblockchain/exceptions.py +++ b/moto/managedblockchain/exceptions.py @@ -14,3 +14,14 @@ class BadRequestException(ManagedBlockchainClientError): pretty_called_method, operation_error ), ) + + +class ResourceNotFoundException(ManagedBlockchainClientError): + def __init__(self, pretty_called_method, operation_error): + self.code = 404 + super(ResourceNotFoundException, self).__init__( + "ResourceNotFoundException", + "An error occurred (BadRequestException) when calling the {0} operation: {1}".format( + pretty_called_method, operation_error + ), + ) diff --git a/moto/managedblockchain/models.py b/moto/managedblockchain/models.py index 475a19bbd..96f411a87 100644 --- a/moto/managedblockchain/models.py +++ b/moto/managedblockchain/models.py @@ -6,7 +6,7 @@ from boto3 import Session from moto.core import BaseBackend, BaseModel -from .exceptions import BadRequestException +from .exceptions import BadRequestException, ResourceNotFoundException from .utils import get_network_id, get_member_id @@ -164,6 +164,10 @@ class ManagedBlockchainBackend(BaseBackend): return self.networks.values() def get_network(self, network_id): + if network_id not in self.networks: + raise ResourceNotFoundException( + "CreateNetwork", "Network {0} not found".format(network_id) + ) return self.networks.get(network_id) diff --git a/tests/test_managedblockchain/test_managedblockchain_networks.py b/tests/test_managedblockchain/test_managedblockchain_networks.py index f9c98676e..a3256a3fe 100644 --- a/tests/test_managedblockchain/test_managedblockchain_networks.py +++ b/tests/test_managedblockchain/test_managedblockchain_networks.py @@ -131,3 +131,12 @@ def test_create_network_badedition(): VotingPolicy=default_votingpolicy, MemberConfiguration=default_memberconfiguration, ).should.throw(Exception, "Invalid request body") + + +@mock_managedblockchain +def test_get_network_badnetwork(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.get_network.when.called_with( + NetworkId="n-BADNETWORK", + ).should.throw(Exception, "Network n-BADNETWORK not found") From 4abd88f95cc7fb5abcca0f18191f89b581b9d319 Mon Sep 17 00:00:00 2001 From: Rigas Papathanasopoulos Date: Wed, 6 May 2020 23:12:32 +0300 Subject: [PATCH 20/29] Fix the online status in OpsWorks When an instance is running, OpsWorks reports its status as "online" [1], while EC2 reports it as "running". Until now, moto copied the EC2 instance's status as is. This commit introduces the converts the running status to online when returned by OpsWorks. [1]: https://docs.aws.amazon.com/cli/latest/reference/opsworks/describe-instances.html --- moto/opsworks/models.py | 3 +++ tests/test_opsworks/test_instances.py | 6 ++++++ 2 files changed, 9 insertions(+) diff --git a/moto/opsworks/models.py b/moto/opsworks/models.py index 96d918cc9..84bd3b103 100644 --- a/moto/opsworks/models.py +++ b/moto/opsworks/models.py @@ -125,6 +125,9 @@ class OpsworkInstance(BaseModel): def status(self): if self.instance is None: return "stopped" + # OpsWorks reports the "running" state as "online" + elif self.instance._state.name == "running": + return "online" return self.instance._state.name def to_dict(self): diff --git a/tests/test_opsworks/test_instances.py b/tests/test_opsworks/test_instances.py index 5f0dc2040..93935d20f 100644 --- a/tests/test_opsworks/test_instances.py +++ b/tests/test_opsworks/test_instances.py @@ -195,6 +195,10 @@ def test_ec2_integration(): reservations = ec2.describe_instances()["Reservations"] assert reservations.should.be.empty + # Before starting the instance, its status should be "stopped" + opsworks_instance = opsworks.describe_instances(StackId=stack_id)["Instances"][0] + opsworks_instance["Status"].should.equal("stopped") + # After starting the instance, it should be discoverable via ec2 opsworks.start_instance(InstanceId=instance_id) reservations = ec2.describe_instances()["Reservations"] @@ -204,3 +208,5 @@ def test_ec2_integration(): instance["InstanceId"].should.equal(opsworks_instance["Ec2InstanceId"]) instance["PrivateIpAddress"].should.equal(opsworks_instance["PrivateIp"]) + # After starting the instance, its status should be "online" + opsworks_instance["Status"].should.equal("online") From dcb122076fc3c6e634cd939c6e9ea1b2433b777c Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Thu, 7 May 2020 09:53:07 +0100 Subject: [PATCH 21/29] Linting --- tests/test_ses/test_ses.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/tests/test_ses/test_ses.py b/tests/test_ses/test_ses.py index 7d7674bea..ce0062974 100644 --- a/tests/test_ses/test_ses.py +++ b/tests/test_ses/test_ses.py @@ -145,10 +145,14 @@ def test_get_send_statistics(): result = conn.get_send_statistics() reject_count = int( - result["GetSendStatisticsResponse"]["GetSendStatisticsResult"]["SendDataPoints"][0]["Rejects"] + result["GetSendStatisticsResponse"]["GetSendStatisticsResult"][ + "SendDataPoints" + ][0]["Rejects"] ) delivery_count = int( - result["GetSendStatisticsResponse"]["GetSendStatisticsResult"]["SendDataPoints"][0]["DeliveryAttempts"] + result["GetSendStatisticsResponse"]["GetSendStatisticsResult"][ + "SendDataPoints" + ][0]["DeliveryAttempts"] ) reject_count.should.equal(1) delivery_count.should.equal(0) @@ -162,10 +166,14 @@ def test_get_send_statistics(): result = conn.get_send_statistics() reject_count = int( - result["GetSendStatisticsResponse"]["GetSendStatisticsResult"]["SendDataPoints"][0]["Rejects"] + result["GetSendStatisticsResponse"]["GetSendStatisticsResult"][ + "SendDataPoints" + ][0]["Rejects"] ) delivery_count = int( - result["GetSendStatisticsResponse"]["GetSendStatisticsResult"]["SendDataPoints"][0]["DeliveryAttempts"] + result["GetSendStatisticsResponse"]["GetSendStatisticsResult"][ + "SendDataPoints" + ][0]["DeliveryAttempts"] ) reject_count.should.equal(1) delivery_count.should.equal(1) From 9881306ef2ea53564e26d886ce6dccbbee1ce6c0 Mon Sep 17 00:00:00 2001 From: James Belleau Date: Thu, 7 May 2020 04:33:31 -0500 Subject: [PATCH 22/29] Simplified optional attribute get --- moto/managedblockchain/responses.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/moto/managedblockchain/responses.py b/moto/managedblockchain/responses.py index 93084581d..081f301d5 100644 --- a/moto/managedblockchain/responses.py +++ b/moto/managedblockchain/responses.py @@ -55,9 +55,7 @@ class ManagedBlockchainResponse(BaseResponse): member_configuration = json_body["MemberConfiguration"] # Optional - description = None - if "Description" in json_body: - description = json_body["Description"] + description = json_body.get("Description", None) response = self.backend.create_network( name, From 9e7803dc3601427b8f195f715f07a76a23216749 Mon Sep 17 00:00:00 2001 From: pvbouwel Date: Thu, 7 May 2020 21:29:20 +0100 Subject: [PATCH 23/29] [Bugfix] UpdateExpression using ADD from zero (#2975) When using the ADD syntax to sum up different components the path that is provided is allowed to be non-existent. In such a case DynamoDB will initialize it depending on the type of the value. If it is a number it will be initialized with 0. If it is a set it will be initialized with an empty set. --- moto/dynamodb2/parsing/executors.py | 25 +++++- tests/test_dynamodb2/test_dynamodb.py | 78 +++++++++++++++++++ .../test_dynamodb_table_with_range_key.py | 23 +++--- 3 files changed, 112 insertions(+), 14 deletions(-) diff --git a/moto/dynamodb2/parsing/executors.py b/moto/dynamodb2/parsing/executors.py index 8c51c9cec..2f2f2bb82 100644 --- a/moto/dynamodb2/parsing/executors.py +++ b/moto/dynamodb2/parsing/executors.py @@ -1,6 +1,10 @@ from abc import abstractmethod -from moto.dynamodb2.exceptions import IncorrectOperandType, IncorrectDataType +from moto.dynamodb2.exceptions import ( + IncorrectOperandType, + IncorrectDataType, + ProvidedKeyDoesNotExist, +) from moto.dynamodb2.models import DynamoType from moto.dynamodb2.models.dynamo_type import DDBTypeConversion, DDBType from moto.dynamodb2.parsing.ast_nodes import ( @@ -193,7 +197,18 @@ class AddExecutor(NodeExecutor): value_to_add = self.get_action_value() if isinstance(value_to_add, DynamoType): if value_to_add.is_set(): - current_string_set = self.get_item_at_end_of_path(item) + try: + current_string_set = self.get_item_at_end_of_path(item) + except ProvidedKeyDoesNotExist: + current_string_set = DynamoType({value_to_add.type: []}) + SetExecutor.set( + item_part_to_modify_with_set=self.get_item_before_end_of_path( + item + ), + element_to_set=self.get_element_to_action(), + value_to_set=current_string_set, + expression_attribute_names=self.expression_attribute_names, + ) assert isinstance(current_string_set, DynamoType) if not current_string_set.type == value_to_add.type: raise IncorrectDataType() @@ -204,7 +219,11 @@ class AddExecutor(NodeExecutor): else: current_string_set.value.append(value) elif value_to_add.type == DDBType.NUMBER: - existing_value = self.get_item_at_end_of_path(item) + try: + existing_value = self.get_item_at_end_of_path(item) + except ProvidedKeyDoesNotExist: + existing_value = DynamoType({DDBType.NUMBER: "0"}) + assert isinstance(existing_value, DynamoType) if not existing_value.type == DDBType.NUMBER: raise IncorrectDataType() diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 470c5f8ff..9f917a7ae 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -5029,3 +5029,81 @@ def test_update_item_atomic_counter_return_values(): "v" in response["Attributes"] ), "v has been updated, and should be returned here" response["Attributes"]["v"]["N"].should.equal("8") + + +@mock_dynamodb2 +def test_update_item_atomic_counter_from_zero(): + table = "table_t" + ddb_mock = boto3.client("dynamodb", region_name="eu-west-1") + ddb_mock.create_table( + TableName=table, + KeySchema=[{"AttributeName": "t_id", "KeyType": "HASH"}], + AttributeDefinitions=[{"AttributeName": "t_id", "AttributeType": "S"}], + BillingMode="PAY_PER_REQUEST", + ) + + key = {"t_id": {"S": "item1"}} + + ddb_mock.put_item( + TableName=table, Item=key, + ) + + ddb_mock.update_item( + TableName=table, + Key=key, + UpdateExpression="add n_i :inc1, n_f :inc2", + ExpressionAttributeValues={":inc1": {"N": "1.2"}, ":inc2": {"N": "-0.5"}}, + ) + updated_item = ddb_mock.get_item(TableName=table, Key=key)["Item"] + assert updated_item["n_i"]["N"] == "1.2" + assert updated_item["n_f"]["N"] == "-0.5" + + +@mock_dynamodb2 +def test_update_item_add_to_non_existent_set(): + table = "table_t" + ddb_mock = boto3.client("dynamodb", region_name="eu-west-1") + ddb_mock.create_table( + TableName=table, + KeySchema=[{"AttributeName": "t_id", "KeyType": "HASH"}], + AttributeDefinitions=[{"AttributeName": "t_id", "AttributeType": "S"}], + BillingMode="PAY_PER_REQUEST", + ) + key = {"t_id": {"S": "item1"}} + ddb_mock.put_item( + TableName=table, Item=key, + ) + + ddb_mock.update_item( + TableName=table, + Key=key, + UpdateExpression="add s_i :s1", + ExpressionAttributeValues={":s1": {"SS": ["hello"]}}, + ) + updated_item = ddb_mock.get_item(TableName=table, Key=key)["Item"] + assert updated_item["s_i"]["SS"] == ["hello"] + + +@mock_dynamodb2 +def test_update_item_add_to_non_existent_number_set(): + table = "table_t" + ddb_mock = boto3.client("dynamodb", region_name="eu-west-1") + ddb_mock.create_table( + TableName=table, + KeySchema=[{"AttributeName": "t_id", "KeyType": "HASH"}], + AttributeDefinitions=[{"AttributeName": "t_id", "AttributeType": "S"}], + BillingMode="PAY_PER_REQUEST", + ) + key = {"t_id": {"S": "item1"}} + ddb_mock.put_item( + TableName=table, Item=key, + ) + + ddb_mock.update_item( + TableName=table, + Key=key, + UpdateExpression="add s_i :s1", + ExpressionAttributeValues={":s1": {"NS": ["3"]}}, + ) + updated_item = ddb_mock.get_item(TableName=table, Key=key)["Item"] + assert updated_item["s_i"]["NS"] == ["3"] diff --git a/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py b/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py index 6fba713ec..33f65d5ec 100644 --- a/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py +++ b/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py @@ -1307,16 +1307,16 @@ def test_update_item_add_with_expression(): ExpressionAttributeValues={":v": {"item4"}}, ) current_item["str_set"] = current_item["str_set"].union({"item4"}) - dict(table.get_item(Key=item_key)["Item"]).should.equal(current_item) + assert dict(table.get_item(Key=item_key)["Item"]) == current_item # Update item to add a string value to a non-existing set - # Should throw: 'The provided key element does not match the schema' - assert_failure_due_to_key_not_in_schema( - table.update_item, + table.update_item( Key=item_key, UpdateExpression="ADD non_existing_str_set :v", ExpressionAttributeValues={":v": {"item4"}}, ) + current_item["non_existing_str_set"] = {"item4"} + assert dict(table.get_item(Key=item_key)["Item"]) == current_item # Update item to add a num value to a num set table.update_item( @@ -1325,7 +1325,7 @@ def test_update_item_add_with_expression(): ExpressionAttributeValues={":v": {6}}, ) current_item["num_set"] = current_item["num_set"].union({6}) - dict(table.get_item(Key=item_key)["Item"]).should.equal(current_item) + assert dict(table.get_item(Key=item_key)["Item"]) == current_item # Update item to add a value to a number value table.update_item( @@ -1334,7 +1334,7 @@ def test_update_item_add_with_expression(): ExpressionAttributeValues={":v": 20}, ) current_item["num_val"] = current_item["num_val"] + 20 - dict(table.get_item(Key=item_key)["Item"]).should.equal(current_item) + assert dict(table.get_item(Key=item_key)["Item"]) == current_item # Attempt to add a number value to a string set, should raise Client Error table.update_item.when.called_with( @@ -1342,7 +1342,7 @@ def test_update_item_add_with_expression(): UpdateExpression="ADD str_set :v", ExpressionAttributeValues={":v": 20}, ).should.have.raised(ClientError) - dict(table.get_item(Key=item_key)["Item"]).should.equal(current_item) + assert dict(table.get_item(Key=item_key)["Item"]) == current_item # Attempt to add a number set to the string set, should raise a ClientError table.update_item.when.called_with( @@ -1350,7 +1350,7 @@ def test_update_item_add_with_expression(): UpdateExpression="ADD str_set :v", ExpressionAttributeValues={":v": {20}}, ).should.have.raised(ClientError) - dict(table.get_item(Key=item_key)["Item"]).should.equal(current_item) + assert dict(table.get_item(Key=item_key)["Item"]) == current_item # Attempt to update with a bad expression table.update_item.when.called_with( @@ -1388,17 +1388,18 @@ def test_update_item_add_with_nested_sets(): current_item["nested"]["str_set"] = current_item["nested"]["str_set"].union( {"item4"} ) - dict(table.get_item(Key=item_key)["Item"]).should.equal(current_item) + assert dict(table.get_item(Key=item_key)["Item"]) == current_item # Update item to add a string value to a non-existing set # Should raise - assert_failure_due_to_key_not_in_schema( - table.update_item, + table.update_item( Key=item_key, UpdateExpression="ADD #ns.#ne :v", ExpressionAttributeNames={"#ns": "nested", "#ne": "non_existing_str_set"}, ExpressionAttributeValues={":v": {"new_item"}}, ) + current_item["nested"]["non_existing_str_set"] = {"new_item"} + assert dict(table.get_item(Key=item_key)["Item"]) == current_item @mock_dynamodb2 From 65e790c4eb6928c76797ab2985f2935f9196d46d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Anton=20Gr=C3=BCbel?= <33207684+gruebel@users.noreply.github.com> Date: Fri, 8 May 2020 16:57:48 +0200 Subject: [PATCH 24/29] Add dynamodb continuous backups (#2976) * remove print statement * Add dynamodb.describe_continuous_backups * Add dynamodb.update_continuous_backups * Fix Python 2 timestamp error --- moto/dynamodb2/models/__init__.py | 33 +++++++ moto/dynamodb2/responses.py | 29 ++++++ tests/test_dynamodb2/test_dynamodb.py | 136 ++++++++++++++++++++++++++ tests/test_ssm/test_ssm_boto3.py | 1 - 4 files changed, 198 insertions(+), 1 deletion(-) diff --git a/moto/dynamodb2/models/__init__.py b/moto/dynamodb2/models/__init__.py index ea16f456f..f459cd043 100644 --- a/moto/dynamodb2/models/__init__.py +++ b/moto/dynamodb2/models/__init__.py @@ -316,6 +316,12 @@ class Table(BaseModel): } self.set_stream_specification(streams) self.lambda_event_source_mappings = {} + self.continuous_backups = { + "ContinuousBackupsStatus": "ENABLED", # One of 'ENABLED'|'DISABLED', it's enabled by default + "PointInTimeRecoveryDescription": { + "PointInTimeRecoveryStatus": "DISABLED" # One of 'ENABLED'|'DISABLED' + }, + } @classmethod def create_from_cloudformation_json( @@ -1246,6 +1252,33 @@ class DynamoDBBackend(BaseBackend): self.tables = original_table_state raise + def describe_continuous_backups(self, table_name): + table = self.get_table(table_name) + + return table.continuous_backups + + def update_continuous_backups(self, table_name, point_in_time_spec): + table = self.get_table(table_name) + + if ( + point_in_time_spec["PointInTimeRecoveryEnabled"] + and table.continuous_backups["PointInTimeRecoveryDescription"][ + "PointInTimeRecoveryStatus" + ] + == "DISABLED" + ): + table.continuous_backups["PointInTimeRecoveryDescription"] = { + "PointInTimeRecoveryStatus": "ENABLED", + "EarliestRestorableDateTime": unix_time(), + "LatestRestorableDateTime": unix_time(), + } + elif not point_in_time_spec["PointInTimeRecoveryEnabled"]: + table.continuous_backups["PointInTimeRecoveryDescription"] = { + "PointInTimeRecoveryStatus": "DISABLED" + } + + return table.continuous_backups + dynamodb_backends = {} for region in Session().get_available_regions("dynamodb"): diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index b703f2935..02c4749d3 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -936,3 +936,32 @@ class DynamoHandler(BaseResponse): ) response = {"ConsumedCapacity": [], "ItemCollectionMetrics": {}} return dynamo_json_dump(response) + + def describe_continuous_backups(self): + name = self.body["TableName"] + + if self.dynamodb_backend.get_table(name) is None: + return self.error( + "com.amazonaws.dynamodb.v20111205#TableNotFoundException", + "Table not found: {}".format(name), + ) + + response = self.dynamodb_backend.describe_continuous_backups(name) + + return json.dumps({"ContinuousBackupsDescription": response}) + + def update_continuous_backups(self): + name = self.body["TableName"] + point_in_time_spec = self.body["PointInTimeRecoverySpecification"] + + if self.dynamodb_backend.get_table(name) is None: + return self.error( + "com.amazonaws.dynamodb.v20111205#TableNotFoundException", + "Table not found: {}".format(name), + ) + + response = self.dynamodb_backend.update_continuous_backups( + name, point_in_time_spec + ) + + return json.dumps({"ContinuousBackupsDescription": response}) diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 9f917a7ae..8774c3e88 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -1,5 +1,6 @@ from __future__ import unicode_literals, print_function +from datetime import datetime from decimal import Decimal import boto @@ -2049,6 +2050,141 @@ def test_set_ttl(): resp["TimeToLiveDescription"]["TimeToLiveStatus"].should.equal("DISABLED") +@mock_dynamodb2 +def test_describe_continuous_backups(): + # given + client = boto3.client("dynamodb", region_name="us-east-1") + table_name = client.create_table( + TableName="test", + AttributeDefinitions=[ + {"AttributeName": "client", "AttributeType": "S"}, + {"AttributeName": "app", "AttributeType": "S"}, + ], + KeySchema=[ + {"AttributeName": "client", "KeyType": "HASH"}, + {"AttributeName": "app", "KeyType": "RANGE"}, + ], + BillingMode="PAY_PER_REQUEST", + )["TableDescription"]["TableName"] + + # when + response = client.describe_continuous_backups(TableName=table_name) + + # then + response["ContinuousBackupsDescription"].should.equal( + { + "ContinuousBackupsStatus": "ENABLED", + "PointInTimeRecoveryDescription": {"PointInTimeRecoveryStatus": "DISABLED"}, + } + ) + + +@mock_dynamodb2 +def test_describe_continuous_backups_errors(): + # given + client = boto3.client("dynamodb", region_name="us-east-1") + + # when + with assert_raises(Exception) as e: + client.describe_continuous_backups(TableName="not-existing-table") + + # then + ex = e.exception + ex.operation_name.should.equal("DescribeContinuousBackups") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("TableNotFoundException") + ex.response["Error"]["Message"].should.equal("Table not found: not-existing-table") + + +@mock_dynamodb2 +def test_update_continuous_backups(): + # given + client = boto3.client("dynamodb", region_name="us-east-1") + table_name = client.create_table( + TableName="test", + AttributeDefinitions=[ + {"AttributeName": "client", "AttributeType": "S"}, + {"AttributeName": "app", "AttributeType": "S"}, + ], + KeySchema=[ + {"AttributeName": "client", "KeyType": "HASH"}, + {"AttributeName": "app", "KeyType": "RANGE"}, + ], + BillingMode="PAY_PER_REQUEST", + )["TableDescription"]["TableName"] + + # when + response = client.update_continuous_backups( + TableName=table_name, + PointInTimeRecoverySpecification={"PointInTimeRecoveryEnabled": True}, + ) + + # then + response["ContinuousBackupsDescription"]["ContinuousBackupsStatus"].should.equal( + "ENABLED" + ) + point_in_time = response["ContinuousBackupsDescription"][ + "PointInTimeRecoveryDescription" + ] + earliest_datetime = point_in_time["EarliestRestorableDateTime"] + earliest_datetime.should.be.a(datetime) + latest_datetime = point_in_time["LatestRestorableDateTime"] + latest_datetime.should.be.a(datetime) + point_in_time["PointInTimeRecoveryStatus"].should.equal("ENABLED") + + # when + # a second update should not change anything + response = client.update_continuous_backups( + TableName=table_name, + PointInTimeRecoverySpecification={"PointInTimeRecoveryEnabled": True}, + ) + + # then + response["ContinuousBackupsDescription"]["ContinuousBackupsStatus"].should.equal( + "ENABLED" + ) + point_in_time = response["ContinuousBackupsDescription"][ + "PointInTimeRecoveryDescription" + ] + point_in_time["EarliestRestorableDateTime"].should.equal(earliest_datetime) + point_in_time["LatestRestorableDateTime"].should.equal(latest_datetime) + point_in_time["PointInTimeRecoveryStatus"].should.equal("ENABLED") + + # when + response = client.update_continuous_backups( + TableName=table_name, + PointInTimeRecoverySpecification={"PointInTimeRecoveryEnabled": False}, + ) + + # then + response["ContinuousBackupsDescription"].should.equal( + { + "ContinuousBackupsStatus": "ENABLED", + "PointInTimeRecoveryDescription": {"PointInTimeRecoveryStatus": "DISABLED"}, + } + ) + + +@mock_dynamodb2 +def test_update_continuous_backups_errors(): + # given + client = boto3.client("dynamodb", region_name="us-east-1") + + # when + with assert_raises(Exception) as e: + client.update_continuous_backups( + TableName="not-existing-table", + PointInTimeRecoverySpecification={"PointInTimeRecoveryEnabled": True}, + ) + + # then + ex = e.exception + ex.operation_name.should.equal("UpdateContinuousBackups") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("TableNotFoundException") + ex.response["Error"]["Message"].should.equal("Table not found: not-existing-table") + + # https://github.com/spulec/moto/issues/1043 @mock_dynamodb2 def test_query_missing_expr_names(): diff --git a/tests/test_ssm/test_ssm_boto3.py b/tests/test_ssm/test_ssm_boto3.py index e757a4006..837f81bf5 100644 --- a/tests/test_ssm/test_ssm_boto3.py +++ b/tests/test_ssm/test_ssm_boto3.py @@ -324,7 +324,6 @@ def test_get_parameters_errors(): ", ".join(ssm_parameters.keys()) ) ) - print(ex.response["Error"]["Message"]) @mock_ssm From a2f5c41372f7bbad0f3bb075eb94b5fa5792c2f6 Mon Sep 17 00:00:00 2001 From: Erik Hovland Date: Fri, 8 May 2020 09:07:28 -0700 Subject: [PATCH 25/29] Check off assume_role_with_saml since it is implemented now. (#2977) --- IMPLEMENTATION_COVERAGE.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index f99d86df3..ef67b1cc3 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -7212,7 +7212,7 @@ ## sts 62% implemented - [X] assume_role -- [ ] assume_role_with_saml +- [X] assume_role_with_saml - [X] assume_role_with_web_identity - [ ] decode_authorization_message - [ ] get_access_key_info From 9618e29ba9bc8a4959a62df1cfc7721fafa308c8 Mon Sep 17 00:00:00 2001 From: Denver Janke Date: Mon, 11 May 2020 16:44:26 +1000 Subject: [PATCH 26/29] Always call update ELBs for ASGs (#2980) --- moto/autoscaling/models.py | 13 +- tests/test_autoscaling/test_autoscaling.py | 232 +++++++++++++++++++++ 2 files changed, 238 insertions(+), 7 deletions(-) diff --git a/moto/autoscaling/models.py b/moto/autoscaling/models.py index b757672d0..1da12a09c 100644 --- a/moto/autoscaling/models.py +++ b/moto/autoscaling/models.py @@ -419,11 +419,8 @@ class FakeAutoScalingGroup(BaseModel): curr_instance_count = len(self.active_instances()) if self.desired_capacity == curr_instance_count: - self.autoscaling_backend.update_attached_elbs(self.name) - self.autoscaling_backend.update_attached_target_groups(self.name) - return - - if self.desired_capacity > curr_instance_count: + pass # Nothing to do here + elif self.desired_capacity > curr_instance_count: # Need more instances count_needed = int(self.desired_capacity) - int(curr_instance_count) @@ -447,6 +444,7 @@ class FakeAutoScalingGroup(BaseModel): self.instance_states = list( set(self.instance_states) - set(instances_to_remove) ) + if self.name in self.autoscaling_backend.autoscaling_groups: self.autoscaling_backend.update_attached_elbs(self.name) self.autoscaling_backend.update_attached_target_groups(self.name) @@ -695,6 +693,7 @@ class AutoScalingBackend(BaseBackend): ) group.instance_states.extend(new_instances) self.update_attached_elbs(group.name) + self.update_attached_target_groups(group.name) def set_instance_health( self, instance_id, health_status, should_respect_grace_period @@ -938,8 +937,7 @@ class AutoScalingBackend(BaseBackend): standby_instances.append(instance_state) if should_decrement: group.desired_capacity = group.desired_capacity - len(instance_ids) - else: - group.set_desired_capacity(group.desired_capacity) + group.set_desired_capacity(group.desired_capacity) return standby_instances, original_size, group.desired_capacity def exit_standby_instances(self, group_name, instance_ids): @@ -951,6 +949,7 @@ class AutoScalingBackend(BaseBackend): instance_state.lifecycle_state = "InService" standby_instances.append(instance_state) group.desired_capacity = group.desired_capacity + len(instance_ids) + group.set_desired_capacity(group.desired_capacity) return standby_instances, original_size, group.desired_capacity def terminate_instance(self, instance_id, should_decrement): diff --git a/tests/test_autoscaling/test_autoscaling.py b/tests/test_autoscaling/test_autoscaling.py index 3a10f20ff..93a8c5a48 100644 --- a/tests/test_autoscaling/test_autoscaling.py +++ b/tests/test_autoscaling/test_autoscaling.py @@ -1071,6 +1071,7 @@ def test_autoscaling_describe_policies_boto3(): response["ScalingPolicies"][0]["PolicyName"].should.equal("test_policy_down") +@mock_elb @mock_autoscaling @mock_ec2 def test_detach_one_instance_decrement(): @@ -1096,6 +1097,19 @@ def test_detach_one_instance_decrement(): ], VPCZoneIdentifier=mocked_networking["subnet1"], ) + + elb_client = boto3.client("elb", region_name="us-east-1") + elb_client.create_load_balancer( + LoadBalancerName="my-lb", + Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}], + AvailabilityZones=["us-east-1a", "us-east-1b"], + ) + + response = client.attach_load_balancers( + AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"] + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) instance_to_detach = response["AutoScalingGroups"][0]["Instances"][0]["InstanceId"] instance_to_keep = response["AutoScalingGroups"][0]["Instances"][1]["InstanceId"] @@ -1111,6 +1125,9 @@ def test_detach_one_instance_decrement(): response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) response["AutoScalingGroups"][0]["Instances"].should.have.length_of(1) + instance_to_detach.shouldnt.be.within( + [x["InstanceId"] for x in response["AutoScalingGroups"][0]["Instances"]] + ) # test to ensure tag has been removed response = ec2_client.describe_instances(InstanceIds=[instance_to_detach]) @@ -1122,7 +1139,14 @@ def test_detach_one_instance_decrement(): tags = response["Reservations"][0]["Instances"][0]["Tags"] tags.should.have.length_of(2) + response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"]) + list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(1) + instance_to_detach.shouldnt.be.within( + [x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]] + ) + +@mock_elb @mock_autoscaling @mock_ec2 def test_detach_one_instance(): @@ -1148,6 +1172,19 @@ def test_detach_one_instance(): ], VPCZoneIdentifier=mocked_networking["subnet1"], ) + + elb_client = boto3.client("elb", region_name="us-east-1") + elb_client.create_load_balancer( + LoadBalancerName="my-lb", + Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}], + AvailabilityZones=["us-east-1a", "us-east-1b"], + ) + + response = client.attach_load_balancers( + AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"] + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) instance_to_detach = response["AutoScalingGroups"][0]["Instances"][0]["InstanceId"] instance_to_keep = response["AutoScalingGroups"][0]["Instances"][1]["InstanceId"] @@ -1173,7 +1210,14 @@ def test_detach_one_instance(): tags = response["Reservations"][0]["Instances"][0]["Tags"] tags.should.have.length_of(2) + response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"]) + list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(2) + instance_to_detach.shouldnt.be.within( + [x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]] + ) + +@mock_elb @mock_autoscaling @mock_ec2 def test_standby_one_instance_decrement(): @@ -1199,6 +1243,19 @@ def test_standby_one_instance_decrement(): ], VPCZoneIdentifier=mocked_networking["subnet1"], ) + + elb_client = boto3.client("elb", region_name="us-east-1") + elb_client.create_load_balancer( + LoadBalancerName="my-lb", + Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}], + AvailabilityZones=["us-east-1a", "us-east-1b"], + ) + + response = client.attach_load_balancers( + AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"] + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) instance_to_standby = response["AutoScalingGroups"][0]["Instances"][0]["InstanceId"] instance_to_keep = response["AutoScalingGroups"][0]["Instances"][1]["InstanceId"] @@ -1226,7 +1283,14 @@ def test_standby_one_instance_decrement(): tags = instance["Tags"] tags.should.have.length_of(2) + response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"]) + list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(1) + instance_to_standby.shouldnt.be.within( + [x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]] + ) + +@mock_elb @mock_autoscaling @mock_ec2 def test_standby_one_instance(): @@ -1252,6 +1316,19 @@ def test_standby_one_instance(): ], VPCZoneIdentifier=mocked_networking["subnet1"], ) + + elb_client = boto3.client("elb", region_name="us-east-1") + elb_client.create_load_balancer( + LoadBalancerName="my-lb", + Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}], + AvailabilityZones=["us-east-1a", "us-east-1b"], + ) + + response = client.attach_load_balancers( + AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"] + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) instance_to_standby = response["AutoScalingGroups"][0]["Instances"][0]["InstanceId"] instance_to_keep = response["AutoScalingGroups"][0]["Instances"][1]["InstanceId"] @@ -1279,6 +1356,12 @@ def test_standby_one_instance(): tags = instance["Tags"] tags.should.have.length_of(2) + response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"]) + list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(2) + instance_to_standby.shouldnt.be.within( + [x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]] + ) + @mock_elb @mock_autoscaling @@ -1338,8 +1421,12 @@ def test_standby_elb_update(): response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"]) list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(2) + instance_to_standby.shouldnt.be.within( + [x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]] + ) +@mock_elb @mock_autoscaling @mock_ec2 def test_standby_terminate_instance_decrement(): @@ -1366,6 +1453,18 @@ def test_standby_terminate_instance_decrement(): VPCZoneIdentifier=mocked_networking["subnet1"], ) + elb_client = boto3.client("elb", region_name="us-east-1") + elb_client.create_load_balancer( + LoadBalancerName="my-lb", + Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}], + AvailabilityZones=["us-east-1a", "us-east-1b"], + ) + + response = client.attach_load_balancers( + AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"] + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) instance_to_standby_terminate = response["AutoScalingGroups"][0]["Instances"][0][ "InstanceId" @@ -1409,7 +1508,14 @@ def test_standby_terminate_instance_decrement(): "terminated" ) + response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"]) + list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(1) + instance_to_standby_terminate.shouldnt.be.within( + [x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]] + ) + +@mock_elb @mock_autoscaling @mock_ec2 def test_standby_terminate_instance_no_decrement(): @@ -1436,6 +1542,18 @@ def test_standby_terminate_instance_no_decrement(): VPCZoneIdentifier=mocked_networking["subnet1"], ) + elb_client = boto3.client("elb", region_name="us-east-1") + elb_client.create_load_balancer( + LoadBalancerName="my-lb", + Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}], + AvailabilityZones=["us-east-1a", "us-east-1b"], + ) + + response = client.attach_load_balancers( + AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"] + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) instance_to_standby_terminate = response["AutoScalingGroups"][0]["Instances"][0][ "InstanceId" @@ -1479,7 +1597,14 @@ def test_standby_terminate_instance_no_decrement(): "terminated" ) + response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"]) + list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(2) + instance_to_standby_terminate.shouldnt.be.within( + [x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]] + ) + +@mock_elb @mock_autoscaling @mock_ec2 def test_standby_detach_instance_decrement(): @@ -1506,6 +1631,18 @@ def test_standby_detach_instance_decrement(): VPCZoneIdentifier=mocked_networking["subnet1"], ) + elb_client = boto3.client("elb", region_name="us-east-1") + elb_client.create_load_balancer( + LoadBalancerName="my-lb", + Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}], + AvailabilityZones=["us-east-1a", "us-east-1b"], + ) + + response = client.attach_load_balancers( + AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"] + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) instance_to_standby_detach = response["AutoScalingGroups"][0]["Instances"][0][ "InstanceId" @@ -1547,7 +1684,14 @@ def test_standby_detach_instance_decrement(): response = ec2_client.describe_instances(InstanceIds=[instance_to_standby_detach]) response["Reservations"][0]["Instances"][0]["State"]["Name"].should.equal("running") + response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"]) + list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(1) + instance_to_standby_detach.shouldnt.be.within( + [x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]] + ) + +@mock_elb @mock_autoscaling @mock_ec2 def test_standby_detach_instance_no_decrement(): @@ -1574,6 +1718,18 @@ def test_standby_detach_instance_no_decrement(): VPCZoneIdentifier=mocked_networking["subnet1"], ) + elb_client = boto3.client("elb", region_name="us-east-1") + elb_client.create_load_balancer( + LoadBalancerName="my-lb", + Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}], + AvailabilityZones=["us-east-1a", "us-east-1b"], + ) + + response = client.attach_load_balancers( + AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"] + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) instance_to_standby_detach = response["AutoScalingGroups"][0]["Instances"][0][ "InstanceId" @@ -1615,7 +1771,14 @@ def test_standby_detach_instance_no_decrement(): response = ec2_client.describe_instances(InstanceIds=[instance_to_standby_detach]) response["Reservations"][0]["Instances"][0]["State"]["Name"].should.equal("running") + response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"]) + list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(2) + instance_to_standby_detach.shouldnt.be.within( + [x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]] + ) + +@mock_elb @mock_autoscaling @mock_ec2 def test_standby_exit_standby(): @@ -1642,6 +1805,18 @@ def test_standby_exit_standby(): VPCZoneIdentifier=mocked_networking["subnet1"], ) + elb_client = boto3.client("elb", region_name="us-east-1") + elb_client.create_load_balancer( + LoadBalancerName="my-lb", + Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}], + AvailabilityZones=["us-east-1a", "us-east-1b"], + ) + + response = client.attach_load_balancers( + AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"] + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) instance_to_standby_exit_standby = response["AutoScalingGroups"][0]["Instances"][0][ "InstanceId" @@ -1683,7 +1858,14 @@ def test_standby_exit_standby(): ) response["Reservations"][0]["Instances"][0]["State"]["Name"].should.equal("running") + response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"]) + list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(3) + instance_to_standby_exit_standby.should.be.within( + [x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]] + ) + +@mock_elb @mock_autoscaling @mock_ec2 def test_attach_one_instance(): @@ -1711,6 +1893,18 @@ def test_attach_one_instance(): NewInstancesProtectedFromScaleIn=True, ) + elb_client = boto3.client("elb", region_name="us-east-1") + elb_client.create_load_balancer( + LoadBalancerName="my-lb", + Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}], + AvailabilityZones=["us-east-1a", "us-east-1b"], + ) + + response = client.attach_load_balancers( + AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"] + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + ec2 = boto3.resource("ec2", "us-east-1") instances_to_add = [ x.id for x in ec2.create_instances(ImageId="", MinCount=1, MaxCount=1) @@ -1727,6 +1921,9 @@ def test_attach_one_instance(): for instance in instances: instance["ProtectedFromScaleIn"].should.equal(True) + response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"]) + list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(3) + @mock_autoscaling @mock_ec2 @@ -1948,6 +2145,7 @@ def test_terminate_instance_via_ec2_in_autoscaling_group(): replaced_instance_id.should_not.equal(original_instance_id) +@mock_elb @mock_autoscaling @mock_ec2 def test_terminate_instance_in_auto_scaling_group_decrement(): @@ -1966,6 +2164,18 @@ def test_terminate_instance_in_auto_scaling_group_decrement(): NewInstancesProtectedFromScaleIn=False, ) + elb_client = boto3.client("elb", region_name="us-east-1") + elb_client.create_load_balancer( + LoadBalancerName="my-lb", + Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}], + AvailabilityZones=["us-east-1a", "us-east-1b"], + ) + + response = client.attach_load_balancers( + AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"] + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) original_instance_id = next( instance["InstanceId"] @@ -1979,7 +2189,11 @@ def test_terminate_instance_in_auto_scaling_group_decrement(): response["AutoScalingGroups"][0]["Instances"].should.equal([]) response["AutoScalingGroups"][0]["DesiredCapacity"].should.equal(0) + response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"]) + list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(0) + +@mock_elb @mock_autoscaling @mock_ec2 def test_terminate_instance_in_auto_scaling_group_no_decrement(): @@ -1998,6 +2212,18 @@ def test_terminate_instance_in_auto_scaling_group_no_decrement(): NewInstancesProtectedFromScaleIn=False, ) + elb_client = boto3.client("elb", region_name="us-east-1") + elb_client.create_load_balancer( + LoadBalancerName="my-lb", + Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}], + AvailabilityZones=["us-east-1a", "us-east-1b"], + ) + + response = client.attach_load_balancers( + AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"] + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) original_instance_id = next( instance["InstanceId"] @@ -2014,3 +2240,9 @@ def test_terminate_instance_in_auto_scaling_group_no_decrement(): ) replaced_instance_id.should_not.equal(original_instance_id) response["AutoScalingGroups"][0]["DesiredCapacity"].should.equal(1) + + response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"]) + list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(1) + original_instance_id.shouldnt.be.within( + [x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]] + ) From 1e0a7380d5ac219f6ead8b1fb2b2f1d243322102 Mon Sep 17 00:00:00 2001 From: Maxim Kirilov Date: Mon, 11 May 2020 15:23:45 +0300 Subject: [PATCH 27/29] Add support for BlockDeviceMappings argument (#2949) * Add support for BlockDeviceMappings argument upon run_instances execution * Remove redundant check for Ebs existence --- moto/ec2/models.py | 12 +++- moto/ec2/responses/instances.py | 113 ++++++++++++++++++++++--------- tests/test_ec2/test_instances.py | 105 ++++++++++++++++++++++++++++ 3 files changed, 195 insertions(+), 35 deletions(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index e94d2877c..bab4636af 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -560,8 +560,10 @@ class Instance(TaggedEC2Resource, BotoInstance): # worst case we'll get IP address exaustion... rarely pass - def add_block_device(self, size, device_path): - volume = self.ec2_backend.create_volume(size, self.region_name) + def add_block_device(self, size, device_path, snapshot_id=None, encrypted=False): + volume = self.ec2_backend.create_volume( + size, self.region_name, snapshot_id, encrypted + ) self.ec2_backend.attach_volume(volume.id, self.id, device_path) def setup_defaults(self): @@ -891,8 +893,12 @@ class InstanceBackend(object): new_instance.add_tags(instance_tags) if "block_device_mappings" in kwargs: for block_device in kwargs["block_device_mappings"]: + device_name = block_device["DeviceName"] + volume_size = block_device["Ebs"].get("VolumeSize") + snapshot_id = block_device["Ebs"].get("SnapshotId") + encrypted = block_device["Ebs"].get("Encrypted", False) new_instance.add_block_device( - block_device["Ebs"]["VolumeSize"], block_device["DeviceName"] + volume_size, device_name, snapshot_id, encrypted ) else: new_instance.setup_defaults() diff --git a/moto/ec2/responses/instances.py b/moto/ec2/responses/instances.py index de17f0609..adcbfa741 100644 --- a/moto/ec2/responses/instances.py +++ b/moto/ec2/responses/instances.py @@ -4,10 +4,16 @@ from boto.ec2.instancetype import InstanceType from moto.autoscaling import autoscaling_backends from moto.core.responses import BaseResponse from moto.core.utils import camelcase_to_underscores -from moto.ec2.utils import filters_from_querystring, dict_from_querystring +from moto.ec2.exceptions import MissingParameterError +from moto.ec2.utils import ( + filters_from_querystring, + dict_from_querystring, +) from moto.elbv2 import elbv2_backends from moto.core import ACCOUNT_ID +from copy import deepcopy + class InstanceResponse(BaseResponse): def describe_instances(self): @@ -44,40 +50,31 @@ class InstanceResponse(BaseResponse): owner_id = self._get_param("OwnerId") user_data = self._get_param("UserData") security_group_names = self._get_multi_param("SecurityGroup") - security_group_ids = self._get_multi_param("SecurityGroupId") - nics = dict_from_querystring("NetworkInterface", self.querystring) - instance_type = self._get_param("InstanceType", if_none="m1.small") - placement = self._get_param("Placement.AvailabilityZone") - subnet_id = self._get_param("SubnetId") - private_ip = self._get_param("PrivateIpAddress") - associate_public_ip = self._get_param("AssociatePublicIpAddress") - key_name = self._get_param("KeyName") - ebs_optimized = self._get_param("EbsOptimized") or False - instance_initiated_shutdown_behavior = self._get_param( - "InstanceInitiatedShutdownBehavior" - ) - tags = self._parse_tag_specification("TagSpecification") - region_name = self.region + kwargs = { + "instance_type": self._get_param("InstanceType", if_none="m1.small"), + "placement": self._get_param("Placement.AvailabilityZone"), + "region_name": self.region, + "subnet_id": self._get_param("SubnetId"), + "owner_id": owner_id, + "key_name": self._get_param("KeyName"), + "security_group_ids": self._get_multi_param("SecurityGroupId"), + "nics": dict_from_querystring("NetworkInterface", self.querystring), + "private_ip": self._get_param("PrivateIpAddress"), + "associate_public_ip": self._get_param("AssociatePublicIpAddress"), + "tags": self._parse_tag_specification("TagSpecification"), + "ebs_optimized": self._get_param("EbsOptimized") or False, + "instance_initiated_shutdown_behavior": self._get_param( + "InstanceInitiatedShutdownBehavior" + ), + } + + mappings = self._parse_block_device_mapping() + if mappings: + kwargs["block_device_mappings"] = mappings if self.is_not_dryrun("RunInstance"): new_reservation = self.ec2_backend.add_instances( - image_id, - min_count, - user_data, - security_group_names, - instance_type=instance_type, - placement=placement, - region_name=region_name, - subnet_id=subnet_id, - owner_id=owner_id, - key_name=key_name, - security_group_ids=security_group_ids, - nics=nics, - private_ip=private_ip, - associate_public_ip=associate_public_ip, - tags=tags, - ebs_optimized=ebs_optimized, - instance_initiated_shutdown_behavior=instance_initiated_shutdown_behavior, + image_id, min_count, user_data, security_group_names, **kwargs ) template = self.response_template(EC2_RUN_INSTANCES) @@ -272,6 +269,58 @@ class InstanceResponse(BaseResponse): ) return EC2_MODIFY_INSTANCE_ATTRIBUTE + def _parse_block_device_mapping(self): + device_mappings = self._get_list_prefix("BlockDeviceMapping") + mappings = [] + for device_mapping in device_mappings: + self._validate_block_device_mapping(device_mapping) + device_template = deepcopy(BLOCK_DEVICE_MAPPING_TEMPLATE) + device_template["VirtualName"] = device_mapping.get("virtual_name") + device_template["DeviceName"] = device_mapping.get("device_name") + device_template["Ebs"]["SnapshotId"] = device_mapping.get( + "ebs._snapshot_id" + ) + device_template["Ebs"]["VolumeSize"] = device_mapping.get( + "ebs._volume_size" + ) + device_template["Ebs"]["DeleteOnTermination"] = device_mapping.get( + "ebs._delete_on_termination", False + ) + device_template["Ebs"]["VolumeType"] = device_mapping.get( + "ebs._volume_type" + ) + device_template["Ebs"]["Iops"] = device_mapping.get("ebs._iops") + device_template["Ebs"]["Encrypted"] = device_mapping.get( + "ebs._encrypted", False + ) + mappings.append(device_template) + + return mappings + + @staticmethod + def _validate_block_device_mapping(device_mapping): + + if not any(mapping for mapping in device_mapping if mapping.startswith("ebs.")): + raise MissingParameterError("ebs") + if ( + "ebs._volume_size" not in device_mapping + and "ebs._snapshot_id" not in device_mapping + ): + raise MissingParameterError("size or snapshotId") + + +BLOCK_DEVICE_MAPPING_TEMPLATE = { + "VirtualName": None, + "DeviceName": None, + "Ebs": { + "SnapshotId": None, + "VolumeSize": None, + "DeleteOnTermination": None, + "VolumeType": None, + "Iops": None, + "Encrypted": None, + }, +} EC2_RUN_INSTANCES = ( """ diff --git a/tests/test_ec2/test_instances.py b/tests/test_ec2/test_instances.py index 0509e1a45..d53bd14aa 100644 --- a/tests/test_ec2/test_instances.py +++ b/tests/test_ec2/test_instances.py @@ -1126,6 +1126,111 @@ def test_run_instance_with_keypair(): instance.key_name.should.equal("keypair_name") +@mock_ec2 +def test_run_instance_with_block_device_mappings(): + ec2_client = boto3.client("ec2", region_name="us-east-1") + + kwargs = { + "MinCount": 1, + "MaxCount": 1, + "ImageId": "ami-d3adb33f", + "KeyName": "the_key", + "InstanceType": "t1.micro", + "BlockDeviceMappings": [{"DeviceName": "/dev/sda2", "Ebs": {"VolumeSize": 50}}], + } + + ec2_client.run_instances(**kwargs) + + instances = ec2_client.describe_instances() + volume = instances["Reservations"][0]["Instances"][0]["BlockDeviceMappings"][0][ + "Ebs" + ] + + volumes = ec2_client.describe_volumes(VolumeIds=[volume["VolumeId"]]) + volumes["Volumes"][0]["Size"].should.equal(50) + + +@mock_ec2 +def test_run_instance_with_block_device_mappings_missing_ebs(): + ec2_client = boto3.client("ec2", region_name="us-east-1") + + kwargs = { + "MinCount": 1, + "MaxCount": 1, + "ImageId": "ami-d3adb33f", + "KeyName": "the_key", + "InstanceType": "t1.micro", + "BlockDeviceMappings": [{"DeviceName": "/dev/sda2"}], + } + with assert_raises(ClientError) as ex: + ec2_client.run_instances(**kwargs) + + ex.exception.response["Error"]["Code"].should.equal("MissingParameter") + ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.exception.response["Error"]["Message"].should.equal( + "The request must contain the parameter ebs" + ) + + +@mock_ec2 +def test_run_instance_with_block_device_mappings_missing_size(): + ec2_client = boto3.client("ec2", region_name="us-east-1") + + kwargs = { + "MinCount": 1, + "MaxCount": 1, + "ImageId": "ami-d3adb33f", + "KeyName": "the_key", + "InstanceType": "t1.micro", + "BlockDeviceMappings": [ + {"DeviceName": "/dev/sda2", "Ebs": {"VolumeType": "standard"}} + ], + } + with assert_raises(ClientError) as ex: + ec2_client.run_instances(**kwargs) + + ex.exception.response["Error"]["Code"].should.equal("MissingParameter") + ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.exception.response["Error"]["Message"].should.equal( + "The request must contain the parameter size or snapshotId" + ) + + +@mock_ec2 +def test_run_instance_with_block_device_mappings_from_snapshot(): + ec2_client = boto3.client("ec2", region_name="us-east-1") + ec2_resource = boto3.resource("ec2", region_name="us-east-1") + volume_details = { + "AvailabilityZone": "1a", + "Size": 30, + } + + volume = ec2_resource.create_volume(**volume_details) + snapshot = volume.create_snapshot() + kwargs = { + "MinCount": 1, + "MaxCount": 1, + "ImageId": "ami-d3adb33f", + "KeyName": "the_key", + "InstanceType": "t1.micro", + "BlockDeviceMappings": [ + {"DeviceName": "/dev/sda2", "Ebs": {"SnapshotId": snapshot.snapshot_id}} + ], + } + + ec2_client.run_instances(**kwargs) + + instances = ec2_client.describe_instances() + volume = instances["Reservations"][0]["Instances"][0]["BlockDeviceMappings"][0][ + "Ebs" + ] + + volumes = ec2_client.describe_volumes(VolumeIds=[volume["VolumeId"]]) + + volumes["Volumes"][0]["Size"].should.equal(30) + volumes["Volumes"][0]["SnapshotId"].should.equal(snapshot.snapshot_id) + + @mock_ec2_deprecated def test_describe_instance_status_no_instances(): conn = boto.connect_ec2("the_key", "the_secret") From e73a69421952eb65da583fadf86af4efa6dd0c93 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Anton=20Gr=C3=BCbel?= <33207684+gruebel@users.noreply.github.com> Date: Tue, 12 May 2020 14:34:10 +0200 Subject: [PATCH 28/29] Add CloudWatch logs subscription filters (#2982) * Add logs.describe_subscription_filters * Add logs.put_subscription_filter * Add logs.delete_subscription_filter * Change to usage of ACCOUNT_ID --- moto/awslambda/models.py | 24 ++ moto/logs/exceptions.py | 12 +- moto/logs/models.py | 109 ++++++++- moto/logs/responses.py | 30 +++ tests/test_logs/test_logs.py | 414 ++++++++++++++++++++++++++++++++++- 5 files changed, 585 insertions(+), 4 deletions(-) diff --git a/moto/awslambda/models.py b/moto/awslambda/models.py index 589a790ae..7641ce067 100644 --- a/moto/awslambda/models.py +++ b/moto/awslambda/models.py @@ -5,6 +5,8 @@ import time from collections import defaultdict import copy import datetime +from gzip import GzipFile + import docker import docker.errors import hashlib @@ -983,6 +985,28 @@ class LambdaBackend(BaseBackend): func = self._lambdas.get_arn(function_arn) return func.invoke(json.dumps(event), {}, {}) + def send_log_event( + self, function_arn, filter_name, log_group_name, log_stream_name, log_events + ): + data = { + "messageType": "DATA_MESSAGE", + "owner": ACCOUNT_ID, + "logGroup": log_group_name, + "logStream": log_stream_name, + "subscriptionFilters": [filter_name], + "logEvents": log_events, + } + + output = io.BytesIO() + with GzipFile(fileobj=output, mode="w") as f: + f.write(json.dumps(data, separators=(",", ":")).encode("utf-8")) + payload_gz_encoded = base64.b64encode(output.getvalue()).decode("utf-8") + + event = {"awslogs": {"data": payload_gz_encoded}} + + func = self._lambdas.get_arn(function_arn) + return func.invoke(json.dumps(event), {}, {}) + def list_tags(self, resource): return self.get_function_by_arn(resource).tags diff --git a/moto/logs/exceptions.py b/moto/logs/exceptions.py index 9f6628b0f..022b3a411 100644 --- a/moto/logs/exceptions.py +++ b/moto/logs/exceptions.py @@ -7,10 +7,10 @@ class LogsClientError(JsonRESTError): class ResourceNotFoundException(LogsClientError): - def __init__(self): + def __init__(self, msg=None): self.code = 400 super(ResourceNotFoundException, self).__init__( - "ResourceNotFoundException", "The specified resource does not exist" + "ResourceNotFoundException", msg or "The specified log group does not exist" ) @@ -28,3 +28,11 @@ class ResourceAlreadyExistsException(LogsClientError): super(ResourceAlreadyExistsException, self).__init__( "ResourceAlreadyExistsException", "The specified log group already exists" ) + + +class LimitExceededException(LogsClientError): + def __init__(self): + self.code = 400 + super(LimitExceededException, self).__init__( + "LimitExceededException", "Resource limit exceeded." + ) diff --git a/moto/logs/models.py b/moto/logs/models.py index 755605734..dcc0e85e1 100644 --- a/moto/logs/models.py +++ b/moto/logs/models.py @@ -6,6 +6,7 @@ from .exceptions import ( ResourceNotFoundException, ResourceAlreadyExistsException, InvalidParameterException, + LimitExceededException, ) @@ -57,6 +58,8 @@ class LogStream: 0 # I'm guessing this is token needed for sequenceToken by put_events ) self.events = [] + self.destination_arn = None + self.filter_name = None self.__class__._log_ids += 1 @@ -97,11 +100,32 @@ class LogStream: self.lastIngestionTime = int(unix_time_millis()) # TODO: make this match AWS if possible self.storedBytes += sum([len(log_event["message"]) for log_event in log_events]) - self.events += [ + events = [ LogEvent(self.lastIngestionTime, log_event) for log_event in log_events ] + self.events += events self.uploadSequenceToken += 1 + if self.destination_arn and self.destination_arn.split(":")[2] == "lambda": + from moto.awslambda import lambda_backends # due to circular dependency + + lambda_log_events = [ + { + "id": event.eventId, + "timestamp": event.timestamp, + "message": event.message, + } + for event in events + ] + + lambda_backends[self.region].send_log_event( + self.destination_arn, + self.filter_name, + log_group_name, + log_stream_name, + lambda_log_events, + ) + return "{:056d}".format(self.uploadSequenceToken) def get_log_events( @@ -227,6 +251,7 @@ class LogGroup: self.retention_in_days = kwargs.get( "RetentionInDays" ) # AWS defaults to Never Expire for log group retention + self.subscription_filters = [] def create_log_stream(self, log_stream_name): if log_stream_name in self.streams: @@ -386,6 +411,48 @@ class LogGroup: k: v for (k, v) in self.tags.items() if k not in tags_to_remove } + def describe_subscription_filters(self): + return self.subscription_filters + + def put_subscription_filter( + self, filter_name, filter_pattern, destination_arn, role_arn + ): + creation_time = int(unix_time_millis()) + + # only one subscription filter can be associated with a log group + if self.subscription_filters: + if self.subscription_filters[0]["filterName"] == filter_name: + creation_time = self.subscription_filters[0]["creationTime"] + else: + raise LimitExceededException + + for stream in self.streams.values(): + stream.destination_arn = destination_arn + stream.filter_name = filter_name + + self.subscription_filters = [ + { + "filterName": filter_name, + "logGroupName": self.name, + "filterPattern": filter_pattern, + "destinationArn": destination_arn, + "roleArn": role_arn, + "distribution": "ByLogStream", + "creationTime": creation_time, + } + ] + + def delete_subscription_filter(self, filter_name): + if ( + not self.subscription_filters + or self.subscription_filters[0]["filterName"] != filter_name + ): + raise ResourceNotFoundException( + "The specified subscription filter does not exist." + ) + + self.subscription_filters = [] + class LogsBackend(BaseBackend): def __init__(self, region_name): @@ -557,6 +624,46 @@ class LogsBackend(BaseBackend): log_group = self.groups[log_group_name] log_group.untag(tags) + def describe_subscription_filters(self, log_group_name): + log_group = self.groups.get(log_group_name) + + if not log_group: + raise ResourceNotFoundException() + + return log_group.describe_subscription_filters() + + def put_subscription_filter( + self, log_group_name, filter_name, filter_pattern, destination_arn, role_arn + ): + # TODO: support other destinations like Kinesis stream + from moto.awslambda import lambda_backends # due to circular dependency + + log_group = self.groups.get(log_group_name) + + if not log_group: + raise ResourceNotFoundException() + + lambda_func = lambda_backends[self.region_name].get_function(destination_arn) + + # no specific permission check implemented + if not lambda_func: + raise InvalidParameterException( + "Could not execute the lambda function. " + "Make sure you have given CloudWatch Logs permission to execute your function." + ) + + log_group.put_subscription_filter( + filter_name, filter_pattern, destination_arn, role_arn + ) + + def delete_subscription_filter(self, log_group_name, filter_name): + log_group = self.groups.get(log_group_name) + + if not log_group: + raise ResourceNotFoundException() + + log_group.delete_subscription_filter(filter_name) + logs_backends = {} for region in Session().get_available_regions("logs"): diff --git a/moto/logs/responses.py b/moto/logs/responses.py index 4631da2f9..9e6886a42 100644 --- a/moto/logs/responses.py +++ b/moto/logs/responses.py @@ -178,3 +178,33 @@ class LogsResponse(BaseResponse): tags = self._get_param("tags") self.logs_backend.untag_log_group(log_group_name, tags) return "" + + def describe_subscription_filters(self): + log_group_name = self._get_param("logGroupName") + + subscription_filters = self.logs_backend.describe_subscription_filters( + log_group_name + ) + + return json.dumps({"subscriptionFilters": subscription_filters}) + + def put_subscription_filter(self): + log_group_name = self._get_param("logGroupName") + filter_name = self._get_param("filterName") + filter_pattern = self._get_param("filterPattern") + destination_arn = self._get_param("destinationArn") + role_arn = self._get_param("roleArn") + + self.logs_backend.put_subscription_filter( + log_group_name, filter_name, filter_pattern, destination_arn, role_arn + ) + + return "" + + def delete_subscription_filter(self): + log_group_name = self._get_param("logGroupName") + filter_name = self._get_param("filterName") + + self.logs_backend.delete_subscription_filter(log_group_name, filter_name) + + return "" diff --git a/tests/test_logs/test_logs.py b/tests/test_logs/test_logs.py index 2429d7e93..675948150 100644 --- a/tests/test_logs/test_logs.py +++ b/tests/test_logs/test_logs.py @@ -1,10 +1,17 @@ +import base64 +import json +import time +import zlib +from io import BytesIO +from zipfile import ZipFile, ZIP_DEFLATED + import boto3 import os import sure # noqa import six from botocore.exceptions import ClientError -from moto import mock_logs, settings +from moto import mock_logs, settings, mock_lambda, mock_iam from nose.tools import assert_raises from nose import SkipTest @@ -425,3 +432,408 @@ def test_untag_log_group(): assert response["tags"] == remaining_tags response = conn.delete_log_group(logGroupName=log_group_name) + + +@mock_logs +def test_describe_subscription_filters(): + # given + client = boto3.client("logs", "us-east-1") + log_group_name = "/test" + client.create_log_group(logGroupName=log_group_name) + + # when + response = client.describe_subscription_filters(logGroupName=log_group_name) + + # then + response["subscriptionFilters"].should.have.length_of(0) + + +@mock_logs +def test_describe_subscription_filters_errors(): + # given + client = boto3.client("logs", "us-east-1") + + # when + with assert_raises(ClientError) as e: + client.describe_subscription_filters(logGroupName="not-existing-log-group",) + + # then + ex = e.exception + ex.operation_name.should.equal("DescribeSubscriptionFilters") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("ResourceNotFoundException") + ex.response["Error"]["Message"].should.equal( + "The specified log group does not exist" + ) + + +@mock_lambda +@mock_logs +def test_put_subscription_filter_update(): + # given + region_name = "us-east-1" + client_lambda = boto3.client("lambda", region_name) + client_logs = boto3.client("logs", region_name) + log_group_name = "/test" + log_stream_name = "stream" + client_logs.create_log_group(logGroupName=log_group_name) + client_logs.create_log_stream( + logGroupName=log_group_name, logStreamName=log_stream_name + ) + function_arn = client_lambda.create_function( + FunctionName="test", + Runtime="python3.8", + Role=_get_role_name(region_name), + Handler="lambda_function.lambda_handler", + Code={"ZipFile": _get_test_zip_file()}, + Description="test lambda function", + Timeout=3, + MemorySize=128, + Publish=True, + )["FunctionArn"] + + # when + client_logs.put_subscription_filter( + logGroupName=log_group_name, + filterName="test", + filterPattern="", + destinationArn=function_arn, + ) + + # then + response = client_logs.describe_subscription_filters(logGroupName=log_group_name) + response["subscriptionFilters"].should.have.length_of(1) + filter = response["subscriptionFilters"][0] + creation_time = filter["creationTime"] + creation_time.should.be.a(int) + filter["destinationArn"] = "arn:aws:lambda:us-east-1:123456789012:function:test" + filter["distribution"] = "ByLogStream" + filter["logGroupName"] = "/test" + filter["filterName"] = "test" + filter["filterPattern"] = "" + + # when + # to update an existing subscription filter the 'filerName' must be identical + client_logs.put_subscription_filter( + logGroupName=log_group_name, + filterName="test", + filterPattern="[]", + destinationArn=function_arn, + ) + + # then + response = client_logs.describe_subscription_filters(logGroupName=log_group_name) + response["subscriptionFilters"].should.have.length_of(1) + filter = response["subscriptionFilters"][0] + filter["creationTime"].should.equal(creation_time) + filter["destinationArn"] = "arn:aws:lambda:us-east-1:123456789012:function:test" + filter["distribution"] = "ByLogStream" + filter["logGroupName"] = "/test" + filter["filterName"] = "test" + filter["filterPattern"] = "[]" + + # when + # only one subscription filter can be associated with a log group + with assert_raises(ClientError) as e: + client_logs.put_subscription_filter( + logGroupName=log_group_name, + filterName="test-2", + filterPattern="", + destinationArn=function_arn, + ) + + # then + ex = e.exception + ex.operation_name.should.equal("PutSubscriptionFilter") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("LimitExceededException") + ex.response["Error"]["Message"].should.equal("Resource limit exceeded.") + + +@mock_lambda +@mock_logs +def test_put_subscription_filter_with_lambda(): + # given + region_name = "us-east-1" + client_lambda = boto3.client("lambda", region_name) + client_logs = boto3.client("logs", region_name) + log_group_name = "/test" + log_stream_name = "stream" + client_logs.create_log_group(logGroupName=log_group_name) + client_logs.create_log_stream( + logGroupName=log_group_name, logStreamName=log_stream_name + ) + function_arn = client_lambda.create_function( + FunctionName="test", + Runtime="python3.8", + Role=_get_role_name(region_name), + Handler="lambda_function.lambda_handler", + Code={"ZipFile": _get_test_zip_file()}, + Description="test lambda function", + Timeout=3, + MemorySize=128, + Publish=True, + )["FunctionArn"] + + # when + client_logs.put_subscription_filter( + logGroupName=log_group_name, + filterName="test", + filterPattern="", + destinationArn=function_arn, + ) + + # then + response = client_logs.describe_subscription_filters(logGroupName=log_group_name) + response["subscriptionFilters"].should.have.length_of(1) + filter = response["subscriptionFilters"][0] + filter["creationTime"].should.be.a(int) + filter["destinationArn"] = "arn:aws:lambda:us-east-1:123456789012:function:test" + filter["distribution"] = "ByLogStream" + filter["logGroupName"] = "/test" + filter["filterName"] = "test" + filter["filterPattern"] = "" + + # when + client_logs.put_log_events( + logGroupName=log_group_name, + logStreamName=log_stream_name, + logEvents=[ + {"timestamp": 0, "message": "test"}, + {"timestamp": 0, "message": "test 2"}, + ], + ) + + # then + msg_showed_up, received_message = _wait_for_log_msg( + client_logs, "/aws/lambda/test", "awslogs" + ) + assert msg_showed_up, "CloudWatch log event was not found. All logs: {}".format( + received_message + ) + + data = json.loads(received_message)["awslogs"]["data"] + response = json.loads( + zlib.decompress(base64.b64decode(data), 16 + zlib.MAX_WBITS).decode("utf-8") + ) + response["messageType"].should.equal("DATA_MESSAGE") + response["owner"].should.equal("123456789012") + response["logGroup"].should.equal("/test") + response["logStream"].should.equal("stream") + response["subscriptionFilters"].should.equal(["test"]) + log_events = sorted(response["logEvents"], key=lambda log_event: log_event["id"]) + log_events.should.have.length_of(2) + log_events[0]["id"].should.be.a(int) + log_events[0]["message"].should.equal("test") + log_events[0]["timestamp"].should.equal(0) + log_events[1]["id"].should.be.a(int) + log_events[1]["message"].should.equal("test 2") + log_events[1]["timestamp"].should.equal(0) + + +@mock_logs +def test_put_subscription_filter_errors(): + # given + client = boto3.client("logs", "us-east-1") + log_group_name = "/test" + client.create_log_group(logGroupName=log_group_name) + + # when + with assert_raises(ClientError) as e: + client.put_subscription_filter( + logGroupName="not-existing-log-group", + filterName="test", + filterPattern="", + destinationArn="arn:aws:lambda:us-east-1:123456789012:function:test", + ) + + # then + ex = e.exception + ex.operation_name.should.equal("PutSubscriptionFilter") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("ResourceNotFoundException") + ex.response["Error"]["Message"].should.equal( + "The specified log group does not exist" + ) + + # when + with assert_raises(ClientError) as e: + client.put_subscription_filter( + logGroupName="/test", + filterName="test", + filterPattern="", + destinationArn="arn:aws:lambda:us-east-1:123456789012:function:not-existing", + ) + + # then + ex = e.exception + ex.operation_name.should.equal("PutSubscriptionFilter") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("InvalidParameterException") + ex.response["Error"]["Message"].should.equal( + "Could not execute the lambda function. " + "Make sure you have given CloudWatch Logs permission to execute your function." + ) + + # when + with assert_raises(ClientError) as e: + client.put_subscription_filter( + logGroupName="/test", + filterName="test", + filterPattern="", + destinationArn="arn:aws:lambda:us-east-1:123456789012:function:not-existing", + ) + + # then + ex = e.exception + ex.operation_name.should.equal("PutSubscriptionFilter") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("InvalidParameterException") + ex.response["Error"]["Message"].should.equal( + "Could not execute the lambda function. " + "Make sure you have given CloudWatch Logs permission to execute your function." + ) + + +@mock_lambda +@mock_logs +def test_delete_subscription_filter_errors(): + # given + region_name = "us-east-1" + client_lambda = boto3.client("lambda", region_name) + client_logs = boto3.client("logs", region_name) + log_group_name = "/test" + client_logs.create_log_group(logGroupName=log_group_name) + function_arn = client_lambda.create_function( + FunctionName="test", + Runtime="python3.8", + Role=_get_role_name(region_name), + Handler="lambda_function.lambda_handler", + Code={"ZipFile": _get_test_zip_file()}, + Description="test lambda function", + Timeout=3, + MemorySize=128, + Publish=True, + )["FunctionArn"] + client_logs.put_subscription_filter( + logGroupName=log_group_name, + filterName="test", + filterPattern="", + destinationArn=function_arn, + ) + + # when + client_logs.delete_subscription_filter( + logGroupName="/test", filterName="test", + ) + + # then + response = client_logs.describe_subscription_filters(logGroupName=log_group_name) + response["subscriptionFilters"].should.have.length_of(0) + + +@mock_lambda +@mock_logs +def test_delete_subscription_filter_errors(): + # given + region_name = "us-east-1" + client_lambda = boto3.client("lambda", region_name) + client_logs = boto3.client("logs", region_name) + log_group_name = "/test" + client_logs.create_log_group(logGroupName=log_group_name) + function_arn = client_lambda.create_function( + FunctionName="test", + Runtime="python3.8", + Role=_get_role_name(region_name), + Handler="lambda_function.lambda_handler", + Code={"ZipFile": _get_test_zip_file()}, + Description="test lambda function", + Timeout=3, + MemorySize=128, + Publish=True, + )["FunctionArn"] + client_logs.put_subscription_filter( + logGroupName=log_group_name, + filterName="test", + filterPattern="", + destinationArn=function_arn, + ) + + # when + with assert_raises(ClientError) as e: + client_logs.delete_subscription_filter( + logGroupName="not-existing-log-group", filterName="test", + ) + + # then + ex = e.exception + ex.operation_name.should.equal("DeleteSubscriptionFilter") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("ResourceNotFoundException") + ex.response["Error"]["Message"].should.equal( + "The specified log group does not exist" + ) + + # when + with assert_raises(ClientError) as e: + client_logs.delete_subscription_filter( + logGroupName="/test", filterName="wrong-filter-name", + ) + + # then + ex = e.exception + ex.operation_name.should.equal("DeleteSubscriptionFilter") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("ResourceNotFoundException") + ex.response["Error"]["Message"].should.equal( + "The specified subscription filter does not exist." + ) + + +def _get_role_name(region_name): + with mock_iam(): + iam = boto3.client("iam", region_name=region_name) + try: + return iam.get_role(RoleName="test-role")["Role"]["Arn"] + except ClientError: + return iam.create_role( + RoleName="test-role", AssumeRolePolicyDocument="test policy", Path="/", + )["Role"]["Arn"] + + +def _get_test_zip_file(): + func_str = """ +def lambda_handler(event, context): + return event +""" + + zip_output = BytesIO() + zip_file = ZipFile(zip_output, "w", ZIP_DEFLATED) + zip_file.writestr("lambda_function.py", func_str) + zip_file.close() + zip_output.seek(0) + return zip_output.read() + + +def _wait_for_log_msg(client, log_group_name, expected_msg_part): + received_messages = [] + start = time.time() + while (time.time() - start) < 10: + result = client.describe_log_streams(logGroupName=log_group_name) + log_streams = result.get("logStreams") + if not log_streams: + time.sleep(1) + continue + + for log_stream in log_streams: + result = client.get_log_events( + logGroupName=log_group_name, logStreamName=log_stream["logStreamName"], + ) + received_messages.extend( + [event["message"] for event in result.get("events")] + ) + for message in received_messages: + if expected_msg_part in message: + return True, message + time.sleep(1) + return False, received_messages From 774a764b698fbf50dffd58f6518a344fef73af76 Mon Sep 17 00:00:00 2001 From: usmangani1 Date: Tue, 12 May 2020 19:29:07 +0530 Subject: [PATCH 29/29] Fix s3 Added Error handling in case of invalid uploadID (#2979) * Added Error handling in case of invalid uploadID * Linting * added assertions * Linting Co-authored-by: usmankb Co-authored-by: Bert Blommers --- moto/s3/exceptions.py | 9 +++++++++ moto/s3/models.py | 4 ++++ tests/test_s3/test_s3.py | 13 +++++++++++++ 3 files changed, 26 insertions(+) diff --git a/moto/s3/exceptions.py b/moto/s3/exceptions.py index c38a4f467..3ed385f1c 100644 --- a/moto/s3/exceptions.py +++ b/moto/s3/exceptions.py @@ -377,3 +377,12 @@ class NoSystemTags(S3ClientError): super(NoSystemTags, self).__init__( "InvalidTag", "System tags cannot be added/updated by requester" ) + + +class NoSuchUpload(S3ClientError): + code = 404 + + def __init__(self): + super(NoSuchUpload, self).__init__( + "NoSuchUpload", "The specified multipart upload does not exist." + ) diff --git a/moto/s3/models.py b/moto/s3/models.py index 866c5d007..3020fd45e 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -40,6 +40,7 @@ from .exceptions import ( NoSuchPublicAccessBlockConfiguration, InvalidPublicAccessBlockConfiguration, WrongPublicAccessBlockAccountIdError, + NoSuchUpload, ) from .utils import clean_key_name, _VersionedKeyStore @@ -1478,6 +1479,9 @@ class S3Backend(BaseBackend): def cancel_multipart(self, bucket_name, multipart_id): bucket = self.get_bucket(bucket_name) + multipart_data = bucket.multiparts.get(multipart_id, None) + if not multipart_data: + raise NoSuchUpload() del bucket.multiparts[multipart_id] def list_multipart(self, bucket_name, multipart_id): diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index f60e0293e..bcb9da87f 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -2149,6 +2149,19 @@ def test_boto3_copy_object_with_versioning(): data.should.equal(b"test2") +@mock_s3 +def test_s3_abort_multipart_data_with_invalid_upload_and_key(): + client = boto3.client("s3", region_name=DEFAULT_REGION_NAME) + + client.create_bucket(Bucket="blah") + + with assert_raises(Exception) as err: + client.abort_multipart_upload( + Bucket="blah", Key="foobar", UploadId="dummy_upload_id" + ) + err.exception.response["Error"]["Code"].should.equal("NoSuchUpload") + + @mock_s3 def test_boto3_copy_object_from_unversioned_to_versioned_bucket(): client = boto3.client("s3", region_name=DEFAULT_REGION_NAME)