diff --git a/moto/config/exceptions.py b/moto/config/exceptions.py index 4a0dc0d73..6b6498d34 100644 --- a/moto/config/exceptions.py +++ b/moto/config/exceptions.py @@ -366,3 +366,13 @@ class TooManyResourceKeys(JsonRESTError): message = str(message) super(TooManyResourceKeys, self).__init__("ValidationException", message) + + +class InvalidResultTokenException(JsonRESTError): + code = 400 + + def __init__(self): + message = "The resultToken provided is invalid" + super(InvalidResultTokenException, self).__init__( + "InvalidResultTokenException", message + ) diff --git a/moto/config/models.py b/moto/config/models.py index a66576979..242a219e4 100644 --- a/moto/config/models.py +++ b/moto/config/models.py @@ -40,6 +40,7 @@ from moto.config.exceptions import ( TooManyResourceIds, ResourceNotDiscoveredException, TooManyResourceKeys, + InvalidResultTokenException, ) from moto.core import BaseBackend, BaseModel @@ -1089,6 +1090,26 @@ class ConfigBackend(BaseBackend): "UnprocessedResourceIdentifiers": not_found, } + def put_evaluations(self, evaluations=None, result_token=None, test_mode=False): + if not evaluations: + raise InvalidParameterValueException( + "The Evaluations object in your request cannot be null." + "Add the required parameters and try again." + ) + + if not result_token: + raise InvalidResultTokenException() + + # Moto only supports PutEvaluations with test mode currently (missing rule and token support) + if not test_mode: + raise NotImplementedError( + "PutEvaluations without TestMode is not yet implemented" + ) + + return { + "FailedEvaluations": [], + } # At this time, moto is not adding failed evaluations. + config_backends = {} for region in Session().get_available_regions("config"): diff --git a/moto/config/responses.py b/moto/config/responses.py index e977945c9..3b647b5bf 100644 --- a/moto/config/responses.py +++ b/moto/config/responses.py @@ -151,3 +151,11 @@ class ConfigResponse(BaseResponse): self._get_param("ResourceIdentifiers"), ) return json.dumps(schema) + + def put_evaluations(self): + evaluations = self.config_backend.put_evaluations( + self._get_param("Evaluations"), + self._get_param("ResultToken"), + self._get_param("TestMode"), + ) + return json.dumps(evaluations) diff --git a/tests/test_config/test_config.py b/tests/test_config/test_config.py index d5ec8f0bc..09fe8ed91 100644 --- a/tests/test_config/test_config.py +++ b/tests/test_config/test_config.py @@ -1802,3 +1802,68 @@ def test_batch_get_aggregate_resource_config(): len(result["UnprocessedResourceIdentifiers"]) == 1 and result["UnprocessedResourceIdentifiers"][0]["SourceRegion"] == "eu-west-1" ) + + +@mock_config +def test_put_evaluations(): + client = boto3.client("config", region_name="us-west-2") + + # Try without Evaluations supplied: + with assert_raises(ClientError) as ce: + client.put_evaluations(Evaluations=[], ResultToken="test", TestMode=True) + assert ce.exception.response["Error"]["Code"] == "InvalidParameterValueException" + assert ( + "The Evaluations object in your request cannot be null" + in ce.exception.response["Error"]["Message"] + ) + + # Try without a ResultToken supplied: + with assert_raises(ClientError) as ce: + client.put_evaluations( + Evaluations=[ + { + "ComplianceResourceType": "AWS::ApiGateway::RestApi", + "ComplianceResourceId": "test-api", + "ComplianceType": "INSUFFICIENT_DATA", + "OrderingTimestamp": datetime(2015, 1, 1), + } + ], + ResultToken="", + TestMode=True, + ) + assert ce.exception.response["Error"]["Code"] == "InvalidResultTokenException" + + # Try without TestMode supplied: + with assert_raises(NotImplementedError) as ce: + client.put_evaluations( + Evaluations=[ + { + "ComplianceResourceType": "AWS::ApiGateway::RestApi", + "ComplianceResourceId": "test-api", + "ComplianceType": "INSUFFICIENT_DATA", + "OrderingTimestamp": datetime(2015, 1, 1), + } + ], + ResultToken="test", + ) + + # Now with proper params: + response = client.put_evaluations( + Evaluations=[ + { + "ComplianceResourceType": "AWS::ApiGateway::RestApi", + "ComplianceResourceId": "test-api", + "ComplianceType": "INSUFFICIENT_DATA", + "OrderingTimestamp": datetime(2015, 1, 1), + } + ], + TestMode=True, + ResultToken="test", + ) + + # this is hard to match against, so remove it + response["ResponseMetadata"].pop("HTTPHeaders", None) + response["ResponseMetadata"].pop("RetryAttempts", None) + response.should.equal( + {"FailedEvaluations": [], "ResponseMetadata": {"HTTPStatusCode": 200,},} + )