add rudimentary support for Config PutEvaluations with TestMode for now

This commit is contained in:
Jon Beilke 2020-02-24 11:53:27 -06:00
parent 4c2667648a
commit 28b4305759
4 changed files with 104 additions and 0 deletions

View File

@ -366,3 +366,13 @@ class TooManyResourceKeys(JsonRESTError):
message = str(message) message = str(message)
super(TooManyResourceKeys, self).__init__("ValidationException", message) super(TooManyResourceKeys, self).__init__("ValidationException", message)
class InvalidResultTokenException(JsonRESTError):
code = 400
def __init__(self):
message = "The resultToken provided is invalid"
super(InvalidResultTokenException, self).__init__(
"InvalidResultTokenException", message
)

View File

@ -40,6 +40,7 @@ from moto.config.exceptions import (
TooManyResourceIds, TooManyResourceIds,
ResourceNotDiscoveredException, ResourceNotDiscoveredException,
TooManyResourceKeys, TooManyResourceKeys,
InvalidResultTokenException,
) )
from moto.core import BaseBackend, BaseModel from moto.core import BaseBackend, BaseModel
@ -1089,6 +1090,26 @@ class ConfigBackend(BaseBackend):
"UnprocessedResourceIdentifiers": not_found, "UnprocessedResourceIdentifiers": not_found,
} }
def put_evaluations(self, evaluations=None, result_token=None, test_mode=False):
if not evaluations:
raise InvalidParameterValueException(
"The Evaluations object in your request cannot be null."
"Add the required parameters and try again."
)
if not result_token:
raise InvalidResultTokenException()
# Moto only supports PutEvaluations with test mode currently (missing rule and token support)
if not test_mode:
raise NotImplementedError(
"PutEvaluations without TestMode is not yet implemented"
)
return {
"FailedEvaluations": [],
} # At this time, moto is not adding failed evaluations.
config_backends = {} config_backends = {}
for region in Session().get_available_regions("config"): for region in Session().get_available_regions("config"):

View File

@ -151,3 +151,11 @@ class ConfigResponse(BaseResponse):
self._get_param("ResourceIdentifiers"), self._get_param("ResourceIdentifiers"),
) )
return json.dumps(schema) return json.dumps(schema)
def put_evaluations(self):
evaluations = self.config_backend.put_evaluations(
self._get_param("Evaluations"),
self._get_param("ResultToken"),
self._get_param("TestMode"),
)
return json.dumps(evaluations)

View File

@ -1802,3 +1802,68 @@ def test_batch_get_aggregate_resource_config():
len(result["UnprocessedResourceIdentifiers"]) == 1 len(result["UnprocessedResourceIdentifiers"]) == 1
and result["UnprocessedResourceIdentifiers"][0]["SourceRegion"] == "eu-west-1" and result["UnprocessedResourceIdentifiers"][0]["SourceRegion"] == "eu-west-1"
) )
@mock_config
def test_put_evaluations():
client = boto3.client("config", region_name="us-west-2")
# Try without Evaluations supplied:
with assert_raises(ClientError) as ce:
client.put_evaluations(Evaluations=[], ResultToken="test", TestMode=True)
assert ce.exception.response["Error"]["Code"] == "InvalidParameterValueException"
assert (
"The Evaluations object in your request cannot be null"
in ce.exception.response["Error"]["Message"]
)
# Try without a ResultToken supplied:
with assert_raises(ClientError) as ce:
client.put_evaluations(
Evaluations=[
{
"ComplianceResourceType": "AWS::ApiGateway::RestApi",
"ComplianceResourceId": "test-api",
"ComplianceType": "INSUFFICIENT_DATA",
"OrderingTimestamp": datetime(2015, 1, 1),
}
],
ResultToken="",
TestMode=True,
)
assert ce.exception.response["Error"]["Code"] == "InvalidResultTokenException"
# Try without TestMode supplied:
with assert_raises(NotImplementedError) as ce:
client.put_evaluations(
Evaluations=[
{
"ComplianceResourceType": "AWS::ApiGateway::RestApi",
"ComplianceResourceId": "test-api",
"ComplianceType": "INSUFFICIENT_DATA",
"OrderingTimestamp": datetime(2015, 1, 1),
}
],
ResultToken="test",
)
# Now with proper params:
response = client.put_evaluations(
Evaluations=[
{
"ComplianceResourceType": "AWS::ApiGateway::RestApi",
"ComplianceResourceId": "test-api",
"ComplianceType": "INSUFFICIENT_DATA",
"OrderingTimestamp": datetime(2015, 1, 1),
}
],
TestMode=True,
ResultToken="test",
)
# this is hard to match against, so remove it
response["ResponseMetadata"].pop("HTTPHeaders", None)
response["ResponseMetadata"].pop("RetryAttempts", None)
response.should.equal(
{"FailedEvaluations": [], "ResponseMetadata": {"HTTPStatusCode": 200,},}
)