Merge pull request #2767 from jrbeilke/feature-config-putevaluations

Feature: Add test mode support for Config PutEvaluations
This commit is contained in:
Steve Pulec 2020-03-07 10:16:34 -06:00 committed by GitHub
commit f01108f687
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 109 additions and 0 deletions

View File

@ -366,3 +366,13 @@ class TooManyResourceKeys(JsonRESTError):
message = str(message) message = str(message)
super(TooManyResourceKeys, self).__init__("ValidationException", message) super(TooManyResourceKeys, self).__init__("ValidationException", message)
class InvalidResultTokenException(JsonRESTError):
code = 400
def __init__(self):
message = "The resultToken provided is invalid"
super(InvalidResultTokenException, self).__init__(
"InvalidResultTokenException", message
)

View File

@ -40,6 +40,7 @@ from moto.config.exceptions import (
TooManyResourceIds, TooManyResourceIds,
ResourceNotDiscoveredException, ResourceNotDiscoveredException,
TooManyResourceKeys, TooManyResourceKeys,
InvalidResultTokenException,
) )
from moto.core import BaseBackend, BaseModel from moto.core import BaseBackend, BaseModel
@ -1089,6 +1090,26 @@ class ConfigBackend(BaseBackend):
"UnprocessedResourceIdentifiers": not_found, "UnprocessedResourceIdentifiers": not_found,
} }
def put_evaluations(self, evaluations=None, result_token=None, test_mode=False):
if not evaluations:
raise InvalidParameterValueException(
"The Evaluations object in your request cannot be null."
"Add the required parameters and try again."
)
if not result_token:
raise InvalidResultTokenException()
# Moto only supports PutEvaluations with test mode currently (missing rule and token support)
if not test_mode:
raise NotImplementedError(
"PutEvaluations without TestMode is not yet implemented"
)
return {
"FailedEvaluations": [],
} # At this time, moto is not adding failed evaluations.
config_backends = {} config_backends = {}
for region in Session().get_available_regions("config"): for region in Session().get_available_regions("config"):

View File

@ -151,3 +151,11 @@ class ConfigResponse(BaseResponse):
self._get_param("ResourceIdentifiers"), self._get_param("ResourceIdentifiers"),
) )
return json.dumps(schema) return json.dumps(schema)
def put_evaluations(self):
evaluations = self.config_backend.put_evaluations(
self._get_param("Evaluations"),
self._get_param("ResultToken"),
self._get_param("TestMode"),
)
return json.dumps(evaluations)

View File

@ -1,8 +1,10 @@
import json import json
import os
from datetime import datetime, timedelta from datetime import datetime, timedelta
import boto3 import boto3
from botocore.exceptions import ClientError from botocore.exceptions import ClientError
from nose import SkipTest
from nose.tools import assert_raises from nose.tools import assert_raises
from moto import mock_s3 from moto import mock_s3
@ -1802,3 +1804,71 @@ def test_batch_get_aggregate_resource_config():
len(result["UnprocessedResourceIdentifiers"]) == 1 len(result["UnprocessedResourceIdentifiers"]) == 1
and result["UnprocessedResourceIdentifiers"][0]["SourceRegion"] == "eu-west-1" and result["UnprocessedResourceIdentifiers"][0]["SourceRegion"] == "eu-west-1"
) )
@mock_config
def test_put_evaluations():
client = boto3.client("config", region_name="us-west-2")
# Try without Evaluations supplied:
with assert_raises(ClientError) as ce:
client.put_evaluations(Evaluations=[], ResultToken="test", TestMode=True)
assert ce.exception.response["Error"]["Code"] == "InvalidParameterValueException"
assert (
"The Evaluations object in your request cannot be null"
in ce.exception.response["Error"]["Message"]
)
# Try without a ResultToken supplied:
with assert_raises(ClientError) as ce:
client.put_evaluations(
Evaluations=[
{
"ComplianceResourceType": "AWS::ApiGateway::RestApi",
"ComplianceResourceId": "test-api",
"ComplianceType": "INSUFFICIENT_DATA",
"OrderingTimestamp": datetime(2015, 1, 1),
}
],
ResultToken="",
TestMode=True,
)
assert ce.exception.response["Error"]["Code"] == "InvalidResultTokenException"
if os.environ.get("TEST_SERVER_MODE", "false").lower() == "true":
raise SkipTest("Does not work in server mode due to error in Workzeug")
else:
# Try without TestMode supplied:
with assert_raises(NotImplementedError):
client.put_evaluations(
Evaluations=[
{
"ComplianceResourceType": "AWS::ApiGateway::RestApi",
"ComplianceResourceId": "test-api",
"ComplianceType": "INSUFFICIENT_DATA",
"OrderingTimestamp": datetime(2015, 1, 1),
}
],
ResultToken="test",
)
# Now with proper params:
response = client.put_evaluations(
Evaluations=[
{
"ComplianceResourceType": "AWS::ApiGateway::RestApi",
"ComplianceResourceId": "test-api",
"ComplianceType": "INSUFFICIENT_DATA",
"OrderingTimestamp": datetime(2015, 1, 1),
}
],
TestMode=True,
ResultToken="test",
)
# this is hard to match against, so remove it
response["ResponseMetadata"].pop("HTTPHeaders", None)
response["ResponseMetadata"].pop("RetryAttempts", None)
response.should.equal(
{"FailedEvaluations": [], "ResponseMetadata": {"HTTPStatusCode": 200,},}
)