diff --git a/tests/test_config/test_config.py b/tests/test_config/test_config.py index 09fe8ed91..8e6c3ec4c 100644 --- a/tests/test_config/test_config.py +++ b/tests/test_config/test_config.py @@ -3,6 +3,7 @@ from datetime import datetime, timedelta import boto3 from botocore.exceptions import ClientError +from nose import SkipTest from nose.tools import assert_raises from moto import mock_s3 @@ -1833,19 +1834,22 @@ def test_put_evaluations(): ) assert ce.exception.response["Error"]["Code"] == "InvalidResultTokenException" - # Try without TestMode supplied: - with assert_raises(NotImplementedError) as ce: - client.put_evaluations( - Evaluations=[ - { - "ComplianceResourceType": "AWS::ApiGateway::RestApi", - "ComplianceResourceId": "test-api", - "ComplianceType": "INSUFFICIENT_DATA", - "OrderingTimestamp": datetime(2015, 1, 1), - } - ], - ResultToken="test", - ) + if os.environ.get("TEST_SERVER_MODE", "false").lower() == "true": + raise SkipTest("Does not work in server mode due to error in Workzeug") + else: + # Try without TestMode supplied: + with assert_raises(NotImplementedError): + client.put_evaluations( + Evaluations=[ + { + "ComplianceResourceType": "AWS::ApiGateway::RestApi", + "ComplianceResourceId": "test-api", + "ComplianceType": "INSUFFICIENT_DATA", + "OrderingTimestamp": datetime(2015, 1, 1), + } + ], + ResultToken="test", + ) # Now with proper params: response = client.put_evaluations(