import json import unittest import pytest from moto import mock_aws, settings from moto.core.exceptions import InvalidNextTokenException from moto.s3.config import s3_config_query from tests import DEFAULT_ACCOUNT_ID # Integration tests would use `boto3.client("config").list_discovered_resources` # We're testing the backend here directly, because it is much quicker @mock_aws def test_s3_public_access_block_to_config_dict(): if not settings.TEST_DECORATOR_MODE: raise unittest.SkipTest("Not using boto3 - no point in testing server-mode") s3_config_query_backend = s3_config_query.backends[DEFAULT_ACCOUNT_ID]["global"] # With 1 bucket in us-west-2: s3_config_query_backend.create_bucket("bucket1", "us-west-2") public_access_block = { "BlockPublicAcls": "True", "IgnorePublicAcls": "False", "BlockPublicPolicy": "True", "RestrictPublicBuckets": "False", } # Add a public access block: s3_config_query_backend.put_public_access_block("bucket1", public_access_block) result = s3_config_query_backend.buckets[ "bucket1" ].public_access_block.to_config_dict() for key, value in public_access_block.items(): k = f"{key[0].lower()}{key[1:]}" assert result[k] is (value == "True") # Verify that this resides in the full bucket's to_config_dict: full_result = s3_config_query_backend.buckets["bucket1"].to_config_dict() assert ( json.loads( full_result["supplementaryConfiguration"]["PublicAccessBlockConfiguration"] ) == result ) @mock_aws def test_list_config_discovered_resources(): if not settings.TEST_DECORATOR_MODE: raise unittest.SkipTest("Not using boto3 - no point in testing server-mode") s3_config_query_backend = s3_config_query.backends[DEFAULT_ACCOUNT_ID]["global"] # Without any buckets: assert s3_config_query.list_config_service_resources( "global", "global", None, None, 100, None ) == ([], None) # With 10 buckets in us-west-2: for idx in range(0, 10): s3_config_query_backend.create_bucket(f"bucket{idx}", "us-west-2") # With 2 buckets in eu-west-1: for idx in range(10, 12): s3_config_query_backend.create_bucket(f"eu-bucket{idx}", "eu-west-1") result, next_token = s3_config_query.list_config_service_resources( DEFAULT_ACCOUNT_ID, None, None, 100, None ) assert not next_token assert len(result) == 12 for idx in range(0, 10): assert result[idx] == { "type": "AWS::S3::Bucket", "id": f"bucket{idx}", "name": f"bucket{idx}", "region": "us-west-2", } for idx in range(10, 12): assert result[idx] == { "type": "AWS::S3::Bucket", "id": f"eu-bucket{idx}", "name": f"eu-bucket{idx}", "region": "eu-west-1", } # With a name: result, next_token = s3_config_query.list_config_service_resources( DEFAULT_ACCOUNT_ID, None, "bucket0", 100, None ) assert len(result) == 1 and result[0]["name"] == "bucket0" and not next_token # With a region: result, next_token = s3_config_query.list_config_service_resources( DEFAULT_ACCOUNT_ID, None, None, 100, None, resource_region="eu-west-1" ) assert len(result) == 2 and not next_token and result[1]["name"] == "eu-bucket11" # With resource ids: result, next_token = s3_config_query.list_config_service_resources( DEFAULT_ACCOUNT_ID, ["bucket0", "bucket1"], None, 100, None ) assert ( len(result) == 2 and result[0]["name"] == "bucket0" and result[1]["name"] == "bucket1" and not next_token ) # With duplicated resource ids: result, next_token = s3_config_query.list_config_service_resources( DEFAULT_ACCOUNT_ID, ["bucket0", "bucket0"], None, 100, None ) assert len(result) == 1 and result[0]["name"] == "bucket0" and not next_token # Pagination: result, next_token = s3_config_query.list_config_service_resources( DEFAULT_ACCOUNT_ID, None, None, 1, None ) assert ( len(result) == 1 and result[0]["name"] == "bucket0" and next_token == "bucket1" ) # Last Page: result, next_token = s3_config_query.list_config_service_resources( DEFAULT_ACCOUNT_ID, None, None, 1, "eu-bucket11", resource_region="eu-west-1" ) assert len(result) == 1 and result[0]["name"] == "eu-bucket11" and not next_token # With a list of buckets: result, next_token = s3_config_query.list_config_service_resources( DEFAULT_ACCOUNT_ID, ["bucket0", "bucket1"], None, 1, None ) assert ( len(result) == 1 and result[0]["name"] == "bucket0" and next_token == "bucket1" ) # With an invalid page: with pytest.raises(InvalidNextTokenException) as inte: s3_config_query.list_config_service_resources( DEFAULT_ACCOUNT_ID, None, None, 1, "notabucket" ) assert "The nextToken provided is invalid" in inte.value.message @mock_aws def test_s3_lifecycle_config_dict(): if not settings.TEST_DECORATOR_MODE: raise unittest.SkipTest("Not using boto3 - no point in testing server-mode") s3_config_query_backend = s3_config_query.backends[DEFAULT_ACCOUNT_ID]["global"] # With 1 bucket in us-west-2: s3_config_query_backend.create_bucket("bucket1", "us-west-2") # And a lifecycle policy lifecycle = [ { "ID": "rule1", "Status": "Enabled", "Filter": {"Prefix": ""}, "Expiration": {"Days": 1}, }, { "ID": "rule2", "Status": "Enabled", "Filter": { "And": { "Prefix": "some/path", "Tag": [{"Key": "TheKey", "Value": "TheValue"}], } }, "Expiration": {"Days": 1}, }, {"ID": "rule3", "Status": "Enabled", "Filter": {}, "Expiration": {"Days": 1}}, { "ID": "rule4", "Status": "Enabled", "Filter": {"Prefix": ""}, "AbortIncompleteMultipartUpload": {"DaysAfterInitiation": 1}, }, { "ID": "rule5", "Status": "Enabled", "Filter": {"Prefix": ""}, "Transition": [{"Days": 10, "StorageClass": "GLACIER"}], "NoncurrentVersionTransition": [ { "NoncurrentDays": 10, "StorageClass": "GLACIER", } ], }, ] s3_config_query_backend.put_bucket_lifecycle("bucket1", lifecycle) # Get the rules for this: lifecycles = [ rule.to_config_dict() for rule in s3_config_query_backend.buckets["bucket1"].rules ] # Verify the first: assert lifecycles[0] == { "id": "rule1", "prefix": None, "status": "Enabled", "expirationInDays": 1, "expiredObjectDeleteMarker": None, "noncurrentVersionExpirationInDays": -1, "expirationDate": None, "transitions": None, "noncurrentVersionTransitions": None, "abortIncompleteMultipartUpload": None, "filter": {"predicate": {"type": "LifecyclePrefixPredicate", "prefix": ""}}, } # Verify the second: assert lifecycles[1] == { "id": "rule2", "prefix": None, "status": "Enabled", "expirationInDays": 1, "expiredObjectDeleteMarker": None, "noncurrentVersionExpirationInDays": -1, "expirationDate": None, "transitions": None, "noncurrentVersionTransitions": None, "abortIncompleteMultipartUpload": None, "filter": { "predicate": { "type": "LifecycleAndOperator", "operands": [ {"type": "LifecyclePrefixPredicate", "prefix": "some/path"}, { "type": "LifecycleTagPredicate", "tag": {"key": "TheKey", "value": "TheValue"}, }, ], } }, } # And the third: assert lifecycles[2] == { "id": "rule3", "prefix": None, "status": "Enabled", "expirationInDays": 1, "expiredObjectDeleteMarker": None, "noncurrentVersionExpirationInDays": -1, "expirationDate": None, "transitions": None, "noncurrentVersionTransitions": None, "abortIncompleteMultipartUpload": None, "filter": {"predicate": None}, } # And the last: assert lifecycles[3] == { "id": "rule4", "prefix": None, "status": "Enabled", "expirationInDays": None, "expiredObjectDeleteMarker": None, "noncurrentVersionExpirationInDays": -1, "expirationDate": None, "transitions": None, "noncurrentVersionTransitions": None, "abortIncompleteMultipartUpload": {"daysAfterInitiation": 1}, "filter": {"predicate": {"type": "LifecyclePrefixPredicate", "prefix": ""}}, } assert lifecycles[4] == { "id": "rule5", "prefix": None, "status": "Enabled", "expirationInDays": None, "expiredObjectDeleteMarker": None, "noncurrentVersionExpirationInDays": -1, "expirationDate": None, "abortIncompleteMultipartUpload": None, "filter": {"predicate": {"type": "LifecyclePrefixPredicate", "prefix": ""}}, "transitions": [{"days": 10, "storageClass": "GLACIER"}], "noncurrentVersionTransitions": [ {"noncurrentDays": 10, "storageClass": "GLACIER"} ], } @mock_aws def test_s3_notification_config_dict(): if not settings.TEST_DECORATOR_MODE: raise unittest.SkipTest("Not using boto3 - no point in testing server-mode") s3_config_query_backend = s3_config_query.backends[DEFAULT_ACCOUNT_ID]["global"] # With 1 bucket in us-west-2: s3_config_query_backend.create_bucket("bucket1", "us-west-2") # And some notifications: notifications = { "TopicConfiguration": [ { "Id": "Topic", "Topic": "arn:aws:sns:us-west-2:012345678910:mytopic", "Event": [ "s3:ReducedRedundancyLostObject", "s3:ObjectRestore:Completed", ], } ], "QueueConfiguration": [ { "Id": "Queue", "Queue": "arn:aws:sqs:us-west-2:012345678910:myqueue", "Event": ["s3:ObjectRemoved:Delete"], "Filter": { "S3Key": { "FilterRule": [{"Name": "prefix", "Value": "stuff/here/"}] } }, } ], "CloudFunctionConfiguration": [ { "Id": "Lambda", "CloudFunction": "arn:aws:lambda:us-west-2:012345678910:function:mylambda", "Event": [ "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:Put", ], "Filter": { "S3Key": {"FilterRule": [{"Name": "suffix", "Value": ".png"}]} }, } ], } s3_config_query.backends[DEFAULT_ACCOUNT_ID][ "global" ].put_bucket_notification_configuration("bucket1", notifications) # Get the notifications for this: notifications = s3_config_query_backend.buckets[ "bucket1" ].notification_configuration.to_config_dict() # Verify it all: assert notifications == { "configurations": { "Topic": { "events": [ "s3:ReducedRedundancyLostObject", "s3:ObjectRestore:Completed", ], "filter": None, "objectPrefixes": [], "topicARN": "arn:aws:sns:us-west-2:012345678910:mytopic", "type": "TopicConfiguration", }, "Queue": { "events": ["s3:ObjectRemoved:Delete"], "filter": { "s3KeyFilter": { "filterRules": [{"name": "prefix", "value": "stuff/here/"}] } }, "objectPrefixes": [], "queueARN": "arn:aws:sqs:us-west-2:012345678910:myqueue", "type": "QueueConfiguration", }, "Lambda": { "events": [ "s3:ObjectCreated:Post", "s3:ObjectCreated:Copy", "s3:ObjectCreated:Put", ], "filter": { "s3KeyFilter": { "filterRules": [{"name": "suffix", "value": ".png"}] } }, "objectPrefixes": [], "queueARN": "arn:aws:lambda:us-west-2:012345678910:function:mylambda", "type": "LambdaConfiguration", }, } } @mock_aws def test_s3_acl_to_config_dict(): if not settings.TEST_DECORATOR_MODE: raise unittest.SkipTest("Not using boto3 - no point in testing server-mode") s3_config_query_backend = s3_config_query.backends[DEFAULT_ACCOUNT_ID]["global"] from moto.s3.models import OWNER, FakeAcl, FakeGrant, FakeGrantee # With 1 bucket in us-west-2: s3_config_query_backend.create_bucket("logbucket", "us-west-2") # Get the config dict with nothing other than the owner details: acls = s3_config_query_backend.buckets["logbucket"].acl.to_config_dict() owner_acl = { "grantee": {"id": OWNER, "displayName": None}, "permission": "FullControl", } assert acls == { "grantSet": None, "owner": {"displayName": None, "id": OWNER}, "grantList": [owner_acl], } # Add some Log Bucket ACLs: log_acls = FakeAcl( [ FakeGrant( [FakeGrantee(uri="http://acs.amazonaws.com/groups/s3/LogDelivery")], "WRITE", ), FakeGrant( [FakeGrantee(uri="http://acs.amazonaws.com/groups/s3/LogDelivery")], "READ_ACP", ), FakeGrant([FakeGrantee(grantee_id=OWNER)], "FULL_CONTROL"), ] ) s3_config_query_backend.put_bucket_acl("logbucket", log_acls) acls = s3_config_query_backend.buckets["logbucket"].acl.to_config_dict() assert acls == { "grantSet": None, "grantList": [ {"grantee": "LogDelivery", "permission": "Write"}, {"grantee": "LogDelivery", "permission": "ReadAcp"}, { "grantee": { "displayName": None, "id": "75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a", }, "permission": "FullControl", }, ], "owner": {"displayName": None, "id": OWNER}, } # Give the owner less than full_control permissions: log_acls = FakeAcl( [ FakeGrant([FakeGrantee(grantee_id=OWNER)], "READ_ACP"), FakeGrant([FakeGrantee(grantee_id=OWNER)], "WRITE_ACP"), ] ) s3_config_query_backend.put_bucket_acl("logbucket", log_acls) acls = s3_config_query_backend.buckets["logbucket"].acl.to_config_dict() assert acls == { "grantSet": None, "grantList": [ {"grantee": {"id": OWNER, "displayName": None}, "permission": "ReadAcp"}, {"grantee": {"id": OWNER, "displayName": None}, "permission": "WriteAcp"}, ], "owner": {"displayName": None, "id": OWNER}, } @mock_aws def test_s3_config_dict(): if not settings.TEST_DECORATOR_MODE: raise unittest.SkipTest("Not using boto3 - no point in testing server-mode") s3_config_query_backend = s3_config_query.backends[DEFAULT_ACCOUNT_ID]["global"] from moto.s3.models import OWNER, FakeAcl, FakeGrant, FakeGrantee # Without any buckets: assert not s3_config_query.get_config_resource(DEFAULT_ACCOUNT_ID, "some_bucket") tags = {"someTag": "someValue", "someOtherTag": "someOtherValue"} # With 1 bucket in us-west-2: s3_config_query_backend.create_bucket("bucket1", "us-west-2") s3_config_query_backend.put_bucket_tagging("bucket1", tags) # With a log bucket: s3_config_query_backend.create_bucket("logbucket", "us-west-2") log_acls = FakeAcl( [ FakeGrant( [FakeGrantee(uri="http://acs.amazonaws.com/groups/s3/LogDelivery")], "WRITE", ), FakeGrant( [FakeGrantee(uri="http://acs.amazonaws.com/groups/s3/LogDelivery")], "READ_ACP", ), FakeGrant([FakeGrantee(grantee_id=OWNER)], "FULL_CONTROL"), ] ) s3_config_query_backend.put_bucket_acl("logbucket", log_acls) s3_config_query_backend.put_bucket_logging( "bucket1", {"TargetBucket": "logbucket", "TargetPrefix": ""} ) policy = json.dumps( { "Statement": [ { "Effect": "Deny", "Action": "s3:DeleteObject", "Principal": "*", "Resource": "arn:aws:s3:::bucket1/*", } ] } ) # The policy is a byte array -- need to encode in Python 3 pass_policy = bytes(policy, "utf-8") s3_config_query_backend.put_bucket_policy("bucket1", pass_policy) # Get the us-west-2 bucket and verify that it works properly: bucket1_result = s3_config_query.get_config_resource(DEFAULT_ACCOUNT_ID, "bucket1") # Just verify a few things: assert bucket1_result["arn"] == "arn:aws:s3:::bucket1" assert bucket1_result["awsRegion"] == "us-west-2" assert bucket1_result["resourceName"] == bucket1_result["resourceId"] == "bucket1" assert bucket1_result["tags"] == { "someTag": "someValue", "someOtherTag": "someOtherValue", } assert json.loads( bucket1_result["supplementaryConfiguration"]["BucketTaggingConfiguration"] ) == {"tagSets": [{"tags": bucket1_result["tags"]}]} assert isinstance(bucket1_result["configuration"], str) exist_list = [ "AccessControlList", "BucketAccelerateConfiguration", "BucketLoggingConfiguration", "BucketPolicy", "IsRequesterPaysEnabled", "BucketNotificationConfiguration", ] for exist in exist_list: assert isinstance(bucket1_result["supplementaryConfiguration"][exist], str) # Verify the logging config: assert json.loads( bucket1_result["supplementaryConfiguration"]["BucketLoggingConfiguration"] ) == {"destinationBucketName": "logbucket", "logFilePrefix": ""} # Verify that the AccessControlList is a double-wrapped JSON string: assert json.loads( json.loads(bucket1_result["supplementaryConfiguration"]["AccessControlList"]) ) == { "grantSet": None, "grantList": [ { "grantee": { "displayName": None, "id": "75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a", }, "permission": "FullControl", }, ], "owner": { "displayName": None, "id": "75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a", }, } # Verify the policy: assert json.loads(bucket1_result["supplementaryConfiguration"]["BucketPolicy"]) == { "policyText": policy } # Filter by correct region: assert bucket1_result == s3_config_query.get_config_resource( DEFAULT_ACCOUNT_ID, "bucket1", resource_region="us-west-2" ) # By incorrect region: assert not s3_config_query.get_config_resource( DEFAULT_ACCOUNT_ID, "bucket1", resource_region="eu-west-1" ) # With correct resource ID and name: assert bucket1_result == s3_config_query.get_config_resource( DEFAULT_ACCOUNT_ID, "bucket1", resource_name="bucket1" ) # With an incorrect resource name: assert not s3_config_query.get_config_resource( DEFAULT_ACCOUNT_ID, "bucket1", resource_name="eu-bucket-1" ) # With an incorrect account: assert not s3_config_query.get_config_resource( "unknown-accountid", "bucket1", resource_name="bucket-1" ) # Verify that no bucket policy returns the proper value: logging_bucket = s3_config_query.get_config_resource( DEFAULT_ACCOUNT_ID, "logbucket" ) assert json.loads(logging_bucket["supplementaryConfiguration"]["BucketPolicy"]) == { "policyText": None } assert not logging_bucket["tags"] assert not logging_bucket["supplementaryConfiguration"].get( "BucketTaggingConfiguration" )