moto/tests/test_s3/test_s3_config.py

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

594 lines
19 KiB
Python
Raw Normal View History

2022-02-24 21:42:38 +00:00
import json
import pytest
import sure # noqa # pylint: disable=unused-import
from moto import mock_s3
from moto.core.exceptions import InvalidNextTokenException
2022-08-13 09:49:43 +00:00
from moto.s3.config import s3_config_query
from tests import DEFAULT_ACCOUNT_ID
s3_config_query_backend = s3_config_query.backends[DEFAULT_ACCOUNT_ID]["global"]
2022-02-24 21:42:38 +00:00
@mock_s3
def test_s3_public_access_block_to_config_dict():
# With 1 bucket in us-west-2:
2022-08-13 09:49:43 +00:00
s3_config_query_backend.create_bucket("bucket1", "us-west-2")
2022-02-24 21:42:38 +00:00
public_access_block = {
"BlockPublicAcls": "True",
"IgnorePublicAcls": "False",
"BlockPublicPolicy": "True",
"RestrictPublicBuckets": "False",
}
# Add a public access block:
2022-08-13 09:49:43 +00:00
s3_config_query_backend.put_bucket_public_access_block(
2022-02-24 21:42:38 +00:00
"bucket1", public_access_block
)
2022-08-13 09:49:43 +00:00
result = s3_config_query_backend.buckets[
"bucket1"
].public_access_block.to_config_dict()
2022-02-24 21:42:38 +00:00
for key, value in public_access_block.items():
k = f"{key[0].lower()}{key[1:]}"
assert result[k] is (value == "True")
2022-02-24 21:42:38 +00:00
# Verify that this resides in the full bucket's to_config_dict:
2022-08-13 09:49:43 +00:00
full_result = s3_config_query_backend.buckets["bucket1"].to_config_dict()
2022-02-24 21:42:38 +00:00
assert (
json.loads(
full_result["supplementaryConfiguration"]["PublicAccessBlockConfiguration"]
)
== result
)
@mock_s3
def test_list_config_discovered_resources():
# Without any buckets:
assert s3_config_query.list_config_service_resources(
"global", "global", None, None, 100, None
) == ([], None)
# With 10 buckets in us-west-2:
for x in range(0, 10):
2022-08-13 09:49:43 +00:00
s3_config_query_backend.create_bucket(f"bucket{x}", "us-west-2")
2022-02-24 21:42:38 +00:00
# With 2 buckets in eu-west-1:
for x in range(10, 12):
2022-08-13 09:49:43 +00:00
s3_config_query_backend.create_bucket(f"eu-bucket{x}", "eu-west-1")
2022-02-24 21:42:38 +00:00
result, next_token = s3_config_query.list_config_service_resources(
2022-08-13 09:49:43 +00:00
DEFAULT_ACCOUNT_ID, None, None, 100, None
2022-02-24 21:42:38 +00:00
)
assert not next_token
assert len(result) == 12
for x in range(0, 10):
assert result[x] == {
"type": "AWS::S3::Bucket",
"id": f"bucket{x}",
"name": f"bucket{x}",
2022-02-24 21:42:38 +00:00
"region": "us-west-2",
}
for x in range(10, 12):
assert result[x] == {
"type": "AWS::S3::Bucket",
"id": f"eu-bucket{x}",
"name": f"eu-bucket{x}",
2022-02-24 21:42:38 +00:00
"region": "eu-west-1",
}
# With a name:
result, next_token = s3_config_query.list_config_service_resources(
2022-08-13 09:49:43 +00:00
DEFAULT_ACCOUNT_ID, None, "bucket0", 100, None
2022-02-24 21:42:38 +00:00
)
assert len(result) == 1 and result[0]["name"] == "bucket0" and not next_token
# With a region:
result, next_token = s3_config_query.list_config_service_resources(
2022-08-13 09:49:43 +00:00
DEFAULT_ACCOUNT_ID, None, None, 100, None, resource_region="eu-west-1"
2022-02-24 21:42:38 +00:00
)
assert len(result) == 2 and not next_token and result[1]["name"] == "eu-bucket11"
# With resource ids:
result, next_token = s3_config_query.list_config_service_resources(
2022-08-13 09:49:43 +00:00
DEFAULT_ACCOUNT_ID, ["bucket0", "bucket1"], None, 100, None
2022-02-24 21:42:38 +00:00
)
assert (
len(result) == 2
and result[0]["name"] == "bucket0"
and result[1]["name"] == "bucket1"
and not next_token
)
# With duplicated resource ids:
result, next_token = s3_config_query.list_config_service_resources(
2022-08-13 09:49:43 +00:00
DEFAULT_ACCOUNT_ID, ["bucket0", "bucket0"], None, 100, None
2022-02-24 21:42:38 +00:00
)
assert len(result) == 1 and result[0]["name"] == "bucket0" and not next_token
# Pagination:
result, next_token = s3_config_query.list_config_service_resources(
2022-08-13 09:49:43 +00:00
DEFAULT_ACCOUNT_ID, None, None, 1, None
2022-02-24 21:42:38 +00:00
)
assert (
len(result) == 1 and result[0]["name"] == "bucket0" and next_token == "bucket1"
)
# Last Page:
result, next_token = s3_config_query.list_config_service_resources(
2022-08-13 09:49:43 +00:00
DEFAULT_ACCOUNT_ID, None, None, 1, "eu-bucket11", resource_region="eu-west-1"
2022-02-24 21:42:38 +00:00
)
assert len(result) == 1 and result[0]["name"] == "eu-bucket11" and not next_token
# With a list of buckets:
result, next_token = s3_config_query.list_config_service_resources(
2022-08-13 09:49:43 +00:00
DEFAULT_ACCOUNT_ID, ["bucket0", "bucket1"], None, 1, None
2022-02-24 21:42:38 +00:00
)
assert (
len(result) == 1 and result[0]["name"] == "bucket0" and next_token == "bucket1"
)
# With an invalid page:
with pytest.raises(InvalidNextTokenException) as inte:
2022-08-13 09:49:43 +00:00
s3_config_query.list_config_service_resources(
DEFAULT_ACCOUNT_ID, None, None, 1, "notabucket"
)
2022-02-24 21:42:38 +00:00
assert "The nextToken provided is invalid" in inte.value.message
@mock_s3
def test_s3_lifecycle_config_dict():
# With 1 bucket in us-west-2:
2022-08-13 09:49:43 +00:00
s3_config_query_backend.create_bucket("bucket1", "us-west-2")
2022-02-24 21:42:38 +00:00
# And a lifecycle policy
lifecycle = [
{
"ID": "rule1",
"Status": "Enabled",
"Filter": {"Prefix": ""},
"Expiration": {"Days": 1},
},
{
"ID": "rule2",
"Status": "Enabled",
"Filter": {
"And": {
"Prefix": "some/path",
"Tag": [{"Key": "TheKey", "Value": "TheValue"}],
}
},
"Expiration": {"Days": 1},
},
{"ID": "rule3", "Status": "Enabled", "Filter": {}, "Expiration": {"Days": 1}},
{
"ID": "rule4",
"Status": "Enabled",
"Filter": {"Prefix": ""},
"AbortIncompleteMultipartUpload": {"DaysAfterInitiation": 1},
},
{
"ID": "rule5",
"Status": "Enabled",
"Filter": {"Prefix": ""},
"Transition": [{"Days": 10, "StorageClass": "GLACIER"}],
"NoncurrentVersionTransition": [
{
"NoncurrentDays": 10,
"StorageClass": "GLACIER",
}
],
},
2022-02-24 21:42:38 +00:00
]
2022-08-13 09:49:43 +00:00
s3_config_query_backend.put_bucket_lifecycle("bucket1", lifecycle)
2022-02-24 21:42:38 +00:00
# Get the rules for this:
lifecycles = [
rule.to_config_dict()
2022-08-13 09:49:43 +00:00
for rule in s3_config_query_backend.buckets["bucket1"].rules
2022-02-24 21:42:38 +00:00
]
# Verify the first:
assert lifecycles[0] == {
"id": "rule1",
"prefix": None,
"status": "Enabled",
"expirationInDays": 1,
"expiredObjectDeleteMarker": None,
"noncurrentVersionExpirationInDays": -1,
"expirationDate": None,
"transitions": None,
"noncurrentVersionTransitions": None,
"abortIncompleteMultipartUpload": None,
"filter": {"predicate": {"type": "LifecyclePrefixPredicate", "prefix": ""}},
}
# Verify the second:
assert lifecycles[1] == {
"id": "rule2",
"prefix": None,
"status": "Enabled",
"expirationInDays": 1,
"expiredObjectDeleteMarker": None,
"noncurrentVersionExpirationInDays": -1,
"expirationDate": None,
"transitions": None,
"noncurrentVersionTransitions": None,
"abortIncompleteMultipartUpload": None,
"filter": {
"predicate": {
"type": "LifecycleAndOperator",
"operands": [
{"type": "LifecyclePrefixPredicate", "prefix": "some/path"},
{
"type": "LifecycleTagPredicate",
"tag": {"key": "TheKey", "value": "TheValue"},
},
],
}
},
}
# And the third:
assert lifecycles[2] == {
"id": "rule3",
"prefix": None,
"status": "Enabled",
"expirationInDays": 1,
"expiredObjectDeleteMarker": None,
"noncurrentVersionExpirationInDays": -1,
"expirationDate": None,
"transitions": None,
"noncurrentVersionTransitions": None,
"abortIncompleteMultipartUpload": None,
"filter": {"predicate": None},
}
# And the last:
assert lifecycles[3] == {
"id": "rule4",
"prefix": None,
"status": "Enabled",
"expirationInDays": None,
"expiredObjectDeleteMarker": None,
"noncurrentVersionExpirationInDays": -1,
"expirationDate": None,
"transitions": None,
"noncurrentVersionTransitions": None,
"abortIncompleteMultipartUpload": {"daysAfterInitiation": 1},
"filter": {"predicate": {"type": "LifecyclePrefixPredicate", "prefix": ""}},
}
assert lifecycles[4] == {
"id": "rule5",
"prefix": None,
"status": "Enabled",
"expirationInDays": None,
"expiredObjectDeleteMarker": None,
"noncurrentVersionExpirationInDays": -1,
"expirationDate": None,
"abortIncompleteMultipartUpload": None,
"filter": {"predicate": {"type": "LifecyclePrefixPredicate", "prefix": ""}},
"transitions": [{"days": 10, "storageClass": "GLACIER"}],
"noncurrentVersionTransitions": [
{"noncurrentDays": 10, "storageClass": "GLACIER"}
],
}
2022-02-24 21:42:38 +00:00
@mock_s3
def test_s3_notification_config_dict():
# With 1 bucket in us-west-2:
2022-08-13 09:49:43 +00:00
s3_config_query_backend.create_bucket("bucket1", "us-west-2")
2022-02-24 21:42:38 +00:00
# And some notifications:
notifications = {
"TopicConfiguration": [
{
"Id": "Topic",
"Topic": "arn:aws:sns:us-west-2:012345678910:mytopic",
"Event": [
"s3:ReducedRedundancyLostObject",
"s3:ObjectRestore:Completed",
],
}
],
"QueueConfiguration": [
{
"Id": "Queue",
"Queue": "arn:aws:sqs:us-west-2:012345678910:myqueue",
"Event": ["s3:ObjectRemoved:Delete"],
"Filter": {
"S3Key": {
"FilterRule": [{"Name": "prefix", "Value": "stuff/here/"}]
}
},
}
],
"CloudFunctionConfiguration": [
{
"Id": "Lambda",
"CloudFunction": "arn:aws:lambda:us-west-2:012345678910:function:mylambda",
"Event": [
"s3:ObjectCreated:Post",
"s3:ObjectCreated:Copy",
"s3:ObjectCreated:Put",
],
"Filter": {
"S3Key": {"FilterRule": [{"Name": "suffix", "Value": ".png"}]}
},
}
],
}
2022-08-13 09:49:43 +00:00
s3_config_query.backends[DEFAULT_ACCOUNT_ID][
"global"
].put_bucket_notification_configuration("bucket1", notifications)
2022-02-24 21:42:38 +00:00
# Get the notifications for this:
2022-08-13 09:49:43 +00:00
notifications = s3_config_query_backend.buckets[
"bucket1"
].notification_configuration.to_config_dict()
2022-02-24 21:42:38 +00:00
# Verify it all:
assert notifications == {
"configurations": {
"Topic": {
"events": [
"s3:ReducedRedundancyLostObject",
"s3:ObjectRestore:Completed",
],
"filter": None,
"objectPrefixes": [],
"topicARN": "arn:aws:sns:us-west-2:012345678910:mytopic",
"type": "TopicConfiguration",
},
"Queue": {
"events": ["s3:ObjectRemoved:Delete"],
"filter": {
"s3KeyFilter": {
"filterRules": [{"name": "prefix", "value": "stuff/here/"}]
}
},
"objectPrefixes": [],
"queueARN": "arn:aws:sqs:us-west-2:012345678910:myqueue",
"type": "QueueConfiguration",
},
"Lambda": {
"events": [
"s3:ObjectCreated:Post",
"s3:ObjectCreated:Copy",
"s3:ObjectCreated:Put",
],
"filter": {
"s3KeyFilter": {
"filterRules": [{"name": "suffix", "value": ".png"}]
}
},
"objectPrefixes": [],
"queueARN": "arn:aws:lambda:us-west-2:012345678910:function:mylambda",
"type": "LambdaConfiguration",
},
}
}
@mock_s3
def test_s3_acl_to_config_dict():
from moto.s3.models import FakeAcl, FakeGrant, FakeGrantee, OWNER
# With 1 bucket in us-west-2:
2022-08-13 09:49:43 +00:00
s3_config_query_backend.create_bucket("logbucket", "us-west-2")
2022-02-24 21:42:38 +00:00
# Get the config dict with nothing other than the owner details:
2022-08-13 09:49:43 +00:00
acls = s3_config_query_backend.buckets["logbucket"].acl.to_config_dict()
2022-02-24 21:42:38 +00:00
owner_acl = {
"grantee": {"id": OWNER, "displayName": None},
"permission": "FullControl",
}
assert acls == {
"grantSet": None,
"owner": {"displayName": None, "id": OWNER},
"grantList": [owner_acl],
}
# Add some Log Bucket ACLs:
log_acls = FakeAcl(
[
FakeGrant(
[FakeGrantee(uri="http://acs.amazonaws.com/groups/s3/LogDelivery")],
"WRITE",
),
FakeGrant(
[FakeGrantee(uri="http://acs.amazonaws.com/groups/s3/LogDelivery")],
"READ_ACP",
),
FakeGrant([FakeGrantee(grantee_id=OWNER)], "FULL_CONTROL"),
]
)
2022-08-13 09:49:43 +00:00
s3_config_query_backend.put_bucket_acl("logbucket", log_acls)
2022-02-24 21:42:38 +00:00
2022-08-13 09:49:43 +00:00
acls = s3_config_query_backend.buckets["logbucket"].acl.to_config_dict()
2022-02-24 21:42:38 +00:00
assert acls == {
"grantSet": None,
"grantList": [
{"grantee": "LogDelivery", "permission": "Write"},
{"grantee": "LogDelivery", "permission": "ReadAcp"},
{
"grantee": {
"displayName": None,
"id": "75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a",
},
"permission": "FullControl",
},
],
"owner": {"displayName": None, "id": OWNER},
}
# Give the owner less than full_control permissions:
log_acls = FakeAcl(
[
FakeGrant([FakeGrantee(grantee_id=OWNER)], "READ_ACP"),
FakeGrant([FakeGrantee(grantee_id=OWNER)], "WRITE_ACP"),
]
)
2022-08-13 09:49:43 +00:00
s3_config_query_backend.put_bucket_acl("logbucket", log_acls)
acls = s3_config_query_backend.buckets["logbucket"].acl.to_config_dict()
2022-02-24 21:42:38 +00:00
assert acls == {
"grantSet": None,
"grantList": [
{"grantee": {"id": OWNER, "displayName": None}, "permission": "ReadAcp"},
{"grantee": {"id": OWNER, "displayName": None}, "permission": "WriteAcp"},
],
"owner": {"displayName": None, "id": OWNER},
}
@mock_s3
def test_s3_config_dict():
from moto.s3.models import FakeAcl, FakeGrant, FakeGrantee, OWNER
# Without any buckets:
2022-08-13 09:49:43 +00:00
assert not s3_config_query.get_config_resource(DEFAULT_ACCOUNT_ID, "some_bucket")
2022-02-24 21:42:38 +00:00
tags = {"someTag": "someValue", "someOtherTag": "someOtherValue"}
# With 1 bucket in us-west-2:
2022-08-13 09:49:43 +00:00
s3_config_query_backend.create_bucket("bucket1", "us-west-2")
s3_config_query_backend.put_bucket_tagging("bucket1", tags)
2022-02-24 21:42:38 +00:00
# With a log bucket:
2022-08-13 09:49:43 +00:00
s3_config_query_backend.create_bucket("logbucket", "us-west-2")
2022-02-24 21:42:38 +00:00
log_acls = FakeAcl(
[
FakeGrant(
[FakeGrantee(uri="http://acs.amazonaws.com/groups/s3/LogDelivery")],
"WRITE",
),
FakeGrant(
[FakeGrantee(uri="http://acs.amazonaws.com/groups/s3/LogDelivery")],
"READ_ACP",
),
FakeGrant([FakeGrantee(grantee_id=OWNER)], "FULL_CONTROL"),
]
)
2022-08-13 09:49:43 +00:00
s3_config_query_backend.put_bucket_acl("logbucket", log_acls)
s3_config_query_backend.put_bucket_logging(
2022-02-24 21:42:38 +00:00
"bucket1", {"TargetBucket": "logbucket", "TargetPrefix": ""}
)
policy = json.dumps(
{
"Statement": [
{
"Effect": "Deny",
"Action": "s3:DeleteObject",
"Principal": "*",
"Resource": "arn:aws:s3:::bucket1/*",
}
]
}
)
# The policy is a byte array -- need to encode in Python 3
pass_policy = bytes(policy, "utf-8")
2022-08-13 09:49:43 +00:00
s3_config_query_backend.put_bucket_policy("bucket1", pass_policy)
2022-02-24 21:42:38 +00:00
# Get the us-west-2 bucket and verify that it works properly:
2022-08-13 09:49:43 +00:00
bucket1_result = s3_config_query.get_config_resource(DEFAULT_ACCOUNT_ID, "bucket1")
2022-02-24 21:42:38 +00:00
# Just verify a few things:
assert bucket1_result["arn"] == "arn:aws:s3:::bucket1"
assert bucket1_result["awsRegion"] == "us-west-2"
assert bucket1_result["resourceName"] == bucket1_result["resourceId"] == "bucket1"
assert bucket1_result["tags"] == {
"someTag": "someValue",
"someOtherTag": "someOtherValue",
}
assert json.loads(
bucket1_result["supplementaryConfiguration"]["BucketTaggingConfiguration"]
) == {"tagSets": [{"tags": bucket1_result["tags"]}]}
assert isinstance(bucket1_result["configuration"], str)
exist_list = [
"AccessControlList",
"BucketAccelerateConfiguration",
"BucketLoggingConfiguration",
"BucketPolicy",
"IsRequesterPaysEnabled",
"BucketNotificationConfiguration",
]
for exist in exist_list:
assert isinstance(bucket1_result["supplementaryConfiguration"][exist], str)
# Verify the logging config:
assert json.loads(
bucket1_result["supplementaryConfiguration"]["BucketLoggingConfiguration"]
) == {"destinationBucketName": "logbucket", "logFilePrefix": ""}
# Verify that the AccessControlList is a double-wrapped JSON string:
assert json.loads(
json.loads(bucket1_result["supplementaryConfiguration"]["AccessControlList"])
) == {
"grantSet": None,
"grantList": [
{
"grantee": {
"displayName": None,
"id": "75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a",
},
"permission": "FullControl",
},
],
"owner": {
"displayName": None,
"id": "75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a",
},
}
# Verify the policy:
assert json.loads(bucket1_result["supplementaryConfiguration"]["BucketPolicy"]) == {
"policyText": policy
}
# Filter by correct region:
assert bucket1_result == s3_config_query.get_config_resource(
2022-08-13 09:49:43 +00:00
DEFAULT_ACCOUNT_ID, "bucket1", resource_region="us-west-2"
2022-02-24 21:42:38 +00:00
)
# By incorrect region:
assert not s3_config_query.get_config_resource(
2022-08-13 09:49:43 +00:00
DEFAULT_ACCOUNT_ID, "bucket1", resource_region="eu-west-1"
2022-02-24 21:42:38 +00:00
)
# With correct resource ID and name:
assert bucket1_result == s3_config_query.get_config_resource(
2022-08-13 09:49:43 +00:00
DEFAULT_ACCOUNT_ID, "bucket1", resource_name="bucket1"
2022-02-24 21:42:38 +00:00
)
# With an incorrect resource name:
assert not s3_config_query.get_config_resource(
2022-08-13 09:49:43 +00:00
DEFAULT_ACCOUNT_ID, "bucket1", resource_name="eu-bucket-1"
)
# With an incorrect account:
assert not s3_config_query.get_config_resource(
"unknown-accountid", "bucket1", resource_name="bucket-1"
2022-02-24 21:42:38 +00:00
)
# Verify that no bucket policy returns the proper value:
2022-08-13 09:49:43 +00:00
logging_bucket = s3_config_query.get_config_resource(
DEFAULT_ACCOUNT_ID, "logbucket"
)
2022-02-24 21:42:38 +00:00
assert json.loads(logging_bucket["supplementaryConfiguration"]["BucketPolicy"]) == {
"policyText": None
}
assert not logging_bucket["tags"]
assert not logging_bucket["supplementaryConfiguration"].get(
"BucketTaggingConfiguration"
)