S3 - Refactor tests

This commit is contained in:
Bert Blommers 2022-02-24 20:42:38 -01:00
parent 6733947a8c
commit f15e451815
5 changed files with 2194 additions and 2193 deletions

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,577 @@
import json
import pytest
import sure # noqa # pylint: disable=unused-import
from moto import mock_s3
from moto.core.exceptions import InvalidNextTokenException
@mock_s3
def test_s3_public_access_block_to_config_dict():
from moto.s3.config import s3_config_query
# With 1 bucket in us-west-2:
s3_config_query.backends["global"].create_bucket("bucket1", "us-west-2")
public_access_block = {
"BlockPublicAcls": "True",
"IgnorePublicAcls": "False",
"BlockPublicPolicy": "True",
"RestrictPublicBuckets": "False",
}
# Add a public access block:
s3_config_query.backends["global"].put_bucket_public_access_block(
"bucket1", public_access_block
)
result = (
s3_config_query.backends["global"]
.buckets["bucket1"]
.public_access_block.to_config_dict()
)
convert_bool = lambda x: x == "True"
for key, value in public_access_block.items():
assert result[
"{lowercase}{rest}".format(lowercase=key[0].lower(), rest=key[1:])
] == convert_bool(value)
# Verify that this resides in the full bucket's to_config_dict:
full_result = s3_config_query.backends["global"].buckets["bucket1"].to_config_dict()
assert (
json.loads(
full_result["supplementaryConfiguration"]["PublicAccessBlockConfiguration"]
)
== result
)
@mock_s3
def test_list_config_discovered_resources():
from moto.s3.config import s3_config_query
# Without any buckets:
assert s3_config_query.list_config_service_resources(
"global", "global", None, None, 100, None
) == ([], None)
# With 10 buckets in us-west-2:
for x in range(0, 10):
s3_config_query.backends["global"].create_bucket(
"bucket{}".format(x), "us-west-2"
)
# With 2 buckets in eu-west-1:
for x in range(10, 12):
s3_config_query.backends["global"].create_bucket(
"eu-bucket{}".format(x), "eu-west-1"
)
result, next_token = s3_config_query.list_config_service_resources(
None, None, 100, None
)
assert not next_token
assert len(result) == 12
for x in range(0, 10):
assert result[x] == {
"type": "AWS::S3::Bucket",
"id": "bucket{}".format(x),
"name": "bucket{}".format(x),
"region": "us-west-2",
}
for x in range(10, 12):
assert result[x] == {
"type": "AWS::S3::Bucket",
"id": "eu-bucket{}".format(x),
"name": "eu-bucket{}".format(x),
"region": "eu-west-1",
}
# With a name:
result, next_token = s3_config_query.list_config_service_resources(
None, "bucket0", 100, None
)
assert len(result) == 1 and result[0]["name"] == "bucket0" and not next_token
# With a region:
result, next_token = s3_config_query.list_config_service_resources(
None, None, 100, None, resource_region="eu-west-1"
)
assert len(result) == 2 and not next_token and result[1]["name"] == "eu-bucket11"
# With resource ids:
result, next_token = s3_config_query.list_config_service_resources(
["bucket0", "bucket1"], None, 100, None
)
assert (
len(result) == 2
and result[0]["name"] == "bucket0"
and result[1]["name"] == "bucket1"
and not next_token
)
# With duplicated resource ids:
result, next_token = s3_config_query.list_config_service_resources(
["bucket0", "bucket0"], None, 100, None
)
assert len(result) == 1 and result[0]["name"] == "bucket0" and not next_token
# Pagination:
result, next_token = s3_config_query.list_config_service_resources(
None, None, 1, None
)
assert (
len(result) == 1 and result[0]["name"] == "bucket0" and next_token == "bucket1"
)
# Last Page:
result, next_token = s3_config_query.list_config_service_resources(
None, None, 1, "eu-bucket11", resource_region="eu-west-1"
)
assert len(result) == 1 and result[0]["name"] == "eu-bucket11" and not next_token
# With a list of buckets:
result, next_token = s3_config_query.list_config_service_resources(
["bucket0", "bucket1"], None, 1, None
)
assert (
len(result) == 1 and result[0]["name"] == "bucket0" and next_token == "bucket1"
)
# With an invalid page:
with pytest.raises(InvalidNextTokenException) as inte:
s3_config_query.list_config_service_resources(None, None, 1, "notabucket")
assert "The nextToken provided is invalid" in inte.value.message
@mock_s3
def test_s3_lifecycle_config_dict():
from moto.s3.config import s3_config_query
# With 1 bucket in us-west-2:
s3_config_query.backends["global"].create_bucket("bucket1", "us-west-2")
# And a lifecycle policy
lifecycle = [
{
"ID": "rule1",
"Status": "Enabled",
"Filter": {"Prefix": ""},
"Expiration": {"Days": 1},
},
{
"ID": "rule2",
"Status": "Enabled",
"Filter": {
"And": {
"Prefix": "some/path",
"Tag": [{"Key": "TheKey", "Value": "TheValue"}],
}
},
"Expiration": {"Days": 1},
},
{"ID": "rule3", "Status": "Enabled", "Filter": {}, "Expiration": {"Days": 1}},
{
"ID": "rule4",
"Status": "Enabled",
"Filter": {"Prefix": ""},
"AbortIncompleteMultipartUpload": {"DaysAfterInitiation": 1},
},
]
s3_config_query.backends["global"].put_bucket_lifecycle("bucket1", lifecycle)
# Get the rules for this:
lifecycles = [
rule.to_config_dict()
for rule in s3_config_query.backends["global"].buckets["bucket1"].rules
]
# Verify the first:
assert lifecycles[0] == {
"id": "rule1",
"prefix": None,
"status": "Enabled",
"expirationInDays": 1,
"expiredObjectDeleteMarker": None,
"noncurrentVersionExpirationInDays": -1,
"expirationDate": None,
"transitions": None,
"noncurrentVersionTransitions": None,
"abortIncompleteMultipartUpload": None,
"filter": {"predicate": {"type": "LifecyclePrefixPredicate", "prefix": ""}},
}
# Verify the second:
assert lifecycles[1] == {
"id": "rule2",
"prefix": None,
"status": "Enabled",
"expirationInDays": 1,
"expiredObjectDeleteMarker": None,
"noncurrentVersionExpirationInDays": -1,
"expirationDate": None,
"transitions": None,
"noncurrentVersionTransitions": None,
"abortIncompleteMultipartUpload": None,
"filter": {
"predicate": {
"type": "LifecycleAndOperator",
"operands": [
{"type": "LifecyclePrefixPredicate", "prefix": "some/path"},
{
"type": "LifecycleTagPredicate",
"tag": {"key": "TheKey", "value": "TheValue"},
},
],
}
},
}
# And the third:
assert lifecycles[2] == {
"id": "rule3",
"prefix": None,
"status": "Enabled",
"expirationInDays": 1,
"expiredObjectDeleteMarker": None,
"noncurrentVersionExpirationInDays": -1,
"expirationDate": None,
"transitions": None,
"noncurrentVersionTransitions": None,
"abortIncompleteMultipartUpload": None,
"filter": {"predicate": None},
}
# And the last:
assert lifecycles[3] == {
"id": "rule4",
"prefix": None,
"status": "Enabled",
"expirationInDays": None,
"expiredObjectDeleteMarker": None,
"noncurrentVersionExpirationInDays": -1,
"expirationDate": None,
"transitions": None,
"noncurrentVersionTransitions": None,
"abortIncompleteMultipartUpload": {"daysAfterInitiation": 1},
"filter": {"predicate": {"type": "LifecyclePrefixPredicate", "prefix": ""}},
}
@mock_s3
def test_s3_notification_config_dict():
from moto.s3.config import s3_config_query
# With 1 bucket in us-west-2:
s3_config_query.backends["global"].create_bucket("bucket1", "us-west-2")
# And some notifications:
notifications = {
"TopicConfiguration": [
{
"Id": "Topic",
"Topic": "arn:aws:sns:us-west-2:012345678910:mytopic",
"Event": [
"s3:ReducedRedundancyLostObject",
"s3:ObjectRestore:Completed",
],
}
],
"QueueConfiguration": [
{
"Id": "Queue",
"Queue": "arn:aws:sqs:us-west-2:012345678910:myqueue",
"Event": ["s3:ObjectRemoved:Delete"],
"Filter": {
"S3Key": {
"FilterRule": [{"Name": "prefix", "Value": "stuff/here/"}]
}
},
}
],
"CloudFunctionConfiguration": [
{
"Id": "Lambda",
"CloudFunction": "arn:aws:lambda:us-west-2:012345678910:function:mylambda",
"Event": [
"s3:ObjectCreated:Post",
"s3:ObjectCreated:Copy",
"s3:ObjectCreated:Put",
],
"Filter": {
"S3Key": {"FilterRule": [{"Name": "suffix", "Value": ".png"}]}
},
}
],
}
s3_config_query.backends["global"].put_bucket_notification_configuration(
"bucket1", notifications
)
# Get the notifications for this:
notifications = (
s3_config_query.backends["global"]
.buckets["bucket1"]
.notification_configuration.to_config_dict()
)
# Verify it all:
assert notifications == {
"configurations": {
"Topic": {
"events": [
"s3:ReducedRedundancyLostObject",
"s3:ObjectRestore:Completed",
],
"filter": None,
"objectPrefixes": [],
"topicARN": "arn:aws:sns:us-west-2:012345678910:mytopic",
"type": "TopicConfiguration",
},
"Queue": {
"events": ["s3:ObjectRemoved:Delete"],
"filter": {
"s3KeyFilter": {
"filterRules": [{"name": "prefix", "value": "stuff/here/"}]
}
},
"objectPrefixes": [],
"queueARN": "arn:aws:sqs:us-west-2:012345678910:myqueue",
"type": "QueueConfiguration",
},
"Lambda": {
"events": [
"s3:ObjectCreated:Post",
"s3:ObjectCreated:Copy",
"s3:ObjectCreated:Put",
],
"filter": {
"s3KeyFilter": {
"filterRules": [{"name": "suffix", "value": ".png"}]
}
},
"objectPrefixes": [],
"queueARN": "arn:aws:lambda:us-west-2:012345678910:function:mylambda",
"type": "LambdaConfiguration",
},
}
}
@mock_s3
def test_s3_acl_to_config_dict():
from moto.s3.config import s3_config_query
from moto.s3.models import FakeAcl, FakeGrant, FakeGrantee, OWNER
# With 1 bucket in us-west-2:
s3_config_query.backends["global"].create_bucket("logbucket", "us-west-2")
# Get the config dict with nothing other than the owner details:
acls = s3_config_query.backends["global"].buckets["logbucket"].acl.to_config_dict()
owner_acl = {
"grantee": {"id": OWNER, "displayName": None},
"permission": "FullControl",
}
assert acls == {
"grantSet": None,
"owner": {"displayName": None, "id": OWNER},
"grantList": [owner_acl],
}
# Add some Log Bucket ACLs:
log_acls = FakeAcl(
[
FakeGrant(
[FakeGrantee(uri="http://acs.amazonaws.com/groups/s3/LogDelivery")],
"WRITE",
),
FakeGrant(
[FakeGrantee(uri="http://acs.amazonaws.com/groups/s3/LogDelivery")],
"READ_ACP",
),
FakeGrant([FakeGrantee(grantee_id=OWNER)], "FULL_CONTROL"),
]
)
s3_config_query.backends["global"].put_bucket_acl("logbucket", log_acls)
acls = s3_config_query.backends["global"].buckets["logbucket"].acl.to_config_dict()
assert acls == {
"grantSet": None,
"grantList": [
{"grantee": "LogDelivery", "permission": "Write"},
{"grantee": "LogDelivery", "permission": "ReadAcp"},
{
"grantee": {
"displayName": None,
"id": "75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a",
},
"permission": "FullControl",
},
],
"owner": {"displayName": None, "id": OWNER},
}
# Give the owner less than full_control permissions:
log_acls = FakeAcl(
[
FakeGrant([FakeGrantee(grantee_id=OWNER)], "READ_ACP"),
FakeGrant([FakeGrantee(grantee_id=OWNER)], "WRITE_ACP"),
]
)
s3_config_query.backends["global"].put_bucket_acl("logbucket", log_acls)
acls = s3_config_query.backends["global"].buckets["logbucket"].acl.to_config_dict()
assert acls == {
"grantSet": None,
"grantList": [
{"grantee": {"id": OWNER, "displayName": None}, "permission": "ReadAcp"},
{"grantee": {"id": OWNER, "displayName": None}, "permission": "WriteAcp"},
],
"owner": {"displayName": None, "id": OWNER},
}
@mock_s3
def test_s3_config_dict():
from moto.s3.config import s3_config_query
from moto.s3.models import (
FakeAcl,
FakeGrant,
FakeGrantee,
OWNER,
)
# Without any buckets:
assert not s3_config_query.get_config_resource("some_bucket")
tags = {"someTag": "someValue", "someOtherTag": "someOtherValue"}
# With 1 bucket in us-west-2:
s3_config_query.backends["global"].create_bucket("bucket1", "us-west-2")
s3_config_query.backends["global"].put_bucket_tagging("bucket1", tags)
# With a log bucket:
s3_config_query.backends["global"].create_bucket("logbucket", "us-west-2")
log_acls = FakeAcl(
[
FakeGrant(
[FakeGrantee(uri="http://acs.amazonaws.com/groups/s3/LogDelivery")],
"WRITE",
),
FakeGrant(
[FakeGrantee(uri="http://acs.amazonaws.com/groups/s3/LogDelivery")],
"READ_ACP",
),
FakeGrant([FakeGrantee(grantee_id=OWNER)], "FULL_CONTROL"),
]
)
s3_config_query.backends["global"].put_bucket_acl("logbucket", log_acls)
s3_config_query.backends["global"].put_bucket_logging(
"bucket1", {"TargetBucket": "logbucket", "TargetPrefix": ""}
)
policy = json.dumps(
{
"Statement": [
{
"Effect": "Deny",
"Action": "s3:DeleteObject",
"Principal": "*",
"Resource": "arn:aws:s3:::bucket1/*",
}
]
}
)
# The policy is a byte array -- need to encode in Python 3
pass_policy = bytes(policy, "utf-8")
s3_config_query.backends["global"].put_bucket_policy("bucket1", pass_policy)
# Get the us-west-2 bucket and verify that it works properly:
bucket1_result = s3_config_query.get_config_resource("bucket1")
# Just verify a few things:
assert bucket1_result["arn"] == "arn:aws:s3:::bucket1"
assert bucket1_result["awsRegion"] == "us-west-2"
assert bucket1_result["resourceName"] == bucket1_result["resourceId"] == "bucket1"
assert bucket1_result["tags"] == {
"someTag": "someValue",
"someOtherTag": "someOtherValue",
}
assert json.loads(
bucket1_result["supplementaryConfiguration"]["BucketTaggingConfiguration"]
) == {"tagSets": [{"tags": bucket1_result["tags"]}]}
assert isinstance(bucket1_result["configuration"], str)
exist_list = [
"AccessControlList",
"BucketAccelerateConfiguration",
"BucketLoggingConfiguration",
"BucketPolicy",
"IsRequesterPaysEnabled",
"BucketNotificationConfiguration",
]
for exist in exist_list:
assert isinstance(bucket1_result["supplementaryConfiguration"][exist], str)
# Verify the logging config:
assert json.loads(
bucket1_result["supplementaryConfiguration"]["BucketLoggingConfiguration"]
) == {"destinationBucketName": "logbucket", "logFilePrefix": ""}
# Verify that the AccessControlList is a double-wrapped JSON string:
assert json.loads(
json.loads(bucket1_result["supplementaryConfiguration"]["AccessControlList"])
) == {
"grantSet": None,
"grantList": [
{
"grantee": {
"displayName": None,
"id": "75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a",
},
"permission": "FullControl",
},
],
"owner": {
"displayName": None,
"id": "75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a",
},
}
# Verify the policy:
assert json.loads(bucket1_result["supplementaryConfiguration"]["BucketPolicy"]) == {
"policyText": policy
}
# Filter by correct region:
assert bucket1_result == s3_config_query.get_config_resource(
"bucket1", resource_region="us-west-2"
)
# By incorrect region:
assert not s3_config_query.get_config_resource(
"bucket1", resource_region="eu-west-1"
)
# With correct resource ID and name:
assert bucket1_result == s3_config_query.get_config_resource(
"bucket1", resource_name="bucket1"
)
# With an incorrect resource name:
assert not s3_config_query.get_config_resource(
"bucket1", resource_name="eu-bucket-1"
)
# Verify that no bucket policy returns the proper value:
logging_bucket = s3_config_query.get_config_resource("logbucket")
assert json.loads(logging_bucket["supplementaryConfiguration"]["BucketPolicy"]) == {
"policyText": None
}
assert not logging_bucket["tags"]
assert not logging_bucket["supplementaryConfiguration"].get(
"BucketTaggingConfiguration"
)

View File

@ -0,0 +1,383 @@
import boto3
from botocore.client import ClientError
from moto.s3.responses import DEFAULT_REGION_NAME
import pytest
import sure # noqa # pylint: disable=unused-import
from moto import mock_s3, mock_kms
@pytest.mark.parametrize(
"key_name",
[
"the-key",
"the-unicode-💩-key",
"key-with?question-mark",
"key-with%2Fembedded%2Furl%2Fencoding",
],
)
@mock_s3
def test_copy_key_boto3(key_name):
s3 = boto3.resource("s3", region_name=DEFAULT_REGION_NAME)
client = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
s3.create_bucket(Bucket="foobar")
key = s3.Object("foobar", key_name)
key.put(Body=b"some value")
key2 = s3.Object("foobar", "new-key")
key2.copy_from(CopySource="foobar/{}".format(key_name))
resp = client.get_object(Bucket="foobar", Key=key_name)
resp["Body"].read().should.equal(b"some value")
resp = client.get_object(Bucket="foobar", Key="new-key")
resp["Body"].read().should.equal(b"some value")
@mock_s3
def test_copy_key_with_version_boto3():
s3 = boto3.resource("s3", region_name=DEFAULT_REGION_NAME)
client = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
s3.create_bucket(Bucket="foobar")
client.put_bucket_versioning(
Bucket="foobar", VersioningConfiguration={"Status": "Enabled"}
)
key = s3.Object("foobar", "the-key")
key.put(Body=b"some value")
key.put(Body=b"another value")
all_versions = client.list_object_versions(Bucket="foobar", Prefix="the-key")[
"Versions"
]
old_version = [v for v in all_versions if not v["IsLatest"]][0]
key2 = s3.Object("foobar", "new-key")
key2.copy_from(
CopySource="foobar/the-key?versionId={}".format(old_version["VersionId"])
)
resp = client.get_object(Bucket="foobar", Key="the-key")
resp["Body"].read().should.equal(b"another value")
resp = client.get_object(Bucket="foobar", Key="new-key")
resp["Body"].read().should.equal(b"some value")
@mock_s3
def test_copy_object_with_bucketkeyenabled_returns_the_value():
s3 = boto3.resource("s3", region_name=DEFAULT_REGION_NAME)
client = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
bucket_name = "test-copy-object-with-bucketkeyenabled"
s3.create_bucket(Bucket=bucket_name)
key = s3.Object(bucket_name, "the-key")
key.put(Body=b"some value")
key2 = s3.Object(bucket_name, "new-key")
key2.copy_from(
CopySource=f"{bucket_name}/the-key",
BucketKeyEnabled=True,
ServerSideEncryption="aws:kms",
)
resp = client.get_object(Bucket=bucket_name, Key="the-key")
src_headers = resp["ResponseMetadata"]["HTTPHeaders"]
src_headers.shouldnt.have.key("x-amz-server-side-encryption")
src_headers.shouldnt.have.key("x-amz-server-side-encryption-aws-kms-key-id")
src_headers.shouldnt.have.key("x-amz-server-side-encryption-bucket-key-enabled")
resp = client.get_object(Bucket=bucket_name, Key="new-key")
target_headers = resp["ResponseMetadata"]["HTTPHeaders"]
target_headers.should.have.key("x-amz-server-side-encryption")
# AWS will also return the KMS default key id - not yet implemented
# target_headers.should.have.key("x-amz-server-side-encryption-aws-kms-key-id")
# This field is only returned if encryption is set to 'aws:kms'
target_headers.should.have.key("x-amz-server-side-encryption-bucket-key-enabled")
str(
target_headers["x-amz-server-side-encryption-bucket-key-enabled"]
).lower().should.equal("true")
@mock_s3
def test_copy_key_with_metadata():
s3 = boto3.resource("s3", region_name=DEFAULT_REGION_NAME)
client = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
s3.create_bucket(Bucket="foobar")
key = s3.Object("foobar", "the-key")
metadata = {"md": "Metadatastring"}
content_type = "application/json"
initial = key.put(Body=b"{}", Metadata=metadata, ContentType=content_type)
client.copy_object(
Bucket="foobar", CopySource="foobar/the-key", Key="new-key",
)
resp = client.get_object(Bucket="foobar", Key="new-key")
resp["Metadata"].should.equal(metadata)
resp["ContentType"].should.equal(content_type)
resp["ETag"].should.equal(initial["ETag"])
@mock_s3
def test_copy_key_replace_metadata():
s3 = boto3.resource("s3", region_name=DEFAULT_REGION_NAME)
client = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
s3.create_bucket(Bucket="foobar")
key = s3.Object("foobar", "the-key")
initial = key.put(Body=b"some value", Metadata={"md": "Metadatastring"})
client.copy_object(
Bucket="foobar",
CopySource="foobar/the-key",
Key="new-key",
Metadata={"momd": "Mometadatastring"},
MetadataDirective="REPLACE",
)
resp = client.get_object(Bucket="foobar", Key="new-key")
resp["Metadata"].should.equal({"momd": "Mometadatastring"})
resp["ETag"].should.equal(initial["ETag"])
@mock_s3
def test_copy_key_without_changes_should_error():
client = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
bucket_name = "my_bucket"
s3 = boto3.resource("s3", region_name=DEFAULT_REGION_NAME)
key_name = "my_key"
key = s3.Object(bucket_name, key_name)
s3.create_bucket(Bucket=bucket_name)
key.put(Body=b"some value")
with pytest.raises(ClientError) as e:
client.copy_object(
Bucket=bucket_name,
CopySource="{}/{}".format(bucket_name, key_name),
Key=key_name,
)
e.value.response["Error"]["Message"].should.equal(
"This copy request is illegal because it is trying to copy an object to itself without changing the object's metadata, storage class, website redirect location or encryption attributes."
)
@mock_s3
def test_copy_key_without_changes_should_not_error():
client = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
bucket_name = "my_bucket"
s3 = boto3.resource("s3", region_name=DEFAULT_REGION_NAME)
key_name = "my_key"
key = s3.Object(bucket_name, key_name)
s3.create_bucket(Bucket=bucket_name)
key.put(Body=b"some value")
client.copy_object(
Bucket=bucket_name,
CopySource="{}/{}".format(bucket_name, key_name),
Key=key_name,
Metadata={"some-key": "some-value"},
MetadataDirective="REPLACE",
)
new_object = client.get_object(Bucket=bucket_name, Key=key_name)
assert new_object["Metadata"] == {"some-key": "some-value"}
@mock_s3
def test_copy_key_reduced_redundancy():
s3 = boto3.resource("s3", region_name=DEFAULT_REGION_NAME)
client = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
bucket = s3.Bucket("test_bucket")
bucket.create()
bucket.put_object(Key="the-key", Body=b"somedata")
client.copy_object(
Bucket="test_bucket",
CopySource="test_bucket/the-key",
Key="new-key",
StorageClass="REDUCED_REDUNDANCY",
)
keys = dict([(k.key, k) for k in bucket.objects.all()])
keys["new-key"].storage_class.should.equal("REDUCED_REDUNDANCY")
keys["the-key"].storage_class.should.equal("STANDARD")
@mock_s3
def test_copy_non_existing_file():
s3 = boto3.resource("s3", region_name=DEFAULT_REGION_NAME)
src = "srcbucket"
target = "target"
s3.create_bucket(Bucket=src)
s3.create_bucket(Bucket=target)
s3_client = boto3.client("s3")
with pytest.raises(ClientError) as exc:
s3_client.copy_object(
Bucket=target, CopySource={"Bucket": src, "Key": "foofoofoo"}, Key="newkey"
)
err = exc.value.response["Error"]
err["Code"].should.equal("NoSuchKey")
err["Message"].should.equal("The specified key does not exist.")
err["Key"].should.equal("foofoofoo")
@mock_s3
def test_copy_object_with_versioning():
client = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
client.create_bucket(
Bucket="blah", CreateBucketConfiguration={"LocationConstraint": "eu-west-1"}
)
client.put_bucket_versioning(
Bucket="blah", VersioningConfiguration={"Status": "Enabled"}
)
client.put_object(Bucket="blah", Key="test1", Body=b"test1")
client.put_object(Bucket="blah", Key="test2", Body=b"test2")
client.get_object(Bucket="blah", Key="test1")["VersionId"]
obj2_version = client.get_object(Bucket="blah", Key="test2")["VersionId"]
client.copy_object(
CopySource={"Bucket": "blah", "Key": "test1"}, Bucket="blah", Key="test2"
)
obj2_version_new = client.get_object(Bucket="blah", Key="test2")["VersionId"]
# Version should be different to previous version
obj2_version_new.should_not.equal(obj2_version)
client.copy_object(
CopySource={"Bucket": "blah", "Key": "test2", "VersionId": obj2_version},
Bucket="blah",
Key="test3",
)
obj3_version_new = client.get_object(Bucket="blah", Key="test3")["VersionId"]
obj3_version_new.should_not.equal(obj2_version_new)
# Copy file that doesn't exist
with pytest.raises(ClientError) as e:
client.copy_object(
CopySource={"Bucket": "blah", "Key": "test4", "VersionId": obj2_version},
Bucket="blah",
Key="test5",
)
e.value.response["Error"]["Code"].should.equal("NoSuchKey")
response = client.create_multipart_upload(Bucket="blah", Key="test4")
upload_id = response["UploadId"]
response = client.upload_part_copy(
Bucket="blah",
Key="test4",
CopySource={"Bucket": "blah", "Key": "test3", "VersionId": obj3_version_new},
UploadId=upload_id,
PartNumber=1,
)
etag = response["CopyPartResult"]["ETag"]
client.complete_multipart_upload(
Bucket="blah",
Key="test4",
UploadId=upload_id,
MultipartUpload={"Parts": [{"ETag": etag, "PartNumber": 1}]},
)
response = client.get_object(Bucket="blah", Key="test4")
data = response["Body"].read()
data.should.equal(b"test2")
@mock_s3
def test_copy_object_from_unversioned_to_versioned_bucket():
client = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
client.create_bucket(
Bucket="src", CreateBucketConfiguration={"LocationConstraint": "eu-west-1"}
)
client.create_bucket(
Bucket="dest", CreateBucketConfiguration={"LocationConstraint": "eu-west-1"}
)
client.put_bucket_versioning(
Bucket="dest", VersioningConfiguration={"Status": "Enabled"}
)
client.put_object(Bucket="src", Key="test", Body=b"content")
obj2_version_new = client.copy_object(
CopySource={"Bucket": "src", "Key": "test"}, Bucket="dest", Key="test"
).get("VersionId")
# VersionId should be present in the response
obj2_version_new.should_not.equal(None)
@mock_s3
def test_copy_object_with_replacement_tagging():
client = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
client.create_bucket(Bucket="mybucket")
client.put_object(
Bucket="mybucket", Key="original", Body=b"test", Tagging="tag=old"
)
# using system tags will fail
with pytest.raises(ClientError) as err:
client.copy_object(
CopySource={"Bucket": "mybucket", "Key": "original"},
Bucket="mybucket",
Key="copy1",
TaggingDirective="REPLACE",
Tagging="aws:tag=invalid_key",
)
e = err.value
e.response["Error"]["Code"].should.equal("InvalidTag")
client.copy_object(
CopySource={"Bucket": "mybucket", "Key": "original"},
Bucket="mybucket",
Key="copy1",
TaggingDirective="REPLACE",
Tagging="tag=new",
)
client.copy_object(
CopySource={"Bucket": "mybucket", "Key": "original"},
Bucket="mybucket",
Key="copy2",
TaggingDirective="COPY",
)
tags1 = client.get_object_tagging(Bucket="mybucket", Key="copy1")["TagSet"]
tags1.should.equal([{"Key": "tag", "Value": "new"}])
tags2 = client.get_object_tagging(Bucket="mybucket", Key="copy2")["TagSet"]
tags2.should.equal([{"Key": "tag", "Value": "old"}])
@mock_s3
@mock_kms
def test_copy_object_with_kms_encryption():
client = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
kms_client = boto3.client("kms", region_name=DEFAULT_REGION_NAME)
kms_key = kms_client.create_key()["KeyMetadata"]["KeyId"]
client.create_bucket(
Bucket="blah", CreateBucketConfiguration={"LocationConstraint": "eu-west-1"}
)
client.put_object(Bucket="blah", Key="test1", Body=b"test1")
client.copy_object(
CopySource={"Bucket": "blah", "Key": "test1"},
Bucket="blah",
Key="test2",
SSEKMSKeyId=kms_key,
ServerSideEncryption="aws:kms",
)
result = client.head_object(Bucket="blah", Key="test2")
assert result["SSEKMSKeyId"] == kms_key
assert result["ServerSideEncryption"] == "aws:kms"

View File

@ -1,11 +1,410 @@
from botocore.exceptions import ClientError
from moto import mock_s3
import boto3
import os
import pytest
from functools import wraps
from io import BytesIO
import boto3
from botocore.client import ClientError
from moto.s3.responses import DEFAULT_REGION_NAME
import sure # noqa # pylint: disable=unused-import
from .test_s3 import DEFAULT_REGION_NAME
from moto import settings, mock_s3
import moto.s3.models as s3model
from moto.settings import get_s3_default_key_buffer_size, S3_UPLOAD_PART_MIN_SIZE
if settings.TEST_SERVER_MODE:
REDUCED_PART_SIZE = S3_UPLOAD_PART_MIN_SIZE
EXPECTED_ETAG = '"140f92a6df9f9e415f74a1463bcee9bb-2"'
else:
REDUCED_PART_SIZE = 256
EXPECTED_ETAG = '"66d1a1a2ed08fd05c137f316af4ff255-2"'
def reduced_min_part_size(f):
"""speed up tests by temporarily making the multipart minimum part size
small
"""
orig_size = S3_UPLOAD_PART_MIN_SIZE
@wraps(f)
def wrapped(*args, **kwargs):
try:
s3model.S3_UPLOAD_PART_MIN_SIZE = REDUCED_PART_SIZE
return f(*args, **kwargs)
finally:
s3model.S3_UPLOAD_PART_MIN_SIZE = orig_size
return wrapped
@mock_s3
def test_default_key_buffer_size():
# save original DEFAULT_KEY_BUFFER_SIZE environment variable content
original_default_key_buffer_size = os.environ.get(
"MOTO_S3_DEFAULT_KEY_BUFFER_SIZE", None
)
os.environ["MOTO_S3_DEFAULT_KEY_BUFFER_SIZE"] = "2" # 2 bytes
assert get_s3_default_key_buffer_size() == 2
fk = s3model.FakeKey("a", os.urandom(1)) # 1 byte string
assert fk._value_buffer._rolled == False
os.environ["MOTO_S3_DEFAULT_KEY_BUFFER_SIZE"] = "1" # 1 byte
assert get_s3_default_key_buffer_size() == 1
fk = s3model.FakeKey("a", os.urandom(3)) # 3 byte string
assert fk._value_buffer._rolled == True
# if no MOTO_S3_DEFAULT_KEY_BUFFER_SIZE env variable is present the buffer size should be less than
# S3_UPLOAD_PART_MIN_SIZE to prevent in memory caching of multi part uploads
del os.environ["MOTO_S3_DEFAULT_KEY_BUFFER_SIZE"]
assert get_s3_default_key_buffer_size() < S3_UPLOAD_PART_MIN_SIZE
# restore original environment variable content
if original_default_key_buffer_size:
os.environ["MOTO_S3_DEFAULT_KEY_BUFFER_SIZE"] = original_default_key_buffer_size
@mock_s3
def test_multipart_upload_too_small():
s3 = boto3.resource("s3", region_name=DEFAULT_REGION_NAME)
client = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
s3.create_bucket(Bucket="foobar")
mp = client.create_multipart_upload(Bucket="foobar", Key="the-key")
up1 = client.upload_part(
Body=BytesIO(b"hello"),
PartNumber=1,
Bucket="foobar",
Key="the-key",
UploadId=mp["UploadId"],
)
up2 = client.upload_part(
Body=BytesIO(b"world"),
PartNumber=2,
Bucket="foobar",
Key="the-key",
UploadId=mp["UploadId"],
)
# Multipart with total size under 5MB is refused
with pytest.raises(ClientError) as ex:
client.complete_multipart_upload(
Bucket="foobar",
Key="the-key",
MultipartUpload={
"Parts": [
{"ETag": up1["ETag"], "PartNumber": 1},
{"ETag": up2["ETag"], "PartNumber": 2},
]
},
UploadId=mp["UploadId"],
)
ex.value.response["Error"]["Code"].should.equal("EntityTooSmall")
ex.value.response["Error"]["Message"].should.equal(
"Your proposed upload is smaller than the minimum allowed object size."
)
@mock_s3
@reduced_min_part_size
def test_multipart_upload():
s3 = boto3.resource("s3", region_name=DEFAULT_REGION_NAME)
client = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
s3.create_bucket(Bucket="foobar")
part1 = b"0" * REDUCED_PART_SIZE
part2 = b"1"
mp = client.create_multipart_upload(Bucket="foobar", Key="the-key")
up1 = client.upload_part(
Body=BytesIO(part1),
PartNumber=1,
Bucket="foobar",
Key="the-key",
UploadId=mp["UploadId"],
)
up2 = client.upload_part(
Body=BytesIO(part2),
PartNumber=2,
Bucket="foobar",
Key="the-key",
UploadId=mp["UploadId"],
)
client.complete_multipart_upload(
Bucket="foobar",
Key="the-key",
MultipartUpload={
"Parts": [
{"ETag": up1["ETag"], "PartNumber": 1},
{"ETag": up2["ETag"], "PartNumber": 2},
]
},
UploadId=mp["UploadId"],
)
# we should get both parts as the key contents
response = client.get_object(Bucket="foobar", Key="the-key")
response["Body"].read().should.equal(part1 + part2)
@mock_s3
@reduced_min_part_size
def test_multipart_upload_out_of_order():
s3 = boto3.resource("s3", region_name=DEFAULT_REGION_NAME)
client = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
s3.create_bucket(Bucket="foobar")
part1 = b"0" * REDUCED_PART_SIZE
part2 = b"1"
mp = client.create_multipart_upload(Bucket="foobar", Key="the-key")
up1 = client.upload_part(
Body=BytesIO(part1),
PartNumber=4,
Bucket="foobar",
Key="the-key",
UploadId=mp["UploadId"],
)
up2 = client.upload_part(
Body=BytesIO(part2),
PartNumber=2,
Bucket="foobar",
Key="the-key",
UploadId=mp["UploadId"],
)
client.complete_multipart_upload(
Bucket="foobar",
Key="the-key",
MultipartUpload={
"Parts": [
{"ETag": up1["ETag"], "PartNumber": 4},
{"ETag": up2["ETag"], "PartNumber": 2},
]
},
UploadId=mp["UploadId"],
)
# we should get both parts as the key contents
response = client.get_object(Bucket="foobar", Key="the-key")
response["Body"].read().should.equal(part1 + part2)
@mock_s3
@reduced_min_part_size
def test_multipart_upload_with_headers():
s3 = boto3.resource("s3", region_name=DEFAULT_REGION_NAME)
client = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
s3.create_bucket(Bucket="foobar")
part1 = b"0" * REDUCED_PART_SIZE
mp = client.create_multipart_upload(
Bucket="foobar", Key="the-key", Metadata={"meta": "data"}
)
up1 = client.upload_part(
Body=BytesIO(part1),
PartNumber=1,
Bucket="foobar",
Key="the-key",
UploadId=mp["UploadId"],
)
client.complete_multipart_upload(
Bucket="foobar",
Key="the-key",
MultipartUpload={"Parts": [{"ETag": up1["ETag"], "PartNumber": 1}]},
UploadId=mp["UploadId"],
)
# we should get both parts as the key contents
response = client.get_object(Bucket="foobar", Key="the-key")
response["Metadata"].should.equal({"meta": "data"})
@pytest.mark.parametrize(
"original_key_name",
[
"original-key",
"the-unicode-💩-key",
"key-with?question-mark",
"key-with%2Fembedded%2Furl%2Fencoding",
],
)
@mock_s3
@reduced_min_part_size
def test_multipart_upload_with_copy_key(original_key_name):
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
s3.create_bucket(Bucket="foobar")
s3.put_object(Bucket="foobar", Key=original_key_name, Body="key_value")
mpu = s3.create_multipart_upload(Bucket="foobar", Key="the-key")
part1 = b"0" * REDUCED_PART_SIZE
up1 = s3.upload_part(
Bucket="foobar",
Key="the-key",
PartNumber=1,
UploadId=mpu["UploadId"],
Body=BytesIO(part1),
)
up2 = s3.upload_part_copy(
Bucket="foobar",
Key="the-key",
CopySource={"Bucket": "foobar", "Key": original_key_name},
CopySourceRange="0-3",
PartNumber=2,
UploadId=mpu["UploadId"],
)
s3.complete_multipart_upload(
Bucket="foobar",
Key="the-key",
MultipartUpload={
"Parts": [
{"ETag": up1["ETag"], "PartNumber": 1},
{"ETag": up2["CopyPartResult"]["ETag"], "PartNumber": 2},
]
},
UploadId=mpu["UploadId"],
)
response = s3.get_object(Bucket="foobar", Key="the-key")
response["Body"].read().should.equal(part1 + b"key_")
@mock_s3
@reduced_min_part_size
def test_multipart_upload_cancel():
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
s3.create_bucket(Bucket="foobar")
mpu = s3.create_multipart_upload(Bucket="foobar", Key="the-key")
part1 = b"0" * REDUCED_PART_SIZE
s3.upload_part(
Bucket="foobar",
Key="the-key",
PartNumber=1,
UploadId=mpu["UploadId"],
Body=BytesIO(part1),
)
uploads = s3.list_multipart_uploads(Bucket="foobar")["Uploads"]
uploads.should.have.length_of(1)
uploads[0]["Key"].should.equal("the-key")
s3.abort_multipart_upload(Bucket="foobar", Key="the-key", UploadId=mpu["UploadId"])
s3.list_multipart_uploads(Bucket="foobar").shouldnt.have.key("Uploads")
@mock_s3
@reduced_min_part_size
def test_multipart_etag_quotes_stripped():
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
s3.create_bucket(Bucket="foobar")
s3.put_object(Bucket="foobar", Key="original-key", Body="key_value")
mpu = s3.create_multipart_upload(Bucket="foobar", Key="the-key")
part1 = b"0" * REDUCED_PART_SIZE
up1 = s3.upload_part(
Bucket="foobar",
Key="the-key",
PartNumber=1,
UploadId=mpu["UploadId"],
Body=BytesIO(part1),
)
etag1 = up1["ETag"].replace('"', "")
up2 = s3.upload_part_copy(
Bucket="foobar",
Key="the-key",
CopySource={"Bucket": "foobar", "Key": "original-key"},
CopySourceRange="0-3",
PartNumber=2,
UploadId=mpu["UploadId"],
)
etag2 = up2["CopyPartResult"]["ETag"].replace('"', "")
s3.complete_multipart_upload(
Bucket="foobar",
Key="the-key",
MultipartUpload={
"Parts": [
{"ETag": etag1, "PartNumber": 1},
{"ETag": etag2, "PartNumber": 2},
]
},
UploadId=mpu["UploadId"],
)
response = s3.get_object(Bucket="foobar", Key="the-key")
response["Body"].read().should.equal(part1 + b"key_")
@mock_s3
@reduced_min_part_size
def test_multipart_duplicate_upload():
s3 = boto3.resource("s3", region_name=DEFAULT_REGION_NAME)
client = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
s3.create_bucket(Bucket="foobar")
part1 = b"0" * REDUCED_PART_SIZE
part2 = b"1"
mp = client.create_multipart_upload(Bucket="foobar", Key="the-key")
client.upload_part(
Body=BytesIO(part1),
PartNumber=1,
Bucket="foobar",
Key="the-key",
UploadId=mp["UploadId"],
)
# same part again
up1 = client.upload_part(
Body=BytesIO(part1),
PartNumber=1,
Bucket="foobar",
Key="the-key",
UploadId=mp["UploadId"],
)
up2 = client.upload_part(
Body=BytesIO(part2),
PartNumber=2,
Bucket="foobar",
Key="the-key",
UploadId=mp["UploadId"],
)
client.complete_multipart_upload(
Bucket="foobar",
Key="the-key",
MultipartUpload={
"Parts": [
{"ETag": up1["ETag"], "PartNumber": 1},
{"ETag": up2["ETag"], "PartNumber": 2},
]
},
UploadId=mp["UploadId"],
)
# we should get both parts as the key contents
response = client.get_object(Bucket="foobar", Key="the-key")
response["Body"].read().should.equal(part1 + part2)
@mock_s3
def test_list_multiparts():
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
s3.create_bucket(Bucket="foobar")
mpu1 = s3.create_multipart_upload(Bucket="foobar", Key="one-key")
mpu2 = s3.create_multipart_upload(Bucket="foobar", Key="two-key")
uploads = s3.list_multipart_uploads(Bucket="foobar")["Uploads"]
uploads.should.have.length_of(2)
{u["Key"]: u["UploadId"] for u in uploads}.should.equal(
{"one-key": mpu1["UploadId"], "two-key": mpu2["UploadId"]}
)
s3.abort_multipart_upload(Bucket="foobar", Key="the-key", UploadId=mpu2["UploadId"])
uploads = s3.list_multipart_uploads(Bucket="foobar")["Uploads"]
uploads.should.have.length_of(1)
uploads[0]["Key"].should.equal("one-key")
s3.abort_multipart_upload(Bucket="foobar", Key="the-key", UploadId=mpu1["UploadId"])
res = s3.list_multipart_uploads(Bucket="foobar")
res.shouldnt.have.key("Uploads")
@mock_s3
@ -30,7 +429,7 @@ def test_multipart_should_throw_nosuchupload_if_there_are_no_parts():
@mock_s3
def test_boto3_multipart_part_size():
def test_multipart_wrong_partnumber():
bucket_name = "mputest-3593"
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
s3.create_bucket(Bucket=bucket_name)
@ -145,3 +544,298 @@ def test_s3_multipart_upload_cannot_upload_part_over_10000(part_nr):
)
err["ArgumentName"].should.equal("partNumber")
err["ArgumentValue"].should.equal(f"{part_nr}")
@mock_s3
def test_s3_abort_multipart_data_with_invalid_upload_and_key():
client = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
client.create_bucket(Bucket="blah")
with pytest.raises(Exception) as err:
client.abort_multipart_upload(
Bucket="blah", Key="foobar", UploadId="dummy_upload_id"
)
err = err.value.response["Error"]
err["Code"].should.equal("NoSuchUpload")
err["Message"].should.equal(
"The specified upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed."
)
err["UploadId"].should.equal("dummy_upload_id")
@mock_s3
@reduced_min_part_size
def test_multipart_etag():
# Create Bucket so that test can run
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
s3.create_bucket(Bucket="mybucket")
upload_id = s3.create_multipart_upload(Bucket="mybucket", Key="the-key")["UploadId"]
part1 = b"0" * REDUCED_PART_SIZE
etags = []
etags.append(
s3.upload_part(
Bucket="mybucket",
Key="the-key",
PartNumber=1,
UploadId=upload_id,
Body=part1,
)["ETag"]
)
# last part, can be less than 5 MB
part2 = b"1"
etags.append(
s3.upload_part(
Bucket="mybucket",
Key="the-key",
PartNumber=2,
UploadId=upload_id,
Body=part2,
)["ETag"]
)
s3.complete_multipart_upload(
Bucket="mybucket",
Key="the-key",
UploadId=upload_id,
MultipartUpload={
"Parts": [
{"ETag": etag, "PartNumber": i} for i, etag in enumerate(etags, 1)
]
},
)
# we should get both parts as the key contents
resp = s3.get_object(Bucket="mybucket", Key="the-key")
resp["ETag"].should.equal(EXPECTED_ETAG)
@mock_s3
@reduced_min_part_size
def test_multipart_version():
# Create Bucket so that test can run
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
s3.create_bucket(Bucket="mybucket")
s3.put_bucket_versioning(
Bucket="mybucket", VersioningConfiguration={"Status": "Enabled"}
)
upload_id = s3.create_multipart_upload(Bucket="mybucket", Key="the-key")["UploadId"]
part1 = b"0" * REDUCED_PART_SIZE
etags = []
etags.append(
s3.upload_part(
Bucket="mybucket",
Key="the-key",
PartNumber=1,
UploadId=upload_id,
Body=part1,
)["ETag"]
)
# last part, can be less than 5 MB
part2 = b"1"
etags.append(
s3.upload_part(
Bucket="mybucket",
Key="the-key",
PartNumber=2,
UploadId=upload_id,
Body=part2,
)["ETag"]
)
response = s3.complete_multipart_upload(
Bucket="mybucket",
Key="the-key",
UploadId=upload_id,
MultipartUpload={
"Parts": [
{"ETag": etag, "PartNumber": i} for i, etag in enumerate(etags, 1)
]
},
)
response["VersionId"].should.should_not.be.none
@mock_s3
@pytest.mark.parametrize(
"part_nr,msg,msg2",
[
(
-42,
"Argument max-parts must be an integer between 0 and 2147483647",
"Argument part-number-marker must be an integer between 0 and 2147483647",
),
(
2147483647 + 42,
"Provided max-parts not an integer or within integer range",
"Provided part-number-marker not an integer or within integer range",
),
],
)
def test_multipart_list_parts_invalid_argument(part_nr, msg, msg2):
s3 = boto3.client("s3", region_name="us-east-1")
bucket_name = "mybucketasdfljoqwerasdfas"
s3.create_bucket(Bucket=bucket_name)
mpu = s3.create_multipart_upload(Bucket=bucket_name, Key="the-key")
mpu_id = mpu["UploadId"]
def get_parts(**kwarg):
s3.list_parts(Bucket=bucket_name, Key="the-key", UploadId=mpu_id, **kwarg)
with pytest.raises(ClientError) as err:
get_parts(**{"MaxParts": part_nr})
e = err.value.response["Error"]
e["Code"].should.equal("InvalidArgument")
e["Message"].should.equal(msg)
with pytest.raises(ClientError) as err:
get_parts(**{"PartNumberMarker": part_nr})
e = err.value.response["Error"]
e["Code"].should.equal("InvalidArgument")
e["Message"].should.equal(msg2)
@mock_s3
@reduced_min_part_size
def test_multipart_list_parts():
s3 = boto3.client("s3", region_name="us-east-1")
bucket_name = "mybucketasdfljoqwerasdfas"
s3.create_bucket(Bucket=bucket_name)
mpu = s3.create_multipart_upload(Bucket=bucket_name, Key="the-key")
mpu_id = mpu["UploadId"]
parts = []
n_parts = 10
def get_parts_all(i):
# Get uploaded parts using default values
uploaded_parts = []
uploaded = s3.list_parts(Bucket=bucket_name, Key="the-key", UploadId=mpu_id,)
assert uploaded["PartNumberMarker"] == 0
# Parts content check
if i > 0:
for part in uploaded["Parts"]:
uploaded_parts.append(
{"ETag": part["ETag"], "PartNumber": part["PartNumber"]}
)
assert uploaded_parts == parts
next_part_number_marker = uploaded["Parts"][-1]["PartNumber"]
else:
next_part_number_marker = 0
assert uploaded["NextPartNumberMarker"] == next_part_number_marker
assert not uploaded["IsTruncated"]
def get_parts_by_batch(i):
# Get uploaded parts by batch of 2
part_number_marker = 0
uploaded_parts = []
while "there are parts":
uploaded = s3.list_parts(
Bucket=bucket_name,
Key="the-key",
UploadId=mpu_id,
PartNumberMarker=part_number_marker,
MaxParts=2,
)
assert uploaded["PartNumberMarker"] == part_number_marker
if i > 0:
# We should received maximum 2 parts
assert len(uploaded["Parts"]) <= 2
# Store parts content for the final check
for part in uploaded["Parts"]:
uploaded_parts.append(
{"ETag": part["ETag"], "PartNumber": part["PartNumber"]}
)
# No more parts, get out the loop
if not uploaded["IsTruncated"]:
break
# Next parts batch will start with that number
part_number_marker = uploaded["NextPartNumberMarker"]
assert part_number_marker == i + 1 if len(parts) > i else i
# Final check: we received all uploaded parts
assert uploaded_parts == parts
# Check ListParts API parameters when no part was uploaded
get_parts_all(0)
get_parts_by_batch(0)
for i in range(1, n_parts + 1):
part_size = REDUCED_PART_SIZE + i
body = b"1" * part_size
part = s3.upload_part(
Bucket=bucket_name,
Key="the-key",
PartNumber=i,
UploadId=mpu_id,
Body=body,
ContentLength=len(body),
)
parts.append({"PartNumber": i, "ETag": part["ETag"]})
# Check ListParts API parameters while there are uploaded parts
get_parts_all(i)
get_parts_by_batch(i)
# Check ListParts API parameters when all parts were uploaded
get_parts_all(11)
get_parts_by_batch(11)
s3.complete_multipart_upload(
Bucket=bucket_name,
Key="the-key",
UploadId=mpu_id,
MultipartUpload={"Parts": parts},
)
@mock_s3
@reduced_min_part_size
def test_multipart_part_size():
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
s3.create_bucket(Bucket="mybucket")
mpu = s3.create_multipart_upload(Bucket="mybucket", Key="the-key")
mpu_id = mpu["UploadId"]
parts = []
n_parts = 10
for i in range(1, n_parts + 1):
part_size = REDUCED_PART_SIZE + i
body = b"1" * part_size
part = s3.upload_part(
Bucket="mybucket",
Key="the-key",
PartNumber=i,
UploadId=mpu_id,
Body=body,
ContentLength=len(body),
)
parts.append({"PartNumber": i, "ETag": part["ETag"]})
s3.complete_multipart_upload(
Bucket="mybucket",
Key="the-key",
UploadId=mpu_id,
MultipartUpload={"Parts": parts},
)
for i in range(1, n_parts + 1):
obj = s3.head_object(Bucket="mybucket", Key="the-key", PartNumber=i)
assert obj["ContentLength"] == REDUCED_PART_SIZE + i

View File

@ -0,0 +1,457 @@
import boto3
import pytest
import sure # noqa # pylint: disable=unused-import
from botocore.client import ClientError
from moto.s3.responses import DEFAULT_REGION_NAME
from moto import mock_s3
@mock_s3
def test_get_bucket_tagging_unknown_bucket():
client = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
with pytest.raises(ClientError) as ex:
client.get_bucket_tagging(Bucket="foobar")
ex.value.response["Error"]["Code"].should.equal("NoSuchBucket")
ex.value.response["Error"]["Message"].should.equal(
"The specified bucket does not exist"
)
@mock_s3
def test_put_object_with_tagging():
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
bucket_name = "mybucket"
key = "key-with-tags"
s3.create_bucket(Bucket=bucket_name)
# using system tags will fail
with pytest.raises(ClientError) as err:
s3.put_object(Bucket=bucket_name, Key=key, Body="test", Tagging="aws:foo=bar")
e = err.value
e.response["Error"]["Code"].should.equal("InvalidTag")
s3.put_object(Bucket=bucket_name, Key=key, Body="test", Tagging="foo=bar")
s3.get_object_tagging(Bucket=bucket_name, Key=key)["TagSet"].should.contain(
{"Key": "foo", "Value": "bar"}
)
resp = s3.get_object(Bucket=bucket_name, Key=key)
resp.should.have.key("TagCount").equals(1)
s3.delete_object_tagging(Bucket=bucket_name, Key=key)
s3.get_object_tagging(Bucket=bucket_name, Key=key)["TagSet"].should.equal([])
@mock_s3
def test_put_bucket_tagging():
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
bucket_name = "mybucket"
s3.create_bucket(Bucket=bucket_name)
# With 1 tag:
resp = s3.put_bucket_tagging(
Bucket=bucket_name, Tagging={"TagSet": [{"Key": "TagOne", "Value": "ValueOne"}]}
)
resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
# With multiple tags:
resp = s3.put_bucket_tagging(
Bucket=bucket_name,
Tagging={
"TagSet": [
{"Key": "TagOne", "Value": "ValueOne"},
{"Key": "TagTwo", "Value": "ValueTwo"},
]
},
)
resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
# No tags is also OK:
resp = s3.put_bucket_tagging(Bucket=bucket_name, Tagging={"TagSet": []})
resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
# With duplicate tag keys:
with pytest.raises(ClientError) as err:
s3.put_bucket_tagging(
Bucket=bucket_name,
Tagging={
"TagSet": [
{"Key": "TagOne", "Value": "ValueOne"},
{"Key": "TagOne", "Value": "ValueOneAgain"},
]
},
)
e = err.value
e.response["Error"]["Code"].should.equal("InvalidTag")
e.response["Error"]["Message"].should.equal(
"Cannot provide multiple Tags with the same key"
)
# Cannot put tags that are "system" tags - i.e. tags that start with "aws:"
with pytest.raises(ClientError) as ce:
s3.put_bucket_tagging(
Bucket=bucket_name,
Tagging={"TagSet": [{"Key": "aws:sometag", "Value": "nope"}]},
)
e = ce.value
e.response["Error"]["Code"].should.equal("InvalidTag")
e.response["Error"]["Message"].should.equal(
"System tags cannot be added/updated by requester"
)
# This is OK though:
s3.put_bucket_tagging(
Bucket=bucket_name,
Tagging={"TagSet": [{"Key": "something:aws:stuff", "Value": "this is fine"}]},
)
@mock_s3
def test_get_bucket_tagging():
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
bucket_name = "mybucket"
s3.create_bucket(Bucket=bucket_name)
s3.put_bucket_tagging(
Bucket=bucket_name,
Tagging={
"TagSet": [
{"Key": "TagOne", "Value": "ValueOne"},
{"Key": "TagTwo", "Value": "ValueTwo"},
]
},
)
# Get the tags for the bucket:
resp = s3.get_bucket_tagging(Bucket=bucket_name)
resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
len(resp["TagSet"]).should.equal(2)
# With no tags:
s3.put_bucket_tagging(Bucket=bucket_name, Tagging={"TagSet": []})
with pytest.raises(ClientError) as err:
s3.get_bucket_tagging(Bucket=bucket_name)
e = err.value
e.response["Error"]["Code"].should.equal("NoSuchTagSet")
e.response["Error"]["Message"].should.equal("The TagSet does not exist")
@mock_s3
def test_delete_bucket_tagging():
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
bucket_name = "mybucket"
s3.create_bucket(Bucket=bucket_name)
s3.put_bucket_tagging(
Bucket=bucket_name,
Tagging={
"TagSet": [
{"Key": "TagOne", "Value": "ValueOne"},
{"Key": "TagTwo", "Value": "ValueTwo"},
]
},
)
resp = s3.delete_bucket_tagging(Bucket=bucket_name)
resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(204)
with pytest.raises(ClientError) as err:
s3.get_bucket_tagging(Bucket=bucket_name)
e = err.value
e.response["Error"]["Code"].should.equal("NoSuchTagSet")
e.response["Error"]["Message"].should.equal("The TagSet does not exist")
@mock_s3
def test_put_object_tagging():
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
bucket_name = "mybucket"
key = "key-with-tags"
s3.create_bucket(Bucket=bucket_name)
with pytest.raises(ClientError) as err:
s3.put_object_tagging(
Bucket=bucket_name,
Key=key,
Tagging={
"TagSet": [
{"Key": "item1", "Value": "foo"},
{"Key": "item2", "Value": "bar"},
]
},
)
e = err.value
e.response["Error"].should.equal(
{
"Code": "NoSuchKey",
"Message": "The specified key does not exist.",
"Key": "key-with-tags",
"RequestID": "7a62c49f-347e-4fc4-9331-6e8eEXAMPLE",
}
)
s3.put_object(Bucket=bucket_name, Key=key, Body="test")
# using system tags will fail
with pytest.raises(ClientError) as err:
s3.put_object_tagging(
Bucket=bucket_name,
Key=key,
Tagging={"TagSet": [{"Key": "aws:item1", "Value": "foo"},]},
)
e = err.value
e.response["Error"]["Code"].should.equal("InvalidTag")
resp = s3.put_object_tagging(
Bucket=bucket_name,
Key=key,
Tagging={
"TagSet": [
{"Key": "item1", "Value": "foo"},
{"Key": "item2", "Value": "bar"},
]
},
)
resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
@mock_s3
def test_put_object_tagging_on_earliest_version():
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
bucket_name = "mybucket"
key = "key-with-tags"
s3.create_bucket(Bucket=bucket_name)
s3_resource = boto3.resource("s3")
bucket_versioning = s3_resource.BucketVersioning(bucket_name)
bucket_versioning.enable()
bucket_versioning.status.should.equal("Enabled")
with pytest.raises(ClientError) as err:
s3.put_object_tagging(
Bucket=bucket_name,
Key=key,
Tagging={
"TagSet": [
{"Key": "item1", "Value": "foo"},
{"Key": "item2", "Value": "bar"},
]
},
)
e = err.value
e.response["Error"].should.equal(
{
"Code": "NoSuchKey",
"Message": "The specified key does not exist.",
"Key": "key-with-tags",
"RequestID": "7a62c49f-347e-4fc4-9331-6e8eEXAMPLE",
}
)
s3.put_object(Bucket=bucket_name, Key=key, Body="test")
s3.put_object(Bucket=bucket_name, Key=key, Body="test_updated")
object_versions = list(s3_resource.Bucket(bucket_name).object_versions.all())
first_object = object_versions[0]
second_object = object_versions[1]
resp = s3.put_object_tagging(
Bucket=bucket_name,
Key=key,
Tagging={
"TagSet": [
{"Key": "item1", "Value": "foo"},
{"Key": "item2", "Value": "bar"},
]
},
VersionId=first_object.id,
)
resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
# Older version has tags while the most recent does not
resp = s3.get_object_tagging(Bucket=bucket_name, Key=key, VersionId=first_object.id)
resp["VersionId"].should.equal(first_object.id)
resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
sorted_tagset = sorted(resp["TagSet"], key=lambda t: t["Key"])
sorted_tagset.should.equal(
[{"Key": "item1", "Value": "foo"}, {"Key": "item2", "Value": "bar"}]
)
resp = s3.get_object_tagging(
Bucket=bucket_name, Key=key, VersionId=second_object.id
)
resp["VersionId"].should.equal(second_object.id)
resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
resp["TagSet"].should.equal([])
@mock_s3
def test_put_object_tagging_on_both_version():
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
bucket_name = "mybucket"
key = "key-with-tags"
s3.create_bucket(Bucket=bucket_name)
s3_resource = boto3.resource("s3")
bucket_versioning = s3_resource.BucketVersioning(bucket_name)
bucket_versioning.enable()
bucket_versioning.status.should.equal("Enabled")
with pytest.raises(ClientError) as err:
s3.put_object_tagging(
Bucket=bucket_name,
Key=key,
Tagging={
"TagSet": [
{"Key": "item1", "Value": "foo"},
{"Key": "item2", "Value": "bar"},
]
},
)
e = err.value
e.response["Error"].should.equal(
{
"Code": "NoSuchKey",
"Message": "The specified key does not exist.",
"Key": "key-with-tags",
"RequestID": "7a62c49f-347e-4fc4-9331-6e8eEXAMPLE",
}
)
s3.put_object(Bucket=bucket_name, Key=key, Body="test")
s3.put_object(Bucket=bucket_name, Key=key, Body="test_updated")
object_versions = list(s3_resource.Bucket(bucket_name).object_versions.all())
first_object = object_versions[0]
second_object = object_versions[1]
resp = s3.put_object_tagging(
Bucket=bucket_name,
Key=key,
Tagging={
"TagSet": [
{"Key": "item1", "Value": "foo"},
{"Key": "item2", "Value": "bar"},
]
},
VersionId=first_object.id,
)
resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
resp = s3.put_object_tagging(
Bucket=bucket_name,
Key=key,
Tagging={
"TagSet": [
{"Key": "item1", "Value": "baz"},
{"Key": "item2", "Value": "bin"},
]
},
VersionId=second_object.id,
)
resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
resp = s3.get_object_tagging(Bucket=bucket_name, Key=key, VersionId=first_object.id)
resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
sorted_tagset = sorted(resp["TagSet"], key=lambda t: t["Key"])
sorted_tagset.should.equal(
[{"Key": "item1", "Value": "foo"}, {"Key": "item2", "Value": "bar"}]
)
resp = s3.get_object_tagging(
Bucket=bucket_name, Key=key, VersionId=second_object.id
)
resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
sorted_tagset = sorted(resp["TagSet"], key=lambda t: t["Key"])
sorted_tagset.should.equal(
[{"Key": "item1", "Value": "baz"}, {"Key": "item2", "Value": "bin"}]
)
@mock_s3
def test_put_object_tagging_with_single_tag():
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
bucket_name = "mybucket"
key = "key-with-tags"
s3.create_bucket(Bucket=bucket_name)
s3.put_object(Bucket=bucket_name, Key=key, Body="test")
resp = s3.put_object_tagging(
Bucket=bucket_name,
Key=key,
Tagging={"TagSet": [{"Key": "item1", "Value": "foo"}]},
)
resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
@mock_s3
def test_get_object_tagging():
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
bucket_name = "mybucket"
key = "key-with-tags"
s3.create_bucket(Bucket=bucket_name)
s3.put_object(Bucket=bucket_name, Key=key, Body="test")
resp = s3.get_object_tagging(Bucket=bucket_name, Key=key)
resp["TagSet"].should.have.length_of(0)
s3.put_object_tagging(
Bucket=bucket_name,
Key=key,
Tagging={
"TagSet": [
{"Key": "item1", "Value": "foo"},
{"Key": "item2", "Value": "bar"},
]
},
)
resp = s3.get_object_tagging(Bucket=bucket_name, Key=key)
resp["TagSet"].should.have.length_of(2)
resp["TagSet"].should.contain({"Key": "item1", "Value": "foo"})
resp["TagSet"].should.contain({"Key": "item2", "Value": "bar"})
@mock_s3
def test_objects_tagging_with_same_key_name():
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
key_name = "file.txt"
bucket1 = "bucket-1"
s3.create_bucket(Bucket=bucket1)
tagging = "variable=one"
s3.put_object(Bucket=bucket1, Body=b"test", Key=key_name, Tagging=tagging)
bucket2 = "bucket-2"
s3.create_bucket(Bucket=bucket2)
tagging2 = "variable=two"
s3.put_object(Bucket=bucket2, Body=b"test", Key=key_name, Tagging=tagging2)
variable1 = s3.get_object_tagging(Bucket=bucket1, Key=key_name)["TagSet"][0][
"Value"
]
variable2 = s3.get_object_tagging(Bucket=bucket2, Key=key_name)["TagSet"][0][
"Value"
]
assert variable1 == "one"
assert variable2 == "two"