moto/tests/test_s3/test_s3.py

4434 lines
145 KiB
Python
Raw Normal View History

# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
import os
import sys
from boto3 import Session
2014-08-26 17:25:50 +00:00
from six.moves.urllib.request import urlopen
from six.moves.urllib.error import HTTPError
from functools import wraps
from gzip import GzipFile
2013-03-26 14:52:33 +00:00
from io import BytesIO
import mimetypes
import zlib
2018-12-20 19:15:15 +00:00
import pickle
2015-07-23 21:33:52 +00:00
import json
2013-02-18 21:09:40 +00:00
import boto
2015-08-02 13:54:23 +00:00
import boto3
2015-08-13 21:16:55 +00:00
from botocore.client import ClientError
import botocore.exceptions
from boto.exception import S3CreateError, S3ResponseError
from botocore.handlers import disable_signing
2014-07-09 00:35:48 +00:00
from boto.s3.connection import S3Connection
2013-02-18 21:09:40 +00:00
from boto.s3.key import Key
2013-03-29 21:45:33 +00:00
from freezegun import freeze_time
from parameterized import parameterized
2017-04-30 05:03:46 +00:00
import six
2013-02-26 04:21:49 +00:00
import requests
import tests.backport_assert_raises # noqa
from moto.s3.responses import DEFAULT_REGION_NAME
from nose import SkipTest
2014-08-26 17:25:50 +00:00
from nose.tools import assert_raises
2013-02-18 21:09:40 +00:00
2013-08-03 21:21:25 +00:00
import sure # noqa
2013-02-18 22:17:19 +00:00
2019-12-10 01:38:26 +00:00
from moto import settings, mock_s3, mock_s3_deprecated, mock_config
import moto.s3.models as s3model
from moto.core.exceptions import InvalidNextTokenException
2019-12-10 01:38:26 +00:00
from moto.core.utils import py2_strip_unicode_keys
2013-02-18 21:09:40 +00:00
if settings.TEST_SERVER_MODE:
REDUCED_PART_SIZE = s3model.UPLOAD_PART_MIN_SIZE
EXPECTED_ETAG = '"140f92a6df9f9e415f74a1463bcee9bb-2"'
else:
REDUCED_PART_SIZE = 256
EXPECTED_ETAG = '"66d1a1a2ed08fd05c137f316af4ff255-2"'
def reduced_min_part_size(f):
""" speed up tests by temporarily making the multipart minimum part size
small
"""
orig_size = s3model.UPLOAD_PART_MIN_SIZE
@wraps(f)
def wrapped(*args, **kwargs):
try:
s3model.UPLOAD_PART_MIN_SIZE = REDUCED_PART_SIZE
return f(*args, **kwargs)
finally:
s3model.UPLOAD_PART_MIN_SIZE = orig_size
return wrapped
2013-02-18 21:09:40 +00:00
class MyModel(object):
def __init__(self, name, value):
self.name = name
self.value = value
def save(self):
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
2019-10-31 15:44:26 +00:00
s3.put_object(Bucket="mybucket", Key=self.name, Body=self.value)
2013-02-18 21:09:40 +00:00
2018-12-20 19:15:15 +00:00
@mock_s3
def test_keys_are_pickleable():
"""Keys must be pickleable due to boto3 implementation details."""
2019-10-31 15:44:26 +00:00
key = s3model.FakeKey("name", b"data!")
assert key.value == b"data!"
2018-12-20 19:40:13 +00:00
2018-12-20 19:15:15 +00:00
pickled = pickle.dumps(key)
loaded = pickle.loads(pickled)
assert loaded.value == key.value
2018-12-20 19:40:13 +00:00
@mock_s3
def test_append_to_value__basic():
2019-10-31 15:44:26 +00:00
key = s3model.FakeKey("name", b"data!")
assert key.value == b"data!"
2018-12-20 19:40:13 +00:00
assert key.size == 5
2019-10-31 15:44:26 +00:00
key.append_to_value(b" And even more data")
assert key.value == b"data! And even more data"
2018-12-20 19:40:13 +00:00
assert key.size == 24
@mock_s3
def test_append_to_value__nothing_added():
2019-10-31 15:44:26 +00:00
key = s3model.FakeKey("name", b"data!")
assert key.value == b"data!"
2018-12-20 19:40:13 +00:00
assert key.size == 5
2019-10-31 15:44:26 +00:00
key.append_to_value(b"")
assert key.value == b"data!"
2018-12-20 19:40:13 +00:00
assert key.size == 5
@mock_s3
def test_append_to_value__empty_key():
2019-10-31 15:44:26 +00:00
key = s3model.FakeKey("name", b"")
assert key.value == b""
2018-12-20 19:40:13 +00:00
assert key.size == 0
2019-10-31 15:44:26 +00:00
key.append_to_value(b"stuff")
assert key.value == b"stuff"
2018-12-20 19:40:13 +00:00
assert key.size == 5
@mock_s3
2013-02-18 21:09:40 +00:00
def test_my_model_save():
# Create Bucket so that test can run
conn = boto3.resource("s3", region_name=DEFAULT_REGION_NAME)
2019-10-31 15:44:26 +00:00
conn.create_bucket(Bucket="mybucket")
2013-02-18 21:09:40 +00:00
####################################
2019-10-31 15:44:26 +00:00
model_instance = MyModel("steve", "is awesome")
2013-02-18 21:09:40 +00:00
model_instance.save()
2019-10-31 15:44:26 +00:00
body = conn.Object("mybucket", "steve").get()["Body"].read().decode()
2019-10-31 15:44:26 +00:00
assert body == "is awesome"
2013-02-18 22:17:19 +00:00
2017-02-24 00:50:34 +00:00
@mock_s3
def test_key_etag():
conn = boto3.resource("s3", region_name=DEFAULT_REGION_NAME)
2019-10-31 15:44:26 +00:00
conn.create_bucket(Bucket="mybucket")
2019-10-31 15:44:26 +00:00
model_instance = MyModel("steve", "is awesome")
model_instance.save()
2019-10-31 15:44:26 +00:00
conn.Bucket("mybucket").Object("steve").e_tag.should.equal(
'"d32bda93738f7e03adb22e66c90fbc04"'
)
2017-02-16 03:35:45 +00:00
@mock_s3_deprecated
2013-11-15 09:59:30 +00:00
def test_multipart_upload_too_small():
2019-10-31 15:44:26 +00:00
conn = boto.connect_s3("the_key", "the_secret")
2013-03-26 14:52:33 +00:00
bucket = conn.create_bucket("foobar")
multipart = bucket.initiate_multipart_upload("the-key")
2019-10-31 15:44:26 +00:00
multipart.upload_part_from_file(BytesIO(b"hello"), 1)
multipart.upload_part_from_file(BytesIO(b"world"), 2)
2013-03-26 14:52:33 +00:00
# Multipart with total size under 5MB is refused
multipart.complete_upload.should.throw(S3ResponseError)
2013-03-26 14:52:33 +00:00
2013-11-15 09:59:30 +00:00
2017-02-16 03:35:45 +00:00
@mock_s3_deprecated
@reduced_min_part_size
2013-11-15 09:59:30 +00:00
def test_multipart_upload():
2019-10-31 15:44:26 +00:00
conn = boto.connect_s3("the_key", "the_secret")
2013-11-15 09:59:30 +00:00
bucket = conn.create_bucket("foobar")
2013-03-26 14:52:33 +00:00
multipart = bucket.initiate_multipart_upload("the-key")
2019-10-31 15:44:26 +00:00
part1 = b"0" * REDUCED_PART_SIZE
2013-11-15 09:59:30 +00:00
multipart.upload_part_from_file(BytesIO(part1), 1)
# last part, can be less than 5 MB
2019-10-31 15:44:26 +00:00
part2 = b"1"
2013-11-15 09:59:30 +00:00
multipart.upload_part_from_file(BytesIO(part2), 2)
2013-03-26 14:52:33 +00:00
multipart.complete_upload()
2013-11-15 09:59:30 +00:00
# we should get both parts as the key contents
2019-10-31 15:44:26 +00:00
bucket.get_key("the-key").get_contents_as_string().should.equal(part1 + part2)
2013-03-26 14:52:33 +00:00
2017-02-16 03:35:45 +00:00
@mock_s3_deprecated
@reduced_min_part_size
def test_multipart_upload_out_of_order():
2019-10-31 15:44:26 +00:00
conn = boto.connect_s3("the_key", "the_secret")
bucket = conn.create_bucket("foobar")
multipart = bucket.initiate_multipart_upload("the-key")
# last part, can be less than 5 MB
2019-10-31 15:44:26 +00:00
part2 = b"1"
multipart.upload_part_from_file(BytesIO(part2), 4)
2019-10-31 15:44:26 +00:00
part1 = b"0" * REDUCED_PART_SIZE
multipart.upload_part_from_file(BytesIO(part1), 2)
multipart.complete_upload()
# we should get both parts as the key contents
2019-10-31 15:44:26 +00:00
bucket.get_key("the-key").get_contents_as_string().should.equal(part1 + part2)
2017-02-16 03:35:45 +00:00
@mock_s3_deprecated
@reduced_min_part_size
def test_multipart_upload_with_headers():
2019-10-31 15:44:26 +00:00
conn = boto.connect_s3("the_key", "the_secret")
bucket = conn.create_bucket("foobar")
2019-10-31 15:44:26 +00:00
multipart = bucket.initiate_multipart_upload("the-key", metadata={"foo": "bar"})
part1 = b"0" * 10
multipart.upload_part_from_file(BytesIO(part1), 1)
multipart.complete_upload()
key = bucket.get_key("the-key")
key.metadata.should.equal({"foo": "bar"})
2017-02-16 03:35:45 +00:00
@mock_s3_deprecated
@reduced_min_part_size
2014-03-30 15:50:36 +00:00
def test_multipart_upload_with_copy_key():
2019-10-31 15:44:26 +00:00
conn = boto.connect_s3("the_key", "the_secret")
2014-03-30 15:50:36 +00:00
bucket = conn.create_bucket("foobar")
key = Key(bucket)
key.key = "original-key"
key.set_contents_from_string("key_value")
multipart = bucket.initiate_multipart_upload("the-key")
2019-10-31 15:44:26 +00:00
part1 = b"0" * REDUCED_PART_SIZE
2014-03-30 15:50:36 +00:00
multipart.upload_part_from_file(BytesIO(part1), 1)
multipart.copy_part_from_key("foobar", "original-key", 2, 0, 3)
2014-03-30 15:50:36 +00:00
multipart.complete_upload()
2019-10-31 15:44:26 +00:00
bucket.get_key("the-key").get_contents_as_string().should.equal(part1 + b"key_")
2014-03-30 15:50:36 +00:00
2017-02-16 03:35:45 +00:00
@mock_s3_deprecated
@reduced_min_part_size
2014-03-30 15:50:36 +00:00
def test_multipart_upload_cancel():
2019-10-31 15:44:26 +00:00
conn = boto.connect_s3("the_key", "the_secret")
2014-03-30 15:50:36 +00:00
bucket = conn.create_bucket("foobar")
multipart = bucket.initiate_multipart_upload("the-key")
2019-10-31 15:44:26 +00:00
part1 = b"0" * REDUCED_PART_SIZE
2014-03-30 15:50:36 +00:00
multipart.upload_part_from_file(BytesIO(part1), 1)
multipart.cancel_upload()
# TODO we really need some sort of assertion here, but we don't currently
# have the ability to list mulipart uploads for a bucket.
2017-02-16 03:35:45 +00:00
@mock_s3_deprecated
@reduced_min_part_size
def test_multipart_etag():
# Create Bucket so that test can run
2019-10-31 15:44:26 +00:00
conn = boto.connect_s3("the_key", "the_secret")
bucket = conn.create_bucket("mybucket")
multipart = bucket.initiate_multipart_upload("the-key")
2019-10-31 15:44:26 +00:00
part1 = b"0" * REDUCED_PART_SIZE
multipart.upload_part_from_file(BytesIO(part1), 1)
# last part, can be less than 5 MB
2019-10-31 15:44:26 +00:00
part2 = b"1"
multipart.upload_part_from_file(BytesIO(part2), 2)
multipart.complete_upload()
# we should get both parts as the key contents
bucket.get_key("the-key").etag.should.equal(EXPECTED_ETAG)
2017-02-16 03:35:45 +00:00
@mock_s3_deprecated
@reduced_min_part_size
def test_multipart_invalid_order():
# Create Bucket so that test can run
2019-10-31 15:44:26 +00:00
conn = boto.connect_s3("the_key", "the_secret")
bucket = conn.create_bucket("mybucket")
multipart = bucket.initiate_multipart_upload("the-key")
2019-10-31 15:44:26 +00:00
part1 = b"0" * 5242880
etag1 = multipart.upload_part_from_file(BytesIO(part1), 1).etag
# last part, can be less than 5 MB
2019-10-31 15:44:26 +00:00
part2 = b"1"
etag2 = multipart.upload_part_from_file(BytesIO(part2), 2).etag
2015-02-10 15:31:40 +00:00
xml = "<Part><PartNumber>{0}</PartNumber><ETag>{1}</ETag></Part>"
xml = xml.format(2, etag2) + xml.format(1, etag1)
2015-02-10 15:31:40 +00:00
xml = "<CompleteMultipartUpload>{0}</CompleteMultipartUpload>".format(xml)
bucket.complete_multipart_upload.when.called_with(
2019-10-31 15:44:26 +00:00
multipart.key_name, multipart.id, xml
).should.throw(S3ResponseError)
@mock_s3_deprecated
@reduced_min_part_size
def test_multipart_etag_quotes_stripped():
# Create Bucket so that test can run
2019-10-31 15:44:26 +00:00
conn = boto.connect_s3("the_key", "the_secret")
bucket = conn.create_bucket("mybucket")
multipart = bucket.initiate_multipart_upload("the-key")
2019-10-31 15:44:26 +00:00
part1 = b"0" * REDUCED_PART_SIZE
etag1 = multipart.upload_part_from_file(BytesIO(part1), 1).etag
# last part, can be less than 5 MB
2019-10-31 15:44:26 +00:00
part2 = b"1"
etag2 = multipart.upload_part_from_file(BytesIO(part2), 2).etag
# Strip quotes from etags
2019-10-31 15:44:26 +00:00
etag1 = etag1.replace('"', "")
etag2 = etag2.replace('"', "")
xml = "<Part><PartNumber>{0}</PartNumber><ETag>{1}</ETag></Part>"
xml = xml.format(1, etag1) + xml.format(2, etag2)
xml = "<CompleteMultipartUpload>{0}</CompleteMultipartUpload>".format(xml)
bucket.complete_multipart_upload.when.called_with(
2019-10-31 15:44:26 +00:00
multipart.key_name, multipart.id, xml
).should_not.throw(S3ResponseError)
# we should get both parts as the key contents
bucket.get_key("the-key").etag.should.equal(EXPECTED_ETAG)
2015-06-27 21:56:37 +00:00
2017-02-16 03:35:45 +00:00
@mock_s3_deprecated
@reduced_min_part_size
def test_multipart_duplicate_upload():
2019-10-31 15:44:26 +00:00
conn = boto.connect_s3("the_key", "the_secret")
bucket = conn.create_bucket("foobar")
multipart = bucket.initiate_multipart_upload("the-key")
2019-10-31 15:44:26 +00:00
part1 = b"0" * REDUCED_PART_SIZE
multipart.upload_part_from_file(BytesIO(part1), 1)
# same part again
multipart.upload_part_from_file(BytesIO(part1), 1)
2019-10-31 15:44:26 +00:00
part2 = b"1" * 1024
multipart.upload_part_from_file(BytesIO(part2), 2)
multipart.complete_upload()
# We should get only one copy of part 1.
2019-10-31 15:44:26 +00:00
bucket.get_key("the-key").get_contents_as_string().should.equal(part1 + part2)
2015-06-27 21:56:37 +00:00
2017-02-16 03:35:45 +00:00
@mock_s3_deprecated
2014-04-02 16:03:40 +00:00
def test_list_multiparts():
# Create Bucket so that test can run
2019-10-31 15:44:26 +00:00
conn = boto.connect_s3("the_key", "the_secret")
bucket = conn.create_bucket("mybucket")
2014-04-02 16:03:40 +00:00
multipart1 = bucket.initiate_multipart_upload("one-key")
multipart2 = bucket.initiate_multipart_upload("two-key")
uploads = bucket.get_all_multipart_uploads()
uploads.should.have.length_of(2)
dict([(u.key_name, u.id) for u in uploads]).should.equal(
2019-10-31 15:44:26 +00:00
{"one-key": multipart1.id, "two-key": multipart2.id}
)
2014-04-02 16:03:40 +00:00
multipart2.cancel_upload()
uploads = bucket.get_all_multipart_uploads()
uploads.should.have.length_of(1)
uploads[0].key_name.should.equal("one-key")
multipart1.cancel_upload()
uploads = bucket.get_all_multipart_uploads()
uploads.should.be.empty
2017-02-16 03:35:45 +00:00
@mock_s3_deprecated
def test_key_save_to_missing_bucket():
2019-10-31 15:44:26 +00:00
conn = boto.connect_s3("the_key", "the_secret")
bucket = conn.get_bucket("mybucket", validate=False)
key = Key(bucket)
key.key = "the-key"
2019-10-31 15:44:26 +00:00
key.set_contents_from_string.when.called_with("foobar").should.throw(
S3ResponseError
)
2017-02-16 03:35:45 +00:00
@mock_s3_deprecated
def test_missing_key():
2019-10-31 15:44:26 +00:00
conn = boto.connect_s3("the_key", "the_secret")
bucket = conn.create_bucket("foobar")
bucket.get_key("the-key").should.equal(None)
2017-02-16 03:35:45 +00:00
@mock_s3_deprecated
def test_missing_key_urllib2():
2019-10-31 15:44:26 +00:00
conn = boto.connect_s3("the_key", "the_secret")
conn.create_bucket("foobar")
2019-10-31 15:44:26 +00:00
urlopen.when.called_with("http://foobar.s3.amazonaws.com/the-key").should.throw(
HTTPError
)
2017-02-16 03:35:45 +00:00
@mock_s3_deprecated
def test_empty_key():
2019-10-31 15:44:26 +00:00
conn = boto.connect_s3("the_key", "the_secret")
bucket = conn.create_bucket("foobar")
key = Key(bucket)
key.key = "the-key"
key.set_contents_from_string("")
key = bucket.get_key("the-key")
key.size.should.equal(0)
2019-10-31 15:44:26 +00:00
key.get_contents_as_string().should.equal(b"")
2017-02-16 03:35:45 +00:00
@mock_s3_deprecated
def test_empty_key_set_on_existing_key():
2019-10-31 15:44:26 +00:00
conn = boto.connect_s3("the_key", "the_secret")
2013-02-20 04:29:46 +00:00
bucket = conn.create_bucket("foobar")
key = Key(bucket)
key.key = "the-key"
key.set_contents_from_string("foobar")
2013-02-20 04:29:46 +00:00
key = bucket.get_key("the-key")
key.size.should.equal(6)
2019-10-31 15:44:26 +00:00
key.get_contents_as_string().should.equal(b"foobar")
2013-02-20 04:29:46 +00:00
key.set_contents_from_string("")
2019-10-31 15:44:26 +00:00
bucket.get_key("the-key").get_contents_as_string().should.equal(b"")
2017-02-16 03:35:45 +00:00
@mock_s3_deprecated
def test_large_key_save():
2019-10-31 15:44:26 +00:00
conn = boto.connect_s3("the_key", "the_secret")
bucket = conn.create_bucket("foobar")
key = Key(bucket)
key.key = "the-key"
key.set_contents_from_string("foobar" * 100000)
2019-10-31 15:44:26 +00:00
bucket.get_key("the-key").get_contents_as_string().should.equal(b"foobar" * 100000)
2013-02-20 04:29:46 +00:00
2017-02-16 03:35:45 +00:00
@mock_s3_deprecated
2013-02-20 04:29:46 +00:00
def test_copy_key():
2019-10-31 15:44:26 +00:00
conn = boto.connect_s3("the_key", "the_secret")
bucket = conn.create_bucket("foobar")
key = Key(bucket)
key.key = "the-key"
key.set_contents_from_string("some value")
2019-10-31 15:44:26 +00:00
bucket.copy_key("new-key", "foobar", "the-key")
2019-10-31 15:44:26 +00:00
bucket.get_key("the-key").get_contents_as_string().should.equal(b"some value")
bucket.get_key("new-key").get_contents_as_string().should.equal(b"some value")
2019-10-31 15:44:26 +00:00
@parameterized([("the-unicode-💩-key",), ("key-with?question-mark",)])
@mock_s3_deprecated
def test_copy_key_with_special_chars(key_name):
2019-10-31 15:44:26 +00:00
conn = boto.connect_s3("the_key", "the_secret")
bucket = conn.create_bucket("foobar")
key = Key(bucket)
key.key = key_name
key.set_contents_from_string("some value")
2019-10-31 15:44:26 +00:00
bucket.copy_key("new-key", "foobar", key_name)
2019-10-31 15:44:26 +00:00
bucket.get_key(key_name).get_contents_as_string().should.equal(b"some value")
bucket.get_key("new-key").get_contents_as_string().should.equal(b"some value")
2017-02-16 03:35:45 +00:00
@mock_s3_deprecated
def test_copy_key_with_version():
2019-10-31 15:44:26 +00:00
conn = boto.connect_s3("the_key", "the_secret")
bucket = conn.create_bucket("foobar")
bucket.configure_versioning(versioning=True)
key = Key(bucket)
key.key = "the-key"
key.set_contents_from_string("some value")
key.set_contents_from_string("another value")
2019-10-31 15:44:26 +00:00
key = [key.version_id for key in bucket.get_all_versions() if not key.is_latest][0]
bucket.copy_key("new-key", "foobar", "the-key", src_version_id=key)
2019-10-31 15:44:26 +00:00
bucket.get_key("the-key").get_contents_as_string().should.equal(b"another value")
bucket.get_key("new-key").get_contents_as_string().should.equal(b"some value")
2017-02-16 03:35:45 +00:00
@mock_s3_deprecated
def test_set_metadata():
2019-10-31 15:44:26 +00:00
conn = boto.connect_s3("the_key", "the_secret")
bucket = conn.create_bucket("foobar")
key = Key(bucket)
2019-10-31 15:44:26 +00:00
key.key = "the-key"
key.set_metadata("md", "Metadatastring")
key.set_contents_from_string("Testval")
2019-10-31 15:44:26 +00:00
bucket.get_key("the-key").get_metadata("md").should.equal("Metadatastring")
2017-02-16 03:35:45 +00:00
@mock_s3_deprecated
def test_copy_key_replace_metadata():
2019-10-31 15:44:26 +00:00
conn = boto.connect_s3("the_key", "the_secret")
bucket = conn.create_bucket("foobar")
key = Key(bucket)
key.key = "the-key"
2019-10-31 15:44:26 +00:00
key.set_metadata("md", "Metadatastring")
key.set_contents_from_string("some value")
2019-10-31 15:44:26 +00:00
bucket.copy_key(
"new-key", "foobar", "the-key", metadata={"momd": "Mometadatastring"}
)
2019-10-31 15:44:26 +00:00
bucket.get_key("new-key").get_metadata("md").should.be.none
bucket.get_key("new-key").get_metadata("momd").should.equal("Mometadatastring")
2013-03-29 21:45:33 +00:00
@freeze_time("2012-01-01 12:00:00")
2017-02-16 03:35:45 +00:00
@mock_s3_deprecated
2013-03-29 21:45:33 +00:00
def test_last_modified():
# See https://github.com/boto/boto/issues/466
conn = boto.connect_s3()
bucket = conn.create_bucket("foobar")
key = Key(bucket)
key.key = "the-key"
key.set_contents_from_string("some value")
2013-03-29 21:45:33 +00:00
rs = bucket.get_all_keys()
2019-10-31 15:44:26 +00:00
rs[0].last_modified.should.equal("2012-01-01T12:00:00.000Z")
2013-03-29 21:45:33 +00:00
2019-10-31 15:44:26 +00:00
bucket.get_key("the-key").last_modified.should.equal(
"Sun, 01 Jan 2012 12:00:00 GMT"
)
2017-02-16 03:35:45 +00:00
@mock_s3_deprecated
2013-02-18 22:17:19 +00:00
def test_missing_bucket():
2019-10-31 15:44:26 +00:00
conn = boto.connect_s3("the_key", "the_secret")
conn.get_bucket.when.called_with("mybucket").should.throw(S3ResponseError)
2013-02-18 22:17:19 +00:00
2017-02-16 03:35:45 +00:00
@mock_s3_deprecated
def test_bucket_with_dash():
2019-10-31 15:44:26 +00:00
conn = boto.connect_s3("the_key", "the_secret")
conn.get_bucket.when.called_with("mybucket-test").should.throw(S3ResponseError)
2017-02-16 03:35:45 +00:00
@mock_s3_deprecated
def test_create_existing_bucket():
"Trying to create a bucket that already exists should raise an Error"
conn = boto.s3.connect_to_region("us-west-2")
conn.create_bucket("foobar", location="us-west-2")
2014-08-26 17:25:50 +00:00
with assert_raises(S3CreateError):
conn.create_bucket("foobar", location="us-west-2")
2017-02-16 03:35:45 +00:00
@mock_s3_deprecated
def test_create_existing_bucket_in_us_east_1():
"Trying to create a bucket that already exists in us-east-1 returns the bucket"
""""
http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
Your previous request to create the named bucket succeeded and you already
own it. You get this error in all AWS regions except US Standard,
us-east-1. In us-east-1 region, you will get 200 OK, but it is no-op (if
bucket exists it Amazon S3 will not do anything).
"""
conn = boto.s3.connect_to_region(DEFAULT_REGION_NAME)
conn.create_bucket("foobar")
bucket = conn.create_bucket("foobar")
bucket.name.should.equal("foobar")
2017-02-16 03:35:45 +00:00
@mock_s3_deprecated
2014-07-09 00:35:48 +00:00
def test_other_region():
2019-10-31 15:44:26 +00:00
conn = S3Connection("key", "secret", host="s3-website-ap-southeast-2.amazonaws.com")
conn.create_bucket("foobar", location="ap-southeast-2")
2014-07-09 00:35:48 +00:00
list(conn.get_bucket("foobar").get_all_keys()).should.equal([])
2017-02-16 03:35:45 +00:00
@mock_s3_deprecated
2013-02-18 22:17:19 +00:00
def test_bucket_deletion():
2019-10-31 15:44:26 +00:00
conn = boto.connect_s3("the_key", "the_secret")
2013-02-18 22:17:19 +00:00
bucket = conn.create_bucket("foobar")
key = Key(bucket)
key.key = "the-key"
key.set_contents_from_string("some value")
# Try to delete a bucket that still has keys
2013-02-18 22:17:19 +00:00
conn.delete_bucket.when.called_with("foobar").should.throw(S3ResponseError)
bucket.delete_key("the-key")
conn.delete_bucket("foobar")
# Get non-existing bucket
2013-02-18 22:17:19 +00:00
conn.get_bucket.when.called_with("foobar").should.throw(S3ResponseError)
2013-02-18 22:31:15 +00:00
2020-01-20 23:21:11 +00:00
# Delete non-existent bucket
conn.delete_bucket.when.called_with("foobar").should.throw(S3ResponseError)
2013-02-18 22:31:15 +00:00
2017-02-16 03:35:45 +00:00
@mock_s3_deprecated
2013-02-18 22:31:15 +00:00
def test_get_all_buckets():
2019-10-31 15:44:26 +00:00
conn = boto.connect_s3("the_key", "the_secret")
conn.create_bucket("foobar")
conn.create_bucket("foobar2")
2013-02-18 22:31:15 +00:00
buckets = conn.get_all_buckets()
buckets.should.have.length_of(2)
2013-02-26 04:21:49 +00:00
@mock_s3
2017-02-16 03:35:45 +00:00
@mock_s3_deprecated
def test_post_to_bucket():
2019-10-31 15:44:26 +00:00
conn = boto.connect_s3("the_key", "the_secret")
bucket = conn.create_bucket("foobar")
2013-05-17 23:41:39 +00:00
2019-10-31 15:44:26 +00:00
requests.post(
"https://foobar.s3.amazonaws.com/", {"key": "the-key", "file": "nothing"}
)
2019-10-31 15:44:26 +00:00
bucket.get_key("the-key").get_contents_as_string().should.equal(b"nothing")
2013-05-17 23:41:39 +00:00
@mock_s3
2017-02-16 03:35:45 +00:00
@mock_s3_deprecated
def test_post_with_metadata_to_bucket():
2019-10-31 15:44:26 +00:00
conn = boto.connect_s3("the_key", "the_secret")
bucket = conn.create_bucket("foobar")
2013-05-17 23:41:39 +00:00
2019-10-31 15:44:26 +00:00
requests.post(
"https://foobar.s3.amazonaws.com/",
{"key": "the-key", "file": "nothing", "x-amz-meta-test": "metadata"},
)
2019-10-31 15:44:26 +00:00
bucket.get_key("the-key").get_metadata("test").should.equal("metadata")
2014-07-09 00:35:48 +00:00
2017-02-16 03:35:45 +00:00
@mock_s3_deprecated
2015-06-27 21:56:37 +00:00
def test_delete_missing_key():
2019-10-31 15:44:26 +00:00
conn = boto.connect_s3("the_key", "the_secret")
bucket = conn.create_bucket("foobar")
2015-06-27 21:56:37 +00:00
deleted_key = bucket.delete_key("foobar")
deleted_key.key.should.equal("foobar")
2015-06-27 21:56:37 +00:00
2017-02-16 03:35:45 +00:00
@mock_s3_deprecated
def test_delete_keys():
2019-10-31 15:44:26 +00:00
conn = boto.connect_s3("the_key", "the_secret")
bucket = conn.create_bucket("foobar")
2019-10-31 15:44:26 +00:00
Key(bucket=bucket, name="file1").set_contents_from_string("abc")
Key(bucket=bucket, name="file2").set_contents_from_string("abc")
Key(bucket=bucket, name="file3").set_contents_from_string("abc")
Key(bucket=bucket, name="file4").set_contents_from_string("abc")
2019-10-31 15:44:26 +00:00
result = bucket.delete_keys(["file2", "file3"])
result.deleted.should.have.length_of(2)
result.errors.should.have.length_of(0)
keys = bucket.get_all_keys()
keys.should.have.length_of(2)
2019-10-31 15:44:26 +00:00
keys[0].name.should.equal("file1")
2014-07-09 00:35:48 +00:00
2017-02-16 03:35:45 +00:00
@mock_s3_deprecated
2019-07-23 18:53:45 +00:00
def test_delete_keys_invalid():
2019-10-31 15:44:26 +00:00
conn = boto.connect_s3("the_key", "the_secret")
bucket = conn.create_bucket("foobar")
2019-10-31 15:44:26 +00:00
Key(bucket=bucket, name="file1").set_contents_from_string("abc")
Key(bucket=bucket, name="file2").set_contents_from_string("abc")
Key(bucket=bucket, name="file3").set_contents_from_string("abc")
Key(bucket=bucket, name="file4").set_contents_from_string("abc")
# non-existing key case
2019-10-31 15:44:26 +00:00
result = bucket.delete_keys(["abc", "file3"])
result.deleted.should.have.length_of(1)
result.errors.should.have.length_of(1)
keys = bucket.get_all_keys()
keys.should.have.length_of(3)
2019-10-31 15:44:26 +00:00
keys[0].name.should.equal("file1")
2013-05-17 23:41:39 +00:00
2019-07-23 18:53:45 +00:00
# empty keys
result = bucket.delete_keys([])
result.deleted.should.have.length_of(0)
result.errors.should.have.length_of(0)
2019-07-23 18:53:45 +00:00
@mock_s3
def test_boto3_delete_empty_keys_list():
with assert_raises(ClientError) as err:
2019-10-31 15:44:26 +00:00
boto3.client("s3").delete_objects(Bucket="foobar", Delete={"Objects": []})
assert err.exception.response["Error"]["Code"] == "MalformedXML"
2014-07-09 00:35:48 +00:00
2017-02-16 03:35:45 +00:00
@mock_s3_deprecated
def test_bucket_name_with_dot():
conn = boto.connect_s3()
2019-10-31 15:44:26 +00:00
bucket = conn.create_bucket("firstname.lastname")
2019-10-31 15:44:26 +00:00
k = Key(bucket, "somekey")
k.set_contents_from_string("somedata")
2013-04-13 23:00:37 +00:00
2017-02-16 03:35:45 +00:00
@mock_s3_deprecated
2013-04-13 23:00:37 +00:00
def test_key_with_special_characters():
conn = boto.connect_s3()
2019-10-31 15:44:26 +00:00
bucket = conn.create_bucket("test_bucket_name")
2013-04-13 23:00:37 +00:00
2019-10-31 15:44:26 +00:00
key = Key(bucket, "test_list_keys_2/x?y")
key.set_contents_from_string("value1")
2013-04-13 23:00:37 +00:00
2019-10-31 15:44:26 +00:00
key_list = bucket.list("test_list_keys_2/", "/")
2013-04-13 23:00:37 +00:00
keys = [x for x in key_list]
keys[0].name.should.equal("test_list_keys_2/x?y")
2017-02-16 03:35:45 +00:00
@mock_s3_deprecated
def test_unicode_key_with_slash():
2019-10-31 15:44:26 +00:00
conn = boto.connect_s3("the_key", "the_secret")
bucket = conn.create_bucket("foobar")
key = Key(bucket)
key.key = "/the-key-unîcode/test"
key.set_contents_from_string("value")
key = bucket.get_key("/the-key-unîcode/test")
2019-10-31 15:44:26 +00:00
key.get_contents_as_string().should.equal(b"value")
2017-02-16 03:35:45 +00:00
@mock_s3_deprecated
def test_bucket_key_listing_order():
conn = boto.connect_s3()
2019-10-31 15:44:26 +00:00
bucket = conn.create_bucket("test_bucket")
prefix = "toplevel/"
def store(name):
k = Key(bucket, prefix + name)
2019-10-31 15:44:26 +00:00
k.set_contents_from_string("somedata")
2019-10-31 15:44:26 +00:00
names = ["x/key", "y.key1", "y.key2", "y.key3", "x/y/key", "x/y/z/key"]
for name in names:
store(name)
delimiter = None
keys = [x.name for x in bucket.list(prefix, delimiter)]
2019-10-31 15:44:26 +00:00
keys.should.equal(
[
"toplevel/x/key",
"toplevel/x/y/key",
"toplevel/x/y/z/key",
"toplevel/y.key1",
"toplevel/y.key2",
"toplevel/y.key3",
]
)
2019-10-31 15:44:26 +00:00
delimiter = "/"
keys = [x.name for x in bucket.list(prefix, delimiter)]
2019-10-31 15:44:26 +00:00
keys.should.equal(
["toplevel/y.key1", "toplevel/y.key2", "toplevel/y.key3", "toplevel/x/"]
)
# Test delimiter with no prefix
2019-10-31 15:44:26 +00:00
delimiter = "/"
keys = [x.name for x in bucket.list(prefix=None, delimiter=delimiter)]
2019-10-31 15:44:26 +00:00
keys.should.equal(["toplevel/"])
delimiter = None
2019-10-31 15:44:26 +00:00
keys = [x.name for x in bucket.list(prefix + "x", delimiter)]
keys.should.equal(["toplevel/x/key", "toplevel/x/y/key", "toplevel/x/y/z/key"])
2019-10-31 15:44:26 +00:00
delimiter = "/"
keys = [x.name for x in bucket.list(prefix + "x", delimiter)]
keys.should.equal(["toplevel/x/"])
2014-03-26 15:52:31 +00:00
2017-02-16 03:35:45 +00:00
@mock_s3_deprecated
2014-03-26 15:52:31 +00:00
def test_key_with_reduced_redundancy():
conn = boto.connect_s3()
2019-10-31 15:44:26 +00:00
bucket = conn.create_bucket("test_bucket_name")
2014-03-26 15:52:31 +00:00
2019-10-31 15:44:26 +00:00
key = Key(bucket, "test_rr_key")
key.set_contents_from_string("value1", reduced_redundancy=True)
2014-03-26 15:52:31 +00:00
# we use the bucket iterator because of:
# https:/github.com/boto/boto/issues/1173
2019-10-31 15:44:26 +00:00
list(bucket)[0].storage_class.should.equal("REDUCED_REDUNDANCY")
2014-03-26 15:52:31 +00:00
2017-02-16 03:35:45 +00:00
@mock_s3_deprecated
2014-03-26 15:52:31 +00:00
def test_copy_key_reduced_redundancy():
2019-10-31 15:44:26 +00:00
conn = boto.connect_s3("the_key", "the_secret")
2014-03-26 15:52:31 +00:00
bucket = conn.create_bucket("foobar")
key = Key(bucket)
key.key = "the-key"
key.set_contents_from_string("some value")
2019-10-31 15:44:26 +00:00
bucket.copy_key("new-key", "foobar", "the-key", storage_class="REDUCED_REDUNDANCY")
2014-03-26 15:52:31 +00:00
# we use the bucket iterator because of:
# https:/github.com/boto/boto/issues/1173
keys = dict([(k.name, k) for k in bucket])
2019-10-31 15:44:26 +00:00
keys["new-key"].storage_class.should.equal("REDUCED_REDUNDANCY")
keys["the-key"].storage_class.should.equal("STANDARD")
2014-03-26 17:15:08 +00:00
@freeze_time("2012-01-01 12:00:00")
2017-02-16 03:35:45 +00:00
@mock_s3_deprecated
2014-03-26 17:15:08 +00:00
def test_restore_key():
2019-10-31 15:44:26 +00:00
conn = boto.connect_s3("the_key", "the_secret")
2014-03-26 17:15:08 +00:00
bucket = conn.create_bucket("foobar")
key = Key(bucket)
key.key = "the-key"
key.set_contents_from_string("some value")
list(bucket)[0].ongoing_restore.should.be.none
key.restore(1)
2019-10-31 15:44:26 +00:00
key = bucket.get_key("the-key")
2014-03-26 17:15:08 +00:00
key.ongoing_restore.should_not.be.none
key.ongoing_restore.should.be.false
key.expiry_date.should.equal("Mon, 02 Jan 2012 12:00:00 GMT")
key.restore(2)
2019-10-31 15:44:26 +00:00
key = bucket.get_key("the-key")
2014-03-26 17:15:08 +00:00
key.ongoing_restore.should_not.be.none
key.ongoing_restore.should.be.false
key.expiry_date.should.equal("Tue, 03 Jan 2012 12:00:00 GMT")
@freeze_time("2012-01-01 12:00:00")
2017-02-16 03:35:45 +00:00
@mock_s3_deprecated
2014-03-26 17:15:08 +00:00
def test_restore_key_headers():
2019-10-31 15:44:26 +00:00
conn = boto.connect_s3("the_key", "the_secret")
2014-03-26 17:15:08 +00:00
bucket = conn.create_bucket("foobar")
key = Key(bucket)
key.key = "the-key"
key.set_contents_from_string("some value")
2019-10-31 15:44:26 +00:00
key.restore(1, headers={"foo": "bar"})
key = bucket.get_key("the-key")
2014-03-26 17:15:08 +00:00
key.ongoing_restore.should_not.be.none
key.ongoing_restore.should.be.false
key.expiry_date.should.equal("Mon, 02 Jan 2012 12:00:00 GMT")
2017-02-16 03:35:45 +00:00
@mock_s3_deprecated
def test_get_versioning_status():
2019-10-31 15:44:26 +00:00
conn = boto.connect_s3("the_key", "the_secret")
bucket = conn.create_bucket("foobar")
d = bucket.get_versioning_status()
d.should.be.empty
bucket.configure_versioning(versioning=True)
d = bucket.get_versioning_status()
d.shouldnt.be.empty
2019-10-31 15:44:26 +00:00
d.should.have.key("Versioning").being.equal("Enabled")
bucket.configure_versioning(versioning=False)
d = bucket.get_versioning_status()
2019-10-31 15:44:26 +00:00
d.should.have.key("Versioning").being.equal("Suspended")
2017-02-16 03:35:45 +00:00
@mock_s3_deprecated
def test_key_version():
2019-10-31 15:44:26 +00:00
conn = boto.connect_s3("the_key", "the_secret")
bucket = conn.create_bucket("foobar")
bucket.configure_versioning(versioning=True)
versions = []
key = Key(bucket)
2019-10-31 15:44:26 +00:00
key.key = "the-key"
key.version_id.should.be.none
2019-10-31 15:44:26 +00:00
key.set_contents_from_string("some string")
versions.append(key.version_id)
2019-10-31 15:44:26 +00:00
key.set_contents_from_string("some string")
versions.append(key.version_id)
set(versions).should.have.length_of(2)
2019-10-31 15:44:26 +00:00
key = bucket.get_key("the-key")
key.version_id.should.equal(versions[-1])
2014-06-27 22:21:32 +00:00
2017-02-16 03:35:45 +00:00
@mock_s3_deprecated
2014-06-27 22:21:32 +00:00
def test_list_versions():
2019-10-31 15:44:26 +00:00
conn = boto.connect_s3("the_key", "the_secret")
bucket = conn.create_bucket("foobar")
2014-06-27 22:21:32 +00:00
bucket.configure_versioning(versioning=True)
key_versions = []
2019-10-31 15:44:26 +00:00
key = Key(bucket, "the-key")
2014-06-27 22:21:32 +00:00
key.version_id.should.be.none
key.set_contents_from_string("Version 1")
key_versions.append(key.version_id)
2014-06-27 22:21:32 +00:00
key.set_contents_from_string("Version 2")
key_versions.append(key.version_id)
key_versions.should.have.length_of(2)
2014-06-27 22:21:32 +00:00
versions = list(bucket.list_versions())
versions.should.have.length_of(2)
2019-10-31 15:44:26 +00:00
versions[0].name.should.equal("the-key")
versions[0].version_id.should.equal(key_versions[0])
2014-08-26 17:25:50 +00:00
versions[0].get_contents_as_string().should.equal(b"Version 1")
2014-06-27 22:21:32 +00:00
2019-10-31 15:44:26 +00:00
versions[1].name.should.equal("the-key")
versions[1].version_id.should.equal(key_versions[1])
2014-08-26 17:25:50 +00:00
versions[1].get_contents_as_string().should.equal(b"Version 2")
2019-10-31 15:44:26 +00:00
key = Key(bucket, "the2-key")
key.set_contents_from_string("Version 1")
keys = list(bucket.list())
keys.should.have.length_of(2)
2019-10-31 15:44:26 +00:00
versions = list(bucket.list_versions(prefix="the2-"))
versions.should.have.length_of(1)
2017-02-16 03:35:45 +00:00
@mock_s3_deprecated
2015-10-07 07:04:22 +00:00
def test_acl_setting():
conn = boto.connect_s3()
2019-10-31 15:44:26 +00:00
bucket = conn.create_bucket("foobar")
content = b"imafile"
keyname = "test.txt"
key = Key(bucket, name=keyname)
2019-10-31 15:44:26 +00:00
key.content_type = "text/plain"
key.set_contents_from_string(content)
key.make_public()
key = bucket.get_key(keyname)
assert key.get_contents_as_string() == content
2015-10-07 07:04:22 +00:00
grants = key.get_acl().acl.grants
2019-10-31 15:44:26 +00:00
assert any(
g.uri == "http://acs.amazonaws.com/groups/global/AllUsers"
and g.permission == "READ"
for g in grants
), grants
2015-10-07 07:04:22 +00:00
2017-02-16 03:35:45 +00:00
@mock_s3_deprecated
2015-10-07 07:04:22 +00:00
def test_acl_setting_via_headers():
conn = boto.connect_s3()
2019-10-31 15:44:26 +00:00
bucket = conn.create_bucket("foobar")
content = b"imafile"
keyname = "test.txt"
2015-10-07 07:04:22 +00:00
key = Key(bucket, name=keyname)
2019-10-31 15:44:26 +00:00
key.content_type = "text/plain"
key.set_contents_from_string(
content,
headers={
"x-amz-grant-full-control": 'uri="http://acs.amazonaws.com/groups/global/AllUsers"'
},
)
2015-10-07 07:04:22 +00:00
key = bucket.get_key(keyname)
assert key.get_contents_as_string() == content
grants = key.get_acl().acl.grants
2019-10-31 15:44:26 +00:00
assert any(
g.uri == "http://acs.amazonaws.com/groups/global/AllUsers"
and g.permission == "FULL_CONTROL"
for g in grants
), grants
2015-10-07 07:04:22 +00:00
2017-02-16 03:35:45 +00:00
@mock_s3_deprecated
2015-10-07 07:04:22 +00:00
def test_acl_switching():
conn = boto.connect_s3()
2019-10-31 15:44:26 +00:00
bucket = conn.create_bucket("foobar")
content = b"imafile"
keyname = "test.txt"
2015-10-07 07:04:22 +00:00
key = Key(bucket, name=keyname)
2019-10-31 15:44:26 +00:00
key.content_type = "text/plain"
key.set_contents_from_string(content, policy="public-read")
key.set_acl("private")
2015-10-07 07:04:22 +00:00
grants = key.get_acl().acl.grants
2019-10-31 15:44:26 +00:00
assert not any(
g.uri == "http://acs.amazonaws.com/groups/global/AllUsers"
and g.permission == "READ"
for g in grants
), grants
2015-10-07 07:04:22 +00:00
2017-02-16 03:35:45 +00:00
@mock_s3_deprecated
2015-11-12 01:26:29 +00:00
def test_bucket_acl_setting():
conn = boto.connect_s3()
2019-10-31 15:44:26 +00:00
bucket = conn.create_bucket("foobar")
2015-11-12 01:26:29 +00:00
bucket.make_public()
grants = bucket.get_acl().acl.grants
2019-10-31 15:44:26 +00:00
assert any(
g.uri == "http://acs.amazonaws.com/groups/global/AllUsers"
and g.permission == "READ"
for g in grants
), grants
2015-11-12 01:26:29 +00:00
2017-02-16 03:35:45 +00:00
@mock_s3_deprecated
2015-11-12 01:26:29 +00:00
def test_bucket_acl_switching():
conn = boto.connect_s3()
2019-10-31 15:44:26 +00:00
bucket = conn.create_bucket("foobar")
2015-11-12 01:26:29 +00:00
bucket.make_public()
2019-10-31 15:44:26 +00:00
bucket.set_acl("private")
2015-11-12 01:26:29 +00:00
grants = bucket.get_acl().acl.grants
2019-10-31 15:44:26 +00:00
assert not any(
g.uri == "http://acs.amazonaws.com/groups/global/AllUsers"
and g.permission == "READ"
for g in grants
), grants
2015-11-12 01:26:29 +00:00
2017-09-16 13:08:27 +00:00
@mock_s3
def test_s3_object_in_public_bucket():
2019-10-31 15:44:26 +00:00
s3 = boto3.resource("s3")
bucket = s3.Bucket("test-bucket")
bucket.create(
ACL="public-read", CreateBucketConfiguration={"LocationConstraint": "us-west-1"}
)
2019-10-31 15:44:26 +00:00
bucket.put_object(Body=b"ABCD", Key="file.txt")
s3_anonymous = boto3.resource("s3")
s3_anonymous.meta.client.meta.events.register("choose-signer.s3.*", disable_signing)
contents = (
s3_anonymous.Object(key="file.txt", bucket_name="test-bucket")
.get()["Body"]
.read()
)
contents.should.equal(b"ABCD")
2019-10-31 15:44:26 +00:00
bucket.put_object(ACL="private", Body=b"ABCD", Key="file.txt")
2017-09-22 17:57:06 +00:00
with assert_raises(ClientError) as exc:
2019-10-31 15:44:26 +00:00
s3_anonymous.Object(key="file.txt", bucket_name="test-bucket").get()
exc.exception.response["Error"]["Code"].should.equal("403")
2017-09-16 13:08:27 +00:00
@mock_s3
def test_s3_object_in_public_bucket_using_multiple_presigned_urls():
s3 = boto3.resource("s3")
bucket = s3.Bucket("test-bucket")
bucket.create(
ACL="public-read", CreateBucketConfiguration={"LocationConstraint": "us-west-1"}
)
bucket.put_object(Body=b"ABCD", Key="file.txt")
2019-10-31 15:44:26 +00:00
params = {"Bucket": "test-bucket", "Key": "file.txt"}
presigned_url = boto3.client("s3").generate_presigned_url(
"get_object", params, ExpiresIn=900
)
for i in range(1, 10):
response = requests.get(presigned_url)
assert response.status_code == 200, "Failed on req number {}".format(i)
2017-09-16 13:08:27 +00:00
2017-09-16 13:08:27 +00:00
@mock_s3
def test_s3_object_in_private_bucket():
2019-10-31 15:44:26 +00:00
s3 = boto3.resource("s3")
bucket = s3.Bucket("test-bucket")
bucket.create(
ACL="private", CreateBucketConfiguration={"LocationConstraint": "us-west-1"}
)
2019-10-31 15:44:26 +00:00
bucket.put_object(ACL="private", Body=b"ABCD", Key="file.txt")
2019-10-31 15:44:26 +00:00
s3_anonymous = boto3.resource("s3")
s3_anonymous.meta.client.meta.events.register("choose-signer.s3.*", disable_signing)
with assert_raises(ClientError) as exc:
2019-10-31 15:44:26 +00:00
s3_anonymous.Object(key="file.txt", bucket_name="test-bucket").get()
exc.exception.response["Error"]["Code"].should.equal("403")
bucket.put_object(ACL="public-read", Body=b"ABCD", Key="file.txt")
contents = (
s3_anonymous.Object(key="file.txt", bucket_name="test-bucket")
.get()["Body"]
.read()
)
contents.should.equal(b"ABCD")
2017-09-16 13:08:27 +00:00
2017-02-16 03:35:45 +00:00
@mock_s3_deprecated
def test_unicode_key():
conn = boto.connect_s3()
2019-10-31 15:44:26 +00:00
bucket = conn.create_bucket("mybucket")
key = Key(bucket)
2019-10-31 15:44:26 +00:00
key.key = "こんにちは.jpg"
key.set_contents_from_string("Hello world!")
2016-09-03 22:31:03 +00:00
assert [listed_key.key for listed_key in bucket.list()] == [key.key]
fetched_key = bucket.get_key(key.key)
assert fetched_key.key == key.key
2019-10-31 15:44:26 +00:00
assert fetched_key.get_contents_as_string().decode("utf-8") == "Hello world!"
2017-02-16 03:35:45 +00:00
@mock_s3_deprecated
def test_unicode_value():
conn = boto.connect_s3()
2019-10-31 15:44:26 +00:00
bucket = conn.create_bucket("mybucket")
key = Key(bucket)
2019-10-31 15:44:26 +00:00
key.key = "some_key"
key.set_contents_from_string("こんにちは.jpg")
list(bucket.list())
key = bucket.get_key(key.key)
2019-10-31 15:44:26 +00:00
assert key.get_contents_as_string().decode("utf-8") == "こんにちは.jpg"
2014-11-27 15:43:10 +00:00
2017-02-16 03:35:45 +00:00
@mock_s3_deprecated
2014-11-27 15:43:10 +00:00
def test_setting_content_encoding():
conn = boto.connect_s3()
2019-10-31 15:44:26 +00:00
bucket = conn.create_bucket("mybucket")
2014-11-27 15:43:10 +00:00
key = bucket.new_key("keyname")
key.set_metadata("Content-Encoding", "gzip")
compressed_data = "abcdef"
key.set_contents_from_string(compressed_data)
key = bucket.get_key("keyname")
key.content_encoding.should.equal("gzip")
2014-12-11 01:44:00 +00:00
2017-02-16 03:35:45 +00:00
@mock_s3_deprecated
2014-12-11 01:44:00 +00:00
def test_bucket_location():
conn = boto.s3.connect_to_region("us-west-2")
bucket = conn.create_bucket("mybucket", location="us-west-2")
2014-12-11 01:44:00 +00:00
bucket.get_location().should.equal("us-west-2")
2015-02-10 15:28:18 +00:00
2018-12-29 12:07:29 +00:00
@mock_s3
def test_bucket_location_default():
cli = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
2019-10-31 15:44:26 +00:00
bucket_name = "mybucket"
# No LocationConstraint ==> us-east-1
cli.create_bucket(Bucket=bucket_name)
2019-10-31 15:44:26 +00:00
cli.get_bucket_location(Bucket=bucket_name)["LocationConstraint"].should.equal(None)
@mock_s3
def test_bucket_location_nondefault():
cli = boto3.client("s3", region_name="eu-central-1")
bucket_name = "mybucket"
# LocationConstraint set for non default regions
resp = cli.create_bucket(
Bucket=bucket_name,
CreateBucketConfiguration={"LocationConstraint": "eu-central-1"},
)
cli.get_bucket_location(Bucket=bucket_name)["LocationConstraint"].should.equal(
"eu-central-1"
)
# Test uses current Region to determine whether to throw an error
# Region is retrieved based on current URL
# URL will always be localhost in Server Mode, so can't run it there
if not settings.TEST_SERVER_MODE:
@mock_s3
def test_s3_location_should_error_outside_useast1():
s3 = boto3.client("s3", region_name="eu-west-1")
bucket_name = "asdfasdfsdfdsfasda"
with assert_raises(ClientError) as e:
s3.create_bucket(Bucket=bucket_name)
e.exception.response["Error"]["Message"].should.equal(
"The unspecified location constraint is incompatible for the region specific endpoint this request was sent to."
)
# All tests for s3-control cannot be run under the server without a modification of the
# hosts file on your system. This is due to the fact that the URL to the host is in the form of:
# ACCOUNT_ID.s3-control.amazonaws.com <-- That Account ID part is the problem. If you want to
# make use of the moto server, update your hosts file for `THE_ACCOUNT_ID_FOR_MOTO.localhost`
# and this will work fine.
@mock_s3
def test_get_public_access_block_for_account():
from moto.s3.models import ACCOUNT_ID
client = boto3.client("s3control", region_name="us-west-2")
# With an invalid account ID:
with assert_raises(ClientError) as ce:
client.get_public_access_block(AccountId="111111111111")
assert ce.exception.response["Error"]["Code"] == "AccessDenied"
# Without one defined:
with assert_raises(ClientError) as ce:
client.get_public_access_block(AccountId=ACCOUNT_ID)
assert (
ce.exception.response["Error"]["Code"]
== "NoSuchPublicAccessBlockConfiguration"
)
# Put a with an invalid account ID:
with assert_raises(ClientError) as ce:
client.put_public_access_block(
AccountId="111111111111",
PublicAccessBlockConfiguration={"BlockPublicAcls": True},
)
assert ce.exception.response["Error"]["Code"] == "AccessDenied"
# Put with an invalid PAB:
with assert_raises(ClientError) as ce:
client.put_public_access_block(
AccountId=ACCOUNT_ID, PublicAccessBlockConfiguration={}
)
assert ce.exception.response["Error"]["Code"] == "InvalidRequest"
assert (
"Must specify at least one configuration."
in ce.exception.response["Error"]["Message"]
)
# Correct PAB:
client.put_public_access_block(
AccountId=ACCOUNT_ID,
PublicAccessBlockConfiguration={
"BlockPublicAcls": True,
"IgnorePublicAcls": True,
"BlockPublicPolicy": True,
"RestrictPublicBuckets": True,
},
)
# Get the correct PAB (for all regions):
for region in Session().get_available_regions("s3control"):
region_client = boto3.client("s3control", region_name=region)
assert region_client.get_public_access_block(AccountId=ACCOUNT_ID)[
"PublicAccessBlockConfiguration"
] == {
"BlockPublicAcls": True,
"IgnorePublicAcls": True,
"BlockPublicPolicy": True,
"RestrictPublicBuckets": True,
}
# Delete with an invalid account ID:
with assert_raises(ClientError) as ce:
client.delete_public_access_block(AccountId="111111111111")
assert ce.exception.response["Error"]["Code"] == "AccessDenied"
# Delete successfully:
client.delete_public_access_block(AccountId=ACCOUNT_ID)
# Confirm that it's deleted:
with assert_raises(ClientError) as ce:
client.get_public_access_block(AccountId=ACCOUNT_ID)
assert (
ce.exception.response["Error"]["Code"]
== "NoSuchPublicAccessBlockConfiguration"
)
@mock_s3
@mock_config
def test_config_list_account_pab():
from moto.s3.models import ACCOUNT_ID
client = boto3.client("s3control", region_name="us-west-2")
config_client = boto3.client("config", region_name="us-west-2")
# Create the aggregator:
account_aggregation_source = {
"AccountIds": [ACCOUNT_ID],
"AllAwsRegions": True,
}
config_client.put_configuration_aggregator(
ConfigurationAggregatorName="testing",
AccountAggregationSources=[account_aggregation_source],
)
# Without a PAB in place:
result = config_client.list_discovered_resources(
resourceType="AWS::S3::AccountPublicAccessBlock"
)
assert not result["resourceIdentifiers"]
result = config_client.list_aggregate_discovered_resources(
ResourceType="AWS::S3::AccountPublicAccessBlock",
ConfigurationAggregatorName="testing",
)
assert not result["ResourceIdentifiers"]
# Create a PAB:
client.put_public_access_block(
AccountId=ACCOUNT_ID,
PublicAccessBlockConfiguration={
"BlockPublicAcls": True,
"IgnorePublicAcls": True,
"BlockPublicPolicy": True,
"RestrictPublicBuckets": True,
},
)
# Test that successful queries work (non-aggregated):
result = config_client.list_discovered_resources(
resourceType="AWS::S3::AccountPublicAccessBlock"
)
assert result["resourceIdentifiers"] == [
{
"resourceType": "AWS::S3::AccountPublicAccessBlock",
"resourceId": ACCOUNT_ID,
}
]
result = config_client.list_discovered_resources(
resourceType="AWS::S3::AccountPublicAccessBlock",
resourceIds=[ACCOUNT_ID, "nope"],
)
assert result["resourceIdentifiers"] == [
{
"resourceType": "AWS::S3::AccountPublicAccessBlock",
"resourceId": ACCOUNT_ID,
}
]
result = config_client.list_discovered_resources(
resourceType="AWS::S3::AccountPublicAccessBlock", resourceName=""
)
assert result["resourceIdentifiers"] == [
{
"resourceType": "AWS::S3::AccountPublicAccessBlock",
"resourceId": ACCOUNT_ID,
}
]
# Test that successful queries work (aggregated):
result = config_client.list_aggregate_discovered_resources(
ResourceType="AWS::S3::AccountPublicAccessBlock",
ConfigurationAggregatorName="testing",
)
regions = {region for region in Session().get_available_regions("config")}
for r in result["ResourceIdentifiers"]:
regions.remove(r.pop("SourceRegion"))
assert r == {
"ResourceType": "AWS::S3::AccountPublicAccessBlock",
"SourceAccountId": ACCOUNT_ID,
"ResourceId": ACCOUNT_ID,
}
# Just check that the len is the same -- this should be reasonable
regions = {region for region in Session().get_available_regions("config")}
result = config_client.list_aggregate_discovered_resources(
ResourceType="AWS::S3::AccountPublicAccessBlock",
ConfigurationAggregatorName="testing",
Filters={"ResourceName": ""},
)
assert len(regions) == len(result["ResourceIdentifiers"])
result = config_client.list_aggregate_discovered_resources(
ResourceType="AWS::S3::AccountPublicAccessBlock",
ConfigurationAggregatorName="testing",
Filters={"ResourceName": "", "ResourceId": ACCOUNT_ID},
)
assert len(regions) == len(result["ResourceIdentifiers"])
result = config_client.list_aggregate_discovered_resources(
ResourceType="AWS::S3::AccountPublicAccessBlock",
ConfigurationAggregatorName="testing",
Filters={
"ResourceName": "",
"ResourceId": ACCOUNT_ID,
"Region": "us-west-2",
},
)
assert (
result["ResourceIdentifiers"][0]["SourceRegion"] == "us-west-2"
and len(result["ResourceIdentifiers"]) == 1
)
# Test aggregator pagination:
result = config_client.list_aggregate_discovered_resources(
ResourceType="AWS::S3::AccountPublicAccessBlock",
ConfigurationAggregatorName="testing",
Limit=1,
)
regions = sorted(
[region for region in Session().get_available_regions("config")]
)
assert result["ResourceIdentifiers"][0] == {
"ResourceType": "AWS::S3::AccountPublicAccessBlock",
"SourceAccountId": ACCOUNT_ID,
"ResourceId": ACCOUNT_ID,
"SourceRegion": regions[0],
}
assert result["NextToken"] == regions[1]
# Get the next region:
result = config_client.list_aggregate_discovered_resources(
ResourceType="AWS::S3::AccountPublicAccessBlock",
ConfigurationAggregatorName="testing",
Limit=1,
NextToken=regions[1],
)
assert result["ResourceIdentifiers"][0] == {
"ResourceType": "AWS::S3::AccountPublicAccessBlock",
"SourceAccountId": ACCOUNT_ID,
"ResourceId": ACCOUNT_ID,
"SourceRegion": regions[1],
}
# Non-aggregated with incorrect info:
result = config_client.list_discovered_resources(
resourceType="AWS::S3::AccountPublicAccessBlock", resourceName="nope"
)
assert not result["resourceIdentifiers"]
result = config_client.list_discovered_resources(
resourceType="AWS::S3::AccountPublicAccessBlock", resourceIds=["nope"]
)
assert not result["resourceIdentifiers"]
# Aggregated with incorrect info:
result = config_client.list_aggregate_discovered_resources(
ResourceType="AWS::S3::AccountPublicAccessBlock",
ConfigurationAggregatorName="testing",
Filters={"ResourceName": "nope"},
)
assert not result["ResourceIdentifiers"]
result = config_client.list_aggregate_discovered_resources(
ResourceType="AWS::S3::AccountPublicAccessBlock",
ConfigurationAggregatorName="testing",
Filters={"ResourceId": "nope"},
)
assert not result["ResourceIdentifiers"]
result = config_client.list_aggregate_discovered_resources(
ResourceType="AWS::S3::AccountPublicAccessBlock",
ConfigurationAggregatorName="testing",
Filters={"Region": "Nope"},
)
assert not result["ResourceIdentifiers"]
@mock_s3
@mock_config
def test_config_get_account_pab():
from moto.s3.models import ACCOUNT_ID
client = boto3.client("s3control", region_name="us-west-2")
config_client = boto3.client("config", region_name="us-west-2")
# Create the aggregator:
account_aggregation_source = {
"AccountIds": [ACCOUNT_ID],
"AllAwsRegions": True,
}
config_client.put_configuration_aggregator(
ConfigurationAggregatorName="testing",
AccountAggregationSources=[account_aggregation_source],
)
# Without a PAB in place:
with assert_raises(ClientError) as ce:
config_client.get_resource_config_history(
resourceType="AWS::S3::AccountPublicAccessBlock", resourceId=ACCOUNT_ID
)
assert (
ce.exception.response["Error"]["Code"] == "ResourceNotDiscoveredException"
)
# aggregate
result = config_client.batch_get_resource_config(
resourceKeys=[
{
"resourceType": "AWS::S3::AccountPublicAccessBlock",
"resourceId": "ACCOUNT_ID",
}
]
)
assert not result["baseConfigurationItems"]
result = config_client.batch_get_aggregate_resource_config(
ConfigurationAggregatorName="testing",
ResourceIdentifiers=[
{
"SourceAccountId": ACCOUNT_ID,
"SourceRegion": "us-west-2",
"ResourceId": ACCOUNT_ID,
"ResourceType": "AWS::S3::AccountPublicAccessBlock",
"ResourceName": "",
}
],
)
assert not result["BaseConfigurationItems"]
# Create a PAB:
client.put_public_access_block(
AccountId=ACCOUNT_ID,
PublicAccessBlockConfiguration={
"BlockPublicAcls": True,
"IgnorePublicAcls": True,
"BlockPublicPolicy": True,
"RestrictPublicBuckets": True,
},
)
# Get the proper config:
proper_config = {
"blockPublicAcls": True,
"ignorePublicAcls": True,
"blockPublicPolicy": True,
"restrictPublicBuckets": True,
}
result = config_client.get_resource_config_history(
resourceType="AWS::S3::AccountPublicAccessBlock", resourceId=ACCOUNT_ID
)
assert (
json.loads(result["configurationItems"][0]["configuration"])
== proper_config
)
assert (
result["configurationItems"][0]["accountId"]
== result["configurationItems"][0]["resourceId"]
== ACCOUNT_ID
)
result = config_client.batch_get_resource_config(
resourceKeys=[
{
"resourceType": "AWS::S3::AccountPublicAccessBlock",
"resourceId": ACCOUNT_ID,
}
]
)
assert len(result["baseConfigurationItems"]) == 1
assert (
json.loads(result["baseConfigurationItems"][0]["configuration"])
== proper_config
)
assert (
result["baseConfigurationItems"][0]["accountId"]
== result["baseConfigurationItems"][0]["resourceId"]
== ACCOUNT_ID
)
for region in Session().get_available_regions("s3control"):
result = config_client.batch_get_aggregate_resource_config(
ConfigurationAggregatorName="testing",
ResourceIdentifiers=[
{
"SourceAccountId": ACCOUNT_ID,
"SourceRegion": region,
"ResourceId": ACCOUNT_ID,
"ResourceType": "AWS::S3::AccountPublicAccessBlock",
"ResourceName": "",
}
],
)
assert len(result["BaseConfigurationItems"]) == 1
assert (
json.loads(result["BaseConfigurationItems"][0]["configuration"])
== proper_config
)
2017-02-16 03:35:45 +00:00
@mock_s3_deprecated
2015-02-10 15:28:18 +00:00
def test_ranged_get():
conn = boto.connect_s3()
2019-10-31 15:44:26 +00:00
bucket = conn.create_bucket("mybucket")
2015-02-10 15:28:18 +00:00
key = Key(bucket)
2019-10-31 15:44:26 +00:00
key.key = "bigkey"
2015-02-11 09:08:00 +00:00
rep = b"0123456789"
2015-02-10 17:43:24 +00:00
key.set_contents_from_string(rep * 10)
# Implicitly bounded range requests.
2019-10-31 15:44:26 +00:00
key.get_contents_as_string(headers={"Range": "bytes=0-"}).should.equal(rep * 10)
key.get_contents_as_string(headers={"Range": "bytes=50-"}).should.equal(rep * 5)
key.get_contents_as_string(headers={"Range": "bytes=99-"}).should.equal(b"9")
# Explicitly bounded range requests starting from the first byte.
2019-10-31 15:44:26 +00:00
key.get_contents_as_string(headers={"Range": "bytes=0-0"}).should.equal(b"0")
key.get_contents_as_string(headers={"Range": "bytes=0-49"}).should.equal(rep * 5)
key.get_contents_as_string(headers={"Range": "bytes=0-99"}).should.equal(rep * 10)
key.get_contents_as_string(headers={"Range": "bytes=0-100"}).should.equal(rep * 10)
key.get_contents_as_string(headers={"Range": "bytes=0-700"}).should.equal(rep * 10)
# Explicitly bounded range requests starting from the / a middle byte.
2019-10-31 15:44:26 +00:00
key.get_contents_as_string(headers={"Range": "bytes=50-54"}).should.equal(rep[:5])
key.get_contents_as_string(headers={"Range": "bytes=50-99"}).should.equal(rep * 5)
key.get_contents_as_string(headers={"Range": "bytes=50-100"}).should.equal(rep * 5)
key.get_contents_as_string(headers={"Range": "bytes=50-700"}).should.equal(rep * 5)
# Explicitly bounded range requests starting from the last byte.
2019-10-31 15:44:26 +00:00
key.get_contents_as_string(headers={"Range": "bytes=99-99"}).should.equal(b"9")
key.get_contents_as_string(headers={"Range": "bytes=99-100"}).should.equal(b"9")
key.get_contents_as_string(headers={"Range": "bytes=99-700"}).should.equal(b"9")
# Suffix range requests.
2019-10-31 15:44:26 +00:00
key.get_contents_as_string(headers={"Range": "bytes=-1"}).should.equal(b"9")
key.get_contents_as_string(headers={"Range": "bytes=-60"}).should.equal(rep * 6)
key.get_contents_as_string(headers={"Range": "bytes=-100"}).should.equal(rep * 10)
key.get_contents_as_string(headers={"Range": "bytes=-101"}).should.equal(rep * 10)
key.get_contents_as_string(headers={"Range": "bytes=-700"}).should.equal(rep * 10)
key.size.should.equal(100)
2015-07-23 21:33:52 +00:00
2017-02-16 03:35:45 +00:00
@mock_s3_deprecated
2015-07-23 21:33:52 +00:00
def test_policy():
conn = boto.connect_s3()
2019-10-31 15:44:26 +00:00
bucket_name = "mybucket"
2015-07-23 21:33:52 +00:00
bucket = conn.create_bucket(bucket_name)
2019-10-31 15:44:26 +00:00
policy = json.dumps(
{
"Version": "2012-10-17",
"Id": "PutObjPolicy",
"Statement": [
{
"Sid": "DenyUnEncryptedObjectUploads",
"Effect": "Deny",
"Principal": "*",
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::{bucket_name}/*".format(
bucket_name=bucket_name
),
"Condition": {
"StringNotEquals": {
"s3:x-amz-server-side-encryption": "aws:kms"
}
},
2015-07-23 21:33:52 +00:00
}
2019-10-31 15:44:26 +00:00
],
}
)
2015-07-23 21:33:52 +00:00
with assert_raises(S3ResponseError) as err:
bucket.get_policy()
ex = err.exception
ex.box_usage.should.be.none
2019-10-31 15:44:26 +00:00
ex.error_code.should.equal("NoSuchBucketPolicy")
ex.message.should.equal("The bucket policy does not exist")
ex.reason.should.equal("Not Found")
2015-07-23 21:33:52 +00:00
ex.resource.should.be.none
ex.status.should.equal(404)
ex.body.should.contain(bucket_name)
ex.request_id.should_not.be.none
bucket.set_policy(policy).should.be.true
bucket = conn.get_bucket(bucket_name)
2019-10-31 15:44:26 +00:00
bucket.get_policy().decode("utf-8").should.equal(policy)
2015-08-02 13:54:23 +00:00
bucket.delete_policy()
with assert_raises(S3ResponseError) as err:
bucket.get_policy()
2015-08-02 13:54:23 +00:00
2017-02-16 03:35:45 +00:00
@mock_s3_deprecated
def test_website_configuration_xml():
conn = boto.connect_s3()
2019-10-31 15:44:26 +00:00
bucket = conn.create_bucket("test-bucket")
2017-02-16 03:35:45 +00:00
bucket.set_website_configuration_xml(TEST_XML)
bucket.get_website_configuration_xml().should.equal(TEST_XML)
@mock_s3_deprecated
def test_key_with_trailing_slash_in_ordinary_calling_format():
conn = boto.connect_s3(
2019-10-31 15:44:26 +00:00
"access_key",
"secret_key",
calling_format=boto.s3.connection.OrdinaryCallingFormat(),
2017-02-16 03:35:45 +00:00
)
2019-10-31 15:44:26 +00:00
bucket = conn.create_bucket("test_bucket_name")
2017-02-16 03:35:45 +00:00
2019-10-31 15:44:26 +00:00
key_name = "key_with_slash/"
2017-02-16 03:35:45 +00:00
key = Key(bucket, key_name)
2019-10-31 15:44:26 +00:00
key.set_contents_from_string("some value")
2017-02-16 03:35:45 +00:00
[k.name for k in bucket.get_all_keys()].should.contain(key_name)
@mock_s3
def test_boto3_key_etag():
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
2019-10-31 15:44:26 +00:00
s3.create_bucket(Bucket="mybucket")
s3.put_object(Bucket="mybucket", Key="steve", Body=b"is awesome")
resp = s3.get_object(Bucket="mybucket", Key="steve")
resp["ETag"].should.equal('"d32bda93738f7e03adb22e66c90fbc04"')
@mock_s3
def test_website_redirect_location():
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
2019-10-31 15:44:26 +00:00
s3.create_bucket(Bucket="mybucket")
2019-10-31 15:44:26 +00:00
s3.put_object(Bucket="mybucket", Key="steve", Body=b"is awesome")
resp = s3.get_object(Bucket="mybucket", Key="steve")
resp.get("WebsiteRedirectLocation").should.be.none
2019-10-31 15:44:26 +00:00
url = "https://github.com/spulec/moto"
s3.put_object(
Bucket="mybucket", Key="steve", Body=b"is awesome", WebsiteRedirectLocation=url
)
resp = s3.get_object(Bucket="mybucket", Key="steve")
resp["WebsiteRedirectLocation"].should.equal(url)
2017-02-24 02:37:43 +00:00
@mock_s3
def test_boto3_list_objects_truncated_response():
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
2019-10-31 15:44:26 +00:00
s3.create_bucket(Bucket="mybucket")
s3.put_object(Bucket="mybucket", Key="one", Body=b"1")
s3.put_object(Bucket="mybucket", Key="two", Body=b"22")
s3.put_object(Bucket="mybucket", Key="three", Body=b"333")
# First list
2019-10-31 15:44:26 +00:00
resp = s3.list_objects(Bucket="mybucket", MaxKeys=1)
listed_object = resp["Contents"][0]
2019-10-31 15:44:26 +00:00
assert listed_object["Key"] == "one"
assert resp["MaxKeys"] == 1
assert resp["IsTruncated"] == True
2019-12-21 12:27:49 +00:00
assert resp.get("Prefix") is None
2019-10-31 15:44:26 +00:00
assert resp["Delimiter"] == "None"
assert "NextMarker" in resp
next_marker = resp["NextMarker"]
# Second list
2019-10-31 15:44:26 +00:00
resp = s3.list_objects(Bucket="mybucket", MaxKeys=1, Marker=next_marker)
listed_object = resp["Contents"][0]
2019-10-31 15:44:26 +00:00
assert listed_object["Key"] == "three"
assert resp["MaxKeys"] == 1
assert resp["IsTruncated"] == True
2019-12-21 12:27:49 +00:00
assert resp.get("Prefix") is None
2019-10-31 15:44:26 +00:00
assert resp["Delimiter"] == "None"
assert "NextMarker" in resp
next_marker = resp["NextMarker"]
# Third list
2019-10-31 15:44:26 +00:00
resp = s3.list_objects(Bucket="mybucket", MaxKeys=1, Marker=next_marker)
listed_object = resp["Contents"][0]
2019-10-31 15:44:26 +00:00
assert listed_object["Key"] == "two"
assert resp["MaxKeys"] == 1
assert resp["IsTruncated"] == False
2019-12-21 12:27:49 +00:00
assert resp.get("Prefix") is None
2019-10-31 15:44:26 +00:00
assert resp["Delimiter"] == "None"
assert "NextMarker" not in resp
@mock_s3
def test_boto3_list_keys_xml_escaped():
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
2019-10-31 15:44:26 +00:00
s3.create_bucket(Bucket="mybucket")
key_name = "Q&A.txt"
s3.put_object(Bucket="mybucket", Key=key_name, Body=b"is awesome")
2019-10-31 15:44:26 +00:00
resp = s3.list_objects_v2(Bucket="mybucket", Prefix=key_name)
2019-10-31 15:44:26 +00:00
assert resp["Contents"][0]["Key"] == key_name
assert resp["KeyCount"] == 1
assert resp["MaxKeys"] == 1000
assert resp["Prefix"] == key_name
assert resp["IsTruncated"] == False
assert "Delimiter" not in resp
assert "StartAfter" not in resp
assert "NextContinuationToken" not in resp
assert "Owner" not in resp["Contents"][0]
@mock_s3
def test_boto3_list_objects_v2_common_prefix_pagination():
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
2019-10-31 15:44:26 +00:00
s3.create_bucket(Bucket="mybucket")
max_keys = 1
2019-10-31 15:44:26 +00:00
keys = ["test/{i}/{i}".format(i=i) for i in range(3)]
for key in keys:
2019-10-31 15:44:26 +00:00
s3.put_object(Bucket="mybucket", Key=key, Body=b"v")
prefixes = []
2019-10-31 15:44:26 +00:00
args = {
"Bucket": "mybucket",
"Delimiter": "/",
"Prefix": "test/",
"MaxKeys": max_keys,
}
resp = {"IsTruncated": True}
while resp.get("IsTruncated", False):
if "NextContinuationToken" in resp:
args["ContinuationToken"] = resp["NextContinuationToken"]
resp = s3.list_objects_v2(**args)
if "CommonPrefixes" in resp:
assert len(resp["CommonPrefixes"]) == max_keys
prefixes.extend(i["Prefix"] for i in resp["CommonPrefixes"])
2019-10-31 15:44:26 +00:00
assert prefixes == [k[: k.rindex("/") + 1] for k in keys]
@mock_s3
def test_boto3_list_objects_v2_truncated_response():
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
2019-10-31 15:44:26 +00:00
s3.create_bucket(Bucket="mybucket")
s3.put_object(Bucket="mybucket", Key="one", Body=b"1")
s3.put_object(Bucket="mybucket", Key="two", Body=b"22")
s3.put_object(Bucket="mybucket", Key="three", Body=b"333")
# First list
2019-10-31 15:44:26 +00:00
resp = s3.list_objects_v2(Bucket="mybucket", MaxKeys=1)
listed_object = resp["Contents"][0]
2019-10-31 15:44:26 +00:00
assert listed_object["Key"] == "one"
assert resp["MaxKeys"] == 1
assert resp["Prefix"] == ""
assert resp["KeyCount"] == 1
assert resp["IsTruncated"] == True
assert "Delimiter" not in resp
assert "StartAfter" not in resp
assert "Owner" not in listed_object # owner info was not requested
2019-10-31 15:44:26 +00:00
next_token = resp["NextContinuationToken"]
# Second list
2017-02-24 02:37:43 +00:00
resp = s3.list_objects_v2(
2019-10-31 15:44:26 +00:00
Bucket="mybucket", MaxKeys=1, ContinuationToken=next_token
)
listed_object = resp["Contents"][0]
2019-10-31 15:44:26 +00:00
assert listed_object["Key"] == "three"
assert resp["MaxKeys"] == 1
assert resp["Prefix"] == ""
assert resp["KeyCount"] == 1
assert resp["IsTruncated"] == True
assert "Delimiter" not in resp
assert "StartAfter" not in resp
assert "Owner" not in listed_object
2019-10-31 15:44:26 +00:00
next_token = resp["NextContinuationToken"]
# Third list
2017-02-24 02:37:43 +00:00
resp = s3.list_objects_v2(
2019-10-31 15:44:26 +00:00
Bucket="mybucket", MaxKeys=1, ContinuationToken=next_token
)
listed_object = resp["Contents"][0]
2019-10-31 15:44:26 +00:00
assert listed_object["Key"] == "two"
assert resp["MaxKeys"] == 1
assert resp["Prefix"] == ""
assert resp["KeyCount"] == 1
assert resp["IsTruncated"] == False
assert "Delimiter" not in resp
assert "Owner" not in listed_object
assert "StartAfter" not in resp
assert "NextContinuationToken" not in resp
@mock_s3
def test_boto3_list_objects_v2_truncated_response_start_after():
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
2019-10-31 15:44:26 +00:00
s3.create_bucket(Bucket="mybucket")
s3.put_object(Bucket="mybucket", Key="one", Body=b"1")
s3.put_object(Bucket="mybucket", Key="two", Body=b"22")
s3.put_object(Bucket="mybucket", Key="three", Body=b"333")
# First list
2019-10-31 15:44:26 +00:00
resp = s3.list_objects_v2(Bucket="mybucket", MaxKeys=1, StartAfter="one")
listed_object = resp["Contents"][0]
2019-10-31 15:44:26 +00:00
assert listed_object["Key"] == "three"
assert resp["MaxKeys"] == 1
assert resp["Prefix"] == ""
assert resp["KeyCount"] == 1
assert resp["IsTruncated"] == True
assert resp["StartAfter"] == "one"
assert "Delimiter" not in resp
assert "Owner" not in listed_object
2019-10-31 15:44:26 +00:00
next_token = resp["NextContinuationToken"]
# Second list
# The ContinuationToken must take precedence over StartAfter.
2019-10-31 15:44:26 +00:00
resp = s3.list_objects_v2(
Bucket="mybucket", MaxKeys=1, StartAfter="one", ContinuationToken=next_token
)
listed_object = resp["Contents"][0]
assert listed_object["Key"] == "two"
assert resp["MaxKeys"] == 1
assert resp["Prefix"] == ""
assert resp["KeyCount"] == 1
assert resp["IsTruncated"] == False
# When ContinuationToken is given, StartAfter is ignored. This also means
# AWS does not return it in the response.
2019-10-31 15:44:26 +00:00
assert "StartAfter" not in resp
assert "Delimiter" not in resp
assert "Owner" not in listed_object
@mock_s3
def test_boto3_list_objects_v2_fetch_owner():
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
2019-10-31 15:44:26 +00:00
s3.create_bucket(Bucket="mybucket")
s3.put_object(Bucket="mybucket", Key="one", Body=b"11")
2019-10-31 15:44:26 +00:00
resp = s3.list_objects_v2(Bucket="mybucket", FetchOwner=True)
owner = resp["Contents"][0]["Owner"]
2019-10-31 15:44:26 +00:00
assert "ID" in owner
assert "DisplayName" in owner
assert len(owner.keys()) == 2
2019-09-16 08:46:19 +00:00
@mock_s3
def test_boto3_list_objects_v2_truncate_combined_keys_and_folders():
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
2019-10-31 15:44:26 +00:00
s3.create_bucket(Bucket="mybucket")
s3.put_object(Bucket="mybucket", Key="1/2", Body="")
s3.put_object(Bucket="mybucket", Key="2", Body="")
s3.put_object(Bucket="mybucket", Key="3/4", Body="")
s3.put_object(Bucket="mybucket", Key="4", Body="")
resp = s3.list_objects_v2(Bucket="mybucket", Prefix="", MaxKeys=2, Delimiter="/")
assert "Delimiter" in resp
assert resp["IsTruncated"] is True
assert resp["KeyCount"] == 2
assert len(resp["Contents"]) == 1
assert resp["Contents"][0]["Key"] == "2"
assert len(resp["CommonPrefixes"]) == 1
assert resp["CommonPrefixes"][0]["Prefix"] == "1/"
last_tail = resp["NextContinuationToken"]
resp = s3.list_objects_v2(
Bucket="mybucket", MaxKeys=2, Prefix="", Delimiter="/", StartAfter=last_tail
)
assert resp["KeyCount"] == 2
assert resp["IsTruncated"] is False
assert len(resp["Contents"]) == 1
assert resp["Contents"][0]["Key"] == "4"
assert len(resp["CommonPrefixes"]) == 1
assert resp["CommonPrefixes"][0]["Prefix"] == "3/"
2019-09-16 08:46:19 +00:00
2015-08-02 13:54:23 +00:00
@mock_s3
def test_boto3_bucket_create():
s3 = boto3.resource("s3", region_name=DEFAULT_REGION_NAME)
2015-08-02 13:54:23 +00:00
s3.create_bucket(Bucket="blah")
2019-10-31 15:44:26 +00:00
s3.Object("blah", "hello.txt").put(Body="some text")
2015-08-02 13:54:23 +00:00
2019-10-31 15:44:26 +00:00
s3.Object("blah", "hello.txt").get()["Body"].read().decode("utf-8").should.equal(
"some text"
)
2015-08-13 21:16:55 +00:00
@mock_s3
def test_bucket_create_duplicate():
2019-10-31 15:44:26 +00:00
s3 = boto3.resource("s3", region_name="us-west-2")
s3.create_bucket(
Bucket="blah", CreateBucketConfiguration={"LocationConstraint": "us-west-2"}
)
with assert_raises(ClientError) as exc:
s3.create_bucket(
2019-10-31 15:44:26 +00:00
Bucket="blah", CreateBucketConfiguration={"LocationConstraint": "us-west-2"}
)
2019-10-31 15:44:26 +00:00
exc.exception.response["Error"]["Code"].should.equal("BucketAlreadyExists")
@mock_s3
def test_bucket_create_force_us_east_1():
s3 = boto3.resource("s3", region_name=DEFAULT_REGION_NAME)
with assert_raises(ClientError) as exc:
2019-10-31 15:44:26 +00:00
s3.create_bucket(
Bucket="blah",
CreateBucketConfiguration={"LocationConstraint": DEFAULT_REGION_NAME},
2019-10-31 15:44:26 +00:00
)
exc.exception.response["Error"]["Code"].should.equal("InvalidLocationConstraint")
2015-11-27 19:43:03 +00:00
@mock_s3
def test_boto3_bucket_create_eu_central():
2019-10-31 15:44:26 +00:00
s3 = boto3.resource("s3", region_name="eu-central-1")
s3.create_bucket(
Bucket="blah", CreateBucketConfiguration={"LocationConstraint": "eu-central-1"}
)
2015-11-27 19:43:03 +00:00
2019-10-31 15:44:26 +00:00
s3.Object("blah", "hello.txt").put(Body="some text")
2015-11-27 19:43:03 +00:00
2019-10-31 15:44:26 +00:00
s3.Object("blah", "hello.txt").get()["Body"].read().decode("utf-8").should.equal(
"some text"
)
2015-11-27 19:43:03 +00:00
2015-08-13 21:16:55 +00:00
@mock_s3
def test_boto3_head_object():
s3 = boto3.resource("s3", region_name=DEFAULT_REGION_NAME)
2015-08-13 21:16:55 +00:00
s3.create_bucket(Bucket="blah")
2019-10-31 15:44:26 +00:00
s3.Object("blah", "hello.txt").put(Body="some text")
2015-08-13 21:16:55 +00:00
2019-10-31 15:44:26 +00:00
s3.Object("blah", "hello.txt").meta.client.head_object(
Bucket="blah", Key="hello.txt"
)
2015-08-13 21:16:55 +00:00
with assert_raises(ClientError) as e:
2019-10-31 15:44:26 +00:00
s3.Object("blah", "hello2.txt").meta.client.head_object(
Bucket="blah", Key="hello_bad.txt"
)
e.exception.response["Error"]["Code"].should.equal("404")
@mock_s3
def test_boto3_bucket_deletion():
cli = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
cli.create_bucket(Bucket="foobar")
cli.put_object(Bucket="foobar", Key="the-key", Body="some value")
# Try to delete a bucket that still has keys
cli.delete_bucket.when.called_with(Bucket="foobar").should.throw(
cli.exceptions.ClientError,
2019-10-31 15:44:26 +00:00
(
"An error occurred (BucketNotEmpty) when calling the DeleteBucket operation: "
"The bucket you tried to delete is not empty"
),
)
cli.delete_object(Bucket="foobar", Key="the-key")
cli.delete_bucket(Bucket="foobar")
# Get non-existing bucket
cli.head_bucket.when.called_with(Bucket="foobar").should.throw(
cli.exceptions.ClientError,
2019-10-31 15:44:26 +00:00
"An error occurred (404) when calling the HeadBucket operation: Not Found",
)
# Delete non-existing bucket
2019-10-31 15:44:26 +00:00
cli.delete_bucket.when.called_with(Bucket="foobar").should.throw(
cli.exceptions.NoSuchBucket
)
@mock_s3
def test_boto3_get_object():
s3 = boto3.resource("s3", region_name=DEFAULT_REGION_NAME)
s3.create_bucket(Bucket="blah")
2019-10-31 15:44:26 +00:00
s3.Object("blah", "hello.txt").put(Body="some text")
2019-10-31 15:44:26 +00:00
s3.Object("blah", "hello.txt").meta.client.head_object(
Bucket="blah", Key="hello.txt"
)
with assert_raises(ClientError) as e:
2019-10-31 15:44:26 +00:00
s3.Object("blah", "hello2.txt").get()
2019-10-31 15:44:26 +00:00
e.exception.response["Error"]["Code"].should.equal("NoSuchKey")
@mock_s3
def test_boto3_s3_content_type():
s3 = boto3.resource("s3", region_name=DEFAULT_REGION_NAME)
my_bucket = s3.Bucket("my-cool-bucket")
my_bucket.create()
s3_path = "test_s3.py"
s3 = boto3.resource("s3", verify=False)
content_type = "text/python-x"
s3.Object(my_bucket.name, s3_path).put(
ContentType=content_type, Body=b"some python code", ACL="public-read"
)
s3.Object(my_bucket.name, s3_path).content_type.should.equal(content_type)
@mock_s3
def test_boto3_get_missing_object_with_part_number():
s3 = boto3.resource("s3", region_name=DEFAULT_REGION_NAME)
s3.create_bucket(Bucket="blah")
with assert_raises(ClientError) as e:
2019-10-31 15:44:26 +00:00
s3.Object("blah", "hello.txt").meta.client.head_object(
Bucket="blah", Key="hello.txt", PartNumber=123
)
2019-10-31 15:44:26 +00:00
e.exception.response["Error"]["Code"].should.equal("404")
@mock_s3
def test_boto3_head_object_with_versioning():
s3 = boto3.resource("s3", region_name=DEFAULT_REGION_NAME)
2019-10-31 15:44:26 +00:00
bucket = s3.create_bucket(Bucket="blah")
bucket.Versioning().enable()
2019-10-31 15:44:26 +00:00
old_content = "some text"
new_content = "some new text"
s3.Object("blah", "hello.txt").put(Body=old_content)
s3.Object("blah", "hello.txt").put(Body=new_content)
2019-10-31 15:44:26 +00:00
versions = list(s3.Bucket("blah").object_versions.all())
latest = list(filter(lambda item: item.is_latest, versions))[0]
oldest = list(filter(lambda item: not item.is_latest, versions))[0]
2019-10-31 15:44:26 +00:00
head_object = s3.Object("blah", "hello.txt").meta.client.head_object(
Bucket="blah", Key="hello.txt"
)
head_object["VersionId"].should.equal(latest.id)
head_object["ContentLength"].should.equal(len(new_content))
2019-10-31 15:44:26 +00:00
old_head_object = s3.Object("blah", "hello.txt").meta.client.head_object(
Bucket="blah", Key="hello.txt", VersionId=oldest.id
)
old_head_object["VersionId"].should.equal(oldest.id)
old_head_object["ContentLength"].should.equal(len(old_content))
2019-10-31 15:44:26 +00:00
old_head_object["VersionId"].should_not.equal(head_object["VersionId"])
@mock_s3
def test_boto3_copy_object_with_versioning():
client = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
2019-10-31 15:44:26 +00:00
client.create_bucket(
Bucket="blah", CreateBucketConfiguration={"LocationConstraint": "eu-west-1"}
)
client.put_bucket_versioning(
Bucket="blah", VersioningConfiguration={"Status": "Enabled"}
)
2019-10-31 15:44:26 +00:00
client.put_object(Bucket="blah", Key="test1", Body=b"test1")
client.put_object(Bucket="blah", Key="test2", Body=b"test2")
2019-10-31 15:44:26 +00:00
obj1_version = client.get_object(Bucket="blah", Key="test1")["VersionId"]
obj2_version = client.get_object(Bucket="blah", Key="test2")["VersionId"]
2019-10-31 15:44:26 +00:00
client.copy_object(
CopySource={"Bucket": "blah", "Key": "test1"}, Bucket="blah", Key="test2"
)
obj2_version_new = client.get_object(Bucket="blah", Key="test2")["VersionId"]
# Version should be different to previous version
obj2_version_new.should_not.equal(obj2_version)
2019-10-31 15:44:26 +00:00
client.copy_object(
CopySource={"Bucket": "blah", "Key": "test2", "VersionId": obj2_version},
Bucket="blah",
Key="test3",
)
obj3_version_new = client.get_object(Bucket="blah", Key="test3")["VersionId"]
obj3_version_new.should_not.equal(obj2_version_new)
# Copy file that doesn't exist
with assert_raises(ClientError) as e:
2019-10-31 15:44:26 +00:00
client.copy_object(
CopySource={"Bucket": "blah", "Key": "test4", "VersionId": obj2_version},
Bucket="blah",
Key="test5",
)
e.exception.response["Error"]["Code"].should.equal("404")
response = client.create_multipart_upload(Bucket="blah", Key="test4")
upload_id = response["UploadId"]
response = client.upload_part_copy(
Bucket="blah",
Key="test4",
CopySource={"Bucket": "blah", "Key": "test3", "VersionId": obj3_version_new},
UploadId=upload_id,
PartNumber=1,
)
etag = response["CopyPartResult"]["ETag"]
client.complete_multipart_upload(
2019-10-31 15:44:26 +00:00
Bucket="blah",
Key="test4",
UploadId=upload_id,
MultipartUpload={"Parts": [{"ETag": etag, "PartNumber": 1}]},
)
2019-10-31 15:44:26 +00:00
response = client.get_object(Bucket="blah", Key="test4")
data = response["Body"].read()
2019-10-31 15:44:26 +00:00
data.should.equal(b"test2")
@mock_s3
def test_boto3_copy_object_from_unversioned_to_versioned_bucket():
client = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
2019-10-31 15:44:26 +00:00
client.create_bucket(
Bucket="src", CreateBucketConfiguration={"LocationConstraint": "eu-west-1"}
)
client.create_bucket(
Bucket="dest", CreateBucketConfiguration={"LocationConstraint": "eu-west-1"}
)
client.put_bucket_versioning(
Bucket="dest", VersioningConfiguration={"Status": "Enabled"}
)
2019-10-31 15:44:26 +00:00
client.put_object(Bucket="src", Key="test", Body=b"content")
2019-10-31 15:44:26 +00:00
obj2_version_new = client.copy_object(
CopySource={"Bucket": "src", "Key": "test"}, Bucket="dest", Key="test"
).get("VersionId")
# VersionId should be present in the response
obj2_version_new.should_not.equal(None)
@mock_s3
def test_boto3_copy_object_with_replacement_tagging():
2020-02-18 02:32:28 +00:00
client = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
client.create_bucket(Bucket="mybucket")
client.put_object(
Bucket="mybucket", Key="original", Body=b"test", Tagging="tag=old"
)
client.copy_object(
CopySource={"Bucket": "mybucket", "Key": "original"},
Bucket="mybucket",
Key="copy1",
TaggingDirective="REPLACE",
Tagging="tag=new",
)
client.copy_object(
CopySource={"Bucket": "mybucket", "Key": "original"},
Bucket="mybucket",
Key="copy2",
TaggingDirective="COPY",
)
tags1 = client.get_object_tagging(Bucket="mybucket", Key="copy1")["TagSet"]
tags1.should.equal([{"Key": "tag", "Value": "new"}])
tags2 = client.get_object_tagging(Bucket="mybucket", Key="copy2")["TagSet"]
tags2.should.equal([{"Key": "tag", "Value": "old"}])
@mock_s3
def test_boto3_deleted_versionings_list():
client = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
2019-10-31 15:44:26 +00:00
client.create_bucket(Bucket="blah")
client.put_bucket_versioning(
Bucket="blah", VersioningConfiguration={"Status": "Enabled"}
)
2019-10-31 15:44:26 +00:00
client.put_object(Bucket="blah", Key="test1", Body=b"test1")
client.put_object(Bucket="blah", Key="test2", Body=b"test2")
client.delete_objects(Bucket="blah", Delete={"Objects": [{"Key": "test1"}]})
2019-10-31 15:44:26 +00:00
listed = client.list_objects_v2(Bucket="blah")
assert len(listed["Contents"]) == 1
2018-05-03 09:30:29 +00:00
@mock_s3
def test_boto3_delete_versioned_bucket():
client = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
2018-05-03 09:30:29 +00:00
2019-10-31 15:44:26 +00:00
client.create_bucket(Bucket="blah")
client.put_bucket_versioning(
Bucket="blah", VersioningConfiguration={"Status": "Enabled"}
)
2018-05-03 09:30:29 +00:00
2019-10-31 15:44:26 +00:00
resp = client.put_object(Bucket="blah", Key="test1", Body=b"test1")
client.delete_object(Bucket="blah", Key="test1", VersionId=resp["VersionId"])
2018-05-03 09:30:29 +00:00
2019-10-31 15:44:26 +00:00
client.delete_bucket(Bucket="blah")
2018-05-03 09:30:29 +00:00
@mock_s3
def test_boto3_get_object_if_modified_since():
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
bucket_name = "blah"
s3.create_bucket(Bucket=bucket_name)
2019-10-31 15:44:26 +00:00
key = "hello.txt"
2019-10-31 15:44:26 +00:00
s3.put_object(Bucket=bucket_name, Key=key, Body="test")
with assert_raises(botocore.exceptions.ClientError) as err:
s3.get_object(
Bucket=bucket_name,
Key=key,
2019-10-31 15:44:26 +00:00
IfModifiedSince=datetime.datetime.utcnow() + datetime.timedelta(hours=1),
)
e = err.exception
2019-10-31 15:44:26 +00:00
e.response["Error"].should.equal({"Code": "304", "Message": "Not Modified"})
2018-05-03 09:30:29 +00:00
@mock_s3
def test_boto3_head_object_if_modified_since():
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
bucket_name = "blah"
s3.create_bucket(Bucket=bucket_name)
2019-10-31 15:44:26 +00:00
key = "hello.txt"
2019-10-31 15:44:26 +00:00
s3.put_object(Bucket=bucket_name, Key=key, Body="test")
with assert_raises(botocore.exceptions.ClientError) as err:
s3.head_object(
Bucket=bucket_name,
Key=key,
2019-10-31 15:44:26 +00:00
IfModifiedSince=datetime.datetime.utcnow() + datetime.timedelta(hours=1),
)
e = err.exception
2019-10-31 15:44:26 +00:00
e.response["Error"].should.equal({"Code": "304", "Message": "Not Modified"})
@mock_s3
@reduced_min_part_size
def test_boto3_multipart_etag():
# Create Bucket so that test can run
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
2019-10-31 15:44:26 +00:00
s3.create_bucket(Bucket="mybucket")
2019-10-31 15:44:26 +00:00
upload_id = s3.create_multipart_upload(Bucket="mybucket", Key="the-key")["UploadId"]
part1 = b"0" * REDUCED_PART_SIZE
etags = []
etags.append(
2019-10-31 15:44:26 +00:00
s3.upload_part(
Bucket="mybucket",
Key="the-key",
PartNumber=1,
UploadId=upload_id,
Body=part1,
)["ETag"]
)
# last part, can be less than 5 MB
2019-10-31 15:44:26 +00:00
part2 = b"1"
etags.append(
2019-10-31 15:44:26 +00:00
s3.upload_part(
Bucket="mybucket",
Key="the-key",
PartNumber=2,
UploadId=upload_id,
Body=part2,
)["ETag"]
)
s3.complete_multipart_upload(
2019-10-31 15:44:26 +00:00
Bucket="mybucket",
Key="the-key",
UploadId=upload_id,
MultipartUpload={
"Parts": [
{"ETag": etag, "PartNumber": i} for i, etag in enumerate(etags, 1)
]
},
)
# we should get both parts as the key contents
2019-10-31 15:44:26 +00:00
resp = s3.get_object(Bucket="mybucket", Key="the-key")
resp["ETag"].should.equal(EXPECTED_ETAG)
@mock_s3
@reduced_min_part_size
def test_boto3_multipart_part_size():
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
2019-10-31 15:44:26 +00:00
s3.create_bucket(Bucket="mybucket")
2019-10-31 15:44:26 +00:00
mpu = s3.create_multipart_upload(Bucket="mybucket", Key="the-key")
mpu_id = mpu["UploadId"]
parts = []
n_parts = 10
for i in range(1, n_parts + 1):
part_size = REDUCED_PART_SIZE + i
2019-10-31 15:44:26 +00:00
body = b"1" * part_size
part = s3.upload_part(
2019-10-31 15:44:26 +00:00
Bucket="mybucket",
Key="the-key",
PartNumber=i,
UploadId=mpu_id,
Body=body,
ContentLength=len(body),
)
parts.append({"PartNumber": i, "ETag": part["ETag"]})
s3.complete_multipart_upload(
2019-10-31 15:44:26 +00:00
Bucket="mybucket",
Key="the-key",
UploadId=mpu_id,
MultipartUpload={"Parts": parts},
)
for i in range(1, n_parts + 1):
2019-10-31 15:44:26 +00:00
obj = s3.head_object(Bucket="mybucket", Key="the-key", PartNumber=i)
assert obj["ContentLength"] == REDUCED_PART_SIZE + i
2017-07-16 02:36:12 +00:00
@mock_s3
def test_boto3_put_object_with_tagging():
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
2019-10-31 15:44:26 +00:00
bucket_name = "mybucket"
key = "key-with-tags"
2017-07-16 02:36:12 +00:00
s3.create_bucket(Bucket=bucket_name)
2019-10-31 15:44:26 +00:00
s3.put_object(Bucket=bucket_name, Key=key, Body="test", Tagging="foo=bar")
2017-07-16 02:36:12 +00:00
resp = s3.get_object_tagging(Bucket=bucket_name, Key=key)
2019-10-31 15:44:26 +00:00
resp["TagSet"].should.contain({"Key": "foo", "Value": "bar"})
2017-07-16 02:36:12 +00:00
@mock_s3
def test_boto3_put_bucket_tagging():
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
bucket_name = "mybucket"
s3.create_bucket(Bucket=bucket_name)
# With 1 tag:
2019-10-31 15:44:26 +00:00
resp = s3.put_bucket_tagging(
Bucket=bucket_name, Tagging={"TagSet": [{"Key": "TagOne", "Value": "ValueOne"}]}
)
resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
# With multiple tags:
2019-10-31 15:44:26 +00:00
resp = s3.put_bucket_tagging(
Bucket=bucket_name,
Tagging={
"TagSet": [
{"Key": "TagOne", "Value": "ValueOne"},
{"Key": "TagTwo", "Value": "ValueTwo"},
]
},
)
resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
# No tags is also OK:
2019-10-31 15:44:26 +00:00
resp = s3.put_bucket_tagging(Bucket=bucket_name, Tagging={"TagSet": []})
resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
# With duplicate tag keys:
with assert_raises(ClientError) as err:
2019-10-31 15:44:26 +00:00
resp = s3.put_bucket_tagging(
Bucket=bucket_name,
Tagging={
"TagSet": [
{"Key": "TagOne", "Value": "ValueOne"},
{"Key": "TagOne", "Value": "ValueOneAgain"},
]
},
)
e = err.exception
e.response["Error"]["Code"].should.equal("InvalidTag")
2019-10-31 15:44:26 +00:00
e.response["Error"]["Message"].should.equal(
"Cannot provide multiple Tags with the same key"
)
# Cannot put tags that are "system" tags - i.e. tags that start with "aws:"
with assert_raises(ClientError) as ce:
s3.put_bucket_tagging(
Bucket=bucket_name,
Tagging={"TagSet": [{"Key": "aws:sometag", "Value": "nope"}]},
)
e = ce.exception
e.response["Error"]["Code"].should.equal("InvalidTag")
e.response["Error"]["Message"].should.equal(
"System tags cannot be added/updated by requester"
)
# This is OK though:
s3.put_bucket_tagging(
Bucket=bucket_name,
Tagging={"TagSet": [{"Key": "something:aws:stuff", "Value": "this is fine"}]},
)
@mock_s3
def test_boto3_get_bucket_tagging():
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
bucket_name = "mybucket"
s3.create_bucket(Bucket=bucket_name)
2019-10-31 15:44:26 +00:00
s3.put_bucket_tagging(
Bucket=bucket_name,
Tagging={
"TagSet": [
{"Key": "TagOne", "Value": "ValueOne"},
{"Key": "TagTwo", "Value": "ValueTwo"},
]
},
)
# Get the tags for the bucket:
resp = s3.get_bucket_tagging(Bucket=bucket_name)
2019-10-31 15:44:26 +00:00
resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
len(resp["TagSet"]).should.equal(2)
# With no tags:
2019-10-31 15:44:26 +00:00
s3.put_bucket_tagging(Bucket=bucket_name, Tagging={"TagSet": []})
with assert_raises(ClientError) as err:
s3.get_bucket_tagging(Bucket=bucket_name)
e = err.exception
e.response["Error"]["Code"].should.equal("NoSuchTagSet")
e.response["Error"]["Message"].should.equal("The TagSet does not exist")
@mock_s3
def test_boto3_delete_bucket_tagging():
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
bucket_name = "mybucket"
s3.create_bucket(Bucket=bucket_name)
2019-10-31 15:44:26 +00:00
s3.put_bucket_tagging(
Bucket=bucket_name,
Tagging={
"TagSet": [
{"Key": "TagOne", "Value": "ValueOne"},
{"Key": "TagTwo", "Value": "ValueTwo"},
]
},
)
resp = s3.delete_bucket_tagging(Bucket=bucket_name)
2019-10-31 15:44:26 +00:00
resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(204)
with assert_raises(ClientError) as err:
s3.get_bucket_tagging(Bucket=bucket_name)
e = err.exception
e.response["Error"]["Code"].should.equal("NoSuchTagSet")
e.response["Error"]["Message"].should.equal("The TagSet does not exist")
@mock_s3
def test_boto3_put_bucket_cors():
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
bucket_name = "mybucket"
s3.create_bucket(Bucket=bucket_name)
2019-10-31 15:44:26 +00:00
resp = s3.put_bucket_cors(
Bucket=bucket_name,
CORSConfiguration={
"CORSRules": [
{
2019-10-31 15:44:26 +00:00
"AllowedOrigins": ["*"],
"AllowedMethods": ["GET", "POST"],
"AllowedHeaders": ["Authorization"],
"ExposeHeaders": ["x-amz-request-id"],
"MaxAgeSeconds": 123,
},
{
"AllowedOrigins": ["*"],
"AllowedMethods": ["PUT"],
"AllowedHeaders": ["Authorization"],
"ExposeHeaders": ["x-amz-request-id"],
"MaxAgeSeconds": 123,
},
]
2019-10-31 15:44:26 +00:00
},
)
resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
with assert_raises(ClientError) as err:
s3.put_bucket_cors(
Bucket=bucket_name,
CORSConfiguration={
"CORSRules": [
{"AllowedOrigins": ["*"], "AllowedMethods": ["NOTREAL", "POST"]}
]
},
)
e = err.exception
e.response["Error"]["Code"].should.equal("InvalidRequest")
2019-10-31 15:44:26 +00:00
e.response["Error"]["Message"].should.equal(
"Found unsupported HTTP method in CORS config. " "Unsupported method is NOTREAL"
)
with assert_raises(ClientError) as err:
2019-10-31 15:44:26 +00:00
s3.put_bucket_cors(Bucket=bucket_name, CORSConfiguration={"CORSRules": []})
e = err.exception
e.response["Error"]["Code"].should.equal("MalformedXML")
# And 101:
many_rules = [{"AllowedOrigins": ["*"], "AllowedMethods": ["GET"]}] * 101
with assert_raises(ClientError) as err:
2019-10-31 15:44:26 +00:00
s3.put_bucket_cors(
Bucket=bucket_name, CORSConfiguration={"CORSRules": many_rules}
)
e = err.exception
e.response["Error"]["Code"].should.equal("MalformedXML")
@mock_s3
def test_boto3_get_bucket_cors():
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
bucket_name = "mybucket"
s3.create_bucket(Bucket=bucket_name)
# Without CORS:
with assert_raises(ClientError) as err:
s3.get_bucket_cors(Bucket=bucket_name)
e = err.exception
e.response["Error"]["Code"].should.equal("NoSuchCORSConfiguration")
e.response["Error"]["Message"].should.equal("The CORS configuration does not exist")
2019-10-31 15:44:26 +00:00
s3.put_bucket_cors(
Bucket=bucket_name,
CORSConfiguration={
"CORSRules": [
{
"AllowedOrigins": ["*"],
"AllowedMethods": ["GET", "POST"],
"AllowedHeaders": ["Authorization"],
"ExposeHeaders": ["x-amz-request-id"],
"MaxAgeSeconds": 123,
},
{
"AllowedOrigins": ["*"],
"AllowedMethods": ["PUT"],
"AllowedHeaders": ["Authorization"],
"ExposeHeaders": ["x-amz-request-id"],
"MaxAgeSeconds": 123,
},
]
},
)
resp = s3.get_bucket_cors(Bucket=bucket_name)
2019-10-31 15:44:26 +00:00
resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
len(resp["CORSRules"]).should.equal(2)
@mock_s3
def test_boto3_delete_bucket_cors():
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
bucket_name = "mybucket"
s3.create_bucket(Bucket=bucket_name)
2019-10-31 15:44:26 +00:00
s3.put_bucket_cors(
Bucket=bucket_name,
CORSConfiguration={
"CORSRules": [{"AllowedOrigins": ["*"], "AllowedMethods": ["GET"]}]
},
)
resp = s3.delete_bucket_cors(Bucket=bucket_name)
2019-10-31 15:44:26 +00:00
resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(204)
# Verify deletion:
with assert_raises(ClientError) as err:
s3.get_bucket_cors(Bucket=bucket_name)
e = err.exception
e.response["Error"]["Code"].should.equal("NoSuchCORSConfiguration")
e.response["Error"]["Message"].should.equal("The CORS configuration does not exist")
@mock_s3
def test_put_bucket_acl_body():
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
s3.create_bucket(Bucket="bucket")
bucket_owner = s3.get_bucket_acl(Bucket="bucket")["Owner"]
2019-10-31 15:44:26 +00:00
s3.put_bucket_acl(
Bucket="bucket",
AccessControlPolicy={
"Grants": [
{
"Grantee": {
"URI": "http://acs.amazonaws.com/groups/s3/LogDelivery",
"Type": "Group",
},
"Permission": "WRITE",
},
2019-10-31 15:44:26 +00:00
{
"Grantee": {
"URI": "http://acs.amazonaws.com/groups/s3/LogDelivery",
"Type": "Group",
},
"Permission": "READ_ACP",
},
2019-10-31 15:44:26 +00:00
],
"Owner": bucket_owner,
},
)
result = s3.get_bucket_acl(Bucket="bucket")
assert len(result["Grants"]) == 2
for g in result["Grants"]:
assert g["Grantee"]["URI"] == "http://acs.amazonaws.com/groups/s3/LogDelivery"
assert g["Grantee"]["Type"] == "Group"
assert g["Permission"] in ["WRITE", "READ_ACP"]
# With one:
2019-10-31 15:44:26 +00:00
s3.put_bucket_acl(
Bucket="bucket",
AccessControlPolicy={
"Grants": [
{
"Grantee": {
"URI": "http://acs.amazonaws.com/groups/s3/LogDelivery",
2019-10-31 15:44:26 +00:00
"Type": "Group",
},
2019-10-31 15:44:26 +00:00
"Permission": "WRITE",
}
2019-10-31 15:44:26 +00:00
],
"Owner": bucket_owner,
},
)
result = s3.get_bucket_acl(Bucket="bucket")
assert len(result["Grants"]) == 1
# With no owner:
with assert_raises(ClientError) as err:
s3.put_bucket_acl(
Bucket="bucket",
AccessControlPolicy={
"Grants": [
{
"Grantee": {
"URI": "http://acs.amazonaws.com/groups/s3/LogDelivery",
"Type": "Group",
},
"Permission": "WRITE",
}
]
},
)
assert err.exception.response["Error"]["Code"] == "MalformedACLError"
# With incorrect permission:
with assert_raises(ClientError) as err:
2019-10-31 15:44:26 +00:00
s3.put_bucket_acl(
Bucket="bucket",
AccessControlPolicy={
"Grants": [
{
"Grantee": {
"URI": "http://acs.amazonaws.com/groups/s3/LogDelivery",
"Type": "Group",
},
"Permission": "lskjflkasdjflkdsjfalisdjflkdsjf",
}
],
"Owner": bucket_owner,
},
)
assert err.exception.response["Error"]["Code"] == "MalformedACLError"
# Clear the ACLs:
2019-10-31 15:44:26 +00:00
result = s3.put_bucket_acl(
Bucket="bucket", AccessControlPolicy={"Grants": [], "Owner": bucket_owner}
)
assert not result.get("Grants")
@mock_s3
def test_put_bucket_notification():
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
s3.create_bucket(Bucket="bucket")
# With no configuration:
result = s3.get_bucket_notification(Bucket="bucket")
assert not result.get("TopicConfigurations")
assert not result.get("QueueConfigurations")
assert not result.get("LambdaFunctionConfigurations")
# Place proper topic configuration:
2019-10-31 15:44:26 +00:00
s3.put_bucket_notification_configuration(
Bucket="bucket",
NotificationConfiguration={
"TopicConfigurations": [
{
"TopicArn": "arn:aws:sns:us-east-1:012345678910:mytopic",
"Events": ["s3:ObjectCreated:*", "s3:ObjectRemoved:*"],
},
{
"TopicArn": "arn:aws:sns:us-east-1:012345678910:myothertopic",
"Events": ["s3:ObjectCreated:*"],
"Filter": {
"Key": {
"FilterRules": [
{"Name": "prefix", "Value": "images/"},
{"Name": "suffix", "Value": "png"},
]
}
},
},
]
},
)
# Verify to completion:
result = s3.get_bucket_notification_configuration(Bucket="bucket")
assert len(result["TopicConfigurations"]) == 2
assert not result.get("QueueConfigurations")
assert not result.get("LambdaFunctionConfigurations")
2019-10-31 15:44:26 +00:00
assert (
result["TopicConfigurations"][0]["TopicArn"]
== "arn:aws:sns:us-east-1:012345678910:mytopic"
)
assert (
result["TopicConfigurations"][1]["TopicArn"]
== "arn:aws:sns:us-east-1:012345678910:myothertopic"
)
assert len(result["TopicConfigurations"][0]["Events"]) == 2
assert len(result["TopicConfigurations"][1]["Events"]) == 1
assert result["TopicConfigurations"][0]["Events"][0] == "s3:ObjectCreated:*"
assert result["TopicConfigurations"][0]["Events"][1] == "s3:ObjectRemoved:*"
assert result["TopicConfigurations"][1]["Events"][0] == "s3:ObjectCreated:*"
assert result["TopicConfigurations"][0]["Id"]
assert result["TopicConfigurations"][1]["Id"]
assert not result["TopicConfigurations"][0].get("Filter")
assert len(result["TopicConfigurations"][1]["Filter"]["Key"]["FilterRules"]) == 2
2019-10-31 15:44:26 +00:00
assert (
result["TopicConfigurations"][1]["Filter"]["Key"]["FilterRules"][0]["Name"]
== "prefix"
)
assert (
result["TopicConfigurations"][1]["Filter"]["Key"]["FilterRules"][0]["Value"]
== "images/"
)
assert (
result["TopicConfigurations"][1]["Filter"]["Key"]["FilterRules"][1]["Name"]
== "suffix"
)
assert (
result["TopicConfigurations"][1]["Filter"]["Key"]["FilterRules"][1]["Value"]
== "png"
)
# Place proper queue configuration:
2019-10-31 15:44:26 +00:00
s3.put_bucket_notification_configuration(
Bucket="bucket",
NotificationConfiguration={
"QueueConfigurations": [
{
"Id": "SomeID",
"QueueArn": "arn:aws:sqs:us-east-1:012345678910:myQueue",
"Events": ["s3:ObjectCreated:*"],
"Filter": {
"Key": {"FilterRules": [{"Name": "prefix", "Value": "images/"}]}
},
}
]
},
)
result = s3.get_bucket_notification_configuration(Bucket="bucket")
assert len(result["QueueConfigurations"]) == 1
assert not result.get("TopicConfigurations")
assert not result.get("LambdaFunctionConfigurations")
assert result["QueueConfigurations"][0]["Id"] == "SomeID"
2019-10-31 15:44:26 +00:00
assert (
result["QueueConfigurations"][0]["QueueArn"]
== "arn:aws:sqs:us-east-1:012345678910:myQueue"
)
assert result["QueueConfigurations"][0]["Events"][0] == "s3:ObjectCreated:*"
assert len(result["QueueConfigurations"][0]["Events"]) == 1
assert len(result["QueueConfigurations"][0]["Filter"]["Key"]["FilterRules"]) == 1
2019-10-31 15:44:26 +00:00
assert (
result["QueueConfigurations"][0]["Filter"]["Key"]["FilterRules"][0]["Name"]
== "prefix"
)
assert (
result["QueueConfigurations"][0]["Filter"]["Key"]["FilterRules"][0]["Value"]
== "images/"
)
# Place proper Lambda configuration:
2019-10-31 15:44:26 +00:00
s3.put_bucket_notification_configuration(
Bucket="bucket",
NotificationConfiguration={
"LambdaFunctionConfigurations": [
{
"LambdaFunctionArn": "arn:aws:lambda:us-east-1:012345678910:function:lambda",
"Events": ["s3:ObjectCreated:*"],
"Filter": {
"Key": {"FilterRules": [{"Name": "prefix", "Value": "images/"}]}
},
}
]
},
)
result = s3.get_bucket_notification_configuration(Bucket="bucket")
assert len(result["LambdaFunctionConfigurations"]) == 1
assert not result.get("TopicConfigurations")
assert not result.get("QueueConfigurations")
assert result["LambdaFunctionConfigurations"][0]["Id"]
2019-10-31 15:44:26 +00:00
assert (
result["LambdaFunctionConfigurations"][0]["LambdaFunctionArn"]
== "arn:aws:lambda:us-east-1:012345678910:function:lambda"
)
assert (
result["LambdaFunctionConfigurations"][0]["Events"][0] == "s3:ObjectCreated:*"
)
assert len(result["LambdaFunctionConfigurations"][0]["Events"]) == 1
2019-10-31 15:44:26 +00:00
assert (
len(result["LambdaFunctionConfigurations"][0]["Filter"]["Key"]["FilterRules"])
== 1
)
assert (
result["LambdaFunctionConfigurations"][0]["Filter"]["Key"]["FilterRules"][0][
"Name"
]
== "prefix"
)
assert (
result["LambdaFunctionConfigurations"][0]["Filter"]["Key"]["FilterRules"][0][
"Value"
]
== "images/"
)
# And with all 3 set:
2019-10-31 15:44:26 +00:00
s3.put_bucket_notification_configuration(
Bucket="bucket",
NotificationConfiguration={
"TopicConfigurations": [
{
"TopicArn": "arn:aws:sns:us-east-1:012345678910:mytopic",
"Events": ["s3:ObjectCreated:*", "s3:ObjectRemoved:*"],
}
],
"LambdaFunctionConfigurations": [
{
"LambdaFunctionArn": "arn:aws:lambda:us-east-1:012345678910:function:lambda",
"Events": ["s3:ObjectCreated:*"],
}
],
"QueueConfigurations": [
{
"QueueArn": "arn:aws:sqs:us-east-1:012345678910:myQueue",
"Events": ["s3:ObjectCreated:*"],
}
],
},
)
result = s3.get_bucket_notification_configuration(Bucket="bucket")
assert len(result["LambdaFunctionConfigurations"]) == 1
assert len(result["TopicConfigurations"]) == 1
assert len(result["QueueConfigurations"]) == 1
# And clear it out:
2019-10-31 15:44:26 +00:00
s3.put_bucket_notification_configuration(
Bucket="bucket", NotificationConfiguration={}
)
result = s3.get_bucket_notification_configuration(Bucket="bucket")
assert not result.get("TopicConfigurations")
assert not result.get("QueueConfigurations")
assert not result.get("LambdaFunctionConfigurations")
@mock_s3
def test_put_bucket_notification_errors():
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
s3.create_bucket(Bucket="bucket")
# With incorrect ARNs:
for tech, arn in [("Queue", "sqs"), ("Topic", "sns"), ("LambdaFunction", "lambda")]:
with assert_raises(ClientError) as err:
2019-10-31 15:44:26 +00:00
s3.put_bucket_notification_configuration(
Bucket="bucket",
NotificationConfiguration={
"{}Configurations".format(tech): [
{
"{}Arn".format(
tech
): "arn:aws:{}:us-east-1:012345678910:lksajdfkldskfj",
"Events": ["s3:ObjectCreated:*"],
}
]
},
)
assert err.exception.response["Error"]["Code"] == "InvalidArgument"
2019-10-31 15:44:26 +00:00
assert (
err.exception.response["Error"]["Message"] == "The ARN is not well formed"
)
# Region not the same as the bucket:
with assert_raises(ClientError) as err:
2019-10-31 15:44:26 +00:00
s3.put_bucket_notification_configuration(
Bucket="bucket",
NotificationConfiguration={
"QueueConfigurations": [
{
"QueueArn": "arn:aws:sqs:us-west-2:012345678910:lksajdfkldskfj",
"Events": ["s3:ObjectCreated:*"],
}
]
},
)
assert err.exception.response["Error"]["Code"] == "InvalidArgument"
2019-10-31 15:44:26 +00:00
assert (
err.exception.response["Error"]["Message"]
== "The notification destination service region is not valid for the bucket location constraint"
)
# Invalid event name:
with assert_raises(ClientError) as err:
2019-10-31 15:44:26 +00:00
s3.put_bucket_notification_configuration(
Bucket="bucket",
NotificationConfiguration={
"QueueConfigurations": [
{
"QueueArn": "arn:aws:sqs:us-east-1:012345678910:lksajdfkldskfj",
"Events": ["notarealeventname"],
}
]
},
)
assert err.exception.response["Error"]["Code"] == "InvalidArgument"
2019-10-31 15:44:26 +00:00
assert (
err.exception.response["Error"]["Message"]
== "The event is not supported for notifications"
)
@mock_s3
def test_boto3_put_bucket_logging():
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
bucket_name = "mybucket"
log_bucket = "logbucket"
wrong_region_bucket = "wrongregionlogbucket"
s3.create_bucket(Bucket=bucket_name)
s3.create_bucket(Bucket=log_bucket) # Adding the ACL for log-delivery later...
2019-10-31 15:44:26 +00:00
s3.create_bucket(
Bucket=wrong_region_bucket,
CreateBucketConfiguration={"LocationConstraint": "us-west-2"},
)
# No logging config:
result = s3.get_bucket_logging(Bucket=bucket_name)
assert not result.get("LoggingEnabled")
# A log-bucket that doesn't exist:
with assert_raises(ClientError) as err:
2019-10-31 15:44:26 +00:00
s3.put_bucket_logging(
Bucket=bucket_name,
BucketLoggingStatus={
"LoggingEnabled": {"TargetBucket": "IAMNOTREAL", "TargetPrefix": ""}
},
)
assert err.exception.response["Error"]["Code"] == "InvalidTargetBucketForLogging"
# A log-bucket that's missing the proper ACLs for LogDelivery:
with assert_raises(ClientError) as err:
2019-10-31 15:44:26 +00:00
s3.put_bucket_logging(
Bucket=bucket_name,
BucketLoggingStatus={
"LoggingEnabled": {"TargetBucket": log_bucket, "TargetPrefix": ""}
},
)
assert err.exception.response["Error"]["Code"] == "InvalidTargetBucketForLogging"
assert "log-delivery" in err.exception.response["Error"]["Message"]
# Add the proper "log-delivery" ACL to the log buckets:
bucket_owner = s3.get_bucket_acl(Bucket=log_bucket)["Owner"]
for bucket in [log_bucket, wrong_region_bucket]:
2019-10-31 15:44:26 +00:00
s3.put_bucket_acl(
Bucket=bucket,
AccessControlPolicy={
"Grants": [
{
"Grantee": {
"URI": "http://acs.amazonaws.com/groups/s3/LogDelivery",
"Type": "Group",
},
"Permission": "WRITE",
},
2019-10-31 15:44:26 +00:00
{
"Grantee": {
"URI": "http://acs.amazonaws.com/groups/s3/LogDelivery",
"Type": "Group",
},
"Permission": "READ_ACP",
},
2019-10-31 15:44:26 +00:00
{
"Grantee": {"Type": "CanonicalUser", "ID": bucket_owner["ID"]},
"Permission": "FULL_CONTROL",
},
2019-10-31 15:44:26 +00:00
],
"Owner": bucket_owner,
},
)
# A log-bucket that's in the wrong region:
with assert_raises(ClientError) as err:
2019-10-31 15:44:26 +00:00
s3.put_bucket_logging(
Bucket=bucket_name,
BucketLoggingStatus={
"LoggingEnabled": {
"TargetBucket": wrong_region_bucket,
"TargetPrefix": "",
}
},
)
assert err.exception.response["Error"]["Code"] == "CrossLocationLoggingProhibitted"
# Correct logging:
2019-10-31 15:44:26 +00:00
s3.put_bucket_logging(
Bucket=bucket_name,
BucketLoggingStatus={
"LoggingEnabled": {
"TargetBucket": log_bucket,
"TargetPrefix": "{}/".format(bucket_name),
}
},
)
result = s3.get_bucket_logging(Bucket=bucket_name)
assert result["LoggingEnabled"]["TargetBucket"] == log_bucket
assert result["LoggingEnabled"]["TargetPrefix"] == "{}/".format(bucket_name)
assert not result["LoggingEnabled"].get("TargetGrants")
# And disabling:
s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={})
assert not s3.get_bucket_logging(Bucket=bucket_name).get("LoggingEnabled")
# And enabling with multiple target grants:
2019-10-31 15:44:26 +00:00
s3.put_bucket_logging(
Bucket=bucket_name,
BucketLoggingStatus={
"LoggingEnabled": {
"TargetBucket": log_bucket,
"TargetPrefix": "{}/".format(bucket_name),
"TargetGrants": [
{
"Grantee": {
"ID": "SOMEIDSTRINGHERE9238748923734823917498237489237409123840983274",
"Type": "CanonicalUser",
},
"Permission": "READ",
},
2019-10-31 15:44:26 +00:00
{
"Grantee": {
"ID": "SOMEIDSTRINGHERE9238748923734823917498237489237409123840983274",
"Type": "CanonicalUser",
},
"Permission": "WRITE",
},
2019-10-31 15:44:26 +00:00
],
}
},
)
result = s3.get_bucket_logging(Bucket=bucket_name)
assert len(result["LoggingEnabled"]["TargetGrants"]) == 2
2019-10-31 15:44:26 +00:00
assert (
result["LoggingEnabled"]["TargetGrants"][0]["Grantee"]["ID"]
== "SOMEIDSTRINGHERE9238748923734823917498237489237409123840983274"
)
# Test with just 1 grant:
2019-10-31 15:44:26 +00:00
s3.put_bucket_logging(
Bucket=bucket_name,
BucketLoggingStatus={
"LoggingEnabled": {
"TargetBucket": log_bucket,
"TargetPrefix": "{}/".format(bucket_name),
"TargetGrants": [
{
"Grantee": {
"ID": "SOMEIDSTRINGHERE9238748923734823917498237489237409123840983274",
2019-10-31 15:44:26 +00:00
"Type": "CanonicalUser",
},
2019-10-31 15:44:26 +00:00
"Permission": "READ",
}
2019-10-31 15:44:26 +00:00
],
}
2019-10-31 15:44:26 +00:00
},
)
result = s3.get_bucket_logging(Bucket=bucket_name)
assert len(result["LoggingEnabled"]["TargetGrants"]) == 1
# With an invalid grant:
with assert_raises(ClientError) as err:
s3.put_bucket_logging(
Bucket=bucket_name,
BucketLoggingStatus={
"LoggingEnabled": {
"TargetBucket": log_bucket,
"TargetPrefix": "{}/".format(bucket_name),
"TargetGrants": [
{
"Grantee": {
"ID": "SOMEIDSTRINGHERE9238748923734823917498237489237409123840983274",
"Type": "CanonicalUser",
},
"Permission": "NOTAREALPERM",
}
],
}
},
)
assert err.exception.response["Error"]["Code"] == "MalformedXML"
2017-07-16 02:36:12 +00:00
@mock_s3
def test_boto3_put_object_tagging():
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
2019-10-31 15:44:26 +00:00
bucket_name = "mybucket"
key = "key-with-tags"
2017-07-16 02:36:12 +00:00
s3.create_bucket(Bucket=bucket_name)
with assert_raises(ClientError) as err:
s3.put_object_tagging(
Bucket=bucket_name,
Key=key,
2019-10-31 15:44:26 +00:00
Tagging={
"TagSet": [
{"Key": "item1", "Value": "foo"},
{"Key": "item2", "Value": "bar"},
]
},
2017-07-16 02:36:12 +00:00
)
e = err.exception
2019-10-31 15:44:26 +00:00
e.response["Error"].should.equal(
{
"Code": "NoSuchKey",
"Message": "The specified key does not exist.",
"RequestID": "7a62c49f-347e-4fc4-9331-6e8eEXAMPLE",
}
2017-07-16 02:36:12 +00:00
)
2019-10-31 15:44:26 +00:00
s3.put_object(Bucket=bucket_name, Key=key, Body="test")
2017-07-16 02:36:12 +00:00
resp = s3.put_object_tagging(
Bucket=bucket_name,
Key=key,
2019-10-31 15:44:26 +00:00
Tagging={
"TagSet": [
{"Key": "item1", "Value": "foo"},
{"Key": "item2", "Value": "bar"},
]
},
2017-07-16 02:36:12 +00:00
)
2019-10-31 15:44:26 +00:00
resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
2017-07-16 02:36:12 +00:00
2019-10-17 04:16:16 +00:00
@mock_s3
def test_boto3_put_object_tagging_on_earliest_version():
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
2019-10-31 15:44:26 +00:00
bucket_name = "mybucket"
key = "key-with-tags"
2019-10-17 04:16:16 +00:00
s3.create_bucket(Bucket=bucket_name)
2019-10-31 15:44:26 +00:00
s3_resource = boto3.resource("s3")
2019-10-17 04:16:16 +00:00
bucket_versioning = s3_resource.BucketVersioning(bucket_name)
bucket_versioning.enable()
2019-10-31 15:44:26 +00:00
bucket_versioning.status.should.equal("Enabled")
2019-10-17 04:16:16 +00:00
with assert_raises(ClientError) as err:
s3.put_object_tagging(
Bucket=bucket_name,
Key=key,
2019-10-31 15:44:26 +00:00
Tagging={
"TagSet": [
{"Key": "item1", "Value": "foo"},
{"Key": "item2", "Value": "bar"},
]
},
2019-10-17 04:16:16 +00:00
)
e = err.exception
2019-10-31 15:44:26 +00:00
e.response["Error"].should.equal(
{
"Code": "NoSuchKey",
"Message": "The specified key does not exist.",
"RequestID": "7a62c49f-347e-4fc4-9331-6e8eEXAMPLE",
}
2019-10-17 04:16:16 +00:00
)
2019-10-31 15:44:26 +00:00
s3.put_object(Bucket=bucket_name, Key=key, Body="test")
s3.put_object(Bucket=bucket_name, Key=key, Body="test_updated")
2019-10-17 04:16:16 +00:00
object_versions = list(s3_resource.Bucket(bucket_name).object_versions.all())
first_object = object_versions[0]
second_object = object_versions[1]
resp = s3.put_object_tagging(
Bucket=bucket_name,
Key=key,
2019-10-31 15:44:26 +00:00
Tagging={
"TagSet": [
{"Key": "item1", "Value": "foo"},
{"Key": "item2", "Value": "bar"},
]
},
VersionId=first_object.id,
2019-10-17 04:16:16 +00:00
)
2019-10-31 15:44:26 +00:00
resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
2019-10-17 04:16:16 +00:00
# Older version has tags while the most recent does not
resp = s3.get_object_tagging(Bucket=bucket_name, Key=key, VersionId=first_object.id)
2019-10-31 15:44:26 +00:00
resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
2020-03-31 11:04:04 +00:00
sorted_tagset = sorted(resp["TagSet"], key=lambda t: t["Key"])
sorted_tagset.should.equal(
2019-10-31 15:44:26 +00:00
[{"Key": "item1", "Value": "foo"}, {"Key": "item2", "Value": "bar"}]
2019-10-17 04:16:16 +00:00
)
2019-10-31 15:44:26 +00:00
resp = s3.get_object_tagging(
Bucket=bucket_name, Key=key, VersionId=second_object.id
)
resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
resp["TagSet"].should.equal([])
2019-10-17 04:16:16 +00:00
@mock_s3
def test_boto3_put_object_tagging_on_both_version():
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
2019-10-31 15:44:26 +00:00
bucket_name = "mybucket"
key = "key-with-tags"
2019-10-17 04:16:16 +00:00
s3.create_bucket(Bucket=bucket_name)
2019-10-31 15:44:26 +00:00
s3_resource = boto3.resource("s3")
2019-10-17 04:16:16 +00:00
bucket_versioning = s3_resource.BucketVersioning(bucket_name)
bucket_versioning.enable()
2019-10-31 15:44:26 +00:00
bucket_versioning.status.should.equal("Enabled")
2019-10-17 04:16:16 +00:00
with assert_raises(ClientError) as err:
s3.put_object_tagging(
Bucket=bucket_name,
Key=key,
2019-10-31 15:44:26 +00:00
Tagging={
"TagSet": [
{"Key": "item1", "Value": "foo"},
{"Key": "item2", "Value": "bar"},
]
},
2019-10-17 04:16:16 +00:00
)
e = err.exception
2019-10-31 15:44:26 +00:00
e.response["Error"].should.equal(
{
"Code": "NoSuchKey",
"Message": "The specified key does not exist.",
"RequestID": "7a62c49f-347e-4fc4-9331-6e8eEXAMPLE",
}
2019-10-17 04:16:16 +00:00
)
2019-10-31 15:44:26 +00:00
s3.put_object(Bucket=bucket_name, Key=key, Body="test")
s3.put_object(Bucket=bucket_name, Key=key, Body="test_updated")
2019-10-17 04:16:16 +00:00
object_versions = list(s3_resource.Bucket(bucket_name).object_versions.all())
first_object = object_versions[0]
second_object = object_versions[1]
resp = s3.put_object_tagging(
Bucket=bucket_name,
Key=key,
2019-10-31 15:44:26 +00:00
Tagging={
"TagSet": [
{"Key": "item1", "Value": "foo"},
{"Key": "item2", "Value": "bar"},
]
},
VersionId=first_object.id,
2019-10-17 04:16:16 +00:00
)
2019-10-31 15:44:26 +00:00
resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
2019-10-17 04:16:16 +00:00
resp = s3.put_object_tagging(
Bucket=bucket_name,
Key=key,
2019-10-31 15:44:26 +00:00
Tagging={
"TagSet": [
{"Key": "item1", "Value": "baz"},
{"Key": "item2", "Value": "bin"},
]
},
VersionId=second_object.id,
2019-10-17 04:16:16 +00:00
)
2019-10-31 15:44:26 +00:00
resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
2019-10-17 04:16:16 +00:00
resp = s3.get_object_tagging(Bucket=bucket_name, Key=key, VersionId=first_object.id)
2019-10-31 15:44:26 +00:00
resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
2020-03-31 11:04:04 +00:00
sorted_tagset = sorted(resp["TagSet"], key=lambda t: t["Key"])
sorted_tagset.should.equal(
2019-10-31 15:44:26 +00:00
[{"Key": "item1", "Value": "foo"}, {"Key": "item2", "Value": "bar"}]
2019-10-17 04:16:16 +00:00
)
2019-10-31 15:44:26 +00:00
resp = s3.get_object_tagging(
Bucket=bucket_name, Key=key, VersionId=second_object.id
)
resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
2020-03-31 11:04:04 +00:00
sorted_tagset = sorted(resp["TagSet"], key=lambda t: t["Key"])
sorted_tagset.should.equal(
2019-10-31 15:44:26 +00:00
[{"Key": "item1", "Value": "baz"}, {"Key": "item2", "Value": "bin"}]
2019-10-17 04:16:16 +00:00
)
2017-11-03 06:03:54 +00:00
@mock_s3
def test_boto3_put_object_tagging_with_single_tag():
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
2019-10-31 15:44:26 +00:00
bucket_name = "mybucket"
key = "key-with-tags"
2017-11-03 06:03:54 +00:00
s3.create_bucket(Bucket=bucket_name)
2019-10-31 15:44:26 +00:00
s3.put_object(Bucket=bucket_name, Key=key, Body="test")
2017-11-03 06:03:54 +00:00
resp = s3.put_object_tagging(
Bucket=bucket_name,
Key=key,
2019-10-31 15:44:26 +00:00
Tagging={"TagSet": [{"Key": "item1", "Value": "foo"}]},
2017-11-03 06:03:54 +00:00
)
2019-10-31 15:44:26 +00:00
resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
2017-11-03 06:03:54 +00:00
2017-07-16 02:36:12 +00:00
@mock_s3
def test_boto3_get_object_tagging():
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
2019-10-31 15:44:26 +00:00
bucket_name = "mybucket"
key = "key-with-tags"
2017-07-16 02:36:12 +00:00
s3.create_bucket(Bucket=bucket_name)
2019-10-31 15:44:26 +00:00
s3.put_object(Bucket=bucket_name, Key=key, Body="test")
2017-07-16 02:36:12 +00:00
resp = s3.get_object_tagging(Bucket=bucket_name, Key=key)
2019-10-31 15:44:26 +00:00
resp["TagSet"].should.have.length_of(0)
2017-07-16 02:36:12 +00:00
resp = s3.put_object_tagging(
Bucket=bucket_name,
Key=key,
2019-10-31 15:44:26 +00:00
Tagging={
"TagSet": [
{"Key": "item1", "Value": "foo"},
{"Key": "item2", "Value": "bar"},
]
},
2017-07-16 02:36:12 +00:00
)
resp = s3.get_object_tagging(Bucket=bucket_name, Key=key)
2019-10-31 15:44:26 +00:00
resp["TagSet"].should.have.length_of(2)
resp["TagSet"].should.contain({"Key": "item1", "Value": "foo"})
resp["TagSet"].should.contain({"Key": "item2", "Value": "bar"})
2017-07-16 02:36:12 +00:00
@mock_s3
def test_boto3_list_object_versions():
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
2019-10-31 15:44:26 +00:00
bucket_name = "mybucket"
key = "key-with-versions"
s3.create_bucket(Bucket=bucket_name)
2017-05-14 17:00:26 +00:00
s3.put_bucket_versioning(
2019-10-31 15:44:26 +00:00
Bucket=bucket_name, VersioningConfiguration={"Status": "Enabled"}
2017-05-14 17:00:26 +00:00
)
2019-10-31 15:44:26 +00:00
items = (six.b("v1"), six.b("v2"))
2017-04-30 05:03:46 +00:00
for body in items:
2019-10-31 15:44:26 +00:00
s3.put_object(Bucket=bucket_name, Key=key, Body=body)
response = s3.list_object_versions(Bucket=bucket_name)
# Two object versions should be returned
2019-10-31 15:44:26 +00:00
len(response["Versions"]).should.equal(2)
keys = set([item["Key"] for item in response["Versions"]])
keys.should.equal({key})
# Test latest object version is returned
response = s3.get_object(Bucket=bucket_name, Key=key)
2019-10-31 15:44:26 +00:00
response["Body"].read().should.equal(items[-1])
@mock_s3
def test_boto3_list_object_versions_with_versioning_disabled():
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
2019-10-31 15:44:26 +00:00
bucket_name = "mybucket"
key = "key-with-versions"
s3.create_bucket(Bucket=bucket_name)
2019-10-31 15:44:26 +00:00
items = (six.b("v1"), six.b("v2"))
for body in items:
2019-10-31 15:44:26 +00:00
s3.put_object(Bucket=bucket_name, Key=key, Body=body)
response = s3.list_object_versions(Bucket=bucket_name)
# One object version should be returned
2019-10-31 15:44:26 +00:00
len(response["Versions"]).should.equal(1)
response["Versions"][0]["Key"].should.equal(key)
# The version id should be the string null
2019-10-31 15:44:26 +00:00
response["Versions"][0]["VersionId"].should.equal("null")
# Test latest object version is returned
response = s3.get_object(Bucket=bucket_name, Key=key)
2019-10-31 15:44:26 +00:00
response["Body"].read().should.equal(items[-1])
@mock_s3
def test_boto3_list_object_versions_with_versioning_enabled_late():
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
2019-10-31 15:44:26 +00:00
bucket_name = "mybucket"
key = "key-with-versions"
s3.create_bucket(Bucket=bucket_name)
2019-10-31 15:44:26 +00:00
items = (six.b("v1"), six.b("v2"))
s3.put_object(Bucket=bucket_name, Key=key, Body=six.b("v1"))
s3.put_bucket_versioning(
2019-10-31 15:44:26 +00:00
Bucket=bucket_name, VersioningConfiguration={"Status": "Enabled"}
)
2019-10-31 15:44:26 +00:00
s3.put_object(Bucket=bucket_name, Key=key, Body=six.b("v2"))
response = s3.list_object_versions(Bucket=bucket_name)
# Two object versions should be returned
2019-10-31 15:44:26 +00:00
len(response["Versions"]).should.equal(2)
keys = set([item["Key"] for item in response["Versions"]])
keys.should.equal({key})
# There should still be a null version id.
2019-10-31 15:44:26 +00:00
versionsId = set([item["VersionId"] for item in response["Versions"]])
versionsId.should.contain("null")
# Test latest object version is returned
response = s3.get_object(Bucket=bucket_name, Key=key)
2019-10-31 15:44:26 +00:00
response["Body"].read().should.equal(items[-1])
@mock_s3
def test_boto3_bad_prefix_list_object_versions():
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
2019-10-31 15:44:26 +00:00
bucket_name = "mybucket"
key = "key-with-versions"
bad_prefix = "key-that-does-not-exist"
s3.create_bucket(Bucket=bucket_name)
s3.put_bucket_versioning(
2019-10-31 15:44:26 +00:00
Bucket=bucket_name, VersioningConfiguration={"Status": "Enabled"}
)
2019-10-31 15:44:26 +00:00
items = (six.b("v1"), six.b("v2"))
for body in items:
2019-10-31 15:44:26 +00:00
s3.put_object(Bucket=bucket_name, Key=key, Body=body)
response = s3.list_object_versions(Bucket=bucket_name, Prefix=bad_prefix)
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
response.should_not.contain("Versions")
response.should_not.contain("DeleteMarkers")
2017-05-14 17:00:26 +00:00
@mock_s3
def test_boto3_delete_markers():
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
2019-10-31 15:44:26 +00:00
bucket_name = "mybucket"
key = "key-with-versions-and-unicode-ó"
2017-05-14 17:00:26 +00:00
s3.create_bucket(Bucket=bucket_name)
s3.put_bucket_versioning(
2019-10-31 15:44:26 +00:00
Bucket=bucket_name, VersioningConfiguration={"Status": "Enabled"}
2017-05-14 17:00:26 +00:00
)
2019-10-31 15:44:26 +00:00
items = (six.b("v1"), six.b("v2"))
2017-05-14 17:00:26 +00:00
for body in items:
2019-10-31 15:44:26 +00:00
s3.put_object(Bucket=bucket_name, Key=key, Body=body)
2019-10-31 15:44:26 +00:00
s3.delete_objects(Bucket=bucket_name, Delete={"Objects": [{"Key": key}]})
2017-05-14 17:00:26 +00:00
with assert_raises(ClientError) as e:
2019-10-31 15:44:26 +00:00
s3.get_object(Bucket=bucket_name, Key=key)
e.exception.response["Error"]["Code"].should.equal("NoSuchKey")
2019-10-31 15:44:26 +00:00
response = s3.list_object_versions(Bucket=bucket_name)
response["Versions"].should.have.length_of(2)
response["DeleteMarkers"].should.have.length_of(1)
2017-05-14 17:00:26 +00:00
s3.delete_object(
2019-10-31 15:44:26 +00:00
Bucket=bucket_name, Key=key, VersionId=response["DeleteMarkers"][0]["VersionId"]
2017-05-14 17:00:26 +00:00
)
2019-10-31 15:44:26 +00:00
response = s3.get_object(Bucket=bucket_name, Key=key)
response["Body"].read().should.equal(items[-1])
2019-10-31 15:44:26 +00:00
response = s3.list_object_versions(Bucket=bucket_name)
response["Versions"].should.have.length_of(2)
# We've asserted there is only 2 records so one is newest, one is oldest
2019-10-31 15:44:26 +00:00
latest = list(filter(lambda item: item["IsLatest"], response["Versions"]))[0]
oldest = list(filter(lambda item: not item["IsLatest"], response["Versions"]))[0]
# Double check ordering of version ID's
2019-10-31 15:44:26 +00:00
latest["VersionId"].should_not.equal(oldest["VersionId"])
# Double check the name is still unicode
2019-10-31 15:44:26 +00:00
latest["Key"].should.equal("key-with-versions-and-unicode-ó")
oldest["Key"].should.equal("key-with-versions-and-unicode-ó")
2017-05-14 17:00:26 +00:00
@mock_s3
def test_boto3_multiple_delete_markers():
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
2019-10-31 15:44:26 +00:00
bucket_name = "mybucket"
key = "key-with-versions-and-unicode-ó"
s3.create_bucket(Bucket=bucket_name)
s3.put_bucket_versioning(
2019-10-31 15:44:26 +00:00
Bucket=bucket_name, VersioningConfiguration={"Status": "Enabled"}
)
2019-10-31 15:44:26 +00:00
items = (six.b("v1"), six.b("v2"))
for body in items:
2019-10-31 15:44:26 +00:00
s3.put_object(Bucket=bucket_name, Key=key, Body=body)
# Delete the object twice to add multiple delete markers
s3.delete_object(Bucket=bucket_name, Key=key)
s3.delete_object(Bucket=bucket_name, Key=key)
response = s3.list_object_versions(Bucket=bucket_name)
2019-10-31 15:44:26 +00:00
response["DeleteMarkers"].should.have.length_of(2)
with assert_raises(ClientError) as e:
2019-10-31 15:44:26 +00:00
s3.get_object(Bucket=bucket_name, Key=key)
e.response["Error"]["Code"].should.equal("404")
# Remove both delete markers to restore the object
s3.delete_object(
2019-10-31 15:44:26 +00:00
Bucket=bucket_name, Key=key, VersionId=response["DeleteMarkers"][0]["VersionId"]
)
s3.delete_object(
2019-10-31 15:44:26 +00:00
Bucket=bucket_name, Key=key, VersionId=response["DeleteMarkers"][1]["VersionId"]
)
2019-10-31 15:44:26 +00:00
response = s3.get_object(Bucket=bucket_name, Key=key)
response["Body"].read().should.equal(items[-1])
response = s3.list_object_versions(Bucket=bucket_name)
2019-10-31 15:44:26 +00:00
response["Versions"].should.have.length_of(2)
# We've asserted there is only 2 records so one is newest, one is oldest
2019-10-31 15:44:26 +00:00
latest = list(filter(lambda item: item["IsLatest"], response["Versions"]))[0]
oldest = list(filter(lambda item: not item["IsLatest"], response["Versions"]))[0]
# Double check ordering of version ID's
2019-10-31 15:44:26 +00:00
latest["VersionId"].should_not.equal(oldest["VersionId"])
# Double check the name is still unicode
2019-10-31 15:44:26 +00:00
latest["Key"].should.equal("key-with-versions-and-unicode-ó")
oldest["Key"].should.equal("key-with-versions-and-unicode-ó")
@mock_s3
def test_get_stream_gzipped():
2017-06-03 23:35:23 +00:00
payload = b"this is some stuff here"
s3_client = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
2019-10-31 15:44:26 +00:00
s3_client.create_bucket(Bucket="moto-tests")
buffer_ = BytesIO()
2019-10-31 15:44:26 +00:00
with GzipFile(fileobj=buffer_, mode="w") as f:
f.write(payload)
payload_gz = buffer_.getvalue()
s3_client.put_object(
2019-10-31 15:44:26 +00:00
Bucket="moto-tests", Key="keyname", Body=payload_gz, ContentEncoding="gzip"
)
2019-10-31 15:44:26 +00:00
obj = s3_client.get_object(Bucket="moto-tests", Key="keyname")
res = zlib.decompress(obj["Body"].read(), 16 + zlib.MAX_WBITS)
assert res == payload
TEST_XML = """\
<?xml version="1.0" encoding="UTF-8"?>
<ns0:WebsiteConfiguration xmlns:ns0="http://s3.amazonaws.com/doc/2006-03-01/">
<ns0:IndexDocument>
<ns0:Suffix>index.html</ns0:Suffix>
</ns0:IndexDocument>
<ns0:RoutingRules>
<ns0:RoutingRule>
<ns0:Condition>
<ns0:KeyPrefixEquals>test/testing</ns0:KeyPrefixEquals>
</ns0:Condition>
<ns0:Redirect>
<ns0:ReplaceKeyWith>test.txt</ns0:ReplaceKeyWith>
</ns0:Redirect>
</ns0:RoutingRule>
</ns0:RoutingRules>
</ns0:WebsiteConfiguration>
"""
@mock_s3
def test_boto3_bucket_name_too_long():
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
with assert_raises(ClientError) as exc:
2019-10-31 15:44:26 +00:00
s3.create_bucket(Bucket="x" * 64)
exc.exception.response["Error"]["Code"].should.equal("InvalidBucketName")
@mock_s3
def test_boto3_bucket_name_too_short():
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
with assert_raises(ClientError) as exc:
2019-10-31 15:44:26 +00:00
s3.create_bucket(Bucket="x" * 2)
exc.exception.response["Error"]["Code"].should.equal("InvalidBucketName")
@mock_s3
def test_accelerated_none_when_unspecified():
2019-10-31 15:44:26 +00:00
bucket_name = "some_bucket"
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
s3.create_bucket(Bucket=bucket_name)
resp = s3.get_bucket_accelerate_configuration(Bucket=bucket_name)
2019-10-31 15:44:26 +00:00
resp.shouldnt.have.key("Status")
@mock_s3
def test_can_enable_bucket_acceleration():
2019-10-31 15:44:26 +00:00
bucket_name = "some_bucket"
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
s3.create_bucket(Bucket=bucket_name)
resp = s3.put_bucket_accelerate_configuration(
2019-10-31 15:44:26 +00:00
Bucket=bucket_name, AccelerateConfiguration={"Status": "Enabled"}
)
2019-10-31 15:44:26 +00:00
resp.keys().should.have.length_of(
1
) # Response contains nothing (only HTTP headers)
resp = s3.get_bucket_accelerate_configuration(Bucket=bucket_name)
2019-10-31 15:44:26 +00:00
resp.should.have.key("Status")
resp["Status"].should.equal("Enabled")
@mock_s3
def test_can_suspend_bucket_acceleration():
2019-10-31 15:44:26 +00:00
bucket_name = "some_bucket"
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
s3.create_bucket(Bucket=bucket_name)
resp = s3.put_bucket_accelerate_configuration(
2019-10-31 15:44:26 +00:00
Bucket=bucket_name, AccelerateConfiguration={"Status": "Enabled"}
)
resp = s3.put_bucket_accelerate_configuration(
2019-10-31 15:44:26 +00:00
Bucket=bucket_name, AccelerateConfiguration={"Status": "Suspended"}
)
2019-10-31 15:44:26 +00:00
resp.keys().should.have.length_of(
1
) # Response contains nothing (only HTTP headers)
resp = s3.get_bucket_accelerate_configuration(Bucket=bucket_name)
2019-10-31 15:44:26 +00:00
resp.should.have.key("Status")
resp["Status"].should.equal("Suspended")
@mock_s3
def test_suspending_acceleration_on_not_configured_bucket_does_nothing():
2019-10-31 15:44:26 +00:00
bucket_name = "some_bucket"
s3 = boto3.client("s3")
s3.create_bucket(
Bucket=bucket_name,
CreateBucketConfiguration={"LocationConstraint": "us-west-1"},
)
resp = s3.put_bucket_accelerate_configuration(
2019-10-31 15:44:26 +00:00
Bucket=bucket_name, AccelerateConfiguration={"Status": "Suspended"}
)
2019-10-31 15:44:26 +00:00
resp.keys().should.have.length_of(
1
) # Response contains nothing (only HTTP headers)
resp = s3.get_bucket_accelerate_configuration(Bucket=bucket_name)
2019-10-31 15:44:26 +00:00
resp.shouldnt.have.key("Status")
@mock_s3
def test_accelerate_configuration_status_validation():
2019-10-31 15:44:26 +00:00
bucket_name = "some_bucket"
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
s3.create_bucket(Bucket=bucket_name)
with assert_raises(ClientError) as exc:
s3.put_bucket_accelerate_configuration(
2019-10-31 15:44:26 +00:00
Bucket=bucket_name, AccelerateConfiguration={"Status": "bad_status"}
)
2019-10-31 15:44:26 +00:00
exc.exception.response["Error"]["Code"].should.equal("MalformedXML")
@mock_s3
def test_accelerate_configuration_is_not_supported_when_bucket_name_has_dots():
2019-10-31 15:44:26 +00:00
bucket_name = "some.bucket.with.dots"
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
s3.create_bucket(Bucket=bucket_name)
with assert_raises(ClientError) as exc:
s3.put_bucket_accelerate_configuration(
2019-10-31 15:44:26 +00:00
Bucket=bucket_name, AccelerateConfiguration={"Status": "Enabled"}
)
2019-10-31 15:44:26 +00:00
exc.exception.response["Error"]["Code"].should.equal("InvalidRequest")
def store_and_read_back_a_key(key):
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
2019-10-31 15:44:26 +00:00
bucket_name = "mybucket"
body = b"Some body"
s3.create_bucket(Bucket=bucket_name)
2019-10-31 15:44:26 +00:00
s3.put_object(Bucket=bucket_name, Key=key, Body=body)
response = s3.get_object(Bucket=bucket_name, Key=key)
2019-10-31 15:44:26 +00:00
response["Body"].read().should.equal(body)
@mock_s3
def test_paths_with_leading_slashes_work():
2019-10-31 15:44:26 +00:00
store_and_read_back_a_key("/a-key")
@mock_s3
def test_root_dir_with_empty_name_works():
2019-10-31 15:44:26 +00:00
if os.environ.get("TEST_SERVER_MODE", "false").lower() == "true":
raise SkipTest("Does not work in server mode due to error in Workzeug")
store_and_read_back_a_key("/")
2019-10-31 15:44:26 +00:00
@parameterized(
[("foo/bar/baz",), ("foo",), ("foo/run_dt%3D2019-01-01%252012%253A30%253A00",)]
)
@mock_s3
def test_delete_objects_with_url_encoded_key(key):
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
2019-10-31 15:44:26 +00:00
bucket_name = "mybucket"
body = b"Some body"
s3.create_bucket(Bucket=bucket_name)
def put_object():
2019-10-31 15:44:26 +00:00
s3.put_object(Bucket=bucket_name, Key=key, Body=body)
def assert_deleted():
with assert_raises(ClientError) as e:
s3.get_object(Bucket=bucket_name, Key=key)
2019-10-31 15:44:26 +00:00
e.exception.response["Error"]["Code"].should.equal("NoSuchKey")
put_object()
s3.delete_object(Bucket=bucket_name, Key=key)
assert_deleted()
put_object()
2019-10-31 15:44:26 +00:00
s3.delete_objects(Bucket=bucket_name, Delete={"Objects": [{"Key": key}]})
assert_deleted()
2019-12-10 01:38:26 +00:00
@mock_s3
@mock_config
def test_public_access_block():
client = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
2019-12-10 01:38:26 +00:00
client.create_bucket(Bucket="mybucket")
# Try to get the public access block (should not exist by default)
with assert_raises(ClientError) as ce:
client.get_public_access_block(Bucket="mybucket")
assert (
ce.exception.response["Error"]["Code"] == "NoSuchPublicAccessBlockConfiguration"
)
assert (
ce.exception.response["Error"]["Message"]
== "The public access block configuration was not found"
)
assert ce.exception.response["ResponseMetadata"]["HTTPStatusCode"] == 404
# Put a public block in place:
test_map = {
"BlockPublicAcls": False,
"IgnorePublicAcls": False,
"BlockPublicPolicy": False,
"RestrictPublicBuckets": False,
}
for field in test_map.keys():
# Toggle:
test_map[field] = True
client.put_public_access_block(
Bucket="mybucket", PublicAccessBlockConfiguration=test_map
)
# Test:
assert (
test_map
== client.get_public_access_block(Bucket="mybucket")[
"PublicAccessBlockConfiguration"
]
)
# Assume missing values are default False:
client.put_public_access_block(
Bucket="mybucket", PublicAccessBlockConfiguration={"BlockPublicAcls": True}
)
assert client.get_public_access_block(Bucket="mybucket")[
"PublicAccessBlockConfiguration"
] == {
"BlockPublicAcls": True,
"IgnorePublicAcls": False,
"BlockPublicPolicy": False,
"RestrictPublicBuckets": False,
}
# Test with a blank PublicAccessBlockConfiguration:
with assert_raises(ClientError) as ce:
client.put_public_access_block(
Bucket="mybucket", PublicAccessBlockConfiguration={}
)
assert ce.exception.response["Error"]["Code"] == "InvalidRequest"
assert (
ce.exception.response["Error"]["Message"]
== "Must specify at least one configuration."
)
assert ce.exception.response["ResponseMetadata"]["HTTPStatusCode"] == 400
# Test that things work with AWS Config:
config_client = boto3.client("config", region_name=DEFAULT_REGION_NAME)
2019-12-10 01:38:26 +00:00
result = config_client.get_resource_config_history(
resourceType="AWS::S3::Bucket", resourceId="mybucket"
)
pub_block_config = json.loads(
result["configurationItems"][0]["supplementaryConfiguration"][
"PublicAccessBlockConfiguration"
]
)
assert pub_block_config == {
"blockPublicAcls": True,
"ignorePublicAcls": False,
"blockPublicPolicy": False,
"restrictPublicBuckets": False,
}
# Delete:
client.delete_public_access_block(Bucket="mybucket")
with assert_raises(ClientError) as ce:
client.get_public_access_block(Bucket="mybucket")
assert (
ce.exception.response["Error"]["Code"] == "NoSuchPublicAccessBlockConfiguration"
)
@mock_s3
def test_s3_public_access_block_to_config_dict():
from moto.s3.config import s3_config_query
# With 1 bucket in us-west-2:
s3_config_query.backends["global"].create_bucket("bucket1", "us-west-2")
public_access_block = {
"BlockPublicAcls": "True",
"IgnorePublicAcls": "False",
"BlockPublicPolicy": "True",
"RestrictPublicBuckets": "False",
}
# Python 2 unicode issues:
if sys.version_info[0] < 3:
public_access_block = py2_strip_unicode_keys(public_access_block)
# Add a public access block:
s3_config_query.backends["global"].put_bucket_public_access_block(
"bucket1", public_access_block
)
result = (
s3_config_query.backends["global"]
.buckets["bucket1"]
.public_access_block.to_config_dict()
)
convert_bool = lambda x: x == "True"
for key, value in public_access_block.items():
assert result[
"{lowercase}{rest}".format(lowercase=key[0].lower(), rest=key[1:])
] == convert_bool(value)
# Verify that this resides in the full bucket's to_config_dict:
full_result = s3_config_query.backends["global"].buckets["bucket1"].to_config_dict()
assert (
json.loads(
full_result["supplementaryConfiguration"]["PublicAccessBlockConfiguration"]
)
== result
)
@mock_s3
def test_list_config_discovered_resources():
from moto.s3.config import s3_config_query
# Without any buckets:
2019-10-31 15:44:26 +00:00
assert s3_config_query.list_config_service_resources(
"global", "global", None, None, 100, None
) == ([], None)
# With 10 buckets in us-west-2:
for x in range(0, 10):
2019-10-31 15:44:26 +00:00
s3_config_query.backends["global"].create_bucket(
"bucket{}".format(x), "us-west-2"
)
# With 2 buckets in eu-west-1:
for x in range(10, 12):
2019-10-31 15:44:26 +00:00
s3_config_query.backends["global"].create_bucket(
"eu-bucket{}".format(x), "eu-west-1"
)
2019-10-31 15:44:26 +00:00
result, next_token = s3_config_query.list_config_service_resources(
None, None, 100, None
)
assert not next_token
assert len(result) == 12
for x in range(0, 10):
assert result[x] == {
2019-10-31 15:44:26 +00:00
"type": "AWS::S3::Bucket",
"id": "bucket{}".format(x),
"name": "bucket{}".format(x),
"region": "us-west-2",
}
for x in range(10, 12):
assert result[x] == {
2019-10-31 15:44:26 +00:00
"type": "AWS::S3::Bucket",
"id": "eu-bucket{}".format(x),
"name": "eu-bucket{}".format(x),
"region": "eu-west-1",
}
# With a name:
2019-10-31 15:44:26 +00:00
result, next_token = s3_config_query.list_config_service_resources(
None, "bucket0", 100, None
)
assert len(result) == 1 and result[0]["name"] == "bucket0" and not next_token
# With a region:
2019-10-31 15:44:26 +00:00
result, next_token = s3_config_query.list_config_service_resources(
None, None, 100, None, resource_region="eu-west-1"
)
assert len(result) == 2 and not next_token and result[1]["name"] == "eu-bucket11"
# With resource ids:
2019-10-31 15:44:26 +00:00
result, next_token = s3_config_query.list_config_service_resources(
["bucket0", "bucket1"], None, 100, None
)
assert (
len(result) == 2
and result[0]["name"] == "bucket0"
and result[1]["name"] == "bucket1"
and not next_token
)
# With duplicated resource ids:
2019-10-31 15:44:26 +00:00
result, next_token = s3_config_query.list_config_service_resources(
["bucket0", "bucket0"], None, 100, None
)
assert len(result) == 1 and result[0]["name"] == "bucket0" and not next_token
# Pagination:
2019-10-31 15:44:26 +00:00
result, next_token = s3_config_query.list_config_service_resources(
None, None, 1, None
)
assert (
len(result) == 1 and result[0]["name"] == "bucket0" and next_token == "bucket1"
)
# Last Page:
2019-10-31 15:44:26 +00:00
result, next_token = s3_config_query.list_config_service_resources(
None, None, 1, "eu-bucket11", resource_region="eu-west-1"
)
assert len(result) == 1 and result[0]["name"] == "eu-bucket11" and not next_token
# With a list of buckets:
2019-10-31 15:44:26 +00:00
result, next_token = s3_config_query.list_config_service_resources(
["bucket0", "bucket1"], None, 1, None
)
assert (
len(result) == 1 and result[0]["name"] == "bucket0" and next_token == "bucket1"
)
# With an invalid page:
with assert_raises(InvalidNextTokenException) as inte:
2019-10-31 15:44:26 +00:00
s3_config_query.list_config_service_resources(None, None, 1, "notabucket")
2019-10-31 15:44:26 +00:00
assert "The nextToken provided is invalid" in inte.exception.message
@mock_s3
def test_s3_lifecycle_config_dict():
from moto.s3.config import s3_config_query
# With 1 bucket in us-west-2:
2019-10-31 15:44:26 +00:00
s3_config_query.backends["global"].create_bucket("bucket1", "us-west-2")
# And a lifecycle policy
lifecycle = [
{
2019-10-31 15:44:26 +00:00
"ID": "rule1",
"Status": "Enabled",
"Filter": {"Prefix": ""},
"Expiration": {"Days": 1},
},
{
2019-10-31 15:44:26 +00:00
"ID": "rule2",
"Status": "Enabled",
"Filter": {
"And": {
"Prefix": "some/path",
"Tag": [{"Key": "TheKey", "Value": "TheValue"}],
}
},
2019-10-31 15:44:26 +00:00
"Expiration": {"Days": 1},
},
2019-10-31 15:44:26 +00:00
{"ID": "rule3", "Status": "Enabled", "Filter": {}, "Expiration": {"Days": 1}},
{
2019-10-31 15:44:26 +00:00
"ID": "rule4",
"Status": "Enabled",
"Filter": {"Prefix": ""},
"AbortIncompleteMultipartUpload": {"DaysAfterInitiation": 1},
},
]
2019-10-31 15:44:26 +00:00
s3_config_query.backends["global"].set_bucket_lifecycle("bucket1", lifecycle)
# Get the rules for this:
2019-10-31 15:44:26 +00:00
lifecycles = [
rule.to_config_dict()
for rule in s3_config_query.backends["global"].buckets["bucket1"].rules
]
# Verify the first:
assert lifecycles[0] == {
2019-10-31 15:44:26 +00:00
"id": "rule1",
"prefix": None,
"status": "Enabled",
"expirationInDays": 1,
"expiredObjectDeleteMarker": None,
"noncurrentVersionExpirationInDays": -1,
"expirationDate": None,
"transitions": None,
"noncurrentVersionTransitions": None,
"abortIncompleteMultipartUpload": None,
"filter": {"predicate": {"type": "LifecyclePrefixPredicate", "prefix": ""}},
}
# Verify the second:
assert lifecycles[1] == {
2019-10-31 15:44:26 +00:00
"id": "rule2",
"prefix": None,
"status": "Enabled",
"expirationInDays": 1,
"expiredObjectDeleteMarker": None,
"noncurrentVersionExpirationInDays": -1,
"expirationDate": None,
"transitions": None,
"noncurrentVersionTransitions": None,
"abortIncompleteMultipartUpload": None,
"filter": {
"predicate": {
"type": "LifecycleAndOperator",
"operands": [
{"type": "LifecyclePrefixPredicate", "prefix": "some/path"},
{
2019-10-31 15:44:26 +00:00
"type": "LifecycleTagPredicate",
"tag": {"key": "TheKey", "value": "TheValue"},
},
2019-10-31 15:44:26 +00:00
],
}
2019-10-31 15:44:26 +00:00
},
}
# And the third:
assert lifecycles[2] == {
2019-10-31 15:44:26 +00:00
"id": "rule3",
"prefix": None,
"status": "Enabled",
"expirationInDays": 1,
"expiredObjectDeleteMarker": None,
"noncurrentVersionExpirationInDays": -1,
"expirationDate": None,
"transitions": None,
"noncurrentVersionTransitions": None,
"abortIncompleteMultipartUpload": None,
"filter": {"predicate": None},
}
# And the last:
assert lifecycles[3] == {
2019-10-31 15:44:26 +00:00
"id": "rule4",
"prefix": None,
"status": "Enabled",
"expirationInDays": None,
"expiredObjectDeleteMarker": None,
"noncurrentVersionExpirationInDays": -1,
"expirationDate": None,
"transitions": None,
"noncurrentVersionTransitions": None,
"abortIncompleteMultipartUpload": {"daysAfterInitiation": 1},
"filter": {"predicate": {"type": "LifecyclePrefixPredicate", "prefix": ""}},
}
@mock_s3
def test_s3_notification_config_dict():
from moto.s3.config import s3_config_query
# With 1 bucket in us-west-2:
2019-10-31 15:44:26 +00:00
s3_config_query.backends["global"].create_bucket("bucket1", "us-west-2")
# And some notifications:
notifications = {
2019-10-31 15:44:26 +00:00
"TopicConfiguration": [
{
"Id": "Topic",
"Topic": "arn:aws:sns:us-west-2:012345678910:mytopic",
"Event": [
"s3:ReducedRedundancyLostObject",
"s3:ObjectRestore:Completed",
],
}
2019-10-31 15:44:26 +00:00
],
"QueueConfiguration": [
{
"Id": "Queue",
"Queue": "arn:aws:sqs:us-west-2:012345678910:myqueue",
"Event": ["s3:ObjectRemoved:Delete"],
"Filter": {
"S3Key": {
"FilterRule": [{"Name": "prefix", "Value": "stuff/here/"}]
}
},
}
],
"CloudFunctionConfiguration": [
{
"Id": "Lambda",
"CloudFunction": "arn:aws:lambda:us-west-2:012345678910:function:mylambda",
"Event": [
"s3:ObjectCreated:Post",
"s3:ObjectCreated:Copy",
"s3:ObjectCreated:Put",
],
"Filter": {
"S3Key": {"FilterRule": [{"Name": "suffix", "Value": ".png"}]}
},
}
2019-10-31 15:44:26 +00:00
],
}
2019-10-31 15:44:26 +00:00
s3_config_query.backends["global"].put_bucket_notification_configuration(
"bucket1", notifications
)
# Get the notifications for this:
2019-10-31 15:44:26 +00:00
notifications = (
s3_config_query.backends["global"]
.buckets["bucket1"]
.notification_configuration.to_config_dict()
)
# Verify it all:
assert notifications == {
2019-10-31 15:44:26 +00:00
"configurations": {
"Topic": {
"events": [
"s3:ReducedRedundancyLostObject",
"s3:ObjectRestore:Completed",
],
"filter": None,
"objectPrefixes": [],
"topicARN": "arn:aws:sns:us-west-2:012345678910:mytopic",
"type": "TopicConfiguration",
},
2019-10-31 15:44:26 +00:00
"Queue": {
"events": ["s3:ObjectRemoved:Delete"],
"filter": {
"s3KeyFilter": {
"filterRules": [{"name": "prefix", "value": "stuff/here/"}]
}
},
2019-10-31 15:44:26 +00:00
"objectPrefixes": [],
"queueARN": "arn:aws:sqs:us-west-2:012345678910:myqueue",
"type": "QueueConfiguration",
},
2019-10-31 15:44:26 +00:00
"Lambda": {
"events": [
"s3:ObjectCreated:Post",
"s3:ObjectCreated:Copy",
"s3:ObjectCreated:Put",
],
"filter": {
"s3KeyFilter": {
"filterRules": [{"name": "suffix", "value": ".png"}]
}
},
2019-10-31 15:44:26 +00:00
"objectPrefixes": [],
"queueARN": "arn:aws:lambda:us-west-2:012345678910:function:mylambda",
"type": "LambdaConfiguration",
},
}
}
@mock_s3
def test_s3_acl_to_config_dict():
from moto.s3.config import s3_config_query
from moto.s3.models import FakeAcl, FakeGrant, FakeGrantee, OWNER
# With 1 bucket in us-west-2:
2019-10-31 15:44:26 +00:00
s3_config_query.backends["global"].create_bucket("logbucket", "us-west-2")
# Get the config dict with nothing other than the owner details:
2019-10-31 15:44:26 +00:00
acls = s3_config_query.backends["global"].buckets["logbucket"].acl.to_config_dict()
assert acls == {"grantSet": None, "owner": {"displayName": None, "id": OWNER}}
# Add some Log Bucket ACLs:
2019-10-31 15:44:26 +00:00
log_acls = FakeAcl(
[
FakeGrant(
[FakeGrantee(uri="http://acs.amazonaws.com/groups/s3/LogDelivery")],
"WRITE",
),
FakeGrant(
[FakeGrantee(uri="http://acs.amazonaws.com/groups/s3/LogDelivery")],
"READ_ACP",
),
FakeGrant([FakeGrantee(id=OWNER)], "FULL_CONTROL"),
]
)
s3_config_query.backends["global"].set_bucket_acl("logbucket", log_acls)
acls = s3_config_query.backends["global"].buckets["logbucket"].acl.to_config_dict()
assert acls == {
2019-10-31 15:44:26 +00:00
"grantSet": None,
"grantList": [
{"grantee": "LogDelivery", "permission": "Write"},
{"grantee": "LogDelivery", "permission": "ReadAcp"},
],
"owner": {"displayName": None, "id": OWNER},
}
# Give the owner less than full_control permissions:
2019-10-31 15:44:26 +00:00
log_acls = FakeAcl(
[
FakeGrant([FakeGrantee(id=OWNER)], "READ_ACP"),
FakeGrant([FakeGrantee(id=OWNER)], "WRITE_ACP"),
]
)
s3_config_query.backends["global"].set_bucket_acl("logbucket", log_acls)
acls = s3_config_query.backends["global"].buckets["logbucket"].acl.to_config_dict()
assert acls == {
2019-10-31 15:44:26 +00:00
"grantSet": None,
"grantList": [
{"grantee": {"id": OWNER, "displayName": None}, "permission": "ReadAcp"},
{"grantee": {"id": OWNER, "displayName": None}, "permission": "WriteAcp"},
],
2019-10-31 15:44:26 +00:00
"owner": {"displayName": None, "id": OWNER},
}
@mock_s3
def test_s3_config_dict():
from moto.s3.config import s3_config_query
2019-10-31 15:44:26 +00:00
from moto.s3.models import (
FakeAcl,
FakeGrant,
FakeGrantee,
FakeTag,
FakeTagging,
FakeTagSet,
OWNER,
)
# Without any buckets:
2019-10-31 15:44:26 +00:00
assert not s3_config_query.get_config_resource("some_bucket")
2019-10-31 15:44:26 +00:00
tags = FakeTagging(
FakeTagSet(
[FakeTag("someTag", "someValue"), FakeTag("someOtherTag", "someOtherValue")]
)
)
# With 1 bucket in us-west-2:
2019-10-31 15:44:26 +00:00
s3_config_query.backends["global"].create_bucket("bucket1", "us-west-2")
s3_config_query.backends["global"].put_bucket_tagging("bucket1", tags)
# With a log bucket:
2019-10-31 15:44:26 +00:00
s3_config_query.backends["global"].create_bucket("logbucket", "us-west-2")
log_acls = FakeAcl(
[
FakeGrant(
[FakeGrantee(uri="http://acs.amazonaws.com/groups/s3/LogDelivery")],
"WRITE",
),
FakeGrant(
[FakeGrantee(uri="http://acs.amazonaws.com/groups/s3/LogDelivery")],
"READ_ACP",
),
FakeGrant([FakeGrantee(id=OWNER)], "FULL_CONTROL"),
]
2019-10-31 15:44:26 +00:00
)
s3_config_query.backends["global"].set_bucket_acl("logbucket", log_acls)
s3_config_query.backends["global"].put_bucket_logging(
"bucket1", {"TargetBucket": "logbucket", "TargetPrefix": ""}
)
policy = json.dumps(
{
"Statement": [
{
"Effect": "Deny",
"Action": "s3:DeleteObject",
"Principal": "*",
"Resource": "arn:aws:s3:::bucket1/*",
}
]
}
)
# The policy is a byte array -- need to encode in Python 3 -- for Python 2 just pass the raw string in:
if sys.version_info[0] > 2:
2019-10-31 15:44:26 +00:00
pass_policy = bytes(policy, "utf-8")
else:
pass_policy = policy
2019-10-31 15:44:26 +00:00
s3_config_query.backends["global"].set_bucket_policy("bucket1", pass_policy)
# Get the us-west-2 bucket and verify that it works properly:
2019-10-31 15:44:26 +00:00
bucket1_result = s3_config_query.get_config_resource("bucket1")
# Just verify a few things:
2019-10-31 15:44:26 +00:00
assert bucket1_result["arn"] == "arn:aws:s3:::bucket1"
assert bucket1_result["awsRegion"] == "us-west-2"
assert bucket1_result["resourceName"] == bucket1_result["resourceId"] == "bucket1"
assert bucket1_result["tags"] == {
"someTag": "someValue",
"someOtherTag": "someOtherValue",
}
assert json.loads(
bucket1_result["supplementaryConfiguration"]["BucketTaggingConfiguration"]
) == {"tagSets": [{"tags": bucket1_result["tags"]}]}
assert isinstance(bucket1_result["configuration"], str)
exist_list = [
"AccessControlList",
"BucketAccelerateConfiguration",
"BucketLoggingConfiguration",
"BucketPolicy",
"IsRequesterPaysEnabled",
"BucketNotificationConfiguration",
]
for exist in exist_list:
2019-10-31 15:44:26 +00:00
assert isinstance(bucket1_result["supplementaryConfiguration"][exist], str)
# Verify the logging config:
2019-10-31 15:44:26 +00:00
assert json.loads(
bucket1_result["supplementaryConfiguration"]["BucketLoggingConfiguration"]
) == {"destinationBucketName": "logbucket", "logFilePrefix": ""}
# Verify that the AccessControlList is a double-wrapped JSON string:
2019-10-31 15:44:26 +00:00
assert json.loads(
json.loads(bucket1_result["supplementaryConfiguration"]["AccessControlList"])
) == {
"grantSet": None,
"owner": {
"displayName": None,
"id": "75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a",
},
}
# Verify the policy:
2019-10-31 15:44:26 +00:00
assert json.loads(bucket1_result["supplementaryConfiguration"]["BucketPolicy"]) == {
"policyText": policy
}
# Filter by correct region:
2019-10-31 15:44:26 +00:00
assert bucket1_result == s3_config_query.get_config_resource(
"bucket1", resource_region="us-west-2"
)
# By incorrect region:
2019-10-31 15:44:26 +00:00
assert not s3_config_query.get_config_resource(
"bucket1", resource_region="eu-west-1"
)
# With correct resource ID and name:
2019-10-31 15:44:26 +00:00
assert bucket1_result == s3_config_query.get_config_resource(
"bucket1", resource_name="bucket1"
)
# With an incorrect resource name:
2019-10-31 15:44:26 +00:00
assert not s3_config_query.get_config_resource(
"bucket1", resource_name="eu-bucket-1"
)
# Verify that no bucket policy returns the proper value:
2019-10-31 15:44:26 +00:00
logging_bucket = s3_config_query.get_config_resource("logbucket")
assert json.loads(logging_bucket["supplementaryConfiguration"]["BucketPolicy"]) == {
"policyText": None
}
assert not logging_bucket["tags"]
assert not logging_bucket["supplementaryConfiguration"].get(
"BucketTaggingConfiguration"
)