moto: s3: support partNumber for head_object

To support it, we need to keep multipart info in the key itself when
completing multipart upload.

Fixes #2154

Signed-off-by: Ruslan Kuprieiev <ruslan@iterative.ai>
This commit is contained in:
Ruslan Kuprieiev 2019-07-15 20:08:15 +03:00
parent 8b3b1f88ab
commit 2c2dff22bc
3 changed files with 78 additions and 7 deletions

View File

@ -52,8 +52,17 @@ class FakeDeleteMarker(BaseModel):
class FakeKey(BaseModel):
def __init__(self, name, value, storage="STANDARD", etag=None, is_versioned=False, version_id=0,
max_buffer_size=DEFAULT_KEY_BUFFER_SIZE):
def __init__(
self,
name,
value,
storage="STANDARD",
etag=None,
is_versioned=False,
version_id=0,
max_buffer_size=DEFAULT_KEY_BUFFER_SIZE,
multipart=None
):
self.name = name
self.last_modified = datetime.datetime.utcnow()
self.acl = get_canned_acl('private')
@ -65,6 +74,7 @@ class FakeKey(BaseModel):
self._version_id = version_id
self._is_versioned = is_versioned
self._tagging = FakeTagging()
self.multipart = multipart
self._value_buffer = tempfile.SpooledTemporaryFile(max_size=max_buffer_size)
self._max_buffer_size = max_buffer_size
@ -782,7 +792,15 @@ class S3Backend(BaseBackend):
bucket = self.get_bucket(bucket_name)
return bucket.website_configuration
def set_key(self, bucket_name, key_name, value, storage=None, etag=None):
def set_key(
self,
bucket_name,
key_name,
value,
storage=None,
etag=None,
multipart=None,
):
key_name = clean_key_name(key_name)
if storage is not None and storage not in STORAGE_CLASS:
raise InvalidStorageClass(storage=storage)
@ -795,7 +813,9 @@ class S3Backend(BaseBackend):
storage=storage,
etag=etag,
is_versioned=bucket.is_versioned,
version_id=str(uuid.uuid4()) if bucket.is_versioned else None)
version_id=str(uuid.uuid4()) if bucket.is_versioned else None,
multipart=multipart,
)
keys = [
key for key in bucket.keys.getlist(key_name, [])
@ -812,7 +832,7 @@ class S3Backend(BaseBackend):
key.append_to_value(value)
return key
def get_key(self, bucket_name, key_name, version_id=None):
def get_key(self, bucket_name, key_name, version_id=None, part_number=None):
key_name = clean_key_name(key_name)
bucket = self.get_bucket(bucket_name)
key = None
@ -827,6 +847,9 @@ class S3Backend(BaseBackend):
key = key_version
break
if part_number and key.multipart:
key = key.multipart.parts[part_number]
if isinstance(key, FakeKey):
return key
else:
@ -890,7 +913,12 @@ class S3Backend(BaseBackend):
return
del bucket.multiparts[multipart_id]
key = self.set_key(bucket_name, multipart.key_name, value, etag=etag)
key = self.set_key(
bucket_name,
multipart.key_name,
value, etag=etag,
multipart=multipart
)
key.set_metadata(multipart.metadata)
return key

View File

@ -809,13 +809,20 @@ class ResponseObject(_TemplateEnvironmentMixin):
def _key_response_head(self, bucket_name, query, key_name, headers):
response_headers = {}
version_id = query.get('versionId', [None])[0]
part_number = query.get('partNumber', [None])[0]
if part_number:
part_number = int(part_number)
if_modified_since = headers.get('If-Modified-Since', None)
if if_modified_since:
if_modified_since = str_to_rfc_1123_datetime(if_modified_since)
key = self.backend.get_key(
bucket_name, key_name, version_id=version_id)
bucket_name,
key_name,
version_id=version_id,
part_number=part_number
)
if key:
response_headers.update(key.metadata)
response_headers.update(key.response_dict)

View File

@ -1671,6 +1671,42 @@ def test_boto3_multipart_etag():
resp['ETag'].should.equal(EXPECTED_ETAG)
@mock_s3
@reduced_min_part_size
def test_boto3_multipart_part_size():
s3 = boto3.client('s3', region_name='us-east-1')
s3.create_bucket(Bucket='mybucket')
mpu = s3.create_multipart_upload(Bucket='mybucket', Key='the-key')
mpu_id = mpu["UploadId"]
parts = []
n_parts = 10
for i in range(1, n_parts + 1):
part_size = 5 * 1024 * 1024
body = b'1' * part_size
part = s3.upload_part(
Bucket='mybucket',
Key='the-key',
PartNumber=i,
UploadId=mpu_id,
Body=body,
ContentLength=len(body),
)
parts.append({"PartNumber": i, "ETag": part["ETag"]})
s3.complete_multipart_upload(
Bucket='mybucket',
Key='the-key',
UploadId=mpu_id,
MultipartUpload={"Parts": parts},
)
for i in range(1, n_parts + 1):
obj = s3.head_object(Bucket='mybucket', Key='the-key', PartNumber=i)
assert obj["ContentLength"] == part_size
@mock_s3
def test_boto3_put_object_with_tagging():
s3 = boto3.client('s3', region_name='us-east-1')