Techdebt: Move S3 CompleteMultipartUpload to the backend (#7437)
This commit is contained in:
parent
4a7ed82e8a
commit
349353edaa
@ -2423,13 +2423,39 @@ class S3Backend(BaseBackend, CloudWatchMetricProvider):
|
|||||||
|
|
||||||
def complete_multipart_upload(
|
def complete_multipart_upload(
|
||||||
self, bucket_name: str, multipart_id: str, body: Iterator[Tuple[int, str]]
|
self, bucket_name: str, multipart_id: str, body: Iterator[Tuple[int, str]]
|
||||||
) -> Tuple[FakeMultipart, bytes, str, Optional[str]]:
|
) -> Optional[FakeKey]:
|
||||||
bucket = self.get_bucket(bucket_name)
|
bucket = self.get_bucket(bucket_name)
|
||||||
multipart = bucket.multiparts[multipart_id]
|
multipart = bucket.multiparts[multipart_id]
|
||||||
value, etag, checksum = multipart.complete(body)
|
value, etag, checksum = multipart.complete(body)
|
||||||
if value is not None:
|
if value is not None:
|
||||||
del bucket.multiparts[multipart_id]
|
del bucket.multiparts[multipart_id]
|
||||||
return multipart, value, etag, checksum
|
|
||||||
|
if value is None:
|
||||||
|
return None
|
||||||
|
|
||||||
|
key = self.put_object(
|
||||||
|
bucket_name,
|
||||||
|
multipart.key_name,
|
||||||
|
value,
|
||||||
|
storage=multipart.storage,
|
||||||
|
etag=etag,
|
||||||
|
multipart=multipart,
|
||||||
|
encryption=multipart.sse_encryption,
|
||||||
|
kms_key_id=multipart.kms_key_id,
|
||||||
|
)
|
||||||
|
key.set_metadata(multipart.metadata)
|
||||||
|
|
||||||
|
if checksum:
|
||||||
|
key.checksum_algorithm = multipart.metadata.get("x-amz-checksum-algorithm")
|
||||||
|
key.checksum_value = checksum
|
||||||
|
|
||||||
|
self.put_object_tagging(key, multipart.tags)
|
||||||
|
self.put_object_acl(
|
||||||
|
bucket_name=bucket_name,
|
||||||
|
key_name=key.name,
|
||||||
|
acl=multipart.acl,
|
||||||
|
)
|
||||||
|
return key
|
||||||
|
|
||||||
def get_all_multiparts(self, bucket_name: str) -> Dict[str, FakeMultipart]:
|
def get_all_multiparts(self, bucket_name: str) -> Dict[str, FakeMultipart]:
|
||||||
bucket = self.get_bucket(bucket_name)
|
bucket = self.get_bucket(bucket_name)
|
||||||
|
@ -2245,37 +2245,13 @@ class S3Response(BaseResponse):
|
|||||||
if query.get("uploadId"):
|
if query.get("uploadId"):
|
||||||
multipart_id = query["uploadId"][0]
|
multipart_id = query["uploadId"][0]
|
||||||
|
|
||||||
multipart, value, etag, checksum = self.backend.complete_multipart_upload(
|
key = self.backend.complete_multipart_upload(
|
||||||
bucket_name, multipart_id, self._complete_multipart_body(body)
|
bucket_name, multipart_id, self._complete_multipart_body(body)
|
||||||
)
|
)
|
||||||
if value is None:
|
if key is None:
|
||||||
return 400, {}, ""
|
return 400, {}, ""
|
||||||
|
|
||||||
headers: Dict[str, Any] = {}
|
headers: Dict[str, Any] = {}
|
||||||
key = self.backend.put_object(
|
|
||||||
bucket_name,
|
|
||||||
multipart.key_name,
|
|
||||||
value,
|
|
||||||
storage=multipart.storage,
|
|
||||||
etag=etag,
|
|
||||||
multipart=multipart,
|
|
||||||
encryption=multipart.sse_encryption,
|
|
||||||
kms_key_id=multipart.kms_key_id,
|
|
||||||
)
|
|
||||||
key.set_metadata(multipart.metadata)
|
|
||||||
|
|
||||||
if checksum:
|
|
||||||
key.checksum_algorithm = multipart.metadata.get(
|
|
||||||
"x-amz-checksum-algorithm"
|
|
||||||
)
|
|
||||||
key.checksum_value = checksum
|
|
||||||
|
|
||||||
self.backend.put_object_tagging(key, multipart.tags)
|
|
||||||
self.backend.put_object_acl(
|
|
||||||
bucket_name=bucket_name,
|
|
||||||
key_name=key.name,
|
|
||||||
acl=multipart.acl,
|
|
||||||
)
|
|
||||||
|
|
||||||
template = self.response_template(S3_MULTIPART_COMPLETE_RESPONSE)
|
template = self.response_template(S3_MULTIPART_COMPLETE_RESPONSE)
|
||||||
if key.version_id:
|
if key.version_id:
|
||||||
@ -2299,12 +2275,12 @@ class S3Response(BaseResponse):
|
|||||||
es = minidom.parseString(body).getElementsByTagName("Days")
|
es = minidom.parseString(body).getElementsByTagName("Days")
|
||||||
days = es[0].childNodes[0].wholeText
|
days = es[0].childNodes[0].wholeText
|
||||||
key = self.backend.get_object(bucket_name, key_name) # type: ignore
|
key = self.backend.get_object(bucket_name, key_name) # type: ignore
|
||||||
if key.storage_class not in ARCHIVE_STORAGE_CLASSES:
|
if key.storage_class not in ARCHIVE_STORAGE_CLASSES: # type: ignore
|
||||||
raise InvalidObjectState(storage_class=key.storage_class)
|
raise InvalidObjectState(storage_class=key.storage_class) # type: ignore
|
||||||
r = 202
|
r = 202
|
||||||
if key.expiry_date is not None:
|
if key.expiry_date is not None: # type: ignore
|
||||||
r = 200
|
r = 200
|
||||||
key.restore(int(days))
|
key.restore(int(days)) # type: ignore
|
||||||
return r, {}, ""
|
return r, {}, ""
|
||||||
elif "select" in query:
|
elif "select" in query:
|
||||||
request = xmltodict.parse(body)["SelectObjectContentRequest"]
|
request = xmltodict.parse(body)["SelectObjectContentRequest"]
|
||||||
|
Loading…
Reference in New Issue
Block a user