S3: Add Notifications for Restore-actions (#7455)
This commit is contained in:
parent
d5042facb6
commit
3c71f81155
@ -6509,7 +6509,7 @@
|
|||||||
|
|
||||||
## s3
|
## s3
|
||||||
<details>
|
<details>
|
||||||
<summary>67% implemented</summary>
|
<summary>68% implemented</summary>
|
||||||
|
|
||||||
- [X] abort_multipart_upload
|
- [X] abort_multipart_upload
|
||||||
- [X] complete_multipart_upload
|
- [X] complete_multipart_upload
|
||||||
@ -6605,7 +6605,7 @@
|
|||||||
- [X] put_object_retention
|
- [X] put_object_retention
|
||||||
- [X] put_object_tagging
|
- [X] put_object_tagging
|
||||||
- [X] put_public_access_block
|
- [X] put_public_access_block
|
||||||
- [ ] restore_object
|
- [X] restore_object
|
||||||
- [X] select_object_content
|
- [X] select_object_content
|
||||||
- [X] upload_file
|
- [X] upload_file
|
||||||
- [X] upload_fileobj
|
- [X] upload_fileobj
|
||||||
|
@ -138,7 +138,7 @@ s3
|
|||||||
- [X] put_object_retention
|
- [X] put_object_retention
|
||||||
- [X] put_object_tagging
|
- [X] put_object_tagging
|
||||||
- [X] put_public_access_block
|
- [X] put_public_access_block
|
||||||
- [ ] restore_object
|
- [X] restore_object
|
||||||
- [X] select_object_content
|
- [X] select_object_content
|
||||||
|
|
||||||
Highly experimental. Please raise an issue if you find any inconsistencies/bugs.
|
Highly experimental. Please raise an issue if you find any inconsistencies/bugs.
|
||||||
|
@ -42,6 +42,7 @@ from moto.s3.exceptions import (
|
|||||||
InvalidBucketName,
|
InvalidBucketName,
|
||||||
InvalidNotificationDestination,
|
InvalidNotificationDestination,
|
||||||
InvalidNotificationEvent,
|
InvalidNotificationEvent,
|
||||||
|
InvalidObjectState,
|
||||||
InvalidPart,
|
InvalidPart,
|
||||||
InvalidPublicAccessBlockConfiguration,
|
InvalidPublicAccessBlockConfiguration,
|
||||||
InvalidRequest,
|
InvalidRequest,
|
||||||
@ -197,6 +198,25 @@ class FakeKey(BaseModel, ManagedState):
|
|||||||
self._value_buffer.write(new_value)
|
self._value_buffer.write(new_value)
|
||||||
self.contentsize = len(new_value)
|
self.contentsize = len(new_value)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def status(self) -> Optional[str]:
|
||||||
|
previous = self._status
|
||||||
|
new_status = super().status
|
||||||
|
if previous != "RESTORED" and new_status == "RESTORED":
|
||||||
|
s3_backend = s3_backends[self.account_id]["global"]
|
||||||
|
bucket = s3_backend.get_bucket(self.bucket_name) # type: ignore
|
||||||
|
notifications.send_event(
|
||||||
|
self.account_id,
|
||||||
|
notifications.S3NotificationEvent.OBJECT_RESTORE_COMPLETED_EVENT,
|
||||||
|
bucket,
|
||||||
|
key=self,
|
||||||
|
)
|
||||||
|
return new_status
|
||||||
|
|
||||||
|
@status.setter
|
||||||
|
def status(self, value: str) -> None:
|
||||||
|
self._status = value
|
||||||
|
|
||||||
def set_metadata(self, metadata: Any, replace: bool = False) -> None:
|
def set_metadata(self, metadata: Any, replace: bool = False) -> None:
|
||||||
if replace:
|
if replace:
|
||||||
self._metadata = {} # type: ignore
|
self._metadata = {} # type: ignore
|
||||||
@ -215,6 +235,14 @@ class FakeKey(BaseModel, ManagedState):
|
|||||||
|
|
||||||
def restore(self, days: int) -> None:
|
def restore(self, days: int) -> None:
|
||||||
self._expiry = utcnow() + datetime.timedelta(days)
|
self._expiry = utcnow() + datetime.timedelta(days)
|
||||||
|
s3_backend = s3_backends[self.account_id]["global"]
|
||||||
|
bucket = s3_backend.get_bucket(self.bucket_name) # type: ignore
|
||||||
|
notifications.send_event(
|
||||||
|
self.account_id,
|
||||||
|
notifications.S3NotificationEvent.OBJECT_RESTORE_POST_EVENT,
|
||||||
|
bucket,
|
||||||
|
key=self,
|
||||||
|
)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def etag(self) -> str:
|
def etag(self) -> str:
|
||||||
@ -2854,6 +2882,16 @@ class S3Backend(BaseBackend, CloudWatchMetricProvider):
|
|||||||
for x in query_result
|
for x in query_result
|
||||||
]
|
]
|
||||||
|
|
||||||
|
def restore_object(self, bucket_name: str, key_name: str, days: str) -> bool:
|
||||||
|
key = self.get_object(bucket_name, key_name)
|
||||||
|
if not key:
|
||||||
|
raise MissingKey
|
||||||
|
if key.storage_class not in ARCHIVE_STORAGE_CLASSES:
|
||||||
|
raise InvalidObjectState(storage_class=key.storage_class)
|
||||||
|
had_expiry_date = key.expiry_date is not None
|
||||||
|
key.restore(int(days))
|
||||||
|
return had_expiry_date
|
||||||
|
|
||||||
def upload_file(self) -> None:
|
def upload_file(self) -> None:
|
||||||
# Listed for the implementation coverage
|
# Listed for the implementation coverage
|
||||||
# Implementation part of responses.py
|
# Implementation part of responses.py
|
||||||
|
@ -2274,14 +2274,11 @@ class S3Response(BaseResponse):
|
|||||||
elif "restore" in query:
|
elif "restore" in query:
|
||||||
es = minidom.parseString(body).getElementsByTagName("Days")
|
es = minidom.parseString(body).getElementsByTagName("Days")
|
||||||
days = es[0].childNodes[0].wholeText
|
days = es[0].childNodes[0].wholeText
|
||||||
key = self.backend.get_object(bucket_name, key_name) # type: ignore
|
previously_restored = self.backend.restore_object(
|
||||||
if key.storage_class not in ARCHIVE_STORAGE_CLASSES: # type: ignore
|
bucket_name, key_name, days
|
||||||
raise InvalidObjectState(storage_class=key.storage_class) # type: ignore
|
)
|
||||||
r = 202
|
status_code = 200 if previously_restored else 202
|
||||||
if key.expiry_date is not None: # type: ignore
|
return status_code, {}, ""
|
||||||
r = 200
|
|
||||||
key.restore(int(days)) # type: ignore
|
|
||||||
return r, {}, ""
|
|
||||||
elif "select" in query:
|
elif "select" in query:
|
||||||
request = xmltodict.parse(body)["SelectObjectContentRequest"]
|
request = xmltodict.parse(body)["SelectObjectContentRequest"]
|
||||||
select_query = request["Expression"]
|
select_query = request["Expression"]
|
||||||
|
@ -544,7 +544,8 @@ def test_restore_key():
|
|||||||
|
|
||||||
key = bucket.put_object(Key="the-key", Body=b"somedata", StorageClass="GLACIER")
|
key = bucket.put_object(Key="the-key", Body=b"somedata", StorageClass="GLACIER")
|
||||||
assert key.restore is None
|
assert key.restore is None
|
||||||
key.restore_object(RestoreRequest={"Days": 1})
|
resp = key.restore_object(RestoreRequest={"Days": 1})
|
||||||
|
assert resp["ResponseMetadata"]["HTTPStatusCode"] == 202
|
||||||
if settings.TEST_SERVER_MODE:
|
if settings.TEST_SERVER_MODE:
|
||||||
assert 'ongoing-request="false"' in key.restore
|
assert 'ongoing-request="false"' in key.restore
|
||||||
elif settings.TEST_DECORATOR_MODE:
|
elif settings.TEST_DECORATOR_MODE:
|
||||||
@ -552,7 +553,8 @@ def test_restore_key():
|
|||||||
'ongoing-request="false", expiry-date="Mon, 02 Jan 2012 12:00:00 GMT"'
|
'ongoing-request="false", expiry-date="Mon, 02 Jan 2012 12:00:00 GMT"'
|
||||||
)
|
)
|
||||||
|
|
||||||
key.restore_object(RestoreRequest={"Days": 2})
|
resp = key.restore_object(RestoreRequest={"Days": 2})
|
||||||
|
assert resp["ResponseMetadata"]["HTTPStatusCode"] == 200
|
||||||
|
|
||||||
if settings.TEST_SERVER_MODE:
|
if settings.TEST_SERVER_MODE:
|
||||||
assert 'ongoing-request="false"' in key.restore
|
assert 'ongoing-request="false"' in key.restore
|
||||||
@ -594,6 +596,19 @@ def test_restore_key_transition():
|
|||||||
state_manager.unset_transition(model_name="s3::keyrestore")
|
state_manager.unset_transition(model_name="s3::keyrestore")
|
||||||
|
|
||||||
|
|
||||||
|
@mock_aws
|
||||||
|
def test_restore_unknown_key():
|
||||||
|
client = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
|
||||||
|
client.create_bucket(Bucket="mybucket")
|
||||||
|
|
||||||
|
with pytest.raises(ClientError) as exc:
|
||||||
|
client.restore_object(
|
||||||
|
Bucket="mybucket", Key="unknown", RestoreRequest={"Days": 1}
|
||||||
|
)
|
||||||
|
err = exc.value.response["Error"]
|
||||||
|
assert err["Code"] == "NoSuchKey"
|
||||||
|
|
||||||
|
|
||||||
@mock_aws
|
@mock_aws
|
||||||
def test_cannot_restore_standard_class_object():
|
def test_cannot_restore_standard_class_object():
|
||||||
s3_resource = boto3.resource("s3", region_name=DEFAULT_REGION_NAME)
|
s3_resource = boto3.resource("s3", region_name=DEFAULT_REGION_NAME)
|
||||||
|
@ -220,3 +220,33 @@ def test_delete_object_notification():
|
|||||||
assert event_message["region"] == REGION_NAME
|
assert event_message["region"] == REGION_NAME
|
||||||
assert event_message["detail"]["bucket"]["name"] == bucket_name
|
assert event_message["detail"]["bucket"]["name"] == bucket_name
|
||||||
assert event_message["detail"]["reason"] == "ObjectRemoved"
|
assert event_message["detail"]["reason"] == "ObjectRemoved"
|
||||||
|
|
||||||
|
|
||||||
|
@mock_aws
|
||||||
|
def test_restore_key_notifications():
|
||||||
|
resource_names = _seteup_bucket_notification_eventbridge()
|
||||||
|
bucket_name = resource_names["bucket_name"]
|
||||||
|
|
||||||
|
s3_resource = boto3.resource("s3", region_name=REGION_NAME)
|
||||||
|
|
||||||
|
bucket = s3_resource.Bucket(bucket_name)
|
||||||
|
key = bucket.put_object(Key="the-key", Body=b"somedata", StorageClass="GLACIER")
|
||||||
|
key.restore_object(RestoreRequest={"Days": 1})
|
||||||
|
|
||||||
|
events = _get_send_events()
|
||||||
|
event_names = [json.loads(e["message"])["detail"]["reason"] for e in events]
|
||||||
|
assert event_names == ["ObjectCreated", "ObjectRestore"]
|
||||||
|
|
||||||
|
# Finish the Object Restoration - restore Completes immediately by default
|
||||||
|
key.load()
|
||||||
|
|
||||||
|
events = _get_send_events()
|
||||||
|
event_names = [json.loads(e["message"])["detail"]["reason"] for e in events]
|
||||||
|
assert event_names == ["ObjectCreated", "ObjectRestore", "ObjectRestore"]
|
||||||
|
|
||||||
|
# Sanity check - loading the Key does not mean the Restore-event is fired every time
|
||||||
|
key.load()
|
||||||
|
|
||||||
|
events = _get_send_events()
|
||||||
|
event_names = [json.loads(e["message"])["detail"]["reason"] for e in events]
|
||||||
|
assert event_names == ["ObjectCreated", "ObjectRestore", "ObjectRestore"]
|
||||||
|
@ -203,9 +203,10 @@ def test_s3_copy_object_for_glacier_storage_class_restored():
|
|||||||
)
|
)
|
||||||
|
|
||||||
s3_client.create_bucket(Bucket="Bucket2")
|
s3_client.create_bucket(Bucket="Bucket2")
|
||||||
s3_client.restore_object(
|
resp = s3_client.restore_object(
|
||||||
Bucket="Bucket", Key="First_Object", RestoreRequest={"Days": 123}
|
Bucket="Bucket", Key="First_Object", RestoreRequest={"Days": 123}
|
||||||
)
|
)
|
||||||
|
assert resp["ResponseMetadata"]["HTTPStatusCode"] == 202
|
||||||
|
|
||||||
s3_client.copy_object(
|
s3_client.copy_object(
|
||||||
CopySource={"Bucket": "Bucket", "Key": "First_Object"},
|
CopySource={"Bucket": "Bucket", "Key": "First_Object"},
|
||||||
@ -241,9 +242,10 @@ def test_s3_copy_object_for_deep_archive_storage_class_restored():
|
|||||||
assert err["StorageClass"] == "DEEP_ARCHIVE"
|
assert err["StorageClass"] == "DEEP_ARCHIVE"
|
||||||
|
|
||||||
s3_client.create_bucket(Bucket="Bucket2")
|
s3_client.create_bucket(Bucket="Bucket2")
|
||||||
s3_client.restore_object(
|
resp = s3_client.restore_object(
|
||||||
Bucket="Bucket", Key="First_Object", RestoreRequest={"Days": 123}
|
Bucket="Bucket", Key="First_Object", RestoreRequest={"Days": 123}
|
||||||
)
|
)
|
||||||
|
assert resp["ResponseMetadata"]["HTTPStatusCode"] == 202
|
||||||
s3_client.get_object(Bucket="Bucket", Key="First_Object")
|
s3_client.get_object(Bucket="Bucket", Key="First_Object")
|
||||||
|
|
||||||
s3_client.copy_object(
|
s3_client.copy_object(
|
||||||
|
Loading…
Reference in New Issue
Block a user