diff --git a/moto/s3/models.py b/moto/s3/models.py index 33b5c503b..abc80a3ab 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -1421,6 +1421,18 @@ class S3Backend(BaseBackend, CloudWatchMetricProvider): "s3::keyrestore", transition={"progression": "immediate"} ) + def reset(self): + # For every key and multipart, Moto opens a TemporaryFile to write the value of those keys + # Ensure that these TemporaryFile-objects are closed, and leave no filehandles open + for bucket in self.buckets.values(): + for key in bucket.keys.values(): + if isinstance(key, FakeKey): + key._value_buffer.close() + if key.multipart is not None: + for part in key.multipart.parts.values(): + part._value_buffer.close() + super().reset() + @property def _url_module(self): # The urls-property can be different depending on env variables diff --git a/tests/test_s3/test_s3_multipart.py b/tests/test_s3/test_s3_multipart.py index b152c88ba..2226464ca 100644 --- a/tests/test_s3/test_s3_multipart.py +++ b/tests/test_s3/test_s3_multipart.py @@ -1,8 +1,10 @@ import boto3 import os +import gc import pytest import sure # noqa # pylint: disable=unused-import import requests +import warnings from botocore.client import ClientError from functools import wraps @@ -1009,3 +1011,39 @@ def test_head_object_returns_part_count(): # Header is not returned when we do not pass PartNumber resp = client.head_object(Bucket=bucket, Key=key) resp.shouldnt.have.key("PartsCount") + + +def test_reset_after_multipart_upload_closes_file_handles(): + """ + Large Uploads are written to disk for performance reasons + This test verifies that the filehandles are properly closed when resetting Moto + """ + s3 = s3model.S3Backend("us-west-1", "1234") + s3.create_bucket("my-bucket", "us-west-1") + s3.put_object("my-bucket", "my-key", "x" * 10_000_000) + + with warnings.catch_warnings(record=True) as warning_list: + warnings.simplefilter("always", ResourceWarning) # add filter + s3.reset() + gc.collect() + warning_types = [type(warn.message) for warn in warning_list] + warning_types.shouldnt.contain(ResourceWarning) + + +def test_deleting_large_upload_closes_file_handles(): + """ + Large Uploads are written to disk for performance reasons + This test verifies that the filehandles are properly closed on deletion + """ + s3 = s3model.S3Backend("us-east-1", "1234") + s3.create_bucket("my-bucket", "us-west-1") + s3.put_object("my-bucket", "my-key", "x" * 10_000_000) + + with warnings.catch_warnings(record=True) as warning_list: + warnings.simplefilter("always", ResourceWarning) # add filter + s3.delete_object(bucket_name="my-bucket", key_name="my-key") + gc.collect() + warning_types = [type(warn.message) for warn in warning_list] + warning_types.shouldnt.contain(ResourceWarning) + + s3.reset()