S3: close FileHandles on reset (#5545)

This commit is contained in:
Bert Blommers 2022-10-09 12:22:46 +00:00 committed by GitHub
parent 1a8f93dce3
commit cf2ce3324a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 50 additions and 0 deletions

View File

@ -1421,6 +1421,18 @@ class S3Backend(BaseBackend, CloudWatchMetricProvider):
"s3::keyrestore", transition={"progression": "immediate"}
)
def reset(self):
# For every key and multipart, Moto opens a TemporaryFile to write the value of those keys
# Ensure that these TemporaryFile-objects are closed, and leave no filehandles open
for bucket in self.buckets.values():
for key in bucket.keys.values():
if isinstance(key, FakeKey):
key._value_buffer.close()
if key.multipart is not None:
for part in key.multipart.parts.values():
part._value_buffer.close()
super().reset()
@property
def _url_module(self):
# The urls-property can be different depending on env variables

View File

@ -1,8 +1,10 @@
import boto3
import os
import gc
import pytest
import sure # noqa # pylint: disable=unused-import
import requests
import warnings
from botocore.client import ClientError
from functools import wraps
@ -1009,3 +1011,39 @@ def test_head_object_returns_part_count():
# Header is not returned when we do not pass PartNumber
resp = client.head_object(Bucket=bucket, Key=key)
resp.shouldnt.have.key("PartsCount")
def test_reset_after_multipart_upload_closes_file_handles():
"""
Large Uploads are written to disk for performance reasons
This test verifies that the filehandles are properly closed when resetting Moto
"""
s3 = s3model.S3Backend("us-west-1", "1234")
s3.create_bucket("my-bucket", "us-west-1")
s3.put_object("my-bucket", "my-key", "x" * 10_000_000)
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter("always", ResourceWarning) # add filter
s3.reset()
gc.collect()
warning_types = [type(warn.message) for warn in warning_list]
warning_types.shouldnt.contain(ResourceWarning)
def test_deleting_large_upload_closes_file_handles():
"""
Large Uploads are written to disk for performance reasons
This test verifies that the filehandles are properly closed on deletion
"""
s3 = s3model.S3Backend("us-east-1", "1234")
s3.create_bucket("my-bucket", "us-west-1")
s3.put_object("my-bucket", "my-key", "x" * 10_000_000)
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter("always", ResourceWarning) # add filter
s3.delete_object(bucket_name="my-bucket", key_name="my-key")
gc.collect()
warning_types = [type(warn.message) for warn in warning_list]
warning_types.shouldnt.contain(ResourceWarning)
s3.reset()