Add support for detaching volumes upon instance termination (#2999)
This commit is contained in:
parent
31ce74a842
commit
2320e82647
@ -231,6 +231,16 @@ class InvalidVolumeAttachmentError(EC2ClientError):
|
||||
)
|
||||
|
||||
|
||||
class InvalidVolumeDetachmentError(EC2ClientError):
|
||||
def __init__(self, volume_id, instance_id, device):
|
||||
super(InvalidVolumeDetachmentError, self).__init__(
|
||||
"InvalidAttachment.NotFound",
|
||||
"The volume {0} is not attached to instance {1} as device {2}".format(
|
||||
volume_id, instance_id, device
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
class VolumeInUseError(EC2ClientError):
|
||||
def __init__(self, volume_id, instance_id):
|
||||
super(VolumeInUseError, self).__init__(
|
||||
|
@ -72,6 +72,7 @@ from .exceptions import (
|
||||
InvalidVolumeIdError,
|
||||
VolumeInUseError,
|
||||
InvalidVolumeAttachmentError,
|
||||
InvalidVolumeDetachmentError,
|
||||
InvalidVpcCidrBlockAssociationIdError,
|
||||
InvalidVPCPeeringConnectionIdError,
|
||||
InvalidVPCPeeringConnectionStateTransitionError,
|
||||
@ -560,23 +561,34 @@ class Instance(TaggedEC2Resource, BotoInstance):
|
||||
# worst case we'll get IP address exaustion... rarely
|
||||
pass
|
||||
|
||||
def add_block_device(self, size, device_path, snapshot_id=None, encrypted=False):
|
||||
def add_block_device(
|
||||
self,
|
||||
size,
|
||||
device_path,
|
||||
snapshot_id=None,
|
||||
encrypted=False,
|
||||
delete_on_termination=False,
|
||||
):
|
||||
volume = self.ec2_backend.create_volume(
|
||||
size, self.region_name, snapshot_id, encrypted
|
||||
)
|
||||
self.ec2_backend.attach_volume(volume.id, self.id, device_path)
|
||||
self.ec2_backend.attach_volume(
|
||||
volume.id, self.id, device_path, delete_on_termination
|
||||
)
|
||||
|
||||
def setup_defaults(self):
|
||||
# Default have an instance with root volume should you not wish to
|
||||
# override with attach volume cmd.
|
||||
volume = self.ec2_backend.create_volume(8, "us-east-1a")
|
||||
self.ec2_backend.attach_volume(volume.id, self.id, "/dev/sda1")
|
||||
self.ec2_backend.attach_volume(volume.id, self.id, "/dev/sda1", True)
|
||||
|
||||
def teardown_defaults(self):
|
||||
if "/dev/sda1" in self.block_device_mapping:
|
||||
volume_id = self.block_device_mapping["/dev/sda1"].volume_id
|
||||
self.ec2_backend.detach_volume(volume_id, self.id, "/dev/sda1")
|
||||
self.ec2_backend.delete_volume(volume_id)
|
||||
for device_path in list(self.block_device_mapping.keys()):
|
||||
volume = self.block_device_mapping[device_path]
|
||||
volume_id = volume.volume_id
|
||||
self.ec2_backend.detach_volume(volume_id, self.id, device_path)
|
||||
if volume.delete_on_termination:
|
||||
self.ec2_backend.delete_volume(volume_id)
|
||||
|
||||
@property
|
||||
def get_block_device_mapping(self):
|
||||
@ -897,8 +909,15 @@ class InstanceBackend(object):
|
||||
volume_size = block_device["Ebs"].get("VolumeSize")
|
||||
snapshot_id = block_device["Ebs"].get("SnapshotId")
|
||||
encrypted = block_device["Ebs"].get("Encrypted", False)
|
||||
delete_on_termination = block_device["Ebs"].get(
|
||||
"DeleteOnTermination", False
|
||||
)
|
||||
new_instance.add_block_device(
|
||||
volume_size, device_name, snapshot_id, encrypted
|
||||
volume_size,
|
||||
device_name,
|
||||
snapshot_id,
|
||||
encrypted,
|
||||
delete_on_termination,
|
||||
)
|
||||
else:
|
||||
new_instance.setup_defaults()
|
||||
@ -2475,7 +2494,9 @@ class EBSBackend(object):
|
||||
return self.volumes.pop(volume_id)
|
||||
raise InvalidVolumeIdError(volume_id)
|
||||
|
||||
def attach_volume(self, volume_id, instance_id, device_path):
|
||||
def attach_volume(
|
||||
self, volume_id, instance_id, device_path, delete_on_termination=False
|
||||
):
|
||||
volume = self.get_volume(volume_id)
|
||||
instance = self.get_instance(instance_id)
|
||||
|
||||
@ -2489,17 +2510,25 @@ class EBSBackend(object):
|
||||
status=volume.status,
|
||||
size=volume.size,
|
||||
attach_time=utc_date_and_time(),
|
||||
delete_on_termination=delete_on_termination,
|
||||
)
|
||||
instance.block_device_mapping[device_path] = bdt
|
||||
return volume.attachment
|
||||
|
||||
def detach_volume(self, volume_id, instance_id, device_path):
|
||||
volume = self.get_volume(volume_id)
|
||||
self.get_instance(instance_id)
|
||||
instance = self.get_instance(instance_id)
|
||||
|
||||
old_attachment = volume.attachment
|
||||
if not old_attachment:
|
||||
raise InvalidVolumeAttachmentError(volume_id, instance_id)
|
||||
device_path = device_path or old_attachment.device
|
||||
|
||||
try:
|
||||
del instance.block_device_mapping[device_path]
|
||||
except KeyError:
|
||||
raise InvalidVolumeDetachmentError(volume_id, instance_id, device_path)
|
||||
|
||||
old_attachment.status = "detached"
|
||||
|
||||
volume.attachment = None
|
||||
|
@ -99,6 +99,106 @@ def test_instance_launch_and_terminate():
|
||||
instance.state.should.equal("terminated")
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_instance_terminate_discard_volumes():
|
||||
|
||||
ec2_resource = boto3.resource("ec2", "us-west-1")
|
||||
|
||||
result = ec2_resource.create_instances(
|
||||
ImageId="ami-d3adb33f",
|
||||
MinCount=1,
|
||||
MaxCount=1,
|
||||
BlockDeviceMappings=[
|
||||
{
|
||||
"DeviceName": "/dev/sda1",
|
||||
"Ebs": {"VolumeSize": 50, "DeleteOnTermination": True},
|
||||
}
|
||||
],
|
||||
)
|
||||
instance = result[0]
|
||||
|
||||
instance_volume_ids = []
|
||||
for volume in instance.volumes.all():
|
||||
instance_volume_ids.append(volume.volume_id)
|
||||
|
||||
instance.terminate()
|
||||
instance.wait_until_terminated()
|
||||
|
||||
assert not list(ec2_resource.volumes.all())
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_instance_terminate_keep_volumes():
|
||||
ec2_resource = boto3.resource("ec2", "us-west-1")
|
||||
|
||||
result = ec2_resource.create_instances(
|
||||
ImageId="ami-d3adb33f",
|
||||
MinCount=1,
|
||||
MaxCount=1,
|
||||
BlockDeviceMappings=[{"DeviceName": "/dev/sda1", "Ebs": {"VolumeSize": 50}}],
|
||||
)
|
||||
instance = result[0]
|
||||
|
||||
instance_volume_ids = []
|
||||
for volume in instance.volumes.all():
|
||||
instance_volume_ids.append(volume.volume_id)
|
||||
|
||||
instance.terminate()
|
||||
instance.wait_until_terminated()
|
||||
|
||||
assert len(instance_volume_ids) == 1
|
||||
volume = ec2_resource.Volume(instance_volume_ids[0])
|
||||
volume.state.should.equal("available")
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_instance_terminate_detach_volumes():
|
||||
ec2_resource = boto3.resource("ec2", "us-west-1")
|
||||
result = ec2_resource.create_instances(
|
||||
ImageId="ami-d3adb33f",
|
||||
MinCount=1,
|
||||
MaxCount=1,
|
||||
BlockDeviceMappings=[
|
||||
{"DeviceName": "/dev/sda1", "Ebs": {"VolumeSize": 50}},
|
||||
{"DeviceName": "/dev/sda2", "Ebs": {"VolumeSize": 50}},
|
||||
],
|
||||
)
|
||||
instance = result[0]
|
||||
for volume in instance.volumes.all():
|
||||
response = instance.detach_volume(VolumeId=volume.volume_id)
|
||||
response["State"].should.equal("detaching")
|
||||
|
||||
instance.terminate()
|
||||
instance.wait_until_terminated()
|
||||
|
||||
assert len(list(ec2_resource.volumes.all())) == 2
|
||||
|
||||
|
||||
@mock_ec2
|
||||
def test_instance_detach_volume_wrong_path():
|
||||
ec2_resource = boto3.resource("ec2", "us-west-1")
|
||||
result = ec2_resource.create_instances(
|
||||
ImageId="ami-d3adb33f",
|
||||
MinCount=1,
|
||||
MaxCount=1,
|
||||
BlockDeviceMappings=[{"DeviceName": "/dev/sda1", "Ebs": {"VolumeSize": 50}},],
|
||||
)
|
||||
instance = result[0]
|
||||
for volume in instance.volumes.all():
|
||||
with assert_raises(ClientError) as ex:
|
||||
instance.detach_volume(VolumeId=volume.volume_id, Device="/dev/sdf")
|
||||
|
||||
ex.exception.response["Error"]["Code"].should.equal(
|
||||
"InvalidAttachment.NotFound"
|
||||
)
|
||||
ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
|
||||
ex.exception.response["Error"]["Message"].should.equal(
|
||||
"The volume {0} is not attached to instance {1} as device {2}".format(
|
||||
volume.volume_id, instance.instance_id, "/dev/sdf"
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@mock_ec2_deprecated
|
||||
def test_terminate_empty_instances():
|
||||
conn = boto.connect_ec2("the_key", "the_secret")
|
||||
@ -1416,14 +1516,14 @@ def test_modify_delete_on_termination():
|
||||
result = ec2_client.create_instances(ImageId="ami-12345678", MinCount=1, MaxCount=1)
|
||||
instance = result[0]
|
||||
instance.load()
|
||||
instance.block_device_mappings[0]["Ebs"]["DeleteOnTermination"].should.be(False)
|
||||
instance.block_device_mappings[0]["Ebs"]["DeleteOnTermination"].should.be(True)
|
||||
instance.modify_attribute(
|
||||
BlockDeviceMappings=[
|
||||
{"DeviceName": "/dev/sda1", "Ebs": {"DeleteOnTermination": True}}
|
||||
{"DeviceName": "/dev/sda1", "Ebs": {"DeleteOnTermination": False}}
|
||||
]
|
||||
)
|
||||
instance.load()
|
||||
instance.block_device_mappings[0]["Ebs"]["DeleteOnTermination"].should.be(True)
|
||||
instance.block_device_mappings[0]["Ebs"]["DeleteOnTermination"].should.be(False)
|
||||
|
||||
|
||||
@mock_ec2
|
||||
|
Loading…
Reference in New Issue
Block a user