Always call update ELBs for ASGs (#2980)

This commit is contained in:
Denver Janke 2020-05-11 16:44:26 +10:00 committed by GitHub
parent a2f5c41372
commit 9618e29ba9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 238 additions and 7 deletions

View File

@ -419,11 +419,8 @@ class FakeAutoScalingGroup(BaseModel):
curr_instance_count = len(self.active_instances())
if self.desired_capacity == curr_instance_count:
self.autoscaling_backend.update_attached_elbs(self.name)
self.autoscaling_backend.update_attached_target_groups(self.name)
return
if self.desired_capacity > curr_instance_count:
pass # Nothing to do here
elif self.desired_capacity > curr_instance_count:
# Need more instances
count_needed = int(self.desired_capacity) - int(curr_instance_count)
@ -447,6 +444,7 @@ class FakeAutoScalingGroup(BaseModel):
self.instance_states = list(
set(self.instance_states) - set(instances_to_remove)
)
if self.name in self.autoscaling_backend.autoscaling_groups:
self.autoscaling_backend.update_attached_elbs(self.name)
self.autoscaling_backend.update_attached_target_groups(self.name)
@ -695,6 +693,7 @@ class AutoScalingBackend(BaseBackend):
)
group.instance_states.extend(new_instances)
self.update_attached_elbs(group.name)
self.update_attached_target_groups(group.name)
def set_instance_health(
self, instance_id, health_status, should_respect_grace_period
@ -938,8 +937,7 @@ class AutoScalingBackend(BaseBackend):
standby_instances.append(instance_state)
if should_decrement:
group.desired_capacity = group.desired_capacity - len(instance_ids)
else:
group.set_desired_capacity(group.desired_capacity)
group.set_desired_capacity(group.desired_capacity)
return standby_instances, original_size, group.desired_capacity
def exit_standby_instances(self, group_name, instance_ids):
@ -951,6 +949,7 @@ class AutoScalingBackend(BaseBackend):
instance_state.lifecycle_state = "InService"
standby_instances.append(instance_state)
group.desired_capacity = group.desired_capacity + len(instance_ids)
group.set_desired_capacity(group.desired_capacity)
return standby_instances, original_size, group.desired_capacity
def terminate_instance(self, instance_id, should_decrement):

View File

@ -1071,6 +1071,7 @@ def test_autoscaling_describe_policies_boto3():
response["ScalingPolicies"][0]["PolicyName"].should.equal("test_policy_down")
@mock_elb
@mock_autoscaling
@mock_ec2
def test_detach_one_instance_decrement():
@ -1096,6 +1097,19 @@ def test_detach_one_instance_decrement():
],
VPCZoneIdentifier=mocked_networking["subnet1"],
)
elb_client = boto3.client("elb", region_name="us-east-1")
elb_client.create_load_balancer(
LoadBalancerName="my-lb",
Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}],
AvailabilityZones=["us-east-1a", "us-east-1b"],
)
response = client.attach_load_balancers(
AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"]
)
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
instance_to_detach = response["AutoScalingGroups"][0]["Instances"][0]["InstanceId"]
instance_to_keep = response["AutoScalingGroups"][0]["Instances"][1]["InstanceId"]
@ -1111,6 +1125,9 @@ def test_detach_one_instance_decrement():
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
response["AutoScalingGroups"][0]["Instances"].should.have.length_of(1)
instance_to_detach.shouldnt.be.within(
[x["InstanceId"] for x in response["AutoScalingGroups"][0]["Instances"]]
)
# test to ensure tag has been removed
response = ec2_client.describe_instances(InstanceIds=[instance_to_detach])
@ -1122,7 +1139,14 @@ def test_detach_one_instance_decrement():
tags = response["Reservations"][0]["Instances"][0]["Tags"]
tags.should.have.length_of(2)
response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"])
list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(1)
instance_to_detach.shouldnt.be.within(
[x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]]
)
@mock_elb
@mock_autoscaling
@mock_ec2
def test_detach_one_instance():
@ -1148,6 +1172,19 @@ def test_detach_one_instance():
],
VPCZoneIdentifier=mocked_networking["subnet1"],
)
elb_client = boto3.client("elb", region_name="us-east-1")
elb_client.create_load_balancer(
LoadBalancerName="my-lb",
Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}],
AvailabilityZones=["us-east-1a", "us-east-1b"],
)
response = client.attach_load_balancers(
AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"]
)
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
instance_to_detach = response["AutoScalingGroups"][0]["Instances"][0]["InstanceId"]
instance_to_keep = response["AutoScalingGroups"][0]["Instances"][1]["InstanceId"]
@ -1173,7 +1210,14 @@ def test_detach_one_instance():
tags = response["Reservations"][0]["Instances"][0]["Tags"]
tags.should.have.length_of(2)
response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"])
list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(2)
instance_to_detach.shouldnt.be.within(
[x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]]
)
@mock_elb
@mock_autoscaling
@mock_ec2
def test_standby_one_instance_decrement():
@ -1199,6 +1243,19 @@ def test_standby_one_instance_decrement():
],
VPCZoneIdentifier=mocked_networking["subnet1"],
)
elb_client = boto3.client("elb", region_name="us-east-1")
elb_client.create_load_balancer(
LoadBalancerName="my-lb",
Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}],
AvailabilityZones=["us-east-1a", "us-east-1b"],
)
response = client.attach_load_balancers(
AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"]
)
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
instance_to_standby = response["AutoScalingGroups"][0]["Instances"][0]["InstanceId"]
instance_to_keep = response["AutoScalingGroups"][0]["Instances"][1]["InstanceId"]
@ -1226,7 +1283,14 @@ def test_standby_one_instance_decrement():
tags = instance["Tags"]
tags.should.have.length_of(2)
response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"])
list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(1)
instance_to_standby.shouldnt.be.within(
[x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]]
)
@mock_elb
@mock_autoscaling
@mock_ec2
def test_standby_one_instance():
@ -1252,6 +1316,19 @@ def test_standby_one_instance():
],
VPCZoneIdentifier=mocked_networking["subnet1"],
)
elb_client = boto3.client("elb", region_name="us-east-1")
elb_client.create_load_balancer(
LoadBalancerName="my-lb",
Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}],
AvailabilityZones=["us-east-1a", "us-east-1b"],
)
response = client.attach_load_balancers(
AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"]
)
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
instance_to_standby = response["AutoScalingGroups"][0]["Instances"][0]["InstanceId"]
instance_to_keep = response["AutoScalingGroups"][0]["Instances"][1]["InstanceId"]
@ -1279,6 +1356,12 @@ def test_standby_one_instance():
tags = instance["Tags"]
tags.should.have.length_of(2)
response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"])
list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(2)
instance_to_standby.shouldnt.be.within(
[x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]]
)
@mock_elb
@mock_autoscaling
@ -1338,8 +1421,12 @@ def test_standby_elb_update():
response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"])
list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(2)
instance_to_standby.shouldnt.be.within(
[x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]]
)
@mock_elb
@mock_autoscaling
@mock_ec2
def test_standby_terminate_instance_decrement():
@ -1366,6 +1453,18 @@ def test_standby_terminate_instance_decrement():
VPCZoneIdentifier=mocked_networking["subnet1"],
)
elb_client = boto3.client("elb", region_name="us-east-1")
elb_client.create_load_balancer(
LoadBalancerName="my-lb",
Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}],
AvailabilityZones=["us-east-1a", "us-east-1b"],
)
response = client.attach_load_balancers(
AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"]
)
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
instance_to_standby_terminate = response["AutoScalingGroups"][0]["Instances"][0][
"InstanceId"
@ -1409,7 +1508,14 @@ def test_standby_terminate_instance_decrement():
"terminated"
)
response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"])
list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(1)
instance_to_standby_terminate.shouldnt.be.within(
[x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]]
)
@mock_elb
@mock_autoscaling
@mock_ec2
def test_standby_terminate_instance_no_decrement():
@ -1436,6 +1542,18 @@ def test_standby_terminate_instance_no_decrement():
VPCZoneIdentifier=mocked_networking["subnet1"],
)
elb_client = boto3.client("elb", region_name="us-east-1")
elb_client.create_load_balancer(
LoadBalancerName="my-lb",
Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}],
AvailabilityZones=["us-east-1a", "us-east-1b"],
)
response = client.attach_load_balancers(
AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"]
)
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
instance_to_standby_terminate = response["AutoScalingGroups"][0]["Instances"][0][
"InstanceId"
@ -1479,7 +1597,14 @@ def test_standby_terminate_instance_no_decrement():
"terminated"
)
response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"])
list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(2)
instance_to_standby_terminate.shouldnt.be.within(
[x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]]
)
@mock_elb
@mock_autoscaling
@mock_ec2
def test_standby_detach_instance_decrement():
@ -1506,6 +1631,18 @@ def test_standby_detach_instance_decrement():
VPCZoneIdentifier=mocked_networking["subnet1"],
)
elb_client = boto3.client("elb", region_name="us-east-1")
elb_client.create_load_balancer(
LoadBalancerName="my-lb",
Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}],
AvailabilityZones=["us-east-1a", "us-east-1b"],
)
response = client.attach_load_balancers(
AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"]
)
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
instance_to_standby_detach = response["AutoScalingGroups"][0]["Instances"][0][
"InstanceId"
@ -1547,7 +1684,14 @@ def test_standby_detach_instance_decrement():
response = ec2_client.describe_instances(InstanceIds=[instance_to_standby_detach])
response["Reservations"][0]["Instances"][0]["State"]["Name"].should.equal("running")
response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"])
list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(1)
instance_to_standby_detach.shouldnt.be.within(
[x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]]
)
@mock_elb
@mock_autoscaling
@mock_ec2
def test_standby_detach_instance_no_decrement():
@ -1574,6 +1718,18 @@ def test_standby_detach_instance_no_decrement():
VPCZoneIdentifier=mocked_networking["subnet1"],
)
elb_client = boto3.client("elb", region_name="us-east-1")
elb_client.create_load_balancer(
LoadBalancerName="my-lb",
Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}],
AvailabilityZones=["us-east-1a", "us-east-1b"],
)
response = client.attach_load_balancers(
AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"]
)
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
instance_to_standby_detach = response["AutoScalingGroups"][0]["Instances"][0][
"InstanceId"
@ -1615,7 +1771,14 @@ def test_standby_detach_instance_no_decrement():
response = ec2_client.describe_instances(InstanceIds=[instance_to_standby_detach])
response["Reservations"][0]["Instances"][0]["State"]["Name"].should.equal("running")
response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"])
list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(2)
instance_to_standby_detach.shouldnt.be.within(
[x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]]
)
@mock_elb
@mock_autoscaling
@mock_ec2
def test_standby_exit_standby():
@ -1642,6 +1805,18 @@ def test_standby_exit_standby():
VPCZoneIdentifier=mocked_networking["subnet1"],
)
elb_client = boto3.client("elb", region_name="us-east-1")
elb_client.create_load_balancer(
LoadBalancerName="my-lb",
Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}],
AvailabilityZones=["us-east-1a", "us-east-1b"],
)
response = client.attach_load_balancers(
AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"]
)
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
instance_to_standby_exit_standby = response["AutoScalingGroups"][0]["Instances"][0][
"InstanceId"
@ -1683,7 +1858,14 @@ def test_standby_exit_standby():
)
response["Reservations"][0]["Instances"][0]["State"]["Name"].should.equal("running")
response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"])
list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(3)
instance_to_standby_exit_standby.should.be.within(
[x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]]
)
@mock_elb
@mock_autoscaling
@mock_ec2
def test_attach_one_instance():
@ -1711,6 +1893,18 @@ def test_attach_one_instance():
NewInstancesProtectedFromScaleIn=True,
)
elb_client = boto3.client("elb", region_name="us-east-1")
elb_client.create_load_balancer(
LoadBalancerName="my-lb",
Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}],
AvailabilityZones=["us-east-1a", "us-east-1b"],
)
response = client.attach_load_balancers(
AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"]
)
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
ec2 = boto3.resource("ec2", "us-east-1")
instances_to_add = [
x.id for x in ec2.create_instances(ImageId="", MinCount=1, MaxCount=1)
@ -1727,6 +1921,9 @@ def test_attach_one_instance():
for instance in instances:
instance["ProtectedFromScaleIn"].should.equal(True)
response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"])
list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(3)
@mock_autoscaling
@mock_ec2
@ -1948,6 +2145,7 @@ def test_terminate_instance_via_ec2_in_autoscaling_group():
replaced_instance_id.should_not.equal(original_instance_id)
@mock_elb
@mock_autoscaling
@mock_ec2
def test_terminate_instance_in_auto_scaling_group_decrement():
@ -1966,6 +2164,18 @@ def test_terminate_instance_in_auto_scaling_group_decrement():
NewInstancesProtectedFromScaleIn=False,
)
elb_client = boto3.client("elb", region_name="us-east-1")
elb_client.create_load_balancer(
LoadBalancerName="my-lb",
Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}],
AvailabilityZones=["us-east-1a", "us-east-1b"],
)
response = client.attach_load_balancers(
AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"]
)
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
original_instance_id = next(
instance["InstanceId"]
@ -1979,7 +2189,11 @@ def test_terminate_instance_in_auto_scaling_group_decrement():
response["AutoScalingGroups"][0]["Instances"].should.equal([])
response["AutoScalingGroups"][0]["DesiredCapacity"].should.equal(0)
response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"])
list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(0)
@mock_elb
@mock_autoscaling
@mock_ec2
def test_terminate_instance_in_auto_scaling_group_no_decrement():
@ -1998,6 +2212,18 @@ def test_terminate_instance_in_auto_scaling_group_no_decrement():
NewInstancesProtectedFromScaleIn=False,
)
elb_client = boto3.client("elb", region_name="us-east-1")
elb_client.create_load_balancer(
LoadBalancerName="my-lb",
Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}],
AvailabilityZones=["us-east-1a", "us-east-1b"],
)
response = client.attach_load_balancers(
AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"]
)
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
original_instance_id = next(
instance["InstanceId"]
@ -2014,3 +2240,9 @@ def test_terminate_instance_in_auto_scaling_group_no_decrement():
)
replaced_instance_id.should_not.equal(original_instance_id)
response["AutoScalingGroups"][0]["DesiredCapacity"].should.equal(1)
response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"])
list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(1)
original_instance_id.shouldnt.be.within(
[x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]]
)