Techdebt: Update TF tests (#6661)
This commit is contained in:
parent
78c518ddc8
commit
bc29ae2fc3
2
.github/workflows/test_terraform.yml
vendored
2
.github/workflows/test_terraform.yml
vendored
@ -16,7 +16,7 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
service: ${{ fromJson(needs.prepare_list.outputs.matrix) }}
|
||||
go-version: [1.18.x]
|
||||
go-version: [1.21.x]
|
||||
os: [ubuntu-latest]
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
|
@ -23,6 +23,7 @@ class CertificateAuthority(BaseModel):
|
||||
certificate_authority_configuration: Dict[str, Any],
|
||||
certificate_authority_type: str,
|
||||
revocation_configuration: Dict[str, Any],
|
||||
security_standard: Optional[str],
|
||||
):
|
||||
self.id = mock_random.uuid4()
|
||||
self.arn = (
|
||||
@ -40,6 +41,7 @@ class CertificateAuthority(BaseModel):
|
||||
self.updated_at: Optional[float] = None
|
||||
self.status = "PENDING_CERTIFICATE"
|
||||
self.usage_mode = "SHORT_LIVED_CERTIFICATE"
|
||||
self.security_standard = security_standard or "FIPS_140_2_LEVEL_3_OR_HIGHER"
|
||||
|
||||
common_name = self.certificate_authority_configuration.get("Subject", {}).get(
|
||||
"CommonName", "Moto.org"
|
||||
@ -170,6 +172,7 @@ class CertificateAuthority(BaseModel):
|
||||
"CreatedAt": self.created_at,
|
||||
"Status": self.status,
|
||||
"UsageMode": self.usage_mode,
|
||||
"KeyStorageSecurityStandard": self.security_standard,
|
||||
}
|
||||
if self.updated_at:
|
||||
dct["LastStateChangeAt"] = self.updated_at
|
||||
@ -196,6 +199,7 @@ class ACMPCABackend(BaseBackend):
|
||||
certificate_authority_configuration: Dict[str, Any],
|
||||
revocation_configuration: Dict[str, Any],
|
||||
certificate_authority_type: str,
|
||||
security_standard: Optional[str],
|
||||
tags: List[Dict[str, str]],
|
||||
) -> str:
|
||||
"""
|
||||
@ -207,6 +211,7 @@ class ACMPCABackend(BaseBackend):
|
||||
certificate_authority_configuration=certificate_authority_configuration,
|
||||
certificate_authority_type=certificate_authority_type,
|
||||
revocation_configuration=revocation_configuration,
|
||||
security_standard=security_standard,
|
||||
)
|
||||
self.certificate_authorities[authority.arn] = authority
|
||||
if tags:
|
||||
|
@ -24,11 +24,13 @@ class ACMPCAResponse(BaseResponse):
|
||||
)
|
||||
revocation_configuration = params.get("RevocationConfiguration")
|
||||
certificate_authority_type = params.get("CertificateAuthorityType")
|
||||
security_standard = params.get("KeyStorageSecurityStandard")
|
||||
tags = params.get("Tags")
|
||||
certificate_authority_arn = self.acmpca_backend.create_certificate_authority(
|
||||
certificate_authority_configuration=certificate_authority_configuration,
|
||||
revocation_configuration=revocation_configuration,
|
||||
certificate_authority_type=certificate_authority_type,
|
||||
security_standard=security_standard,
|
||||
tags=tags,
|
||||
)
|
||||
return json.dumps(dict(CertificateAuthorityArn=certificate_authority_arn))
|
||||
|
@ -30,7 +30,7 @@ DEFAULT_COOLDOWN = 300
|
||||
ASG_NAME_TAG = "aws:autoscaling:groupName"
|
||||
|
||||
|
||||
class InstanceState(object):
|
||||
class InstanceState:
|
||||
def __init__(
|
||||
self,
|
||||
instance: "Instance",
|
||||
@ -382,6 +382,20 @@ def set_string_propagate_at_launch_booleans_on_tags(
|
||||
return tags
|
||||
|
||||
|
||||
class FakeWarmPool(CloudFormationModel):
|
||||
def __init__(
|
||||
self,
|
||||
max_capacity: Optional[int],
|
||||
min_size: Optional[int],
|
||||
pool_state: Optional[str],
|
||||
instance_reuse_policy: Optional[Dict[str, bool]],
|
||||
):
|
||||
self.max_capacity = max_capacity
|
||||
self.min_size = min_size or 0
|
||||
self.pool_state = pool_state or "Stopped"
|
||||
self.instance_reuse_policy = instance_reuse_policy
|
||||
|
||||
|
||||
class FakeAutoScalingGroup(CloudFormationModel):
|
||||
def __init__(
|
||||
self,
|
||||
@ -449,6 +463,7 @@ class FakeAutoScalingGroup(CloudFormationModel):
|
||||
self.set_desired_capacity(desired_capacity)
|
||||
|
||||
self.metrics: List[str] = []
|
||||
self.warm_pool: Optional[FakeWarmPool] = None
|
||||
|
||||
@property
|
||||
def tags(self) -> List[Dict[str, str]]:
|
||||
@ -815,6 +830,23 @@ class FakeAutoScalingGroup(CloudFormationModel):
|
||||
def enable_metrics_collection(self, metrics: List[str]) -> None:
|
||||
self.metrics = metrics or []
|
||||
|
||||
def put_warm_pool(
|
||||
self,
|
||||
max_capacity: Optional[int],
|
||||
min_size: Optional[int],
|
||||
pool_state: Optional[str],
|
||||
instance_reuse_policy: Optional[Dict[str, bool]],
|
||||
) -> None:
|
||||
self.warm_pool = FakeWarmPool(
|
||||
max_capacity=max_capacity,
|
||||
min_size=min_size,
|
||||
pool_state=pool_state,
|
||||
instance_reuse_policy=instance_reuse_policy,
|
||||
)
|
||||
|
||||
def get_warm_pool(self) -> Optional[FakeWarmPool]:
|
||||
return self.warm_pool
|
||||
|
||||
|
||||
class AutoScalingBackend(BaseBackend):
|
||||
def __init__(self, region_name: str, account_id: str):
|
||||
@ -1546,5 +1578,32 @@ class AutoScalingBackend(BaseBackend):
|
||||
group = self.describe_auto_scaling_groups([group_name])[0]
|
||||
group.enable_metrics_collection(metrics)
|
||||
|
||||
def put_warm_pool(
|
||||
self,
|
||||
group_name: str,
|
||||
max_capacity: Optional[int],
|
||||
min_size: Optional[int],
|
||||
pool_state: Optional[str],
|
||||
instance_reuse_policy: Optional[Dict[str, bool]],
|
||||
) -> None:
|
||||
group = self.describe_auto_scaling_groups([group_name])[0]
|
||||
group.put_warm_pool(
|
||||
max_capacity=max_capacity,
|
||||
min_size=min_size,
|
||||
pool_state=pool_state,
|
||||
instance_reuse_policy=instance_reuse_policy,
|
||||
)
|
||||
|
||||
def describe_warm_pool(self, group_name: str) -> Optional[FakeWarmPool]:
|
||||
"""
|
||||
Pagination is not yet implemented. Does not create/return any Instances currently.
|
||||
"""
|
||||
group = self.describe_auto_scaling_groups([group_name])[0]
|
||||
return group.get_warm_pool()
|
||||
|
||||
def delete_warm_pool(self, group_name: str) -> None:
|
||||
group = self.describe_auto_scaling_groups([group_name])[0]
|
||||
group.warm_pool = None
|
||||
|
||||
|
||||
autoscaling_backends = BackendDict(AutoScalingBackend, "autoscaling")
|
||||
|
@ -484,6 +484,35 @@ class AutoScalingResponse(BaseResponse):
|
||||
template = self.response_template(ENABLE_METRICS_COLLECTION_TEMPLATE)
|
||||
return template.render()
|
||||
|
||||
def put_warm_pool(self) -> str:
|
||||
params = self._get_params()
|
||||
group_name = params.get("AutoScalingGroupName")
|
||||
max_capacity = params.get("MaxGroupPreparedCapacity")
|
||||
min_size = params.get("MinSize")
|
||||
pool_state = params.get("PoolState")
|
||||
instance_reuse_policy = params.get("InstanceReusePolicy")
|
||||
self.autoscaling_backend.put_warm_pool(
|
||||
group_name=group_name, # type: ignore[arg-type]
|
||||
max_capacity=max_capacity,
|
||||
min_size=min_size,
|
||||
pool_state=pool_state,
|
||||
instance_reuse_policy=instance_reuse_policy,
|
||||
)
|
||||
template = self.response_template(PUT_WARM_POOL_TEMPLATE)
|
||||
return template.render()
|
||||
|
||||
def describe_warm_pool(self) -> str:
|
||||
group_name = self._get_param("AutoScalingGroupName")
|
||||
warm_pool = self.autoscaling_backend.describe_warm_pool(group_name=group_name)
|
||||
template = self.response_template(DESCRIBE_WARM_POOL_TEMPLATE)
|
||||
return template.render(pool=warm_pool)
|
||||
|
||||
def delete_warm_pool(self) -> str:
|
||||
group_name = self._get_param("AutoScalingGroupName")
|
||||
self.autoscaling_backend.delete_warm_pool(group_name=group_name)
|
||||
template = self.response_template(DELETE_WARM_POOL_TEMPLATE)
|
||||
return template.render()
|
||||
|
||||
|
||||
CREATE_LAUNCH_CONFIGURATION_TEMPLATE = """<CreateLaunchConfigurationResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
|
||||
<ResponseMetadata>
|
||||
@ -774,6 +803,28 @@ DESCRIBE_AUTOSCALING_GROUPS_TEMPLATE = """<DescribeAutoScalingGroupsResponse xml
|
||||
</Overrides>
|
||||
{% endif %}
|
||||
</LaunchTemplate>
|
||||
{% if group.mixed_instance_policy.get("InstancesDistribution") %}
|
||||
<InstancesDistribution>
|
||||
{% if group.mixed_instance_policy.get("InstancesDistribution").get("OnDemandAllocationStrategy") %}
|
||||
<OnDemandAllocationStrategy>{{ group.mixed_instance_policy.get("InstancesDistribution").get("OnDemandAllocationStrategy") }}</OnDemandAllocationStrategy>
|
||||
{% endif %}
|
||||
{% if group.mixed_instance_policy.get("InstancesDistribution").get("OnDemandBaseCapacity") %}
|
||||
<OnDemandBaseCapacity>{{ group.mixed_instance_policy.get("InstancesDistribution").get("OnDemandBaseCapacity") }}</OnDemandBaseCapacity>
|
||||
{% endif %}
|
||||
{% if group.mixed_instance_policy.get("InstancesDistribution").get("OnDemandPercentageAboveBaseCapacity") %}
|
||||
<OnDemandPercentageAboveBaseCapacity>{{ group.mixed_instance_policy.get("InstancesDistribution").get("OnDemandPercentageAboveBaseCapacity") }}</OnDemandPercentageAboveBaseCapacity>
|
||||
{% endif %}
|
||||
{% if group.mixed_instance_policy.get("InstancesDistribution").get("SpotAllocationStrategy") %}
|
||||
<SpotAllocationStrategy>{{ group.mixed_instance_policy.get("InstancesDistribution").get("SpotAllocationStrategy") }}</SpotAllocationStrategy>
|
||||
{% endif %}
|
||||
{% if group.mixed_instance_policy.get("InstancesDistribution").get("SpotInstancePools") %}
|
||||
<SpotInstancePools>{{ group.mixed_instance_policy.get("InstancesDistribution").get("SpotInstancePools") }}</SpotInstancePools>
|
||||
{% endif %}
|
||||
{% if group.mixed_instance_policy.get("InstancesDistribution").get("SpotMaxPrice") %}
|
||||
<SpotMaxPrice>{{ group.mixed_instance_policy.get("InstancesDistribution").get("SpotMaxPrice") }}</SpotMaxPrice>
|
||||
{% endif %}
|
||||
</InstancesDistribution>
|
||||
{% endif %}
|
||||
</MixedInstancesPolicy>
|
||||
{% elif group.launch_template %}
|
||||
<LaunchTemplate>
|
||||
@ -864,6 +915,18 @@ DESCRIBE_AUTOSCALING_GROUPS_TEMPLATE = """<DescribeAutoScalingGroupsResponse xml
|
||||
</EnabledMetrics>
|
||||
{% endif %}
|
||||
<ServiceLinkedRoleARN>{{ group.service_linked_role }}</ServiceLinkedRoleARN>
|
||||
{% if group.warm_pool %}
|
||||
<WarmPoolConfiguration>
|
||||
<MaxGroupPreparedCapacity>{{ group.warm_pool.max_capacity }}</MaxGroupPreparedCapacity>
|
||||
<MinSize>{{ group.warm_pool.min_size or 0 }}</MinSize>
|
||||
{% if group.warm_pool.pool_state %}
|
||||
<PoolState>{{ group.warm_pool.pool_state }}</PoolState>
|
||||
{% endif %}
|
||||
<InstanceReusePolicy>
|
||||
<ReuseOnScaleIn>{{ 'true' if group.warm_pool.instance_reuse_policy["ReuseOnScaleIn"] else 'false' }}</ReuseOnScaleIn>
|
||||
</InstanceReusePolicy>
|
||||
</WarmPoolConfiguration>
|
||||
{% endif %}
|
||||
</member>
|
||||
{% endfor %}
|
||||
</AutoScalingGroups>
|
||||
@ -1386,3 +1449,46 @@ ENABLE_METRICS_COLLECTION_TEMPLATE = """<EnableMetricsCollectionResponse xmlns="
|
||||
<RequestId></RequestId>
|
||||
</ResponseMetadata>
|
||||
</EnableMetricsCollectionResponse>"""
|
||||
|
||||
|
||||
PUT_WARM_POOL_TEMPLATE = """<PutWarmPoolResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
|
||||
<ResponseMetadata>
|
||||
<RequestId></RequestId>
|
||||
</ResponseMetadata>
|
||||
<PutWarmPoolResult></PutWarmPoolResult>
|
||||
</PutWarmPoolResponse>"""
|
||||
|
||||
|
||||
DESCRIBE_WARM_POOL_TEMPLATE = """<DescribeWarmPoolResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
|
||||
<ResponseMetadata>
|
||||
<RequestId></RequestId>
|
||||
</ResponseMetadata>
|
||||
<DescribeWarmPoolResult>
|
||||
{% if pool %}
|
||||
<WarmPoolConfiguration>
|
||||
{% if pool.max_capacity %}
|
||||
<MaxGroupPreparedCapacity>{{ pool.max_capacity }}</MaxGroupPreparedCapacity>
|
||||
{% endif %}
|
||||
<MinSize>{{ pool.min_size }}</MinSize>
|
||||
{% if pool.pool_state %}
|
||||
<PoolState>{{ pool.pool_state }}</PoolState>
|
||||
{% endif %}
|
||||
{% if pool.instance_reuse_policy %}
|
||||
<InstanceReusePolicy>
|
||||
<ReuseOnScaleIn>{{ 'true' if pool.instance_reuse_policy["ReuseOnScaleIn"] else 'false' }}</ReuseOnScaleIn>
|
||||
</InstanceReusePolicy>
|
||||
{% endif %}
|
||||
</WarmPoolConfiguration>
|
||||
{% endif %}
|
||||
<Instances>
|
||||
</Instances>
|
||||
</DescribeWarmPoolResult>
|
||||
</DescribeWarmPoolResponse>"""
|
||||
|
||||
|
||||
DELETE_WARM_POOL_TEMPLATE = """<DeleteWarmPoolResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
|
||||
<ResponseMetadata>
|
||||
<RequestId></RequestId>
|
||||
</ResponseMetadata>
|
||||
<DeleteWarmPoolResult></DeleteWarmPoolResult>
|
||||
</DeleteWarmPoolResponse>"""
|
||||
|
@ -1047,6 +1047,7 @@ class FunctionUrlConfig:
|
||||
"Cors": self.config.get("Cors"),
|
||||
"CreationTime": self.created,
|
||||
"LastModifiedTime": self.last_modified,
|
||||
"InvokeMode": self.config.get("InvokeMode") or "Buffered",
|
||||
}
|
||||
|
||||
def update(self, new_config: Dict[str, Any]) -> None:
|
||||
|
@ -250,10 +250,33 @@ class JobDefinition(CloudFormationModel):
|
||||
self.propagate_tags = propagate_tags
|
||||
|
||||
if self.container_properties is not None:
|
||||
if "resourceRequirements" not in self.container_properties:
|
||||
self.container_properties["resourceRequirements"] = []
|
||||
if "secrets" not in self.container_properties:
|
||||
self.container_properties["secrets"] = []
|
||||
# Set some default values
|
||||
default_values: Dict[str, List[Any]] = {
|
||||
"command": [],
|
||||
"resourceRequirements": [],
|
||||
"secrets": [],
|
||||
"environment": [],
|
||||
"mountPoints": [],
|
||||
"ulimits": [],
|
||||
"volumes": [],
|
||||
}
|
||||
for key, val in default_values.items():
|
||||
if key not in self.container_properties:
|
||||
self.container_properties[key] = val
|
||||
|
||||
# Set default FARGATE configuration
|
||||
if "FARGATE" in (self.platform_capabilities or []):
|
||||
if "fargatePlatformConfiguration" not in self.container_properties:
|
||||
self.container_properties["fargatePlatformConfiguration"] = {
|
||||
"platformVersion": "LATEST"
|
||||
}
|
||||
|
||||
# Remove any empty environment variables
|
||||
self.container_properties["environment"] = [
|
||||
env_var
|
||||
for env_var in self.container_properties["environment"]
|
||||
if env_var.get("value") != ""
|
||||
]
|
||||
|
||||
self._validate()
|
||||
self.revision += 1
|
||||
|
@ -1045,6 +1045,7 @@ DESCRIBE_STACK_SET_RESPONSE_TEMPLATE = """<DescribeStackSetResponse xmlns="http:
|
||||
{% if stackset.description %}
|
||||
<Description>{{ stackset.description }}</Description>
|
||||
{% endif %}
|
||||
<ManagedExecution><Active>false</Active></ManagedExecution>
|
||||
</StackSet>
|
||||
</DescribeStackSetResult>
|
||||
<ResponseMetadata>
|
||||
|
@ -106,6 +106,8 @@ class CustomOriginConfig:
|
||||
self.read_timeout = config.get("OriginReadTimeout") or 30
|
||||
protocols = config.get("OriginSslProtocols", {}).get("Items") or {}
|
||||
self.ssl_protocols = protocols.get("SslProtocol") or []
|
||||
if isinstance(self.ssl_protocols, str):
|
||||
self.ssl_protocols = [self.ssl_protocols]
|
||||
|
||||
|
||||
class Origin:
|
||||
|
@ -370,6 +370,12 @@ DEFAULT_USER_POOL_CONFIG: Dict[str, Any] = {
|
||||
"EmailSubject": "Your verification code",
|
||||
"DefaultEmailOption": "CONFIRM_WITH_CODE",
|
||||
},
|
||||
"AccountRecoverySetting": {
|
||||
"RecoveryMechanisms": [
|
||||
{"Priority": 1, "Name": "verified_email"},
|
||||
{"Priority": 2, "Name": "verified_phone_number"},
|
||||
]
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@ -465,19 +471,6 @@ class CognitoIdpUserPool(BaseModel):
|
||||
None,
|
||||
)
|
||||
|
||||
def _account_recovery_setting(self) -> Any:
|
||||
# AccountRecoverySetting is not present in DescribeUserPool response if the pool was created without
|
||||
# specifying it, ForgotPassword works on default settings nonetheless
|
||||
return self.extended_config.get(
|
||||
"AccountRecoverySetting",
|
||||
{
|
||||
"RecoveryMechanisms": [
|
||||
{"Priority": 1, "Name": "verified_phone_number"},
|
||||
{"Priority": 2, "Name": "verified_email"},
|
||||
]
|
||||
},
|
||||
)
|
||||
|
||||
def _base_json(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"Id": self.id,
|
||||
@ -684,7 +677,15 @@ class CognitoIdpUserPoolClient(BaseModel):
|
||||
self.id = create_id()
|
||||
self.secret = str(random.uuid4())
|
||||
self.generate_secret = generate_secret or False
|
||||
self.extended_config = extended_config or {}
|
||||
# Some default values - may be overridden by the user
|
||||
self.extended_config: Dict[str, Any] = {
|
||||
"AllowedOAuthFlowsUserPoolClient": False,
|
||||
"AuthSessionValidity": 3,
|
||||
"EnablePropagateAdditionalUserContextData": False,
|
||||
"EnableTokenRevocation": True,
|
||||
"RefreshTokenValidity": 30,
|
||||
}
|
||||
self.extended_config.update(extended_config or {})
|
||||
|
||||
def _base_json(self) -> Dict[str, Any]:
|
||||
return {
|
||||
@ -1590,7 +1591,7 @@ class CognitoIdpBackend(BaseBackend):
|
||||
"""
|
||||
for user_pool in self.user_pools.values():
|
||||
if client_id in user_pool.clients:
|
||||
recovery_settings = user_pool._account_recovery_setting()
|
||||
recovery_settings = user_pool.extended_config["AccountRecoverySetting"]
|
||||
user = user_pool._get_user(username)
|
||||
break
|
||||
else:
|
||||
|
@ -133,6 +133,7 @@ class NatGatewayBackend:
|
||||
if eip:
|
||||
address_set["allocationId"] = allocation_id
|
||||
address_set["publicIp"] = eip.public_ip or None
|
||||
address_set["associationId"] = eip.association_id or None
|
||||
address_set["networkInterfaceId"] = nat_gateway._eni.id
|
||||
address_set["privateIp"] = nat_gateway._eni.private_ip_address
|
||||
nat_gateway.address_set.append(address_set)
|
||||
|
@ -53,6 +53,9 @@ DESCRIBE_NAT_GATEWAYS_RESPONSE = """<DescribeNatGatewaysResponse xmlns="http://e
|
||||
{% if address_set.networkInterfaceId %}
|
||||
<networkInterfaceId>{{ address_set.networkInterfaceId }}</networkInterfaceId>
|
||||
{% endif %}
|
||||
{% if address_set.associationId %}
|
||||
<associationId>{{ address_set.associationId }}</associationId>
|
||||
{% endif %}
|
||||
</item>
|
||||
{% endfor %}
|
||||
</natGatewayAddressSet>
|
||||
@ -94,6 +97,9 @@ CREATE_NAT_GATEWAY = """<CreateNatGatewayResponse xmlns="http://ec2.amazonaws.co
|
||||
{% if address_set.networkInterfaceId %}
|
||||
<networkInterfaceId>{{ address_set.networkInterfaceId }}</networkInterfaceId>
|
||||
{% endif %}
|
||||
{% if address_set.associationId %}
|
||||
<associationId>{{ address_set.associationId }}</associationId>
|
||||
{% endif %}
|
||||
</item>
|
||||
{% endfor %}
|
||||
</natGatewayAddressSet>
|
||||
|
@ -437,7 +437,7 @@ class CapacityProvider(BaseObject):
|
||||
def _prepare_asg_provider(self, asg_details: Dict[str, Any]) -> Dict[str, Any]:
|
||||
if "managedScaling" not in asg_details:
|
||||
asg_details["managedScaling"] = {}
|
||||
if not asg_details["managedScaling"].get("instanceWarmupPeriod"):
|
||||
if asg_details["managedScaling"].get("instanceWarmupPeriod") is None:
|
||||
asg_details["managedScaling"]["instanceWarmupPeriod"] = 300
|
||||
if not asg_details["managedScaling"].get("minimumScalingStepSize"):
|
||||
asg_details["managedScaling"]["minimumScalingStepSize"] = 1
|
||||
|
@ -121,6 +121,7 @@ class FakeTargetGroup(CloudFormationModel):
|
||||
"deregistration_delay.timeout_seconds": 300,
|
||||
"stickiness.enabled": "false",
|
||||
"load_balancing.algorithm.type": "round_robin",
|
||||
"load_balancing.cross_zone.enabled": "use_load_balancer_configuration",
|
||||
"slow_start.duration_seconds": 0,
|
||||
"waf.fail_open.enabled": "false",
|
||||
}
|
||||
|
@ -60,6 +60,8 @@ class MQResponse(BaseResponse):
|
||||
return self.create_tags()
|
||||
if request.method == "DELETE":
|
||||
return self.delete_tags()
|
||||
if request.method == "GET":
|
||||
return self.list_tags()
|
||||
|
||||
def user(self, request: Any, full_url: str, headers: Any) -> TYPE_RESPONSE: # type: ignore[return]
|
||||
self.setup_class(request, full_url, headers)
|
||||
@ -259,6 +261,11 @@ class MQResponse(BaseResponse):
|
||||
self.mq_backend.delete_tags(resource_arn, tag_keys)
|
||||
return 200, {}, "{}"
|
||||
|
||||
def list_tags(self) -> TYPE_RESPONSE:
|
||||
resource_arn = unquote(self.path.split("/")[-1])
|
||||
tags = self.mq_backend.list_tags(resource_arn)
|
||||
return 200, {}, json.dumps({"tags": tags})
|
||||
|
||||
def reboot(self, request: Any, full_url: str, headers: Any) -> TYPE_RESPONSE: # type: ignore[return]
|
||||
self.setup_class(request, full_url, headers)
|
||||
if request.method == "POST":
|
||||
|
@ -1,5 +1,6 @@
|
||||
from typing import Any, Dict, Iterable
|
||||
from moto.core import BaseBackend, BackendDict, BaseModel
|
||||
from moto.moto_api._internal import mock_random as random
|
||||
from .exceptions import ResourceNotFoundException
|
||||
|
||||
|
||||
@ -112,6 +113,7 @@ class QuicksightUser(BaseModel):
|
||||
self.username = username
|
||||
self.user_role = user_role
|
||||
self.active = False
|
||||
self.principal_id = random.get_random_hex(10)
|
||||
|
||||
def to_json(self) -> Dict[str, Any]:
|
||||
return {
|
||||
@ -121,6 +123,7 @@ class QuicksightUser(BaseModel):
|
||||
"Role": self.user_role,
|
||||
"UserName": self.username,
|
||||
"Active": self.active,
|
||||
"PrincipalId": self.principal_id,
|
||||
}
|
||||
|
||||
|
||||
|
@ -258,6 +258,9 @@ class Cluster:
|
||||
"5.6.1",
|
||||
"2.07.1",
|
||||
"5.7.2",
|
||||
"5.7.mysql_aurora.2.07.1",
|
||||
"5.7.mysql_aurora.2.07.2",
|
||||
"5.7.mysql_aurora.2.08.3",
|
||||
]:
|
||||
self._enable_http_endpoint = val
|
||||
elif self.engine == "aurora-postgresql" and self.engine_version in [
|
||||
@ -344,7 +347,7 @@ class Cluster:
|
||||
<IAMDatabaseAuthenticationEnabled>false</IAMDatabaseAuthenticationEnabled>
|
||||
<EngineMode>{{ cluster.engine_mode }}</EngineMode>
|
||||
<DeletionProtection>{{ 'true' if cluster.deletion_protection else 'false' }}</DeletionProtection>
|
||||
<HttpEndpointEnabled>{{ cluster.enable_http_endpoint }}</HttpEndpointEnabled>
|
||||
<HttpEndpointEnabled>{{ 'true' if cluster.enable_http_endpoint else 'false' }}</HttpEndpointEnabled>
|
||||
<CopyTagsToSnapshot>{{ cluster.copy_tags_to_snapshot }}</CopyTagsToSnapshot>
|
||||
<CrossAccountClone>false</CrossAccountClone>
|
||||
<DomainMemberships></DomainMemberships>
|
||||
|
@ -116,7 +116,7 @@ class RDSResponse(BaseResponse):
|
||||
"enable_iam_database_authentication": self._get_bool_param(
|
||||
"EnableIAMDatabaseAuthentication"
|
||||
),
|
||||
"enable_http_endpoint": self._get_param("EnableHttpEndpoint"),
|
||||
"enable_http_endpoint": self._get_bool_param("EnableHttpEndpoint"),
|
||||
"license_model": self._get_param("LicenseModel"),
|
||||
"iops": self._get_int_param("Iops"),
|
||||
"kms_key_id": self._get_param("KmsKeyId"),
|
||||
@ -204,7 +204,7 @@ class RDSResponse(BaseResponse):
|
||||
"parameter_group": self._get_param("DBClusterParameterGroupName"),
|
||||
"region": self.region,
|
||||
"db_cluster_instance_class": self._get_param("DBClusterInstanceClass"),
|
||||
"enable_http_endpoint": self._get_param("EnableHttpEndpoint"),
|
||||
"enable_http_endpoint": self._get_bool_param("EnableHttpEndpoint"),
|
||||
"copy_tags_to_snapshot": self._get_param("CopyTagsToSnapshot"),
|
||||
"tags": self.unpack_list_params("Tags", "Tag"),
|
||||
"scaling_configuration": self._get_dict_param("ScalingConfiguration."),
|
||||
|
@ -343,5 +343,31 @@ class ServiceDiscoveryBackend(BaseBackend):
|
||||
)
|
||||
return operation_id
|
||||
|
||||
def update_private_dns_namespace(
|
||||
self, _id: str, description: str, properties: Dict[str, Any]
|
||||
) -> str:
|
||||
namespace = self.get_namespace(namespace_id=_id)
|
||||
if description is not None:
|
||||
namespace.description = description
|
||||
if properties is not None:
|
||||
namespace.dns_properties = properties
|
||||
operation_id = self._create_operation(
|
||||
"UPDATE_NAMESPACE", targets={"NAMESPACE": namespace.id}
|
||||
)
|
||||
return operation_id
|
||||
|
||||
def update_public_dns_namespace(
|
||||
self, _id: str, description: str, properties: Dict[str, Any]
|
||||
) -> str:
|
||||
namespace = self.get_namespace(namespace_id=_id)
|
||||
if description is not None:
|
||||
namespace.description = description
|
||||
if properties is not None:
|
||||
namespace.dns_properties = properties
|
||||
operation_id = self._create_operation(
|
||||
"UPDATE_NAMESPACE", targets={"NAMESPACE": namespace.id}
|
||||
)
|
||||
return operation_id
|
||||
|
||||
|
||||
servicediscovery_backends = BackendDict(ServiceDiscoveryBackend, "servicediscovery")
|
||||
|
@ -171,3 +171,27 @@ class ServiceDiscoveryResponse(BaseResponse):
|
||||
service_id=service_id, details=details
|
||||
)
|
||||
return json.dumps(dict(OperationId=operation_id))
|
||||
|
||||
def update_private_dns_namespace(self) -> str:
|
||||
params = json.loads(self.body)
|
||||
_id = params.get("Id")
|
||||
description = params["Namespace"].get("Description")
|
||||
properties = params["Namespace"].get("Properties", {}).get("DnsProperties")
|
||||
operation_id = self.servicediscovery_backend.update_private_dns_namespace(
|
||||
_id=_id,
|
||||
description=description,
|
||||
properties=properties,
|
||||
)
|
||||
return json.dumps(dict(OperationId=operation_id))
|
||||
|
||||
def update_public_dns_namespace(self) -> str:
|
||||
params = json.loads(self.body)
|
||||
_id = params.get("Id")
|
||||
description = params["Namespace"].get("Description")
|
||||
properties = params["Namespace"].get("Properties", {}).get("DnsProperties")
|
||||
operation_id = self.servicediscovery_backend.update_public_dns_namespace(
|
||||
_id=_id,
|
||||
description=description,
|
||||
properties=properties,
|
||||
)
|
||||
return json.dumps(dict(OperationId=operation_id))
|
||||
|
@ -13,6 +13,7 @@ class TimestreamTable(BaseModel):
|
||||
db_name: str,
|
||||
retention_properties: Dict[str, int],
|
||||
magnetic_store_write_properties: Dict[str, Any],
|
||||
schema: Dict[str, Any],
|
||||
):
|
||||
self.region_name = region_name
|
||||
self.name = table_name
|
||||
@ -22,6 +23,15 @@ class TimestreamTable(BaseModel):
|
||||
"MagneticStoreRetentionPeriodInDays": 123,
|
||||
}
|
||||
self.magnetic_store_write_properties = magnetic_store_write_properties or {}
|
||||
self.schema = schema or {
|
||||
"CompositePartitionKey": [
|
||||
{
|
||||
"Type": "MEASURE",
|
||||
"Name": "",
|
||||
"EnforcementInRecord": "",
|
||||
}
|
||||
]
|
||||
}
|
||||
self.records: List[Dict[str, Any]] = []
|
||||
self.arn = f"arn:aws:timestream:{self.region_name}:{account_id}:database/{self.db_name}/table/{self.name}"
|
||||
|
||||
@ -29,10 +39,13 @@ class TimestreamTable(BaseModel):
|
||||
self,
|
||||
retention_properties: Dict[str, int],
|
||||
magnetic_store_write_properties: Dict[str, Any],
|
||||
schema: Dict[str, Any],
|
||||
) -> None:
|
||||
self.retention_properties = retention_properties
|
||||
if magnetic_store_write_properties is not None:
|
||||
self.magnetic_store_write_properties = magnetic_store_write_properties
|
||||
if schema is not None:
|
||||
self.schema = schema
|
||||
|
||||
def write_records(self, records: List[Dict[str, Any]]) -> None:
|
||||
self.records.extend(records)
|
||||
@ -45,6 +58,7 @@ class TimestreamTable(BaseModel):
|
||||
"TableStatus": "ACTIVE",
|
||||
"RetentionProperties": self.retention_properties,
|
||||
"MagneticStoreWriteProperties": self.magnetic_store_write_properties,
|
||||
"Schema": self.schema,
|
||||
}
|
||||
|
||||
|
||||
@ -71,6 +85,7 @@ class TimestreamDatabase(BaseModel):
|
||||
table_name: str,
|
||||
retention_properties: Dict[str, int],
|
||||
magnetic_store_write_properties: Dict[str, Any],
|
||||
schema: Dict[str, Any],
|
||||
) -> TimestreamTable:
|
||||
table = TimestreamTable(
|
||||
account_id=self.account_id,
|
||||
@ -79,6 +94,7 @@ class TimestreamDatabase(BaseModel):
|
||||
db_name=self.name,
|
||||
retention_properties=retention_properties,
|
||||
magnetic_store_write_properties=magnetic_store_write_properties,
|
||||
schema=schema,
|
||||
)
|
||||
self.tables[table_name] = table
|
||||
return table
|
||||
@ -88,11 +104,13 @@ class TimestreamDatabase(BaseModel):
|
||||
table_name: str,
|
||||
retention_properties: Dict[str, int],
|
||||
magnetic_store_write_properties: Dict[str, Any],
|
||||
schema: Dict[str, Any],
|
||||
) -> TimestreamTable:
|
||||
table = self.tables[table_name]
|
||||
table.update(
|
||||
retention_properties=retention_properties,
|
||||
magnetic_store_write_properties=magnetic_store_write_properties,
|
||||
schema=schema,
|
||||
)
|
||||
return table
|
||||
|
||||
@ -170,10 +188,14 @@ class TimestreamWriteBackend(BaseBackend):
|
||||
retention_properties: Dict[str, int],
|
||||
tags: List[Dict[str, str]],
|
||||
magnetic_store_write_properties: Dict[str, Any],
|
||||
schema: Dict[str, Any],
|
||||
) -> TimestreamTable:
|
||||
database = self.describe_database(database_name)
|
||||
table = database.create_table(
|
||||
table_name, retention_properties, magnetic_store_write_properties
|
||||
table_name,
|
||||
retention_properties,
|
||||
magnetic_store_write_properties,
|
||||
schema,
|
||||
)
|
||||
self.tagging_service.tag_resource(table.arn, tags)
|
||||
return table
|
||||
@ -196,10 +218,14 @@ class TimestreamWriteBackend(BaseBackend):
|
||||
table_name: str,
|
||||
retention_properties: Dict[str, int],
|
||||
magnetic_store_write_properties: Dict[str, Any],
|
||||
schema: Dict[str, Any],
|
||||
) -> TimestreamTable:
|
||||
database = self.describe_database(database_name)
|
||||
return database.update_table(
|
||||
table_name, retention_properties, magnetic_store_write_properties
|
||||
table_name,
|
||||
retention_properties,
|
||||
magnetic_store_write_properties,
|
||||
schema,
|
||||
)
|
||||
|
||||
def write_records(
|
||||
|
@ -54,12 +54,14 @@ class TimestreamWriteResponse(BaseResponse):
|
||||
magnetic_store_write_properties = self._get_param(
|
||||
"MagneticStoreWriteProperties"
|
||||
)
|
||||
schema = self._get_param("Schema")
|
||||
table = self.timestreamwrite_backend.create_table(
|
||||
database_name,
|
||||
table_name,
|
||||
retention_properties,
|
||||
tags,
|
||||
magnetic_store_write_properties,
|
||||
schema=schema,
|
||||
)
|
||||
return json.dumps(dict(Table=table.description()))
|
||||
|
||||
@ -87,11 +89,13 @@ class TimestreamWriteResponse(BaseResponse):
|
||||
magnetic_store_write_properties = self._get_param(
|
||||
"MagneticStoreWriteProperties"
|
||||
)
|
||||
schema = self._get_param("Schema")
|
||||
table = self.timestreamwrite_backend.update_table(
|
||||
database_name,
|
||||
table_name,
|
||||
retention_properties,
|
||||
magnetic_store_write_properties,
|
||||
schema=schema,
|
||||
)
|
||||
return json.dumps(dict(Table=table.description()))
|
||||
|
||||
|
@ -8,7 +8,7 @@ echo "Patching the terraform-provider-aws directory..."
|
||||
echo "Patches may fail if the patch was already applied, or if the patch is outdated"
|
||||
PATCH="etc/0001-Patch-Hardcode-endpoints-to-local-server.patch"
|
||||
(git apply $pwd/etc/0001-Patch-Hardcode-endpoints-to-local-server.patch > /dev/null 2>&1 && echo "Patched endpoints") || echo "!! Not able to patch endpoints"
|
||||
(git apply $pwd/etc/0002-EC2-reduce-wait-times.patch > /dev/null 2>&1 && echo "Patched EC2") || echo "!! Not able to EC2"
|
||||
#(git apply $pwd/etc/0002-EC2-reduce-wait-times.patch > /dev/null 2>&1 && echo "Patched EC2") || echo "!! Not able to EC2"
|
||||
(git apply $pwd/etc/0003-Patch-IAM-wait-times.patch > /dev/null 2>&1 && echo "Patched IAM") || echo "!! Not able to patch IAM"
|
||||
(git apply $pwd/etc/0005-Route53-Reduce-wait-times.patch > /dev/null 2>&1 && echo "Patched Route53") || echo "!! Not able to patch Route53"
|
||||
(git apply $pwd/etc/0006-CF-Reduce-wait-times.patch > /dev/null 2>&1 && echo "Patched CF") || echo "!! Not able to patch CF"
|
||||
|
@ -1,18 +1,18 @@
|
||||
From 7b63ad24b4e5a9c874c0430431bf90cd12d9162b Mon Sep 17 00:00:00 2001
|
||||
From 83f8df495c5fc187d925a7dd61f93d1fdc4f405b Mon Sep 17 00:00:00 2001
|
||||
From: Bert Blommers <info@bertblommers.nl>
|
||||
Date: Thu, 23 Feb 2023 20:04:15 -0100
|
||||
Subject: [PATCH] Patch: Hardcode endpoints
|
||||
Date: Sun, 13 Aug 2023 21:16:30 +0000
|
||||
Subject: [PATCH] Patch endpoints to localhost:4566
|
||||
|
||||
---
|
||||
internal/conns/config.go | 16 ++++++++++++++++
|
||||
internal/conns/config.go | 14 ++++++++++++++
|
||||
internal/provider/provider.go | 4 ++--
|
||||
2 files changed, 18 insertions(+), 2 deletions(-)
|
||||
2 files changed, 16 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/internal/conns/config.go b/internal/conns/config.go
|
||||
index 89ce54fc36..72d17bda71 100644
|
||||
index 72c9cabde0..1f2e0d00e9 100644
|
||||
--- a/internal/conns/config.go
|
||||
+++ b/internal/conns/config.go
|
||||
@@ -77,8 +77,24 @@ type Config struct {
|
||||
@@ -55,10 +55,24 @@ type Config struct {
|
||||
UseFIPSEndpoint bool
|
||||
}
|
||||
|
||||
@ -25,12 +25,12 @@ index 89ce54fc36..72d17bda71 100644
|
||||
+ }
|
||||
+ return localEndpoints
|
||||
+}
|
||||
+
|
||||
+
|
||||
// ConfigureProvider configures the provided provider Meta (instance data).
|
||||
func (c *Config) ConfigureProvider(ctx context.Context, client *AWSClient) (*AWSClient, diag.Diagnostics) {
|
||||
+
|
||||
+ // XXX: added by bblommers
|
||||
var diags diag.Diagnostics
|
||||
|
||||
+ // XXX: added by bblommers
|
||||
+ // insert custom endpoints
|
||||
+ c.Endpoints = GetLocalEndpoints()
|
||||
+
|
||||
@ -38,10 +38,10 @@ index 89ce54fc36..72d17bda71 100644
|
||||
AccessKey: c.AccessKey,
|
||||
APNInfo: StdUserAgentProducts(c.TerraformVersion),
|
||||
diff --git a/internal/provider/provider.go b/internal/provider/provider.go
|
||||
index 1c2fcaada9..636902d879 100644
|
||||
index 88c6ea9538..cfe78c5549 100644
|
||||
--- a/internal/provider/provider.go
|
||||
+++ b/internal/provider/provider.go
|
||||
@@ -2295,13 +2295,13 @@ func configure(ctx context.Context, provider *schema.Provider, d *schema.Resourc
|
||||
@@ -452,13 +452,13 @@ func configure(ctx context.Context, provider *schema.Provider, d *schema.Resourc
|
||||
CustomCABundle: d.Get("custom_ca_bundle").(string),
|
||||
EC2MetadataServiceEndpoint: d.Get("ec2_metadata_service_endpoint").(string),
|
||||
EC2MetadataServiceEndpointMode: d.Get("ec2_metadata_service_endpoint_mode").(string),
|
||||
@ -52,11 +52,10 @@ index 1c2fcaada9..636902d879 100644
|
||||
MaxRetries: 25, // Set default here, not in schema (muxing with v6 provider).
|
||||
Profile: d.Get("profile").(string),
|
||||
Region: d.Get("region").(string),
|
||||
- S3UsePathStyle: d.Get("s3_use_path_style").(bool) || d.Get("s3_force_path_style").(bool),
|
||||
- S3UsePathStyle: d.Get("s3_use_path_style").(bool),
|
||||
+ S3UsePathStyle: true,
|
||||
SecretKey: d.Get("secret_key").(string),
|
||||
SkipCredsValidation: d.Get("skip_credentials_validation").(bool),
|
||||
SkipGetEC2Platforms: d.Get("skip_get_ec2_platforms").(bool),
|
||||
SkipRegionValidation: d.Get("skip_region_validation").(bool),
|
||||
--
|
||||
2.25.1
|
||||
|
||||
|
@ -1,17 +1,17 @@
|
||||
From e356afa8e19d90c7e343120897f4385d616ae9d2 Mon Sep 17 00:00:00 2001
|
||||
From 84af701010212ceccccba66422191dfcf1e7445b Mon Sep 17 00:00:00 2001
|
||||
From: Bert Blommers <info@bertblommers.nl>
|
||||
Date: Sun, 19 Jun 2022 19:39:31 +0000
|
||||
Subject: [PATCH] IAM: Reduce wait times
|
||||
Date: Sun, 13 Aug 2023 21:35:11 +0000
|
||||
Subject: [PATCH] Patch IAM timings
|
||||
|
||||
---
|
||||
internal/service/iam/wait.go | 4 ++--
|
||||
1 file changed, 2 insertions(+), 2 deletions(-)
|
||||
internal/service/iam/wait.go | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/internal/service/iam/wait.go b/internal/service/iam/wait.go
|
||||
index 705d88d664..527f4fa9b8 100644
|
||||
index 3d1c542aa2..e2e5069b63 100644
|
||||
--- a/internal/service/iam/wait.go
|
||||
+++ b/internal/service/iam/wait.go
|
||||
@@ -17,7 +17,7 @@ const (
|
||||
@@ -20,7 +20,7 @@ const (
|
||||
// as this will negatively impact user experience when configurations
|
||||
// have incorrect references or permissions.
|
||||
// Reference: https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency
|
||||
@ -20,15 +20,6 @@ index 705d88d664..527f4fa9b8 100644
|
||||
|
||||
RoleStatusARNIsUniqueID = "uniqueid"
|
||||
RoleStatusARNIsARN = "arn"
|
||||
@@ -72,7 +72,7 @@ func waitDeleteServiceLinkedRole(conn *iam.IAM, deletionTaskID string) error {
|
||||
Pending: []string{iam.DeletionTaskStatusTypeInProgress, iam.DeletionTaskStatusTypeNotStarted},
|
||||
Target: []string{iam.DeletionTaskStatusTypeSucceeded},
|
||||
Refresh: statusDeleteServiceLinkedRole(conn, deletionTaskID),
|
||||
- Timeout: 5 * time.Minute,
|
||||
+ Timeout: 15 * time.Second,
|
||||
Delay: 10 * time.Second,
|
||||
}
|
||||
|
||||
--
|
||||
2.25.1
|
||||
|
||||
|
@ -1,77 +1,145 @@
|
||||
From 455475a4dbd9143605320fbcab748a569efecbcd Mon Sep 17 00:00:00 2001
|
||||
From 094b3837cd19df89d266038a91592126723cf833 Mon Sep 17 00:00:00 2001
|
||||
From: Bert Blommers <info@bertblommers.nl>
|
||||
Date: Tue, 28 Jun 2022 11:17:35 +0000
|
||||
Subject: [PATCH] Route53 - wait times
|
||||
Date: Sun, 13 Aug 2023 21:40:43 +0000
|
||||
Subject: [PATCH] Patch: Route53 timings
|
||||
|
||||
---
|
||||
internal/service/route53/record.go | 4 ++--
|
||||
internal/service/route53/wait.go | 6 +++---
|
||||
internal/service/route53/zone.go | 2 +-
|
||||
internal/service/route53/zone_association.go | 2 +-
|
||||
4 files changed, 7 insertions(+), 7 deletions(-)
|
||||
internal/service/route53/record.go | 10 +++++-----
|
||||
internal/service/route53/wait.go | 14 +++++++-------
|
||||
internal/service/route53/zone.go | 20 ++++++++++----------
|
||||
3 files changed, 22 insertions(+), 22 deletions(-)
|
||||
|
||||
diff --git a/internal/service/route53/record.go b/internal/service/route53/record.go
|
||||
index 689d21387b..771d863a31 100644
|
||||
index 4f2eb6d5ef..c4ba38824f 100644
|
||||
--- a/internal/service/route53/record.go
|
||||
+++ b/internal/service/route53/record.go
|
||||
@@ -490,8 +490,8 @@ func WaitForRecordSetToSync(conn *route53.Route53, requestId string) error {
|
||||
@@ -25,8 +25,8 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
- recordSetSyncMinDelay = 10
|
||||
- recordSetSyncMaxDelay = 30
|
||||
+ recordSetSyncMinDelay = 1
|
||||
+ recordSetSyncMaxDelay = 3
|
||||
)
|
||||
|
||||
// @SDKResource("aws_route53_record")
|
||||
@@ -749,7 +749,7 @@ func FindResourceRecordSetByFourPartKey(ctx context.Context, conn *route53.Route
|
||||
}
|
||||
|
||||
func ChangeResourceRecordSets(ctx context.Context, conn *route53.Route53, input *route53.ChangeResourceRecordSetsInput) (*route53.ChangeInfo, error) {
|
||||
- outputRaw, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, 1*time.Minute, func() (interface{}, error) {
|
||||
+ outputRaw, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, 10*time.Second, func() (interface{}, error) {
|
||||
return conn.ChangeResourceRecordSetsWithContext(ctx, input)
|
||||
}, route53.ErrCodeNoSuchHostedZone)
|
||||
|
||||
@@ -766,8 +766,8 @@ func WaitForRecordSetToSync(ctx context.Context, conn *route53.Route53, requestI
|
||||
Target: []string{route53.ChangeStatusInsync},
|
||||
Delay: time.Duration(rand.Int63n(recordSetSyncMaxDelay-recordSetSyncMinDelay)+recordSetSyncMinDelay) * time.Second,
|
||||
MinTimeout: 5 * time.Second,
|
||||
- PollInterval: 20 * time.Second,
|
||||
- Timeout: 30 * time.Minute,
|
||||
+ PollInterval: 5 * time.Second,
|
||||
+ PollInterval: 2 * time.Second,
|
||||
+ Timeout: 3 * time.Minute,
|
||||
Refresh: func() (result interface{}, state string, err error) {
|
||||
changeRequest := &route53.GetChangeInput{
|
||||
Id: aws.String(requestId),
|
||||
diff --git a/internal/service/route53/wait.go b/internal/service/route53/wait.go
|
||||
index c47a81ef03..98c21e6b8e 100644
|
||||
index d8f577b269..2fc26bf570 100644
|
||||
--- a/internal/service/route53/wait.go
|
||||
+++ b/internal/service/route53/wait.go
|
||||
@@ -13,13 +13,13 @@ import (
|
||||
@@ -16,17 +16,17 @@ import (
|
||||
)
|
||||
|
||||
|
||||
const (
|
||||
- changeTimeout = 30 * time.Minute
|
||||
+ changeTimeout = 3 * time.Minute
|
||||
changeMinTimeout = 5 * time.Second
|
||||
- changePollInterval = 15 * time.Second
|
||||
+ changePollInterval = 5 * time.Second
|
||||
changeMinDelay = 10
|
||||
changeMaxDelay = 30
|
||||
|
||||
- changeMinDelay = 10
|
||||
- changeMaxDelay = 30
|
||||
+ changePollInterval = 1 * time.Second
|
||||
+ changeMinDelay = 1
|
||||
+ changeMaxDelay = 3
|
||||
|
||||
- hostedZoneDNSSECStatusTimeout = 5 * time.Minute
|
||||
+ hostedZoneDNSSECStatusTimeout = 1 * time.Minute
|
||||
|
||||
keySigningKeyStatusTimeout = 5 * time.Minute
|
||||
|
||||
|
||||
- keySigningKeyStatusTimeout = 5 * time.Minute
|
||||
+ keySigningKeyStatusTimeout = 1 * time.Minute
|
||||
|
||||
- trafficPolicyInstanceOperationTimeout = 4 * time.Minute
|
||||
+ trafficPolicyInstanceOperationTimeout = 1 * time.Minute
|
||||
)
|
||||
|
||||
func waitChangeInfoStatusInsync(ctx context.Context, conn *route53.Route53, changeID string) (*route53.ChangeInfo, error) { //nolint:unparam
|
||||
diff --git a/internal/service/route53/zone.go b/internal/service/route53/zone.go
|
||||
index d91b13f953..eed86ef027 100644
|
||||
index dc080c4916..493d5a41aa 100644
|
||||
--- a/internal/service/route53/zone.go
|
||||
+++ b/internal/service/route53/zone.go
|
||||
@@ -698,7 +698,7 @@ func waitForChangeSynchronization(conn *route53.Route53, changeID string) error
|
||||
@@ -32,10 +32,10 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
- zoneChangeSyncMinDelay = 10
|
||||
- zoneChangeSyncMaxDelay = 30
|
||||
- zoneChangeSyncMinPollInterval = 15
|
||||
- zoneChangeSyncMaxPollInterval = 30
|
||||
+ zoneChangeSyncMinDelay = 1
|
||||
+ zoneChangeSyncMaxDelay = 3
|
||||
+ zoneChangeSyncMinPollInterval = 2
|
||||
+ zoneChangeSyncMaxPollInterval = 3
|
||||
)
|
||||
|
||||
// @SDKResource("aws_route53_zone", name="Hosted Zone")
|
||||
@@ -424,7 +424,7 @@ func dnsSECStatus(ctx context.Context, conn *route53.Route53, hostedZoneID strin
|
||||
}
|
||||
|
||||
var output *route53.GetDNSSECOutput
|
||||
- err := tfresource.Retry(ctx, 3*time.Minute, func() *retry.RetryError {
|
||||
+ err := tfresource.Retry(ctx, 3*time.Second, func() *retry.RetryError {
|
||||
var err error
|
||||
|
||||
output, err = conn.GetDNSSECWithContext(ctx, input)
|
||||
@@ -439,7 +439,7 @@ func dnsSECStatus(ctx context.Context, conn *route53.Route53, hostedZoneID strin
|
||||
}
|
||||
|
||||
return nil
|
||||
- }, tfresource.WithDelayRand(1*time.Minute), tfresource.WithPollInterval(30*time.Second))
|
||||
+ }, tfresource.WithDelayRand(5*time.Second), tfresource.WithPollInterval(3*time.Second))
|
||||
|
||||
if tfresource.TimedOut(err) {
|
||||
output, err = conn.GetDNSSECWithContext(ctx, input)
|
||||
@@ -480,7 +480,7 @@ func disableDNSSECForHostedZone(ctx context.Context, conn *route53.Route53, host
|
||||
}
|
||||
|
||||
var output *route53.DisableHostedZoneDNSSECOutput
|
||||
- err = tfresource.Retry(ctx, 5*time.Minute, func() *retry.RetryError {
|
||||
+ err = tfresource.Retry(ctx, 5*time.Second, func() *retry.RetryError {
|
||||
var err error
|
||||
|
||||
output, err = conn.DisableHostedZoneDNSSECWithContext(ctx, input)
|
||||
@@ -495,7 +495,7 @@ func disableDNSSECForHostedZone(ctx context.Context, conn *route53.Route53, host
|
||||
}
|
||||
|
||||
return nil
|
||||
- }, tfresource.WithDelayRand(1*time.Minute), tfresource.WithPollInterval(20*time.Second))
|
||||
+ }, tfresource.WithDelayRand(1*time.Second), tfresource.WithPollInterval(20*time.Second))
|
||||
|
||||
if tfresource.TimedOut(err) {
|
||||
output, err = conn.DisableHostedZoneDNSSECWithContext(ctx, input)
|
||||
@@ -687,9 +687,9 @@ func waitForChangeSynchronization(ctx context.Context, conn *route53.Route53, ch
|
||||
Pending: []string{route53.ChangeStatusPending},
|
||||
Target: []string{route53.ChangeStatusInsync},
|
||||
Delay: time.Duration(rand.Int63n(zoneChangeSyncMaxDelay-zoneChangeSyncMinDelay)+zoneChangeSyncMinDelay) * time.Second,
|
||||
MinTimeout: 5 * time.Second,
|
||||
- MinTimeout: 5 * time.Second,
|
||||
+ MinTimeout: 1 * time.Second,
|
||||
PollInterval: time.Duration(rand.Int63n(zoneChangeSyncMaxPollInterval-zoneChangeSyncMinPollInterval)+zoneChangeSyncMinPollInterval) * time.Second,
|
||||
- Timeout: 15 * time.Minute,
|
||||
+ Timeout: 1 * time.Minute,
|
||||
Refresh: func() (result interface{}, state string, err error) {
|
||||
input := &route53.GetChangeInput{
|
||||
Id: aws.String(changeID),
|
||||
diff --git a/internal/service/route53/zone_association.go b/internal/service/route53/zone_association.go
|
||||
index c11188caa9..0103797de7 100644
|
||||
--- a/internal/service/route53/zone_association.go
|
||||
+++ b/internal/service/route53/zone_association.go
|
||||
@@ -84,7 +84,7 @@ func resourceZoneAssociationCreate(d *schema.ResourceData, meta interface{}) err
|
||||
Delay: 30 * time.Second,
|
||||
Pending: []string{route53.ChangeStatusPending},
|
||||
Target: []string{route53.ChangeStatusInsync},
|
||||
- Timeout: 10 * time.Minute,
|
||||
+ Timeout: 1 * time.Minute,
|
||||
MinTimeout: 2 * time.Second,
|
||||
Refresh: resourceZoneAssociationRefreshFunc(conn, CleanChangeID(aws.StringValue(output.ChangeInfo.Id)), d.Id()),
|
||||
}
|
||||
--
|
||||
2.25.1
|
||||
|
||||
|
@ -1,29 +1,29 @@
|
||||
From bc72f0c3ec4a4d099d6d9c9ab8bb5c839957378f Mon Sep 17 00:00:00 2001
|
||||
From c2981f42629c1dcb3756c13f243c8c52391f3677 Mon Sep 17 00:00:00 2001
|
||||
From: Bert Blommers <info@bertblommers.nl>
|
||||
Date: Wed, 29 Jun 2022 16:25:09 +0000
|
||||
Subject: [PATCH] Patch CF timings
|
||||
Date: Sun, 13 Aug 2023 21:43:26 +0000
|
||||
Subject: [PATCH] Patch: CloudFront timings
|
||||
|
||||
---
|
||||
internal/service/cloudfront/distribution.go | 6 +++---
|
||||
1 file changed, 3 insertions(+), 3 deletions(-)
|
||||
|
||||
diff --git a/internal/service/cloudfront/distribution.go b/internal/service/cloudfront/distribution.go
|
||||
index 3d34f2cf1c..17e7e17014 100644
|
||||
index 4870ca0f6d..8190b12231 100644
|
||||
--- a/internal/service/cloudfront/distribution.go
|
||||
+++ b/internal/service/cloudfront/distribution.go
|
||||
@@ -1185,9 +1185,9 @@ func DistributionWaitUntilDeployed(id string, meta interface{}) error {
|
||||
@@ -1120,9 +1120,9 @@ func DistributionWaitUntilDeployed(ctx context.Context, id string, meta interfac
|
||||
Pending: []string{"InProgress"},
|
||||
Target: []string{"Deployed"},
|
||||
Refresh: resourceWebDistributionStateRefreshFunc(id, meta),
|
||||
Refresh: resourceWebDistributionStateRefreshFunc(ctx, id, meta),
|
||||
- Timeout: 90 * time.Minute,
|
||||
- MinTimeout: 15 * time.Second,
|
||||
- Delay: 1 * time.Minute,
|
||||
+ Timeout: 1 * time.Minute,
|
||||
+ MinTimeout: 5 * time.Second,
|
||||
+ Delay: 10 * time.Second,
|
||||
+ Timeout: 90 * time.Second,
|
||||
+ MinTimeout: 2 * time.Second,
|
||||
+ Delay: 2 * time.Second,
|
||||
}
|
||||
|
||||
_, err := stateConf.WaitForState()
|
||||
|
||||
_, err := stateConf.WaitForStateContext(ctx)
|
||||
--
|
||||
2.25.1
|
||||
|
||||
|
@ -1,15 +1,15 @@
|
||||
From 01a50d07400ee7513b31ec10e9832a2d8290b4e2 Mon Sep 17 00:00:00 2001
|
||||
From 44aeb35bb1747acd7fcae1b53186a37eee61c93d Mon Sep 17 00:00:00 2001
|
||||
From: Bert Blommers <info@bertblommers.nl>
|
||||
Date: Sat, 22 Oct 2022 13:25:17 +0000
|
||||
Date: Sun, 13 Aug 2023 20:53:38 +0000
|
||||
Subject: [PATCH] Patch: Comprehend timings
|
||||
|
||||
---
|
||||
internal/service/comprehend/common_model.go | 2 +-
|
||||
internal/service/comprehend/consts.go | 12 ++++++------
|
||||
2 files changed, 7 insertions(+), 7 deletions(-)
|
||||
internal/service/comprehend/consts.go | 14 +++++++-------
|
||||
2 files changed, 8 insertions(+), 8 deletions(-)
|
||||
|
||||
diff --git a/internal/service/comprehend/common_model.go b/internal/service/comprehend/common_model.go
|
||||
index 034314bdcc..03a29aa3c2 100644
|
||||
index b9af8e5f4c..bda7b84d90 100644
|
||||
--- a/internal/service/comprehend/common_model.go
|
||||
+++ b/internal/service/comprehend/common_model.go
|
||||
@@ -60,7 +60,7 @@ func waitNetworkInterfaceCreated(ctx context.Context, conn *ec2.EC2, initialENII
|
||||
@ -22,11 +22,15 @@ index 034314bdcc..03a29aa3c2 100644
|
||||
Timeout: timeout,
|
||||
}
|
||||
diff --git a/internal/service/comprehend/consts.go b/internal/service/comprehend/consts.go
|
||||
index e57884a12d..df5fd0678b 100644
|
||||
index e57884a12d..8fc23d7018 100644
|
||||
--- a/internal/service/comprehend/consts.go
|
||||
+++ b/internal/service/comprehend/consts.go
|
||||
@@ -7,12 +7,12 @@ import (
|
||||
const iamPropagationTimeout = 2 * time.Minute
|
||||
@@ -4,15 +4,15 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
-const iamPropagationTimeout = 2 * time.Minute
|
||||
+const iamPropagationTimeout = 2 * time.Second
|
||||
|
||||
// Avoid service throttling
|
||||
-const entityRegcognizerCreatedDelay = 10 * time.Minute
|
||||
@ -44,6 +48,5 @@ index e57884a12d..df5fd0678b 100644
|
||||
-const documentClassifierPollInterval = 1 * time.Minute
|
||||
+const documentClassifierDeletedDelay = 5 * time.Second
|
||||
+const documentClassifierPollInterval = 1 * time.Second
|
||||
--
|
||||
2.25.1
|
||||
|
||||
--
|
||||
2.25.1
|
@ -1,95 +1,152 @@
|
||||
From 41f23fcd61cd6d9112f730d54b767e0185997103 Mon Sep 17 00:00:00 2001
|
||||
From ef15713b2e04a87966d2ddb8822d703c632c9551 Mon Sep 17 00:00:00 2001
|
||||
From: Bert Blommers <info@bertblommers.nl>
|
||||
Date: Wed, 5 Apr 2023 12:27:39 +0000
|
||||
Subject: [PATCH] Patch: RDS improvements
|
||||
Date: Sun, 13 Aug 2023 21:48:20 +0000
|
||||
Subject: [PATCH] Patch: RDS timings
|
||||
|
||||
---
|
||||
internal/service/rds/cluster.go | 12 ++++++------
|
||||
internal/service/rds/cluster.go | 16 ++++++++--------
|
||||
internal/service/rds/consts.go | 2 +-
|
||||
internal/service/rds/instance.go | 6 +++---
|
||||
3 files changed, 10 insertions(+), 10 deletions(-)
|
||||
internal/service/rds/instance.go | 24 ++++++++++++------------
|
||||
3 files changed, 21 insertions(+), 21 deletions(-)
|
||||
|
||||
diff --git a/internal/service/rds/cluster.go b/internal/service/rds/cluster.go
|
||||
index e5aeaa106d..c9fc8ffe09 100644
|
||||
index 5b71483600..e18e024e47 100644
|
||||
--- a/internal/service/rds/cluster.go
|
||||
+++ b/internal/service/rds/cluster.go
|
||||
@@ -1510,8 +1510,8 @@ func waitDBClusterCreated(ctx context.Context, conn *rds.RDS, id string, timeout
|
||||
@@ -34,7 +34,7 @@ import (
|
||||
const (
|
||||
clusterScalingConfiguration_DefaultMinCapacity = 1
|
||||
clusterScalingConfiguration_DefaultMaxCapacity = 16
|
||||
- clusterTimeoutDelete = 2 * time.Minute
|
||||
+ clusterTimeoutDelete = 5 * time.Second
|
||||
)
|
||||
|
||||
// @SDKResource("aws_rds_cluster", name="Cluster")
|
||||
@@ -1365,7 +1365,7 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta int
|
||||
}
|
||||
}
|
||||
|
||||
- _, err := tfresource.RetryWhen(ctx, 5*time.Minute,
|
||||
+ _, err := tfresource.RetryWhen(ctx, 1*time.Minute,
|
||||
func() (interface{}, error) {
|
||||
return conn.ModifyDBClusterWithContext(ctx, input)
|
||||
},
|
||||
@@ -1694,8 +1694,8 @@ func waitDBClusterCreated(ctx context.Context, conn *rds.RDS, id string, timeout
|
||||
Target: []string{ClusterStatusAvailable},
|
||||
Refresh: statusDBCluster(ctx, conn, id),
|
||||
Timeout: timeout,
|
||||
- MinTimeout: 10 * time.Second,
|
||||
- Delay: 30 * time.Second,
|
||||
+ MinTimeout: 3 * time.Second,
|
||||
+ MinTimeout: 1 * time.Second,
|
||||
+ Delay: 3 * time.Second,
|
||||
}
|
||||
|
||||
|
||||
outputRaw, err := stateConf.WaitForStateContext(ctx)
|
||||
@@ -1536,8 +1536,8 @@ func waitDBClusterUpdated(ctx context.Context, conn *rds.RDS, id string, timeout
|
||||
@@ -1721,8 +1721,8 @@ func waitDBClusterUpdated(ctx context.Context, conn *rds.RDS, id string, timeout
|
||||
Target: []string{ClusterStatusAvailable},
|
||||
Refresh: statusDBCluster(ctx, conn, id),
|
||||
Timeout: timeout,
|
||||
- MinTimeout: 10 * time.Second,
|
||||
- Delay: 30 * time.Second,
|
||||
+ MinTimeout: 3 * time.Second,
|
||||
+ MinTimeout: 1 * time.Second,
|
||||
+ Delay: 3 * time.Second,
|
||||
}
|
||||
|
||||
|
||||
outputRaw, err := stateConf.WaitForStateContext(ctx)
|
||||
@@ -1560,8 +1560,8 @@ func waitDBClusterDeleted(ctx context.Context, conn *rds.RDS, id string, timeout
|
||||
@@ -1745,8 +1745,8 @@ func waitDBClusterDeleted(ctx context.Context, conn *rds.RDS, id string, timeout
|
||||
Target: []string{},
|
||||
Refresh: statusDBCluster(ctx, conn, id),
|
||||
Timeout: timeout,
|
||||
- MinTimeout: 10 * time.Second,
|
||||
- Delay: 30 * time.Second,
|
||||
+ MinTimeout: 3 * time.Second,
|
||||
+ MinTimeout: 1 * time.Second,
|
||||
+ Delay: 3 * time.Second,
|
||||
}
|
||||
|
||||
|
||||
outputRaw, err := stateConf.WaitForStateContext(ctx)
|
||||
diff --git a/internal/service/rds/consts.go b/internal/service/rds/consts.go
|
||||
index dc00aaf5dd..5cc6883a49 100644
|
||||
index 6bc2ab6194..937ba77468 100644
|
||||
--- a/internal/service/rds/consts.go
|
||||
+++ b/internal/service/rds/consts.go
|
||||
@@ -215,7 +215,7 @@ func TimeoutAction_Values() []string {
|
||||
@@ -239,7 +239,7 @@ func backupTarget_Values() []string {
|
||||
}
|
||||
|
||||
|
||||
const (
|
||||
- propagationTimeout = 2 * time.Minute
|
||||
+ propagationTimeout = 2 * time.Second
|
||||
)
|
||||
|
||||
|
||||
const (
|
||||
diff --git a/internal/service/rds/instance.go b/internal/service/rds/instance.go
|
||||
index 6a329b4dd2..a2dcf89ade 100644
|
||||
index eafddc13b0..86809379d9 100644
|
||||
--- a/internal/service/rds/instance.go
|
||||
+++ b/internal/service/rds/instance.go
|
||||
@@ -2294,7 +2294,7 @@ func findDBInstanceByIDSDKv2(ctx context.Context, conn *rds_sdkv2.Client, id str
|
||||
func waitDBInstanceAvailableSDKv1(ctx context.Context, conn *rds.RDS, id string, timeout time.Duration, optFns ...tfresource.OptionsFunc) (*rds.DBInstance, error) { //nolint:unparam
|
||||
@@ -77,9 +77,9 @@ func ResourceInstance() *schema.Resource {
|
||||
},
|
||||
|
||||
Timeouts: &schema.ResourceTimeout{
|
||||
- Create: schema.DefaultTimeout(40 * time.Minute),
|
||||
- Update: schema.DefaultTimeout(80 * time.Minute),
|
||||
- Delete: schema.DefaultTimeout(60 * time.Minute),
|
||||
+ Create: schema.DefaultTimeout(40 * time.Second),
|
||||
+ Update: schema.DefaultTimeout(80 * time.Second),
|
||||
+ Delete: schema.DefaultTimeout(60 * time.Second),
|
||||
},
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
@@ -1917,7 +1917,7 @@ func resourceInstanceUpdate(ctx context.Context, d *schema.ResourceData, meta in
|
||||
DBInstanceIdentifier: aws.String(sourceARN.Identifier),
|
||||
SkipFinalSnapshot: true,
|
||||
}
|
||||
- _, err = tfresource.RetryWhen(ctx, 5*time.Minute,
|
||||
+ _, err = tfresource.RetryWhen(ctx, 1*time.Minute,
|
||||
func() (any, error) {
|
||||
return conn.DeleteDBInstance(ctx, deleteInput)
|
||||
},
|
||||
@@ -2520,8 +2520,8 @@ func statusDBInstanceSDKv2(ctx context.Context, conn *rds_sdkv2.Client, id strin
|
||||
|
||||
func waitDBInstanceAvailableSDKv1(ctx context.Context, conn *rds.RDS, id string, timeout time.Duration, optFns ...tfresource.OptionsFunc) (*rds.DBInstance, error) {
|
||||
options := tfresource.Options{
|
||||
PollInterval: 10 * time.Second,
|
||||
- PollInterval: 10 * time.Second,
|
||||
- Delay: 1 * time.Minute,
|
||||
+ Delay: 1 * time.Second,
|
||||
+ PollInterval: 5 * time.Second,
|
||||
+ Delay: 3 * time.Second,
|
||||
ContinuousTargetOccurence: 3,
|
||||
}
|
||||
for _, fn := range optFns {
|
||||
@@ -2337,7 +2337,7 @@ func waitDBInstanceAvailableSDKv1(ctx context.Context, conn *rds.RDS, id string,
|
||||
@@ -2563,8 +2563,8 @@ func waitDBInstanceAvailableSDKv1(ctx context.Context, conn *rds.RDS, id string,
|
||||
|
||||
func waitDBInstanceAvailableSDKv2(ctx context.Context, conn *rds_sdkv2.Client, id string, timeout time.Duration, optFns ...tfresource.OptionsFunc) (*rds.DBInstance, error) { //nolint:unparam
|
||||
options := tfresource.Options{
|
||||
PollInterval: 10 * time.Second,
|
||||
- PollInterval: 10 * time.Second,
|
||||
- Delay: 1 * time.Minute,
|
||||
+ Delay: 1 * time.Second,
|
||||
+ PollInterval: 2 * time.Second,
|
||||
+ Delay: 5 * time.Second,
|
||||
ContinuousTargetOccurence: 3,
|
||||
}
|
||||
for _, fn := range optFns {
|
||||
@@ -2380,7 +2380,7 @@ func waitDBInstanceAvailableSDKv2(ctx context.Context, conn *rds_sdkv2.Client, i
|
||||
@@ -2606,8 +2606,8 @@ func waitDBInstanceAvailableSDKv2(ctx context.Context, conn *rds_sdkv2.Client, i
|
||||
|
||||
func waitDBInstanceDeleted(ctx context.Context, conn *rds.RDS, id string, timeout time.Duration, optFns ...tfresource.OptionsFunc) (*rds.DBInstance, error) { //nolint:unparam
|
||||
options := tfresource.Options{
|
||||
PollInterval: 10 * time.Second,
|
||||
- PollInterval: 10 * time.Second,
|
||||
- Delay: 1 * time.Minute,
|
||||
+ Delay: 1 * time.Second,
|
||||
+ PollInterval: 2 * time.Second,
|
||||
+ Delay: 5 * time.Second,
|
||||
ContinuousTargetOccurence: 3,
|
||||
}
|
||||
for _, fn := range optFns {
|
||||
@@ -2694,8 +2694,8 @@ func statusBlueGreenDeployment(ctx context.Context, conn *rds_sdkv2.Client, id s
|
||||
|
||||
func waitBlueGreenDeploymentAvailable(ctx context.Context, conn *rds_sdkv2.Client, id string, timeout time.Duration, optFns ...tfresource.OptionsFunc) (*types.BlueGreenDeployment, error) {
|
||||
options := tfresource.Options{
|
||||
- PollInterval: 10 * time.Second,
|
||||
- Delay: 1 * time.Minute,
|
||||
+ PollInterval: 2 * time.Second,
|
||||
+ Delay: 5 * time.Second,
|
||||
}
|
||||
for _, fn := range optFns {
|
||||
fn(&options)
|
||||
--
|
||||
2.25.1
|
||||
|
||||
|
@ -1 +1 @@
|
||||
Subproject commit a8163ccac8494c5beaa108369a0c8531f3800e18
|
||||
Subproject commit 96ac19e4c1feb5edee5f30aba29be233109e8717
|
@ -122,7 +122,7 @@ comprehend:
|
||||
- TestAccComprehendEntityRecognizer_KMSKeys
|
||||
dax:
|
||||
- TestAccDAXCluster_basic
|
||||
- TestAccDAXCluster_Encryption
|
||||
- TestAccDAXCluster_Encryption_enabled
|
||||
dynamodb:
|
||||
- TestAccDynamoDBTableItem
|
||||
ec2:
|
||||
@ -205,7 +205,9 @@ ecr:
|
||||
- TestAccECRRepositoryDataSource
|
||||
- TestAccECRRepositoryPolicy
|
||||
ecs:
|
||||
- TestAccECSCapacityProvider_
|
||||
- TestAccECSCapacityProvider_basic
|
||||
- TestAccECSCapacityProvider_disappears
|
||||
- TestAccECSCapacityProvider_tags
|
||||
- TestAccECSCluster_
|
||||
- TestAccECSClusterCapacityProviders_basic
|
||||
- TestAccECSClusterCapacityProviders_defaults
|
||||
|
@ -61,6 +61,37 @@ def test_describe_certificate_authority():
|
||||
"SigningAlgorithm": "SHA512WITHRSA",
|
||||
"Subject": {"CommonName": "yscb41lw.test"},
|
||||
}
|
||||
assert ca["KeyStorageSecurityStandard"] == "FIPS_140_2_LEVEL_3_OR_HIGHER"
|
||||
|
||||
|
||||
@mock_acmpca
|
||||
def test_describe_certificate_authority_with_security_standard():
|
||||
client = boto3.client("acm-pca", region_name="ap-southeast-1")
|
||||
ca_arn = client.create_certificate_authority(
|
||||
CertificateAuthorityConfiguration={
|
||||
"KeyAlgorithm": "RSA_4096",
|
||||
"SigningAlgorithm": "SHA512WITHRSA",
|
||||
"Subject": {"CommonName": "yscb41lw.test"},
|
||||
},
|
||||
CertificateAuthorityType="SUBORDINATE",
|
||||
KeyStorageSecurityStandard="FIPS_140_2_LEVEL_2_OR_HIGHER",
|
||||
IdempotencyToken="terraform-20221125230308947400000001",
|
||||
)["CertificateAuthorityArn"]
|
||||
ca = client.describe_certificate_authority(CertificateAuthorityArn=ca_arn)[
|
||||
"CertificateAuthority"
|
||||
]
|
||||
|
||||
assert ca["Arn"] == ca_arn
|
||||
assert ca["OwnerAccount"] == DEFAULT_ACCOUNT_ID
|
||||
assert "CreatedAt" in ca
|
||||
assert ca["Type"] == "SUBORDINATE"
|
||||
assert ca["Status"] == "PENDING_CERTIFICATE"
|
||||
assert ca["CertificateAuthorityConfiguration"] == {
|
||||
"KeyAlgorithm": "RSA_4096",
|
||||
"SigningAlgorithm": "SHA512WITHRSA",
|
||||
"Subject": {"CommonName": "yscb41lw.test"},
|
||||
}
|
||||
assert ca["KeyStorageSecurityStandard"] == "FIPS_140_2_LEVEL_2_OR_HIGHER"
|
||||
|
||||
|
||||
@mock_acmpca
|
||||
|
@ -936,7 +936,10 @@ def test_create_autoscaling_policy_with_predictive_scaling_config():
|
||||
|
||||
@mock_autoscaling
|
||||
@mock_ec2
|
||||
def test_create_auto_scaling_group_with_mixed_instances_policy():
|
||||
@pytest.mark.parametrize("include_instances_distribution", [True, False])
|
||||
def test_create_auto_scaling_group_with_mixed_instances_policy(
|
||||
include_instances_distribution,
|
||||
):
|
||||
mocked_networking = setup_networking(region_name="eu-west-1")
|
||||
client = boto3.client("autoscaling", region_name="eu-west-1")
|
||||
ec2_client = boto3.client("ec2", region_name="eu-west-1")
|
||||
@ -946,15 +949,25 @@ def test_create_auto_scaling_group_with_mixed_instances_policy():
|
||||
LaunchTemplateName="launchie",
|
||||
LaunchTemplateData={"ImageId": EXAMPLE_AMI_ID},
|
||||
)["LaunchTemplate"]
|
||||
client.create_auto_scaling_group(
|
||||
MixedInstancesPolicy={
|
||||
"LaunchTemplate": {
|
||||
"LaunchTemplateSpecification": {
|
||||
"LaunchTemplateName": "launchie",
|
||||
"Version": "$DEFAULT",
|
||||
}
|
||||
input_policy = {
|
||||
"LaunchTemplate": {
|
||||
"LaunchTemplateSpecification": {
|
||||
"LaunchTemplateName": "launchie",
|
||||
"Version": "$DEFAULT",
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
if include_instances_distribution:
|
||||
input_policy["InstancesDistribution"] = {
|
||||
"OnDemandAllocationStrategy": "string",
|
||||
"OnDemandBaseCapacity": 123,
|
||||
"OnDemandPercentageAboveBaseCapacity": 123,
|
||||
"SpotAllocationStrategy": "string",
|
||||
"SpotInstancePools": 123,
|
||||
"SpotMaxPrice": "string",
|
||||
}
|
||||
client.create_auto_scaling_group(
|
||||
MixedInstancesPolicy=input_policy,
|
||||
AutoScalingGroupName=asg_name,
|
||||
MinSize=2,
|
||||
MaxSize=2,
|
||||
@ -964,15 +977,11 @@ def test_create_auto_scaling_group_with_mixed_instances_policy():
|
||||
# Assert we can describe MixedInstancesPolicy
|
||||
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name])
|
||||
group = response["AutoScalingGroups"][0]
|
||||
assert group["MixedInstancesPolicy"] == {
|
||||
"LaunchTemplate": {
|
||||
"LaunchTemplateSpecification": {
|
||||
"LaunchTemplateId": lt["LaunchTemplateId"],
|
||||
"LaunchTemplateName": "launchie",
|
||||
"Version": "$DEFAULT",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
input_policy["LaunchTemplate"]["LaunchTemplateSpecification"][
|
||||
"LaunchTemplateId"
|
||||
] = lt["LaunchTemplateId"]
|
||||
assert group["MixedInstancesPolicy"] == input_policy
|
||||
|
||||
# Assert the LaunchTemplate is known for the resulting instances
|
||||
response = client.describe_auto_scaling_instances()
|
||||
|
@ -20,7 +20,7 @@ class TestAutoScalingGroup(TestCase):
|
||||
)
|
||||
|
||||
def test_create_autoscaling_groups_defaults(self):
|
||||
"""Test with the minimum inputs and check that all of the proper defaults
|
||||
"""Test with the minimum inputs and check that all proper defaults
|
||||
are assigned for the other attributes"""
|
||||
|
||||
self._create_group(name="tester_group")
|
||||
|
86
tests/test_autoscaling/test_autoscaling_warm_pools.py
Normal file
86
tests/test_autoscaling/test_autoscaling_warm_pools.py
Normal file
@ -0,0 +1,86 @@
|
||||
import boto3
|
||||
|
||||
from moto import mock_autoscaling, mock_ec2
|
||||
from tests import EXAMPLE_AMI_ID
|
||||
from unittest import TestCase
|
||||
from .utils import setup_networking
|
||||
|
||||
|
||||
@mock_autoscaling
|
||||
@mock_ec2
|
||||
class TestAutoScalingGroup(TestCase):
|
||||
def setUp(self) -> None:
|
||||
self.mocked_networking = setup_networking()
|
||||
self.as_client = boto3.client("autoscaling", region_name="us-east-1")
|
||||
self.ec2_client = boto3.client("ec2", region_name="us-east-1")
|
||||
self.lc_name = "tester_config"
|
||||
self.asg_name = "asg_test"
|
||||
self.ec2_client.create_launch_template(
|
||||
LaunchTemplateName="launchie",
|
||||
LaunchTemplateData={"ImageId": EXAMPLE_AMI_ID},
|
||||
)["LaunchTemplate"]
|
||||
input_policy = {
|
||||
"LaunchTemplate": {
|
||||
"LaunchTemplateSpecification": {
|
||||
"LaunchTemplateName": "launchie",
|
||||
"Version": "$DEFAULT",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.as_client.create_auto_scaling_group(
|
||||
MixedInstancesPolicy=input_policy,
|
||||
AutoScalingGroupName=self.asg_name,
|
||||
MinSize=2,
|
||||
MaxSize=2,
|
||||
VPCZoneIdentifier=self.mocked_networking["subnet1"],
|
||||
)
|
||||
|
||||
def test_put_warm_pool(self):
|
||||
self.as_client.put_warm_pool(
|
||||
AutoScalingGroupName=self.asg_name,
|
||||
MaxGroupPreparedCapacity=42,
|
||||
MinSize=7,
|
||||
PoolState="Stopped",
|
||||
InstanceReusePolicy={"ReuseOnScaleIn": True},
|
||||
)
|
||||
|
||||
pool = self.as_client.describe_warm_pool(AutoScalingGroupName=self.asg_name)
|
||||
assert pool["WarmPoolConfiguration"] == {
|
||||
"MaxGroupPreparedCapacity": 42,
|
||||
"MinSize": 7,
|
||||
"PoolState": "Stopped",
|
||||
"InstanceReusePolicy": {"ReuseOnScaleIn": True},
|
||||
}
|
||||
|
||||
group = self.as_client.describe_auto_scaling_groups()["AutoScalingGroups"][0]
|
||||
assert group["WarmPoolConfiguration"] == {
|
||||
"InstanceReusePolicy": {"ReuseOnScaleIn": True},
|
||||
"MaxGroupPreparedCapacity": 42,
|
||||
"MinSize": 7,
|
||||
"PoolState": "Stopped",
|
||||
}
|
||||
|
||||
def test_describe_pool_not_created(self):
|
||||
pool = self.as_client.describe_warm_pool(AutoScalingGroupName=self.asg_name)
|
||||
assert "WarmPoolConfiguration" not in pool
|
||||
assert pool["Instances"] == []
|
||||
|
||||
def test_delete_pool(self):
|
||||
self.as_client.put_warm_pool(
|
||||
AutoScalingGroupName=self.asg_name,
|
||||
MinSize=2,
|
||||
)
|
||||
self.as_client.delete_warm_pool(AutoScalingGroupName=self.asg_name)
|
||||
pool = self.as_client.describe_warm_pool(AutoScalingGroupName=self.asg_name)
|
||||
assert "WarmPoolConfiguration" not in pool
|
||||
assert pool["Instances"] == []
|
||||
|
||||
def test_describe_pool_with_defaults(self):
|
||||
self.as_client.put_warm_pool(
|
||||
AutoScalingGroupName=self.asg_name,
|
||||
MinSize=2,
|
||||
)
|
||||
pool = self.as_client.describe_warm_pool(AutoScalingGroupName=self.asg_name)
|
||||
assert pool["WarmPoolConfiguration"] == {"MinSize": 2, "PoolState": "Stopped"}
|
||||
assert pool["Instances"] == []
|
@ -53,8 +53,43 @@ def test_register_task_definition_with_platform_capability(platform_capability):
|
||||
platformCapabilities=[platform_capability],
|
||||
)
|
||||
|
||||
resp = batch_client.describe_job_definitions(jobDefinitionName=def_name)
|
||||
assert resp["jobDefinitions"][0]["platformCapabilities"] == [platform_capability]
|
||||
job_def = batch_client.describe_job_definitions(jobDefinitionName=def_name)[
|
||||
"jobDefinitions"
|
||||
][0]
|
||||
assert job_def["platformCapabilities"] == [platform_capability]
|
||||
|
||||
container_props = job_def["containerProperties"]
|
||||
assert container_props["resourceRequirements"] == []
|
||||
assert container_props["secrets"] == []
|
||||
if platform_capability == "FARGATE":
|
||||
assert container_props["fargatePlatformConfiguration"] == {
|
||||
"platformVersion": "LATEST"
|
||||
}
|
||||
else:
|
||||
assert "fargatePlatformConfiguration" not in container_props
|
||||
|
||||
|
||||
@mock_batch
|
||||
def test_register_task_definition_without_command():
|
||||
_, _, _, _, batch_client = _get_clients()
|
||||
|
||||
def_name = str(uuid4())[0:6]
|
||||
batch_client.register_job_definition(
|
||||
jobDefinitionName=def_name,
|
||||
type="container",
|
||||
containerProperties={
|
||||
"image": "busybox",
|
||||
"vcpus": 1,
|
||||
"memory": 4,
|
||||
},
|
||||
)
|
||||
|
||||
job_def = batch_client.describe_job_definitions(jobDefinitionName=def_name)[
|
||||
"jobDefinitions"
|
||||
][0]
|
||||
|
||||
container_props = job_def["containerProperties"]
|
||||
assert container_props["command"] == []
|
||||
|
||||
|
||||
@mock_batch
|
||||
|
@ -38,7 +38,7 @@ def example_dist_config_with_tags(ref):
|
||||
return config
|
||||
|
||||
|
||||
def example_dist_custom_config(ref):
|
||||
def example_dist_custom_config(ref, ssl_protocols):
|
||||
return {
|
||||
"CallerReference": ref,
|
||||
"Origins": {
|
||||
@ -55,7 +55,7 @@ def example_dist_custom_config(ref):
|
||||
"OriginReadTimeout": 15,
|
||||
"OriginSslProtocols": {
|
||||
"Quantity": 2,
|
||||
"Items": ["TLSv1", "SSLv3"],
|
||||
"Items": ssl_protocols,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -434,9 +434,10 @@ def test_create_distribution_with_invalid_s3_bucket():
|
||||
|
||||
|
||||
@mock_cloudfront
|
||||
def test_create_distribution_custom_config():
|
||||
@pytest.mark.parametrize("ssl_protocols", (["TLSv1"], ["TLSv1", "SSLv3"]))
|
||||
def test_create_distribution_custom_config(ssl_protocols):
|
||||
client = boto3.client("cloudfront", region_name="us-west-1")
|
||||
config = scaffold.example_dist_custom_config("ref")
|
||||
config = scaffold.example_dist_custom_config("ref", ssl_protocols)
|
||||
|
||||
dist = client.create_distribution(DistributionConfig=config)["Distribution"][
|
||||
"DistributionConfig"
|
||||
@ -450,8 +451,8 @@ def test_create_distribution_custom_config():
|
||||
assert custom_config["OriginKeepaliveTimeout"] == 10
|
||||
assert custom_config["OriginProtocolPolicy"] == "http-only"
|
||||
assert custom_config["OriginSslProtocols"] == {
|
||||
"Items": ["TLSv1", "SSLv3"],
|
||||
"Quantity": 2,
|
||||
"Items": ssl_protocols,
|
||||
"Quantity": len(ssl_protocols),
|
||||
}
|
||||
|
||||
|
||||
|
@ -3040,8 +3040,8 @@ def test_forgot_password():
|
||||
)["UserPoolClient"]["ClientId"]
|
||||
result = conn.forgot_password(ClientId=client_id, Username=str(uuid.uuid4()))
|
||||
assert result["CodeDeliveryDetails"]["Destination"] is not None
|
||||
assert result["CodeDeliveryDetails"]["DeliveryMedium"] == "SMS"
|
||||
assert result["CodeDeliveryDetails"]["AttributeName"] == "phone_number"
|
||||
assert result["CodeDeliveryDetails"]["DeliveryMedium"] == "EMAIL"
|
||||
assert result["CodeDeliveryDetails"]["AttributeName"] == "email"
|
||||
|
||||
|
||||
@mock_cognitoidp
|
||||
|
@ -188,7 +188,9 @@ def test_update_capacity_provider():
|
||||
|
||||
resp = client.update_capacity_provider(
|
||||
name="my_provider",
|
||||
autoScalingGroupProvider={"managedScaling": {"status": "ENABLED"}},
|
||||
autoScalingGroupProvider={
|
||||
"managedScaling": {"status": "ENABLED", "instanceWarmupPeriod": 0}
|
||||
},
|
||||
)
|
||||
assert resp["capacityProvider"]["name"] == "my_provider"
|
||||
|
||||
@ -199,7 +201,7 @@ def test_update_capacity_provider():
|
||||
assert provider["autoScalingGroupProvider"] == {
|
||||
"autoScalingGroupArn": "asg:arn",
|
||||
"managedScaling": {
|
||||
"instanceWarmupPeriod": 300,
|
||||
"instanceWarmupPeriod": 0,
|
||||
"maximumScalingStepSize": 10000,
|
||||
"minimumScalingStepSize": 1,
|
||||
"status": "ENABLED",
|
||||
|
@ -381,11 +381,15 @@ def test_target_group_attributes():
|
||||
|
||||
# The attributes should start with the two defaults
|
||||
response = conn.describe_target_group_attributes(TargetGroupArn=target_group_arn)
|
||||
assert len(response["Attributes"]) == 5
|
||||
assert len(response["Attributes"]) == 6
|
||||
attributes = {attr["Key"]: attr["Value"] for attr in response["Attributes"]}
|
||||
assert attributes["deregistration_delay.timeout_seconds"] == "300"
|
||||
assert attributes["stickiness.enabled"] == "false"
|
||||
assert attributes["waf.fail_open.enabled"] == "false"
|
||||
assert (
|
||||
attributes["load_balancing.cross_zone.enabled"]
|
||||
== "use_load_balancer_configuration"
|
||||
)
|
||||
|
||||
# Add cookie stickiness
|
||||
response = conn.modify_target_group_attributes(
|
||||
@ -404,7 +408,7 @@ def test_target_group_attributes():
|
||||
|
||||
# These new values should be in the full attribute list
|
||||
response = conn.describe_target_group_attributes(TargetGroupArn=target_group_arn)
|
||||
assert len(response["Attributes"]) == 6
|
||||
assert len(response["Attributes"]) == 7
|
||||
attributes = {attr["Key"]: attr["Value"] for attr in response["Attributes"]}
|
||||
assert attributes["stickiness.type"] == "lb_cookie"
|
||||
assert attributes["stickiness.enabled"] == "true"
|
||||
|
@ -46,6 +46,9 @@ def test_create_tags():
|
||||
|
||||
assert resp["Tags"] == {"key1": "val2", "key2": "val2"}
|
||||
|
||||
tags = client.list_tags(ResourceArn=broker_arn)["Tags"]
|
||||
assert tags == {"key1": "val2", "key2": "val2"}
|
||||
|
||||
|
||||
@mock_mq
|
||||
def test_delete_tags():
|
||||
|
@ -113,6 +113,8 @@ def test_list_users():
|
||||
|
||||
assert len(resp["UserList"]) == 4
|
||||
assert resp["Status"] == 200
|
||||
for user in resp["UserList"]:
|
||||
user.pop("PrincipalId")
|
||||
|
||||
assert {
|
||||
"Arn": f"arn:aws:quicksight:us-east-2:{ACCOUNT_ID}:user/default/fake0",
|
||||
|
@ -185,6 +185,33 @@ def test_create_private_dns_namespace():
|
||||
assert props["DnsProperties"]["SOA"] == {"TTL": 123}
|
||||
|
||||
|
||||
@mock_servicediscovery
|
||||
def test_update_private_dns_namespace():
|
||||
client = boto3.client("servicediscovery", region_name="eu-west-1")
|
||||
client.create_private_dns_namespace(
|
||||
Name="dns_ns",
|
||||
Vpc="vpc_id",
|
||||
Description="my private dns",
|
||||
Properties={"DnsProperties": {"SOA": {"TTL": 123}}},
|
||||
)
|
||||
|
||||
ns_id = client.list_namespaces()["Namespaces"][0]["Id"]
|
||||
|
||||
client.update_private_dns_namespace(
|
||||
Id=ns_id,
|
||||
Namespace={
|
||||
"Description": "updated dns",
|
||||
"Properties": {"DnsProperties": {"SOA": {"TTL": 654}}},
|
||||
},
|
||||
)
|
||||
|
||||
namespace = client.get_namespace(Id=ns_id)["Namespace"]
|
||||
assert namespace["Description"] == "updated dns"
|
||||
|
||||
props = namespace["Properties"]
|
||||
assert props["DnsProperties"]["SOA"] == {"TTL": 654}
|
||||
|
||||
|
||||
@mock_servicediscovery
|
||||
def test_create_private_dns_namespace_with_duplicate_vpc():
|
||||
client = boto3.client("servicediscovery", region_name="eu-west-1")
|
||||
@ -237,3 +264,30 @@ def test_create_public_dns_namespace():
|
||||
assert "DnsProperties" in namespace["Properties"]
|
||||
dns_props = namespace["Properties"]["DnsProperties"]
|
||||
assert dns_props == {"HostedZoneId": "hzi", "SOA": {"TTL": 124}}
|
||||
|
||||
|
||||
@mock_servicediscovery
|
||||
def test_update_public_dns_namespace():
|
||||
client = boto3.client("servicediscovery", region_name="us-east-2")
|
||||
client.create_public_dns_namespace(
|
||||
Name="public_dns_ns",
|
||||
CreatorRequestId="cri",
|
||||
Description="my public dns",
|
||||
Properties={"DnsProperties": {"SOA": {"TTL": 124}}},
|
||||
)
|
||||
|
||||
ns_id = client.list_namespaces()["Namespaces"][0]["Id"]
|
||||
|
||||
client.update_public_dns_namespace(
|
||||
Id=ns_id,
|
||||
Namespace={
|
||||
"Description": "updated dns",
|
||||
"Properties": {"DnsProperties": {"SOA": {"TTL": 987}}},
|
||||
},
|
||||
)
|
||||
|
||||
namespace = client.get_namespace(Id=ns_id)["Namespace"]
|
||||
assert namespace["Description"] == "updated dns"
|
||||
|
||||
dns_props = namespace["Properties"]["DnsProperties"]
|
||||
assert dns_props == {"SOA": {"TTL": 987}}
|
||||
|
Loading…
Reference in New Issue
Block a user