ECS: Various improvements (#5880)
This commit is contained in:
parent
339309c9af
commit
ef1fab008a
@ -2371,7 +2371,7 @@
|
||||
|
||||
## ecs
|
||||
<details>
|
||||
<summary>74% implemented</summary>
|
||||
<summary>80% implemented</summary>
|
||||
|
||||
- [X] create_capacity_provider
|
||||
- [X] create_cluster
|
||||
@ -2408,7 +2408,7 @@
|
||||
- [X] put_account_setting
|
||||
- [ ] put_account_setting_default
|
||||
- [X] put_attributes
|
||||
- [ ] put_cluster_capacity_providers
|
||||
- [X] put_cluster_capacity_providers
|
||||
- [X] register_container_instance
|
||||
- [X] register_task_definition
|
||||
- [X] run_task
|
||||
@ -2419,8 +2419,8 @@
|
||||
- [ ] submit_task_state_change
|
||||
- [X] tag_resource
|
||||
- [X] untag_resource
|
||||
- [ ] update_capacity_provider
|
||||
- [ ] update_cluster
|
||||
- [X] update_capacity_provider
|
||||
- [X] update_cluster
|
||||
- [ ] update_cluster_settings
|
||||
- [ ] update_container_agent
|
||||
- [X] update_container_instances_state
|
||||
|
@ -29,10 +29,6 @@ ecs
|
||||
|
||||
- [X] create_capacity_provider
|
||||
- [X] create_cluster
|
||||
|
||||
The following parameters are not yet implemented: configuration, capacityProviders, defaultCapacityProviderStrategy
|
||||
|
||||
|
||||
- [X] create_service
|
||||
- [X] create_task_set
|
||||
- [X] delete_account_setting
|
||||
@ -92,7 +88,7 @@ ecs
|
||||
- [X] put_account_setting
|
||||
- [ ] put_account_setting_default
|
||||
- [X] put_attributes
|
||||
- [ ] put_cluster_capacity_providers
|
||||
- [X] put_cluster_capacity_providers
|
||||
- [X] register_container_instance
|
||||
- [X] register_task_definition
|
||||
- [X] run_task
|
||||
@ -102,13 +98,13 @@ ecs
|
||||
- [ ] submit_container_state_change
|
||||
- [ ] submit_task_state_change
|
||||
- [X] tag_resource
|
||||
Currently implemented only for services
|
||||
|
||||
- [X] untag_resource
|
||||
Currently implemented only for services
|
||||
- [X] update_capacity_provider
|
||||
- [X] update_cluster
|
||||
|
||||
The serviceConnectDefaults-parameter is not yet implemented
|
||||
|
||||
|
||||
- [ ] update_capacity_provider
|
||||
- [ ] update_cluster
|
||||
- [ ] update_cluster_settings
|
||||
- [ ] update_container_agent
|
||||
- [X] update_container_instances_state
|
||||
|
@ -521,9 +521,13 @@ class FakeAutoScalingGroup(CloudFormationModel):
|
||||
if launch_template:
|
||||
launch_template_id = launch_template.get("launch_template_id")
|
||||
launch_template_name = launch_template.get("launch_template_name")
|
||||
# If no version is specified, AWS will use '$Default'
|
||||
# However, AWS will never show the version if it is not specified
|
||||
# (If the user explicitly specifies '$Default', it will be returned)
|
||||
self.launch_template_version = (
|
||||
launch_template.get("version") or "$Default"
|
||||
)
|
||||
self.provided_launch_template_version = launch_template.get("version")
|
||||
elif mixed_instance_policy:
|
||||
spec = mixed_instance_policy["LaunchTemplate"][
|
||||
"LaunchTemplateSpecification"
|
||||
|
@ -778,7 +778,9 @@ DESCRIBE_AUTOSCALING_GROUPS_TEMPLATE = """<DescribeAutoScalingGroupsResponse xml
|
||||
{% elif group.launch_template %}
|
||||
<LaunchTemplate>
|
||||
<LaunchTemplateId>{{ group.launch_template.id }}</LaunchTemplateId>
|
||||
<Version>{{ group.launch_template_version }}</Version>
|
||||
{% if group.provided_launch_template_version %}}
|
||||
<Version>{{ group.provided_launch_template_version }}</Version>
|
||||
{% endif %}
|
||||
<LaunchTemplateName>{{ group.launch_template.name }}</LaunchTemplateName>
|
||||
</LaunchTemplate>
|
||||
{% endif %}
|
||||
|
@ -1,7 +1,7 @@
|
||||
import re
|
||||
from copy import copy
|
||||
from datetime import datetime, timezone
|
||||
from typing import Any
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from moto import settings
|
||||
from moto.core import BaseBackend, BackendDict, BaseModel, CloudFormationModel
|
||||
@ -11,7 +11,6 @@ from moto.core.utils import unix_time, pascal_to_camelcase, remap_nested_keys
|
||||
from ..ec2.utils import random_private_ip
|
||||
from moto.ec2 import ec2_backends
|
||||
from moto.moto_api._internal import mock_random
|
||||
from moto.utilities.tagging_service import TaggingService
|
||||
from .exceptions import (
|
||||
EcsClientException,
|
||||
ServiceNotFoundException,
|
||||
@ -56,7 +55,17 @@ class AccountSetting(BaseObject):
|
||||
|
||||
|
||||
class Cluster(BaseObject, CloudFormationModel):
|
||||
def __init__(self, cluster_name, account_id, region_name, cluster_settings=None):
|
||||
def __init__(
|
||||
self,
|
||||
cluster_name,
|
||||
account_id,
|
||||
region_name,
|
||||
cluster_settings=None,
|
||||
configuration=None,
|
||||
capacity_providers=None,
|
||||
default_capacity_provider_strategy=None,
|
||||
tags=None,
|
||||
):
|
||||
self.active_services_count = 0
|
||||
self.arn = f"arn:aws:ecs:{region_name}:{account_id}:cluster/{cluster_name}"
|
||||
self.name = cluster_name
|
||||
@ -66,6 +75,10 @@ class Cluster(BaseObject, CloudFormationModel):
|
||||
self.status = "ACTIVE"
|
||||
self.region_name = region_name
|
||||
self.settings = cluster_settings
|
||||
self.configuration = configuration
|
||||
self.capacity_providers = capacity_providers
|
||||
self.default_capacity_provider_strategy = default_capacity_provider_strategy
|
||||
self.tags = tags
|
||||
|
||||
@property
|
||||
def physical_resource_id(self):
|
||||
@ -76,6 +89,10 @@ class Cluster(BaseObject, CloudFormationModel):
|
||||
response_object = self.gen_response_object()
|
||||
response_object["clusterArn"] = self.arn
|
||||
response_object["clusterName"] = self.name
|
||||
response_object["capacityProviders"] = self.capacity_providers
|
||||
response_object[
|
||||
"defaultCapacityProviderStrategy"
|
||||
] = self.default_capacity_provider_strategy
|
||||
del response_object["arn"], response_object["name"]
|
||||
return response_object
|
||||
|
||||
@ -149,6 +166,12 @@ class TaskDefinition(BaseObject, CloudFormationModel):
|
||||
memory=None,
|
||||
task_role_arn=None,
|
||||
execution_role_arn=None,
|
||||
proxy_configuration=None,
|
||||
inference_accelerators=None,
|
||||
runtime_platform=None,
|
||||
ipc_mode=None,
|
||||
pid_mode=None,
|
||||
ephemeral_storage=None,
|
||||
):
|
||||
self.family = family
|
||||
self.revision = revision
|
||||
@ -174,6 +197,12 @@ class TaskDefinition(BaseObject, CloudFormationModel):
|
||||
self.volumes = []
|
||||
else:
|
||||
self.volumes = volumes
|
||||
for volume in volumes:
|
||||
if "efsVolumeConfiguration" in volume:
|
||||
# We should reach into EFS to verify this volume exists
|
||||
efs_config = volume["efsVolumeConfiguration"]
|
||||
if "rootDirectory" not in efs_config:
|
||||
efs_config["rootDirectory"] = "/"
|
||||
|
||||
if not requires_compatibilities or requires_compatibilities == ["EC2"]:
|
||||
self.compatibilities = ["EC2"]
|
||||
@ -197,6 +226,12 @@ class TaskDefinition(BaseObject, CloudFormationModel):
|
||||
)
|
||||
|
||||
self.requires_compatibilities = requires_compatibilities
|
||||
self.proxy_configuration = proxy_configuration
|
||||
self.inference_accelerators = inference_accelerators
|
||||
self.runtime_platform = runtime_platform
|
||||
self.ipc_mode = ipc_mode
|
||||
self.pid_mode = pid_mode
|
||||
self.ephemeral_storage = ephemeral_storage
|
||||
|
||||
self.cpu = cpu
|
||||
self.memory = memory
|
||||
@ -368,12 +403,56 @@ class Task(BaseObject):
|
||||
class CapacityProvider(BaseObject):
|
||||
def __init__(self, account_id, region_name, name, asg_details, tags):
|
||||
self._id = str(mock_random.uuid4())
|
||||
self.capacity_provider_arn = f"arn:aws:ecs:{region_name}:{account_id}:capacity_provider/{name}/{self._id}"
|
||||
self.capacity_provider_arn = (
|
||||
f"arn:aws:ecs:{region_name}:{account_id}:capacity-provider/{name}"
|
||||
)
|
||||
self.name = name
|
||||
self.status = "ACTIVE"
|
||||
self.auto_scaling_group_provider = asg_details
|
||||
self.auto_scaling_group_provider = self._prepare_asg_provider(asg_details)
|
||||
self.tags = tags
|
||||
|
||||
self.update_status = None
|
||||
|
||||
def _prepare_asg_provider(self, asg_details):
|
||||
if "managedScaling" not in asg_details:
|
||||
asg_details["managedScaling"] = {}
|
||||
if not asg_details["managedScaling"].get("instanceWarmupPeriod"):
|
||||
asg_details["managedScaling"]["instanceWarmupPeriod"] = 300
|
||||
if not asg_details["managedScaling"].get("minimumScalingStepSize"):
|
||||
asg_details["managedScaling"]["minimumScalingStepSize"] = 1
|
||||
if not asg_details["managedScaling"].get("maximumScalingStepSize"):
|
||||
asg_details["managedScaling"]["maximumScalingStepSize"] = 10000
|
||||
if not asg_details["managedScaling"].get("targetCapacity"):
|
||||
asg_details["managedScaling"]["targetCapacity"] = 100
|
||||
if not asg_details["managedScaling"].get("status"):
|
||||
asg_details["managedScaling"]["status"] = "DISABLED"
|
||||
if "managedTerminationProtection" not in asg_details:
|
||||
asg_details["managedTerminationProtection"] = "DISABLED"
|
||||
return asg_details
|
||||
|
||||
def update(self, asg_details):
|
||||
if "managedTerminationProtection" in asg_details:
|
||||
self.auto_scaling_group_provider[
|
||||
"managedTerminationProtection"
|
||||
] = asg_details["managedTerminationProtection"]
|
||||
if "managedScaling" in asg_details:
|
||||
scaling_props = [
|
||||
"status",
|
||||
"targetCapacity",
|
||||
"minimumScalingStepSize",
|
||||
"maximumScalingStepSize",
|
||||
"instanceWarmupPeriod",
|
||||
]
|
||||
for prop in scaling_props:
|
||||
if prop in asg_details["managedScaling"]:
|
||||
self.auto_scaling_group_provider["managedScaling"][
|
||||
prop
|
||||
] = asg_details["managedScaling"][prop]
|
||||
self.auto_scaling_group_provider = self._prepare_asg_provider(
|
||||
self.auto_scaling_group_provider
|
||||
)
|
||||
self.update_status = "UPDATE_COMPLETE"
|
||||
|
||||
|
||||
class CapacityProviderFailure(BaseObject):
|
||||
def __init__(self, reason, name, account_id, region_name):
|
||||
@ -402,6 +481,7 @@ class Service(BaseObject, CloudFormationModel):
|
||||
launch_type=None,
|
||||
backend=None,
|
||||
service_registries=None,
|
||||
platform_version=None,
|
||||
):
|
||||
self.cluster_name = cluster.name
|
||||
self.cluster_arn = cluster.arn
|
||||
@ -438,6 +518,7 @@ class Service(BaseObject, CloudFormationModel):
|
||||
self.scheduling_strategy = (
|
||||
scheduling_strategy if scheduling_strategy is not None else "REPLICA"
|
||||
)
|
||||
self.platform_version = platform_version
|
||||
self.tags = tags if tags is not None else []
|
||||
self.pending_count = 0
|
||||
self.region_name = cluster.region_name
|
||||
@ -461,6 +542,7 @@ class Service(BaseObject, CloudFormationModel):
|
||||
response_object["serviceName"] = self.name
|
||||
response_object["serviceArn"] = self.arn
|
||||
response_object["schedulingStrategy"] = self.scheduling_strategy
|
||||
response_object["platformVersion"] = self.platform_version
|
||||
if response_object["deploymentController"]["type"] == "ECS":
|
||||
del response_object["deploymentController"]
|
||||
del response_object["taskSets"]
|
||||
@ -737,12 +819,12 @@ class TaskSet(BaseObject):
|
||||
self.task_definition = task_definition or ""
|
||||
self.region_name = region_name
|
||||
self.external_id = external_id or ""
|
||||
self.network_configuration = network_configuration or {}
|
||||
self.network_configuration = network_configuration or None
|
||||
self.load_balancers = load_balancers or []
|
||||
self.service_registries = service_registries or []
|
||||
self.launch_type = launch_type
|
||||
self.capacity_provider_strategy = capacity_provider_strategy or []
|
||||
self.platform_version = platform_version or ""
|
||||
self.platform_version = platform_version or "LATEST"
|
||||
self.scale = scale or {"value": 100.0, "unit": "PERCENT"}
|
||||
self.client_token = client_token or ""
|
||||
self.tags = tags or []
|
||||
@ -787,19 +869,15 @@ class EC2ContainerServiceBackend(BaseBackend):
|
||||
AWS reference: https://aws.amazon.com/blogs/compute/migrating-your-amazon-ecs-deployment-to-the-new-arn-and-resource-id-format-2/
|
||||
"""
|
||||
|
||||
def __init__(self, region_name, account_id):
|
||||
def __init__(self, region_name: str, account_id: str):
|
||||
super().__init__(region_name, account_id)
|
||||
self.account_settings = dict()
|
||||
self.capacity_providers = dict()
|
||||
self.clusters = {}
|
||||
self.clusters: Dict[str, Cluster] = {}
|
||||
self.task_definitions = {}
|
||||
self.tasks = {}
|
||||
self.services = {}
|
||||
self.container_instances = {}
|
||||
self.task_sets = {}
|
||||
self.tagger = TaggingService(
|
||||
tag_name="tags", key_name="key", value_name="value"
|
||||
)
|
||||
self.services: Dict[str, Service] = {}
|
||||
self.container_instances: Dict[str, ContainerInstance] = {}
|
||||
|
||||
@staticmethod
|
||||
def default_vpc_endpoint_service(service_region, zones):
|
||||
@ -823,8 +901,6 @@ class EC2ContainerServiceBackend(BaseBackend):
|
||||
self.account_id, self.region_name, name, asg_details, tags
|
||||
)
|
||||
self.capacity_providers[name] = capacity_provider
|
||||
if tags:
|
||||
self.tagger.tag_resource(capacity_provider.capacity_provider_arn, tags)
|
||||
return capacity_provider
|
||||
|
||||
def describe_task_definition(self, task_definition_str):
|
||||
@ -842,23 +918,54 @@ class EC2ContainerServiceBackend(BaseBackend):
|
||||
):
|
||||
return self.task_definitions[family][revision]
|
||||
else:
|
||||
raise Exception(f"{task_definition_name} is not a task_definition")
|
||||
raise TaskDefinitionNotFoundException()
|
||||
|
||||
def create_cluster(
|
||||
self, cluster_name: str, tags: Any = None, cluster_settings: Any = None
|
||||
self,
|
||||
cluster_name: str,
|
||||
tags: Any = None,
|
||||
cluster_settings: Any = None,
|
||||
configuration: Optional[Dict[str, Any]] = None,
|
||||
capacity_providers: Optional[List[str]] = None,
|
||||
default_capacity_provider_strategy: Optional[List[Dict[str, Any]]] = None,
|
||||
) -> Cluster:
|
||||
"""
|
||||
The following parameters are not yet implemented: configuration, capacityProviders, defaultCapacityProviderStrategy
|
||||
"""
|
||||
cluster = Cluster(
|
||||
cluster_name, self.account_id, self.region_name, cluster_settings
|
||||
cluster_name,
|
||||
self.account_id,
|
||||
self.region_name,
|
||||
cluster_settings,
|
||||
configuration,
|
||||
capacity_providers,
|
||||
default_capacity_provider_strategy,
|
||||
tags,
|
||||
)
|
||||
self.clusters[cluster_name] = cluster
|
||||
if tags:
|
||||
self.tagger.tag_resource(cluster.arn, tags)
|
||||
return cluster
|
||||
|
||||
def _get_provider(self, name_or_arn):
|
||||
def update_cluster(self, cluster_name, cluster_settings, configuration) -> Cluster:
|
||||
"""
|
||||
The serviceConnectDefaults-parameter is not yet implemented
|
||||
"""
|
||||
cluster = self._get_cluster(cluster_name)
|
||||
if cluster_settings:
|
||||
cluster.settings = cluster_settings
|
||||
if configuration:
|
||||
cluster.configuration = configuration
|
||||
return cluster
|
||||
|
||||
def put_cluster_capacity_providers(
|
||||
self, cluster_name, capacity_providers, default_capacity_provider_strategy
|
||||
):
|
||||
cluster = self._get_cluster(cluster_name)
|
||||
if capacity_providers is not None:
|
||||
cluster.capacity_providers = capacity_providers
|
||||
if default_capacity_provider_strategy is not None:
|
||||
cluster.default_capacity_provider_strategy = (
|
||||
default_capacity_provider_strategy
|
||||
)
|
||||
return cluster
|
||||
|
||||
def _get_provider(self, name_or_arn) -> CapacityProvider:
|
||||
for provider in self.capacity_providers.values():
|
||||
if (
|
||||
provider.name == name_or_arn
|
||||
@ -886,6 +993,11 @@ class EC2ContainerServiceBackend(BaseBackend):
|
||||
self.capacity_providers.pop(provider.name)
|
||||
return provider
|
||||
|
||||
def update_capacity_provider(self, name_or_arn, asg_provider) -> CapacityProvider:
|
||||
provider = self._get_provider(name_or_arn)
|
||||
provider.update(asg_provider)
|
||||
return provider
|
||||
|
||||
def list_clusters(self):
|
||||
"""
|
||||
maxSize and pagination not implemented
|
||||
@ -913,19 +1025,21 @@ class EC2ContainerServiceBackend(BaseBackend):
|
||||
)
|
||||
)
|
||||
|
||||
if "TAGS" in (include or []):
|
||||
if not include or "TAGS" not in (include):
|
||||
for cluster in list_clusters:
|
||||
cluster_arn = cluster["clusterArn"]
|
||||
if self.tagger.has_tags(cluster_arn):
|
||||
cluster_tags = self.tagger.list_tags_for_resource(cluster_arn)
|
||||
cluster.update(cluster_tags)
|
||||
cluster["tags"] = None
|
||||
|
||||
return list_clusters, failures
|
||||
|
||||
def delete_cluster(self, cluster_str: str) -> Cluster:
|
||||
cluster = self._get_cluster(cluster_str)
|
||||
|
||||
return self.clusters.pop(cluster.name)
|
||||
# A cluster is not immediately removed - just marked as inactive
|
||||
# It is only deleted later on
|
||||
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.delete_cluster
|
||||
cluster.status = "INACTIVE"
|
||||
|
||||
return cluster
|
||||
|
||||
def register_task_definition(
|
||||
self,
|
||||
@ -940,6 +1054,12 @@ class EC2ContainerServiceBackend(BaseBackend):
|
||||
memory=None,
|
||||
task_role_arn=None,
|
||||
execution_role_arn=None,
|
||||
proxy_configuration=None,
|
||||
inference_accelerators=None,
|
||||
runtime_platform=None,
|
||||
ipc_mode=None,
|
||||
pid_mode=None,
|
||||
ephemeral_storage=None,
|
||||
):
|
||||
if family in self.task_definitions:
|
||||
last_id = self._get_last_task_definition_revision_id(family)
|
||||
@ -962,6 +1082,12 @@ class EC2ContainerServiceBackend(BaseBackend):
|
||||
memory=memory,
|
||||
task_role_arn=task_role_arn,
|
||||
execution_role_arn=execution_role_arn,
|
||||
proxy_configuration=proxy_configuration,
|
||||
inference_accelerators=inference_accelerators,
|
||||
runtime_platform=runtime_platform,
|
||||
ipc_mode=ipc_mode,
|
||||
pid_mode=pid_mode,
|
||||
ephemeral_storage=ephemeral_storage,
|
||||
)
|
||||
self.task_definitions[family][revision] = task_definition
|
||||
|
||||
@ -995,7 +1121,9 @@ class EC2ContainerServiceBackend(BaseBackend):
|
||||
family in self.task_definitions
|
||||
and revision in self.task_definitions[family]
|
||||
):
|
||||
task_definition = self.task_definitions[family].pop(revision)
|
||||
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.deregister_task_definition
|
||||
# At this time, INACTIVE task definitions remain discoverable in your account indefinitely.
|
||||
task_definition = self.task_definitions[family][revision]
|
||||
task_definition.status = "INACTIVE"
|
||||
return task_definition
|
||||
else:
|
||||
@ -1015,9 +1143,30 @@ class EC2ContainerServiceBackend(BaseBackend):
|
||||
cluster = self._get_cluster(cluster_str)
|
||||
|
||||
task_definition = self.describe_task_definition(task_definition_str)
|
||||
resource_requirements = self._calculate_task_resource_requirements(
|
||||
task_definition
|
||||
)
|
||||
if cluster.name not in self.tasks:
|
||||
self.tasks[cluster.name] = {}
|
||||
tasks = []
|
||||
if launch_type == "FARGATE":
|
||||
for _ in range(count):
|
||||
task = Task(
|
||||
cluster,
|
||||
task_definition,
|
||||
None,
|
||||
resource_requirements,
|
||||
backend=self,
|
||||
overrides=overrides or {},
|
||||
started_by=started_by or "",
|
||||
tags=tags or [],
|
||||
launch_type=launch_type or "",
|
||||
networking_configuration=networking_configuration,
|
||||
)
|
||||
tasks.append(task)
|
||||
self.tasks[cluster.name][task.task_arn] = task
|
||||
return tasks
|
||||
|
||||
container_instances = list(
|
||||
self.container_instances.get(cluster.name, {}).keys()
|
||||
)
|
||||
@ -1028,9 +1177,6 @@ class EC2ContainerServiceBackend(BaseBackend):
|
||||
for x in container_instances
|
||||
if self.container_instances[cluster.name][x].status == "ACTIVE"
|
||||
]
|
||||
resource_requirements = self._calculate_task_resource_requirements(
|
||||
task_definition
|
||||
)
|
||||
# TODO: return event about unable to place task if not able to place enough tasks to meet count
|
||||
placed_count = 0
|
||||
for container_instance in active_container_instances:
|
||||
@ -1301,10 +1447,11 @@ class EC2ContainerServiceBackend(BaseBackend):
|
||||
deployment_controller=None,
|
||||
launch_type=None,
|
||||
service_registries=None,
|
||||
platform_version=None,
|
||||
):
|
||||
cluster = self._get_cluster(cluster_str)
|
||||
|
||||
if task_definition_str is not None:
|
||||
if task_definition_str:
|
||||
task_definition = self.describe_task_definition(task_definition_str)
|
||||
else:
|
||||
task_definition = None
|
||||
@ -1326,6 +1473,7 @@ class EC2ContainerServiceBackend(BaseBackend):
|
||||
launch_type,
|
||||
backend=self,
|
||||
service_registries=service_registries,
|
||||
platform_version=platform_version,
|
||||
)
|
||||
cluster_service_pair = f"{cluster.name}:{service_name}"
|
||||
self.services[cluster_service_pair] = service
|
||||
@ -1354,18 +1502,19 @@ class EC2ContainerServiceBackend(BaseBackend):
|
||||
|
||||
def describe_services(self, cluster_str, service_names_or_arns):
|
||||
cluster = self._get_cluster(cluster_str)
|
||||
service_names = [name.split("/")[-1] for name in service_names_or_arns]
|
||||
|
||||
result = []
|
||||
failures = []
|
||||
for name in service_names:
|
||||
for name_or_arn in service_names_or_arns:
|
||||
name = name_or_arn.split("/")[-1]
|
||||
cluster_service_pair = f"{cluster.name}:{name}"
|
||||
if cluster_service_pair in self.services:
|
||||
result.append(self.services[cluster_service_pair])
|
||||
else:
|
||||
missing_arn = (
|
||||
f"arn:aws:ecs:{self.region_name}:{self.account_id}:service/{name}"
|
||||
)
|
||||
if name_or_arn.startswith("arn:aws:ecs"):
|
||||
missing_arn = name_or_arn
|
||||
else:
|
||||
missing_arn = f"arn:aws:ecs:{self.region_name}:{self.account_id}:service/{name}"
|
||||
failures.append({"arn": missing_arn, "reason": "MISSING"})
|
||||
|
||||
return result, failures
|
||||
@ -1401,7 +1550,11 @@ class EC2ContainerServiceBackend(BaseBackend):
|
||||
"The service cannot be stopped while it is scaled above 0."
|
||||
)
|
||||
else:
|
||||
return self.services.pop(cluster_service_pair)
|
||||
# A service is not immediately removed - just marked as inactive
|
||||
# It is only deleted later on
|
||||
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.delete_service
|
||||
service.status = "INACTIVE"
|
||||
return service
|
||||
|
||||
def register_container_instance(self, cluster_str, ec2_instance_id):
|
||||
cluster_name = cluster_str.split("/")[-1]
|
||||
@ -1693,53 +1846,60 @@ class EC2ContainerServiceBackend(BaseBackend):
|
||||
|
||||
@staticmethod
|
||||
def _parse_resource_arn(resource_arn):
|
||||
match = re.match(
|
||||
regexes = [
|
||||
"^arn:aws:ecs:(?P<region>[^:]+):(?P<account_id>[^:]+):(?P<service>[^:]+)/(?P<cluster_id>[^:]+)/(?P<service_id>[^:]+)/ecs-svc/(?P<id>.*)$",
|
||||
"^arn:aws:ecs:(?P<region>[^:]+):(?P<account_id>[^:]+):(?P<service>[^:]+)/(?P<cluster_id>[^:]+)/(?P<id>.*)$",
|
||||
resource_arn,
|
||||
)
|
||||
if not match:
|
||||
# maybe a short-format ARN
|
||||
match = re.match(
|
||||
"^arn:aws:ecs:(?P<region>[^:]+):(?P<account_id>[^:]+):(?P<service>[^:]+)/(?P<id>.*)$",
|
||||
resource_arn,
|
||||
"^arn:aws:ecs:(?P<region>[^:]+):(?P<account_id>[^:]+):(?P<service>[^:]+)/(?P<id>.*)$",
|
||||
]
|
||||
for regex in regexes:
|
||||
match = re.match(regex, resource_arn)
|
||||
if match:
|
||||
return match.groupdict()
|
||||
raise JsonRESTError("InvalidParameterException", "The ARN provided is invalid.")
|
||||
|
||||
def _get_resource(self, resource_arn, parsed_arn):
|
||||
if parsed_arn["service"] == "cluster":
|
||||
return self._get_cluster(parsed_arn["id"])
|
||||
if parsed_arn["service"] == "service":
|
||||
for service in self.services.values():
|
||||
if service.arn == resource_arn:
|
||||
return service
|
||||
raise ServiceNotFoundException
|
||||
elif parsed_arn["service"] == "task-set":
|
||||
c_id = parsed_arn["cluster_id"]
|
||||
s_id = parsed_arn["service_id"]
|
||||
services, _ = self.describe_services(
|
||||
cluster_str=c_id, service_names_or_arns=[s_id]
|
||||
)
|
||||
if not match:
|
||||
raise JsonRESTError(
|
||||
"InvalidParameterException", "The ARN provided is invalid."
|
||||
for service in services:
|
||||
for task_set in service.task_sets:
|
||||
if task_set.task_set_arn == resource_arn:
|
||||
return task_set
|
||||
raise ServiceNotFoundException
|
||||
elif parsed_arn["service"] == "task-definition":
|
||||
task_def = self.describe_task_definition(
|
||||
task_definition_str=parsed_arn["id"]
|
||||
)
|
||||
return match.groupdict()
|
||||
return task_def
|
||||
elif parsed_arn["service"] == "capacity-provider":
|
||||
return self._get_provider(parsed_arn["id"])
|
||||
raise NotImplementedError()
|
||||
|
||||
def list_tags_for_resource(self, resource_arn):
|
||||
"""Currently implemented only for task definitions and services"""
|
||||
parsed_arn = self._parse_resource_arn(resource_arn)
|
||||
if parsed_arn["service"] == "task-definition":
|
||||
for task_definition in self.task_definitions.values():
|
||||
for revision in task_definition.values():
|
||||
if revision.arn == resource_arn:
|
||||
return revision.tags
|
||||
raise TaskDefinitionNotFoundException()
|
||||
elif parsed_arn["service"] == "service":
|
||||
for service in self.services.values():
|
||||
if service.arn == resource_arn:
|
||||
return service.tags
|
||||
raise ServiceNotFoundException
|
||||
raise NotImplementedError()
|
||||
resource = self._get_resource(resource_arn, parsed_arn)
|
||||
return resource.tags
|
||||
|
||||
def _get_last_task_definition_revision_id(self, family):
|
||||
definitions = self.task_definitions.get(family, {})
|
||||
if definitions:
|
||||
return max(definitions.keys())
|
||||
|
||||
def tag_resource(self, resource_arn, tags):
|
||||
"""Currently implemented only for services"""
|
||||
def tag_resource(self, resource_arn, tags) -> None:
|
||||
parsed_arn = self._parse_resource_arn(resource_arn)
|
||||
if parsed_arn["service"] == "service":
|
||||
for service in self.services.values():
|
||||
if service.arn == resource_arn:
|
||||
service.tags = self._merge_tags(service.tags, tags)
|
||||
return {}
|
||||
raise ServiceNotFoundException
|
||||
raise NotImplementedError()
|
||||
resource = self._get_resource(resource_arn, parsed_arn)
|
||||
resource.tags = self._merge_tags(resource.tags, tags)
|
||||
|
||||
def _merge_tags(self, existing_tags, new_tags):
|
||||
merged_tags = new_tags
|
||||
@ -1753,18 +1913,10 @@ class EC2ContainerServiceBackend(BaseBackend):
|
||||
def _get_keys(tags):
|
||||
return [tag["key"] for tag in tags]
|
||||
|
||||
def untag_resource(self, resource_arn, tag_keys):
|
||||
"""Currently implemented only for services"""
|
||||
def untag_resource(self, resource_arn, tag_keys) -> None:
|
||||
parsed_arn = self._parse_resource_arn(resource_arn)
|
||||
if parsed_arn["service"] == "service":
|
||||
for service in self.services.values():
|
||||
if service.arn == resource_arn:
|
||||
service.tags = [
|
||||
tag for tag in service.tags if tag["key"] not in tag_keys
|
||||
]
|
||||
return {}
|
||||
raise ServiceNotFoundException
|
||||
raise NotImplementedError()
|
||||
resource = self._get_resource(resource_arn, parsed_arn)
|
||||
resource.tags = [tag for tag in resource.tags if tag["key"] not in tag_keys]
|
||||
|
||||
def create_task_set(
|
||||
self,
|
||||
@ -1836,8 +1988,12 @@ class EC2ContainerServiceBackend(BaseBackend):
|
||||
task_set_results = []
|
||||
if task_sets:
|
||||
for task_set in service_obj.task_sets:
|
||||
# Match full ARN
|
||||
if task_set.task_set_arn in task_sets:
|
||||
task_set_results.append(task_set)
|
||||
# Match partial ARN if only the taskset ID is provided
|
||||
elif "/".join(task_set.task_set_arn.split("/")[-2:]) in task_sets:
|
||||
task_set_results.append(task_set)
|
||||
else:
|
||||
task_set_results = service_obj.task_sets
|
||||
|
||||
@ -1853,7 +2009,9 @@ class EC2ContainerServiceBackend(BaseBackend):
|
||||
service_key = f"{cluster_name}:{service_name}"
|
||||
task_set_element = None
|
||||
for i, ts in enumerate(self.services[service_key].task_sets):
|
||||
if task_set == ts.task_set_arn:
|
||||
if task_set == ts.task_set_arn or task_set == "/".join(
|
||||
ts.task_set_arn.split("/")[-2:]
|
||||
):
|
||||
task_set_element = i
|
||||
|
||||
if task_set_element is not None:
|
||||
|
@ -1,7 +1,7 @@
|
||||
import json
|
||||
|
||||
from moto.core.responses import BaseResponse
|
||||
from .models import ecs_backends
|
||||
from .models import ecs_backends, EC2ContainerServiceBackend
|
||||
|
||||
|
||||
class EC2ContainerServiceResponse(BaseResponse):
|
||||
@ -9,13 +9,7 @@ class EC2ContainerServiceResponse(BaseResponse):
|
||||
super().__init__(service_name="ecs")
|
||||
|
||||
@property
|
||||
def ecs_backend(self):
|
||||
"""
|
||||
ECS Backend
|
||||
|
||||
:return: ECS Backend object
|
||||
:rtype: moto.ecs.models.EC2ContainerServiceBackend
|
||||
"""
|
||||
def ecs_backend(self) -> EC2ContainerServiceBackend:
|
||||
return ecs_backends[self.current_account][self.region]
|
||||
|
||||
@property
|
||||
@ -39,9 +33,21 @@ class EC2ContainerServiceResponse(BaseResponse):
|
||||
cluster_name = self._get_param("clusterName")
|
||||
tags = self._get_param("tags")
|
||||
settings = self._get_param("settings")
|
||||
configuration = self._get_param("configuration")
|
||||
capacity_providers = self._get_param("capacityProviders")
|
||||
default_capacity_provider_strategy = self._get_param(
|
||||
"defaultCapacityProviderStrategy"
|
||||
)
|
||||
if cluster_name is None:
|
||||
cluster_name = "default"
|
||||
cluster = self.ecs_backend.create_cluster(cluster_name, tags, settings)
|
||||
cluster = self.ecs_backend.create_cluster(
|
||||
cluster_name,
|
||||
tags,
|
||||
settings,
|
||||
configuration,
|
||||
capacity_providers,
|
||||
default_capacity_provider_strategy,
|
||||
)
|
||||
return json.dumps({"cluster": cluster.response_object})
|
||||
|
||||
def list_clusters(self):
|
||||
@ -53,11 +59,35 @@ class EC2ContainerServiceResponse(BaseResponse):
|
||||
}
|
||||
)
|
||||
|
||||
def update_cluster(self):
|
||||
cluster_name = self._get_param("cluster")
|
||||
settings = self._get_param("settings")
|
||||
configuration = self._get_param("configuration")
|
||||
cluster = self.ecs_backend.update_cluster(cluster_name, settings, configuration)
|
||||
return json.dumps({"cluster": cluster.response_object})
|
||||
|
||||
def put_cluster_capacity_providers(self):
|
||||
cluster_name = self._get_param("cluster")
|
||||
capacity_providers = self._get_param("capacityProviders")
|
||||
default_capacity_provider_strategy = self._get_param(
|
||||
"defaultCapacityProviderStrategy"
|
||||
)
|
||||
cluster = self.ecs_backend.put_cluster_capacity_providers(
|
||||
cluster_name, capacity_providers, default_capacity_provider_strategy
|
||||
)
|
||||
return json.dumps({"cluster": cluster.response_object})
|
||||
|
||||
def delete_capacity_provider(self):
|
||||
name = self._get_param("capacityProvider")
|
||||
provider = self.ecs_backend.delete_capacity_provider(name)
|
||||
return json.dumps({"capacityProvider": provider.response_object})
|
||||
|
||||
def update_capacity_provider(self):
|
||||
name = self._get_param("name")
|
||||
asg_provider = self._get_param("autoScalingGroupProvider")
|
||||
provider = self.ecs_backend.update_capacity_provider(name, asg_provider)
|
||||
return json.dumps({"capacityProvider": provider.response_object})
|
||||
|
||||
def describe_capacity_providers(self):
|
||||
names = self._get_param("capacityProviders")
|
||||
providers, failures = self.ecs_backend.describe_capacity_providers(names)
|
||||
@ -96,6 +126,12 @@ class EC2ContainerServiceResponse(BaseResponse):
|
||||
memory = self._get_param("memory")
|
||||
task_role_arn = self._get_param("taskRoleArn")
|
||||
execution_role_arn = self._get_param("executionRoleArn")
|
||||
proxy_configuration = self._get_param("proxyConfiguration")
|
||||
inference_accelerators = self._get_param("inferenceAccelerators")
|
||||
runtime_platform = self._get_param("runtimePlatform")
|
||||
ipc_mode = self._get_param("ipcMode")
|
||||
pid_mode = self._get_param("pidMode")
|
||||
ephemeral_storage = self._get_param("ephemeralStorage")
|
||||
|
||||
task_definition = self.ecs_backend.register_task_definition(
|
||||
family,
|
||||
@ -109,6 +145,12 @@ class EC2ContainerServiceResponse(BaseResponse):
|
||||
memory=memory,
|
||||
task_role_arn=task_role_arn,
|
||||
execution_role_arn=execution_role_arn,
|
||||
proxy_configuration=proxy_configuration,
|
||||
inference_accelerators=inference_accelerators,
|
||||
runtime_platform=runtime_platform,
|
||||
ipc_mode=ipc_mode,
|
||||
pid_mode=pid_mode,
|
||||
ephemeral_storage=ephemeral_storage,
|
||||
)
|
||||
return json.dumps({"taskDefinition": task_definition.response_object})
|
||||
|
||||
@ -223,6 +265,7 @@ class EC2ContainerServiceResponse(BaseResponse):
|
||||
tags = self._get_param("tags")
|
||||
deployment_controller = self._get_param("deploymentController")
|
||||
launch_type = self._get_param("launchType")
|
||||
platform_version = self._get_param("platformVersion")
|
||||
service = self.ecs_backend.create_service(
|
||||
cluster_str,
|
||||
service_name,
|
||||
@ -234,6 +277,7 @@ class EC2ContainerServiceResponse(BaseResponse):
|
||||
deployment_controller,
|
||||
launch_type,
|
||||
service_registries=service_registries,
|
||||
platform_version=platform_version,
|
||||
)
|
||||
return json.dumps({"service": service.response_object})
|
||||
|
||||
@ -403,14 +447,14 @@ class EC2ContainerServiceResponse(BaseResponse):
|
||||
def tag_resource(self):
|
||||
resource_arn = self._get_param("resourceArn")
|
||||
tags = self._get_param("tags")
|
||||
results = self.ecs_backend.tag_resource(resource_arn, tags)
|
||||
return json.dumps(results)
|
||||
self.ecs_backend.tag_resource(resource_arn, tags)
|
||||
return json.dumps({})
|
||||
|
||||
def untag_resource(self):
|
||||
resource_arn = self._get_param("resourceArn")
|
||||
tag_keys = self._get_param("tagKeys")
|
||||
results = self.ecs_backend.untag_resource(resource_arn, tag_keys)
|
||||
return json.dumps(results)
|
||||
self.ecs_backend.untag_resource(resource_arn, tag_keys)
|
||||
return json.dumps({})
|
||||
|
||||
def create_task_set(self):
|
||||
service_str = self._get_param("service")
|
||||
|
@ -176,6 +176,47 @@ ecr:
|
||||
- TestAccECRRepository
|
||||
- TestAccECRRepositoryDataSource
|
||||
- TestAccECRRepositoryPolicy
|
||||
ecs:
|
||||
- TestAccECSCapacityProvider_
|
||||
- TestAccECSCluster_
|
||||
- TestAccECSClusterCapacityProviders_basic
|
||||
- TestAccECSClusterCapacityProviders_defaults
|
||||
- TestAccECSClusterCapacityProviders_disappears
|
||||
- TestAccECSClusterCapacityProviders_Update
|
||||
- TestAccECSService_clusterName
|
||||
- TestAccECSService_deploymentCircuitBreaker
|
||||
- TestAccECSService_alb
|
||||
- TestAccECSService_multipleTargetGroups
|
||||
- TestAccECSService_DeploymentValues
|
||||
- TestAccECSService_iamRole
|
||||
- TestAccECSService_ServiceRegistries_container
|
||||
- TestAccECSService_renamedCluster
|
||||
- TestAccECSService_familyAndRevision
|
||||
- TestAccECSService_replicaSchedulingStrategy
|
||||
- TestAccECSService_DaemonSchedulingStrategy
|
||||
- TestAccECSService_PlacementStrategy_missing
|
||||
- TestAccECSService_disappears
|
||||
- TestAccECSTaskSet_
|
||||
- TestAccECSTaskDefinition_Docker
|
||||
- TestAccECSTaskDefinition_EFSVolume
|
||||
- TestAccECSTaskDefinition_Fargate
|
||||
- TestAccECSTaskDefinition_ipcMode
|
||||
- TestAccECSTaskDefinition_constraint
|
||||
- TestAccECSTaskDefinition_tags
|
||||
- TestAccECSTaskDefinition_pidMode
|
||||
- TestAccECSTaskDefinition_executionRole
|
||||
- TestAccECSTaskDefinition_service
|
||||
- TestAccECSTaskDefinition_disappears
|
||||
- TestAccECSTaskDefinition_taskRoleARN
|
||||
- TestAccECSTaskDefinition_inferenceAccelerator
|
||||
- TestAccECSTaskDefinition_proxy
|
||||
- TestAccECSTaskDefinition_changeVolumesForcesNewResource
|
||||
- TestAccECSTaskDefinition_invalidContainerDefinition
|
||||
- TestAccECSTaskDefinition_arrays
|
||||
- TestAccECSTaskDefinition_scratchVolume
|
||||
- TestAccECSTaskDefinition_runtimePlatform
|
||||
- TestAccECSTaskDefinition_basic
|
||||
- TestAccECSTaskDefinition_networkMode
|
||||
efs:
|
||||
- TestAccEFSAccessPoint_
|
||||
- TestAccEFSAccessPointDataSource
|
||||
|
@ -221,8 +221,8 @@ def test_create_auto_scaling_from_template_version__no_version():
|
||||
"AutoScalingGroups"
|
||||
][0]
|
||||
response.should.have.key("LaunchTemplate")
|
||||
# We never specified the version - this is what it defaults to
|
||||
response["LaunchTemplate"].should.have.key("Version").equals("$Default")
|
||||
# We never specified the version - and AWS will not return anything if we don't
|
||||
response["LaunchTemplate"].shouldnt.have.key("Version")
|
||||
|
||||
|
||||
@mock_autoscaling
|
||||
|
@ -240,8 +240,10 @@ def test_delete_unmanaged_compute_environment():
|
||||
all_names = [e["computeEnvironmentName"] for e in all_envs]
|
||||
all_names.shouldnt.contain(compute_name)
|
||||
|
||||
all_clusters = ecs_client.list_clusters()["clusterArns"]
|
||||
all_clusters.shouldnt.contain(our_env["ecsClusterArn"])
|
||||
cluster = ecs_client.describe_clusters(clusters=[our_env["ecsClusterArn"]])[
|
||||
"clusters"
|
||||
][0]
|
||||
cluster.should.have.key("status").equals("INACTIVE")
|
||||
|
||||
|
||||
@mock_ec2
|
||||
@ -293,8 +295,10 @@ def test_delete_managed_compute_environment():
|
||||
for reservation in resp["Reservations"]:
|
||||
reservation["Instances"][0]["State"]["Name"].should.equal("terminated")
|
||||
|
||||
all_clusters = ecs_client.list_clusters()["clusterArns"]
|
||||
all_clusters.shouldnt.contain(our_env["ecsClusterArn"])
|
||||
cluster = ecs_client.describe_clusters(clusters=[our_env["ecsClusterArn"]])[
|
||||
"clusters"
|
||||
][0]
|
||||
cluster.should.have.key("status").equals("INACTIVE")
|
||||
|
||||
|
||||
@mock_ec2
|
||||
|
@ -53,6 +53,48 @@ def test_create_cluster_with_setting():
|
||||
)
|
||||
|
||||
|
||||
@mock_ecs
|
||||
def test_create_cluster_with_capacity_providers():
|
||||
client = boto3.client("ecs", region_name="us-east-1")
|
||||
cluster = client.create_cluster(
|
||||
clusterName="test_ecs_cluster",
|
||||
capacityProviders=["FARGATE", "FARGATE_SPOT"],
|
||||
defaultCapacityProviderStrategy=[
|
||||
{"base": 1, "capacityProvider": "FARGATE_SPOT", "weight": 1},
|
||||
{"base": 0, "capacityProvider": "FARGATE", "weight": 1},
|
||||
],
|
||||
)["cluster"]
|
||||
cluster["capacityProviders"].should.equal(["FARGATE", "FARGATE_SPOT"])
|
||||
cluster["defaultCapacityProviderStrategy"].should.equal(
|
||||
[
|
||||
{"base": 1, "capacityProvider": "FARGATE_SPOT", "weight": 1},
|
||||
{"base": 0, "capacityProvider": "FARGATE", "weight": 1},
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
@mock_ecs
|
||||
def test_put_capacity_providers():
|
||||
client = boto3.client("ecs", region_name="us-east-1")
|
||||
client.create_cluster(clusterName="test_ecs_cluster")
|
||||
cluster = client.put_cluster_capacity_providers(
|
||||
cluster="test_ecs_cluster",
|
||||
capacityProviders=["FARGATE", "FARGATE_SPOT"],
|
||||
defaultCapacityProviderStrategy=[
|
||||
{"base": 1, "capacityProvider": "FARGATE_SPOT", "weight": 1},
|
||||
{"base": 0, "capacityProvider": "FARGATE", "weight": 1},
|
||||
],
|
||||
)["cluster"]
|
||||
|
||||
cluster["capacityProviders"].should.equal(["FARGATE", "FARGATE_SPOT"])
|
||||
cluster["defaultCapacityProviderStrategy"].should.equal(
|
||||
[
|
||||
{"base": 1, "capacityProvider": "FARGATE_SPOT", "weight": 1},
|
||||
{"base": 0, "capacityProvider": "FARGATE", "weight": 1},
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
@mock_ecs
|
||||
def test_list_clusters():
|
||||
client = boto3.client("ecs", region_name="us-east-2")
|
||||
@ -67,6 +109,16 @@ def test_list_clusters():
|
||||
)
|
||||
|
||||
|
||||
@mock_ecs
|
||||
def test_create_cluster_with_tags():
|
||||
client = boto3.client("ecs", region_name="us-east-1")
|
||||
tag_list = [{"key": "tagName", "value": "TagValue"}]
|
||||
cluster = client.create_cluster(clusterName="c_with_tags", tags=tag_list)["cluster"]
|
||||
|
||||
tags = client.list_tags_for_resource(resourceArn=cluster["clusterArn"])["tags"]
|
||||
tags.should.equal([{"key": "tagName", "value": "TagValue"}])
|
||||
|
||||
|
||||
@mock_ecs
|
||||
def test_describe_clusters():
|
||||
client = boto3.client("ecs", region_name="us-east-1")
|
||||
@ -119,14 +171,14 @@ def test_delete_cluster():
|
||||
response["cluster"]["clusterArn"].should.equal(
|
||||
f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:cluster/test_ecs_cluster"
|
||||
)
|
||||
response["cluster"]["status"].should.equal("ACTIVE")
|
||||
response["cluster"]["status"].should.equal("INACTIVE")
|
||||
response["cluster"]["registeredContainerInstancesCount"].should.equal(0)
|
||||
response["cluster"]["runningTasksCount"].should.equal(0)
|
||||
response["cluster"]["pendingTasksCount"].should.equal(0)
|
||||
response["cluster"]["activeServicesCount"].should.equal(0)
|
||||
|
||||
response = client.list_clusters()
|
||||
response["clusterArns"].should.have.length_of(0)
|
||||
response["clusterArns"].should.have.length_of(1)
|
||||
|
||||
|
||||
@mock_ecs
|
||||
@ -381,7 +433,6 @@ def test_describe_task_definitions():
|
||||
"logConfiguration": {"logDriver": "json-file"},
|
||||
}
|
||||
],
|
||||
tags=[{"key": "Name", "value": "test_ecs_task"}],
|
||||
)
|
||||
_ = client.register_task_definition(
|
||||
family="test_ecs_task",
|
||||
@ -429,11 +480,6 @@ def test_describe_task_definitions():
|
||||
response["taskDefinition"]["taskRoleArn"].should.equal("my-task-role-arn")
|
||||
response["taskDefinition"]["executionRoleArn"].should.equal("my-execution-role-arn")
|
||||
|
||||
response = client.describe_task_definition(
|
||||
taskDefinition="test_ecs_task:1", include=["TAGS"]
|
||||
)
|
||||
response["tags"].should.equal([{"key": "Name", "value": "test_ecs_task"}])
|
||||
|
||||
|
||||
@mock_ecs
|
||||
def test_deregister_task_definition_1():
|
||||
@ -524,6 +570,7 @@ def test_create_service():
|
||||
serviceName="test_ecs_service",
|
||||
taskDefinition="test_ecs_task",
|
||||
desiredCount=2,
|
||||
platformVersion="2",
|
||||
)
|
||||
response["service"]["clusterArn"].should.equal(
|
||||
f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:cluster/test_ecs_cluster"
|
||||
@ -543,6 +590,7 @@ def test_create_service():
|
||||
)
|
||||
response["service"]["schedulingStrategy"].should.equal("REPLICA")
|
||||
response["service"]["launchType"].should.equal("EC2")
|
||||
response["service"]["platformVersion"].should.equal("2")
|
||||
|
||||
|
||||
@mock_ecs
|
||||
@ -1085,12 +1133,18 @@ def test_delete_service():
|
||||
f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:service/test_ecs_cluster/test_ecs_service"
|
||||
)
|
||||
response["service"]["serviceName"].should.equal("test_ecs_service")
|
||||
response["service"]["status"].should.equal("ACTIVE")
|
||||
response["service"]["status"].should.equal("INACTIVE")
|
||||
response["service"]["schedulingStrategy"].should.equal("REPLICA")
|
||||
response["service"]["taskDefinition"].should.equal(
|
||||
f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:task-definition/test_ecs_task:1"
|
||||
)
|
||||
|
||||
# service should still exist, just in the INACTIVE state
|
||||
service = client.describe_services(
|
||||
cluster="test_ecs_cluster", services=["test_ecs_service"]
|
||||
)["services"][0]
|
||||
service["status"].should.equal("INACTIVE")
|
||||
|
||||
|
||||
@mock_ecs
|
||||
def test_delete_service__using_arns():
|
||||
@ -1169,7 +1223,7 @@ def test_delete_service_force():
|
||||
f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:service/test_ecs_cluster/test_ecs_service"
|
||||
)
|
||||
response["service"]["serviceName"].should.equal("test_ecs_service")
|
||||
response["service"]["status"].should.equal("ACTIVE")
|
||||
response["service"]["status"].should.equal("INACTIVE")
|
||||
response["service"]["schedulingStrategy"].should.equal("REPLICA")
|
||||
response["service"]["taskDefinition"].should.equal(
|
||||
f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:task-definition/test_ecs_task:1"
|
||||
@ -1762,28 +1816,14 @@ def test_run_task_awsvpc_network_error():
|
||||
)
|
||||
|
||||
|
||||
@mock_ec2
|
||||
@mock_ecs
|
||||
def test_run_task_default_cluster():
|
||||
client = boto3.client("ecs", region_name="us-east-1")
|
||||
ec2 = boto3.resource("ec2", region_name="us-east-1")
|
||||
|
||||
test_cluster_name = "default"
|
||||
|
||||
_ = client.create_cluster(clusterName=test_cluster_name)
|
||||
|
||||
test_instance = ec2.create_instances(
|
||||
ImageId=EXAMPLE_AMI_ID, MinCount=1, MaxCount=1
|
||||
)[0]
|
||||
|
||||
instance_id_document = json.dumps(
|
||||
ec2_utils.generate_instance_identity_document(test_instance)
|
||||
)
|
||||
|
||||
client.register_container_instance(
|
||||
cluster=test_cluster_name, instanceIdentityDocument=instance_id_document
|
||||
)
|
||||
|
||||
_ = client.register_task_definition(
|
||||
family="test_ecs_task",
|
||||
containerDefinitions=[
|
||||
@ -1818,9 +1858,6 @@ def test_run_task_default_cluster():
|
||||
response["tasks"][0]["taskDefinitionArn"].should.equal(
|
||||
f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:task-definition/test_ecs_task:1"
|
||||
)
|
||||
response["tasks"][0]["containerInstanceArn"].should.contain(
|
||||
f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:container-instance/"
|
||||
)
|
||||
response["tasks"][0]["overrides"].should.equal({})
|
||||
response["tasks"][0]["lastStatus"].should.equal("RUNNING")
|
||||
response["tasks"][0]["desiredStatus"].should.equal("RUNNING")
|
||||
@ -2207,7 +2244,14 @@ def test_describe_task_definition_by_family():
|
||||
"logConfiguration": {"logDriver": "json-file"},
|
||||
}
|
||||
task_definition = client.register_task_definition(
|
||||
family="test_ecs_task", containerDefinitions=[container_definition]
|
||||
family="test_ecs_task",
|
||||
containerDefinitions=[container_definition],
|
||||
proxyConfiguration={"type": "APPMESH", "containerName": "a"},
|
||||
inferenceAccelerators=[{"deviceName": "dn", "deviceType": "dt"}],
|
||||
runtimePlatform={"cpuArchitecture": "X86_64", "operatingSystemFamily": "LINUX"},
|
||||
ipcMode="host",
|
||||
pidMode="host",
|
||||
ephemeralStorage={"sizeInGiB": 123},
|
||||
)
|
||||
family = task_definition["taskDefinition"]["family"]
|
||||
task = client.describe_task_definition(taskDefinition=family)["taskDefinition"]
|
||||
@ -2222,6 +2266,16 @@ def test_describe_task_definition_by_family():
|
||||
)
|
||||
task["volumes"].should.equal([])
|
||||
task["status"].should.equal("ACTIVE")
|
||||
task["proxyConfiguration"].should.equal({"type": "APPMESH", "containerName": "a"})
|
||||
task["inferenceAccelerators"].should.equal(
|
||||
[{"deviceName": "dn", "deviceType": "dt"}]
|
||||
)
|
||||
task["runtimePlatform"].should.equal(
|
||||
{"cpuArchitecture": "X86_64", "operatingSystemFamily": "LINUX"}
|
||||
)
|
||||
task["ipcMode"].should.equal("host")
|
||||
task["pidMode"].should.equal("host")
|
||||
task["ephemeralStorage"].should.equal({"sizeInGiB": 123})
|
||||
|
||||
|
||||
@mock_ec2
|
||||
@ -3194,6 +3248,22 @@ def test_ecs_service_untag_resource_multiple_tags():
|
||||
response["tags"].should.equal([{"key": "hello", "value": "world"}])
|
||||
|
||||
|
||||
@mock_ecs
|
||||
def test_update_cluster():
|
||||
client = boto3.client("ecs", region_name="us-east-1")
|
||||
resp = client.create_cluster(clusterName="test_ecs_cluster")
|
||||
|
||||
resp = client.update_cluster(
|
||||
cluster="test_ecs_cluster",
|
||||
settings=[{"name": "containerInsights", "value": "v"}],
|
||||
configuration={"executeCommandConfiguration": {"kmsKeyId": "arn:kms:stuff"}},
|
||||
)["cluster"]
|
||||
resp["settings"].should.equal([{"name": "containerInsights", "value": "v"}])
|
||||
resp["configuration"].should.equal(
|
||||
{"executeCommandConfiguration": {"kmsKeyId": "arn:kms:stuff"}}
|
||||
)
|
||||
|
||||
|
||||
@mock_ecs
|
||||
def test_ecs_task_definition_placement_constraints():
|
||||
client = boto3.client("ecs", region_name="us-east-1")
|
||||
@ -3227,390 +3297,6 @@ def test_ecs_task_definition_placement_constraints():
|
||||
)
|
||||
|
||||
|
||||
@mock_ecs
|
||||
def test_create_task_set():
|
||||
cluster_name = "test_ecs_cluster"
|
||||
service_name = "test_ecs_service"
|
||||
task_def_name = "test_ecs_task"
|
||||
|
||||
client = boto3.client("ecs", region_name="us-east-1")
|
||||
_ = client.create_cluster(clusterName=cluster_name)
|
||||
_ = client.register_task_definition(
|
||||
family="test_ecs_task",
|
||||
containerDefinitions=[
|
||||
{
|
||||
"name": "hello_world",
|
||||
"image": "docker/hello-world:latest",
|
||||
"cpu": 1024,
|
||||
"memory": 400,
|
||||
"essential": True,
|
||||
"environment": [
|
||||
{"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"}
|
||||
],
|
||||
"logConfiguration": {"logDriver": "json-file"},
|
||||
}
|
||||
],
|
||||
)
|
||||
_ = client.create_service(
|
||||
cluster=cluster_name,
|
||||
serviceName=service_name,
|
||||
taskDefinition=task_def_name,
|
||||
desiredCount=2,
|
||||
deploymentController={"type": "EXTERNAL"},
|
||||
)
|
||||
load_balancers = [
|
||||
{
|
||||
"targetGroupArn": "arn:aws:elasticloadbalancing:us-east-1:01234567890:targetgroup/c26b93c1bc35466ba792d5b08fe6a5bc/ec39113f8831453a",
|
||||
"containerName": "hello_world",
|
||||
"containerPort": 8080,
|
||||
},
|
||||
]
|
||||
|
||||
task_set = client.create_task_set(
|
||||
cluster=cluster_name,
|
||||
service=service_name,
|
||||
taskDefinition=task_def_name,
|
||||
loadBalancers=load_balancers,
|
||||
)["taskSet"]
|
||||
|
||||
cluster_arn = client.describe_clusters(clusters=[cluster_name])["clusters"][0][
|
||||
"clusterArn"
|
||||
]
|
||||
service_arn = client.describe_services(
|
||||
cluster=cluster_name, services=[service_name]
|
||||
)["services"][0]["serviceArn"]
|
||||
task_set["clusterArn"].should.equal(cluster_arn)
|
||||
task_set["serviceArn"].should.equal(service_arn)
|
||||
task_set["taskDefinition"].should.match(f"{task_def_name}:1$")
|
||||
task_set["scale"].should.equal({"value": 100.0, "unit": "PERCENT"})
|
||||
task_set["loadBalancers"][0]["targetGroupArn"].should.equal(
|
||||
"arn:aws:elasticloadbalancing:us-east-1:01234567890:targetgroup/"
|
||||
"c26b93c1bc35466ba792d5b08fe6a5bc/ec39113f8831453a"
|
||||
)
|
||||
task_set["loadBalancers"][0]["containerPort"].should.equal(8080)
|
||||
task_set["loadBalancers"][0]["containerName"].should.equal("hello_world")
|
||||
task_set["launchType"].should.equal("EC2")
|
||||
|
||||
|
||||
@mock_ecs
|
||||
def test_create_task_set_errors():
|
||||
# given
|
||||
cluster_name = "test_ecs_cluster"
|
||||
service_name = "test_ecs_service"
|
||||
task_def_name = "test_ecs_task"
|
||||
|
||||
client = boto3.client("ecs", region_name="us-east-1")
|
||||
_ = client.create_cluster(clusterName=cluster_name)
|
||||
_ = client.register_task_definition(
|
||||
family="test_ecs_task",
|
||||
containerDefinitions=[
|
||||
{
|
||||
"name": "hello_world",
|
||||
"image": "docker/hello-world:latest",
|
||||
"cpu": 1024,
|
||||
"memory": 400,
|
||||
"essential": True,
|
||||
"environment": [
|
||||
{"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"}
|
||||
],
|
||||
"logConfiguration": {"logDriver": "json-file"},
|
||||
}
|
||||
],
|
||||
)
|
||||
_ = client.create_service(
|
||||
cluster=cluster_name,
|
||||
serviceName=service_name,
|
||||
taskDefinition=task_def_name,
|
||||
desiredCount=2,
|
||||
deploymentController={"type": "EXTERNAL"},
|
||||
)
|
||||
|
||||
# not existing launch type
|
||||
# when
|
||||
with pytest.raises(ClientError) as e:
|
||||
client.create_task_set(
|
||||
cluster=cluster_name,
|
||||
service=service_name,
|
||||
taskDefinition=task_def_name,
|
||||
launchType="SOMETHING",
|
||||
)
|
||||
|
||||
# then
|
||||
ex = e.value
|
||||
ex.operation_name.should.equal("CreateTaskSet")
|
||||
ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
|
||||
ex.response["Error"]["Code"].should.contain("ClientException")
|
||||
ex.response["Error"]["Message"].should.equal(
|
||||
"launch type should be one of [EC2,FARGATE]"
|
||||
)
|
||||
|
||||
|
||||
@mock_ecs
|
||||
def test_describe_task_sets():
|
||||
cluster_name = "test_ecs_cluster"
|
||||
service_name = "test_ecs_service"
|
||||
task_def_name = "test_ecs_task"
|
||||
|
||||
client = boto3.client("ecs", region_name="us-east-1")
|
||||
_ = client.create_cluster(clusterName=cluster_name)
|
||||
_ = client.register_task_definition(
|
||||
family=task_def_name,
|
||||
containerDefinitions=[
|
||||
{
|
||||
"name": "hello_world",
|
||||
"image": "docker/hello-world:latest",
|
||||
"cpu": 1024,
|
||||
"memory": 400,
|
||||
"essential": True,
|
||||
"environment": [
|
||||
{"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"}
|
||||
],
|
||||
"logConfiguration": {"logDriver": "json-file"},
|
||||
}
|
||||
],
|
||||
)
|
||||
_ = client.create_service(
|
||||
cluster=cluster_name,
|
||||
serviceName=service_name,
|
||||
taskDefinition=task_def_name,
|
||||
desiredCount=2,
|
||||
deploymentController={"type": "EXTERNAL"},
|
||||
)
|
||||
|
||||
load_balancers = [
|
||||
{
|
||||
"targetGroupArn": "arn:aws:elasticloadbalancing:us-east-1:01234567890:targetgroup/c26b93c1bc35466ba792d5b08fe6a5bc/ec39113f8831453a",
|
||||
"containerName": "hello_world",
|
||||
"containerPort": 8080,
|
||||
}
|
||||
]
|
||||
|
||||
_ = client.create_task_set(
|
||||
cluster=cluster_name,
|
||||
service=service_name,
|
||||
taskDefinition=task_def_name,
|
||||
loadBalancers=load_balancers,
|
||||
)
|
||||
task_sets = client.describe_task_sets(cluster=cluster_name, service=service_name)[
|
||||
"taskSets"
|
||||
]
|
||||
assert "tags" not in task_sets[0]
|
||||
|
||||
task_sets = client.describe_task_sets(
|
||||
cluster=cluster_name, service=service_name, include=["TAGS"]
|
||||
)["taskSets"]
|
||||
|
||||
cluster_arn = client.describe_clusters(clusters=[cluster_name])["clusters"][0][
|
||||
"clusterArn"
|
||||
]
|
||||
|
||||
service_arn = client.describe_services(
|
||||
cluster=cluster_name, services=[service_name]
|
||||
)["services"][0]["serviceArn"]
|
||||
|
||||
task_sets[0].should.have.key("tags")
|
||||
task_sets.should.have.length_of(1)
|
||||
task_sets[0]["taskDefinition"].should.match(f"{task_def_name}:1$")
|
||||
task_sets[0]["clusterArn"].should.equal(cluster_arn)
|
||||
task_sets[0]["serviceArn"].should.equal(service_arn)
|
||||
task_sets[0]["serviceArn"].should.match(f"{service_name}$")
|
||||
task_sets[0]["scale"].should.equal({"value": 100.0, "unit": "PERCENT"})
|
||||
task_sets[0]["taskSetArn"].should.match(f"{task_sets[0]['id']}$")
|
||||
task_sets[0]["loadBalancers"][0]["targetGroupArn"].should.equal(
|
||||
"arn:aws:elasticloadbalancing:us-east-1:01234567890:targetgroup/"
|
||||
"c26b93c1bc35466ba792d5b08fe6a5bc/ec39113f8831453a"
|
||||
)
|
||||
task_sets[0]["loadBalancers"][0]["containerPort"].should.equal(8080)
|
||||
task_sets[0]["loadBalancers"][0]["containerName"].should.equal("hello_world")
|
||||
task_sets[0]["launchType"].should.equal("EC2")
|
||||
|
||||
|
||||
@mock_ecs
|
||||
def test_delete_task_set():
|
||||
cluster_name = "test_ecs_cluster"
|
||||
service_name = "test_ecs_service"
|
||||
task_def_name = "test_ecs_task"
|
||||
|
||||
client = boto3.client("ecs", region_name="us-east-1")
|
||||
_ = client.create_cluster(clusterName=cluster_name)
|
||||
_ = client.register_task_definition(
|
||||
family=task_def_name,
|
||||
containerDefinitions=[
|
||||
{
|
||||
"name": "hello_world",
|
||||
"image": "docker/hello-world:latest",
|
||||
"cpu": 1024,
|
||||
"memory": 400,
|
||||
"essential": True,
|
||||
"environment": [
|
||||
{"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"}
|
||||
],
|
||||
"logConfiguration": {"logDriver": "json-file"},
|
||||
}
|
||||
],
|
||||
)
|
||||
_ = client.create_service(
|
||||
cluster=cluster_name,
|
||||
serviceName=service_name,
|
||||
taskDefinition=task_def_name,
|
||||
desiredCount=2,
|
||||
deploymentController={"type": "EXTERNAL"},
|
||||
)
|
||||
|
||||
task_set = client.create_task_set(
|
||||
cluster=cluster_name, service=service_name, taskDefinition=task_def_name
|
||||
)["taskSet"]
|
||||
|
||||
task_sets = client.describe_task_sets(
|
||||
cluster=cluster_name, service=service_name, taskSets=[task_set["taskSetArn"]]
|
||||
)["taskSets"]
|
||||
|
||||
assert len(task_sets) == 1
|
||||
|
||||
response = client.delete_task_set(
|
||||
cluster=cluster_name, service=service_name, taskSet=task_set["taskSetArn"]
|
||||
)
|
||||
assert response["taskSet"]["taskSetArn"] == task_set["taskSetArn"]
|
||||
|
||||
task_sets = client.describe_task_sets(
|
||||
cluster=cluster_name, service=service_name, taskSets=[task_set["taskSetArn"]]
|
||||
)["taskSets"]
|
||||
|
||||
assert len(task_sets) == 0
|
||||
|
||||
with pytest.raises(ClientError):
|
||||
_ = client.delete_task_set(
|
||||
cluster=cluster_name, service=service_name, taskSet=task_set["taskSetArn"]
|
||||
)
|
||||
|
||||
|
||||
@mock_ecs
|
||||
def test_update_service_primary_task_set():
|
||||
cluster_name = "test_ecs_cluster"
|
||||
service_name = "test_ecs_service"
|
||||
task_def_name = "test_ecs_task"
|
||||
|
||||
client = boto3.client("ecs", region_name="us-east-1")
|
||||
_ = client.create_cluster(clusterName=cluster_name)
|
||||
_ = client.register_task_definition(
|
||||
family="test_ecs_task",
|
||||
containerDefinitions=[
|
||||
{
|
||||
"name": "hello_world",
|
||||
"image": "docker/hello-world:latest",
|
||||
"cpu": 1024,
|
||||
"memory": 400,
|
||||
"essential": True,
|
||||
"environment": [
|
||||
{"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"}
|
||||
],
|
||||
"logConfiguration": {"logDriver": "json-file"},
|
||||
}
|
||||
],
|
||||
)
|
||||
_ = client.create_service(
|
||||
cluster=cluster_name,
|
||||
serviceName=service_name,
|
||||
desiredCount=2,
|
||||
deploymentController={"type": "EXTERNAL"},
|
||||
)
|
||||
|
||||
task_set = client.create_task_set(
|
||||
cluster=cluster_name, service=service_name, taskDefinition=task_def_name
|
||||
)["taskSet"]
|
||||
|
||||
service = client.describe_services(cluster=cluster_name, services=[service_name],)[
|
||||
"services"
|
||||
][0]
|
||||
|
||||
_ = client.update_service_primary_task_set(
|
||||
cluster=cluster_name,
|
||||
service=service_name,
|
||||
primaryTaskSet=task_set["taskSetArn"],
|
||||
)
|
||||
|
||||
service = client.describe_services(cluster=cluster_name, services=[service_name],)[
|
||||
"services"
|
||||
][0]
|
||||
assert service["taskSets"][0]["status"] == "PRIMARY"
|
||||
assert service["taskDefinition"] == service["taskSets"][0]["taskDefinition"]
|
||||
|
||||
another_task_set = client.create_task_set(
|
||||
cluster=cluster_name, service=service_name, taskDefinition=task_def_name
|
||||
)["taskSet"]
|
||||
service = client.describe_services(cluster=cluster_name, services=[service_name],)[
|
||||
"services"
|
||||
][0]
|
||||
assert service["taskSets"][1]["status"] == "ACTIVE"
|
||||
|
||||
_ = client.update_service_primary_task_set(
|
||||
cluster=cluster_name,
|
||||
service=service_name,
|
||||
primaryTaskSet=another_task_set["taskSetArn"],
|
||||
)
|
||||
service = client.describe_services(cluster=cluster_name, services=[service_name],)[
|
||||
"services"
|
||||
][0]
|
||||
assert service["taskSets"][0]["status"] == "ACTIVE"
|
||||
assert service["taskSets"][1]["status"] == "PRIMARY"
|
||||
assert service["taskDefinition"] == service["taskSets"][1]["taskDefinition"]
|
||||
|
||||
|
||||
@mock_ecs
|
||||
def test_update_task_set():
|
||||
cluster_name = "test_ecs_cluster"
|
||||
service_name = "test_ecs_service"
|
||||
task_def_name = "test_ecs_task"
|
||||
|
||||
client = boto3.client("ecs", region_name="us-east-1")
|
||||
_ = client.create_cluster(clusterName=cluster_name)
|
||||
_ = client.register_task_definition(
|
||||
family=task_def_name,
|
||||
containerDefinitions=[
|
||||
{
|
||||
"name": "hello_world",
|
||||
"image": "docker/hello-world:latest",
|
||||
"cpu": 1024,
|
||||
"memory": 400,
|
||||
"essential": True,
|
||||
"environment": [
|
||||
{"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"}
|
||||
],
|
||||
"logConfiguration": {"logDriver": "json-file"},
|
||||
}
|
||||
],
|
||||
)
|
||||
_ = client.create_service(
|
||||
cluster=cluster_name,
|
||||
serviceName=service_name,
|
||||
desiredCount=2,
|
||||
deploymentController={"type": "EXTERNAL"},
|
||||
)
|
||||
|
||||
task_set = client.create_task_set(
|
||||
cluster=cluster_name, service=service_name, taskDefinition=task_def_name
|
||||
)["taskSet"]
|
||||
|
||||
another_task_set = client.create_task_set(
|
||||
cluster=cluster_name, service=service_name, taskDefinition=task_def_name
|
||||
)["taskSet"]
|
||||
assert another_task_set["scale"]["unit"] == "PERCENT"
|
||||
assert another_task_set["scale"]["value"] == 100.0
|
||||
|
||||
client.update_task_set(
|
||||
cluster=cluster_name,
|
||||
service=service_name,
|
||||
taskSet=task_set["taskSetArn"],
|
||||
scale={"value": 25.0, "unit": "PERCENT"},
|
||||
)
|
||||
|
||||
updated_task_set = client.describe_task_sets(
|
||||
cluster=cluster_name, service=service_name, taskSets=[task_set["taskSetArn"]]
|
||||
)["taskSets"][0]
|
||||
assert updated_task_set["scale"]["value"] == 25.0
|
||||
assert updated_task_set["scale"]["unit"] == "PERCENT"
|
||||
|
||||
|
||||
@mock_ec2
|
||||
@mock_ecs
|
||||
def test_list_tasks_with_filters():
|
||||
|
@ -29,8 +29,10 @@ def test_create_capacity_provider():
|
||||
{
|
||||
"autoScalingGroupArn": "asg:arn",
|
||||
"managedScaling": {
|
||||
"instanceWarmupPeriod": 300,
|
||||
"status": "ENABLED",
|
||||
"targetCapacity": 5,
|
||||
"minimumScalingStepSize": 1,
|
||||
"maximumScalingStepSize": 2,
|
||||
},
|
||||
"managedTerminationProtection": "DISABLED",
|
||||
@ -53,6 +55,20 @@ def test_create_capacity_provider_with_tags():
|
||||
provider.should.have.key("name").equals("my_provider")
|
||||
provider.should.have.key("tags").equals([{"key": "k1", "value": "v1"}])
|
||||
|
||||
client.tag_resource(
|
||||
resourceArn=provider["capacityProviderArn"], tags=[{"key": "k2", "value": "v2"}]
|
||||
)
|
||||
|
||||
resp = client.list_tags_for_resource(resourceArn=provider["capacityProviderArn"])
|
||||
resp["tags"].should.have.length_of(2)
|
||||
resp["tags"].should.contain({"key": "k1", "value": "v1"})
|
||||
resp["tags"].should.contain({"key": "k2", "value": "v2"})
|
||||
|
||||
client.untag_resource(resourceArn=provider["capacityProviderArn"], tagKeys=["k1"])
|
||||
|
||||
resp = client.list_tags_for_resource(resourceArn=provider["capacityProviderArn"])
|
||||
resp["tags"].should.equal([{"key": "k2", "value": "v2"}])
|
||||
|
||||
|
||||
@mock_ecs
|
||||
def test_describe_capacity_provider__using_name():
|
||||
@ -81,8 +97,10 @@ def test_describe_capacity_provider__using_name():
|
||||
{
|
||||
"autoScalingGroupArn": "asg:arn",
|
||||
"managedScaling": {
|
||||
"instanceWarmupPeriod": 300,
|
||||
"status": "ENABLED",
|
||||
"targetCapacity": 5,
|
||||
"minimumScalingStepSize": 1,
|
||||
"maximumScalingStepSize": 2,
|
||||
},
|
||||
"managedTerminationProtection": "DISABLED",
|
||||
@ -171,3 +189,36 @@ def test_delete_capacity_provider():
|
||||
"reason": "MISSING",
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@mock_ecs
|
||||
def test_update_capacity_provider():
|
||||
client = boto3.client("ecs", region_name="us-west-1")
|
||||
client.create_capacity_provider(
|
||||
name="my_provider", autoScalingGroupProvider={"autoScalingGroupArn": "asg:arn"}
|
||||
)
|
||||
|
||||
resp = client.update_capacity_provider(
|
||||
name="my_provider",
|
||||
autoScalingGroupProvider={"managedScaling": {"status": "ENABLED"}},
|
||||
)
|
||||
resp.should.have.key("capacityProvider")
|
||||
resp["capacityProvider"].should.have.key("name").equals("my_provider")
|
||||
|
||||
# We can't find either provider
|
||||
provider = client.describe_capacity_providers(capacityProviders=["my_provider"])[
|
||||
"capacityProviders"
|
||||
][0]
|
||||
provider["autoScalingGroupProvider"].should.equal(
|
||||
{
|
||||
"autoScalingGroupArn": "asg:arn",
|
||||
"managedScaling": {
|
||||
"instanceWarmupPeriod": 300,
|
||||
"maximumScalingStepSize": 10000,
|
||||
"minimumScalingStepSize": 1,
|
||||
"status": "ENABLED",
|
||||
"targetCapacity": 100,
|
||||
},
|
||||
"managedTerminationProtection": "DISABLED",
|
||||
}
|
||||
)
|
||||
|
@ -302,13 +302,20 @@ def test_update_cluster_name_through_cloudformation_should_trigger_a_replacement
|
||||
stack_resp = cfn_conn.create_stack(
|
||||
StackName="test_stack", TemplateBody=template1_json
|
||||
)
|
||||
ecs_conn = boto3.client("ecs", region_name="us-west-1")
|
||||
|
||||
template2_json = json.dumps(template2)
|
||||
cfn_conn.update_stack(StackName=stack_resp["StackId"], TemplateBody=template2_json)
|
||||
ecs_conn = boto3.client("ecs", region_name="us-west-1")
|
||||
resp = ecs_conn.list_clusters()
|
||||
len(resp["clusterArns"]).should.equal(1)
|
||||
resp["clusterArns"][0].endswith("testcluster2").should.be.true
|
||||
|
||||
len(resp["clusterArns"]).should.equal(2)
|
||||
|
||||
cluster1 = ecs_conn.describe_clusters(clusters=["testcluster1"])["clusters"][0]
|
||||
cluster1["status"].should.equal("INACTIVE")
|
||||
|
||||
cluster1 = ecs_conn.describe_clusters(clusters=["testcluster2"])["clusters"][0]
|
||||
cluster1["status"].should.equal("ACTIVE")
|
||||
|
||||
|
||||
@mock_ecs
|
||||
|
43
tests/test_ecs/test_ecs_efs.py
Normal file
43
tests/test_ecs/test_ecs_efs.py
Normal file
@ -0,0 +1,43 @@
|
||||
import boto3
|
||||
from moto import mock_ecs, mock_efs
|
||||
|
||||
|
||||
@mock_ecs
|
||||
@mock_efs
|
||||
def test_register_task_definition__use_efs_root():
|
||||
client = boto3.client("ecs", region_name="us-east-1")
|
||||
|
||||
container_definition = {
|
||||
"name": "hello_world",
|
||||
"image": "docker/hello-world:latest",
|
||||
"cpu": 1024,
|
||||
"memory": 400,
|
||||
}
|
||||
task_definition = client.register_task_definition(
|
||||
family="test_ecs_task",
|
||||
containerDefinitions=[container_definition],
|
||||
volumes=[
|
||||
{
|
||||
"name": "vol1",
|
||||
"efsVolumeConfiguration": {
|
||||
"fileSystemId": "sth",
|
||||
"transitEncryption": "ENABLED",
|
||||
},
|
||||
}
|
||||
],
|
||||
)
|
||||
family = task_definition["taskDefinition"]["family"]
|
||||
task = client.describe_task_definition(taskDefinition=family)["taskDefinition"]
|
||||
|
||||
task["volumes"].should.equal(
|
||||
[
|
||||
{
|
||||
"name": "vol1",
|
||||
"efsVolumeConfiguration": {
|
||||
"fileSystemId": "sth",
|
||||
"rootDirectory": "/",
|
||||
"transitEncryption": "ENABLED",
|
||||
},
|
||||
}
|
||||
]
|
||||
)
|
44
tests/test_ecs/test_ecs_task_def_tags.py
Normal file
44
tests/test_ecs/test_ecs_task_def_tags.py
Normal file
@ -0,0 +1,44 @@
|
||||
import boto3
|
||||
import sure # noqa # pylint: disable=unused-import
|
||||
|
||||
from moto import mock_ecs
|
||||
|
||||
|
||||
@mock_ecs
|
||||
def test_describe_task_definition_with_tags():
|
||||
client = boto3.client("ecs", region_name="us-east-1")
|
||||
task_def = client.register_task_definition(
|
||||
family="test_ecs_task",
|
||||
containerDefinitions=[
|
||||
{
|
||||
"name": "hello_world",
|
||||
"image": "docker/hello-world:latest",
|
||||
"cpu": 1024,
|
||||
"memory": 400,
|
||||
"essential": True,
|
||||
}
|
||||
],
|
||||
tags=[{"key": "k1", "value": "v1"}],
|
||||
)["taskDefinition"]
|
||||
task_def_arn = task_def["taskDefinitionArn"]
|
||||
|
||||
response = client.describe_task_definition(
|
||||
taskDefinition="test_ecs_task:1", include=["TAGS"]
|
||||
)
|
||||
response["tags"].should.equal([{"key": "k1", "value": "v1"}])
|
||||
|
||||
client.tag_resource(resourceArn=task_def_arn, tags=[{"key": "k2", "value": "v2"}])
|
||||
|
||||
response = client.describe_task_definition(
|
||||
taskDefinition="test_ecs_task:1", include=["TAGS"]
|
||||
)
|
||||
response["tags"].should.have.length_of(2)
|
||||
response["tags"].should.contain({"key": "k1", "value": "v1"})
|
||||
response["tags"].should.contain({"key": "k2", "value": "v2"})
|
||||
|
||||
client.untag_resource(resourceArn=task_def_arn, tagKeys=["k2"])
|
||||
|
||||
resp = client.list_tags_for_resource(resourceArn=task_def_arn)
|
||||
resp.should.have.key("tags")
|
||||
resp["tags"].should.have.length_of(1)
|
||||
resp["tags"].should.contain({"key": "k1", "value": "v1"})
|
398
tests/test_ecs/test_ecs_tasksets.py
Normal file
398
tests/test_ecs/test_ecs_tasksets.py
Normal file
@ -0,0 +1,398 @@
|
||||
from botocore.exceptions import ClientError
|
||||
import boto3
|
||||
import sure # noqa # pylint: disable=unused-import
|
||||
|
||||
from moto import mock_ecs
|
||||
import pytest
|
||||
|
||||
|
||||
cluster_name = "test_ecs_cluster"
|
||||
service_name = "test_ecs_service"
|
||||
task_def_name = "test_ecs_task"
|
||||
|
||||
|
||||
@mock_ecs
|
||||
def test_create_task_set():
|
||||
client = boto3.client("ecs", region_name="us-east-1")
|
||||
_ = client.create_cluster(clusterName=cluster_name)
|
||||
create_task_def(client)
|
||||
_ = client.create_service(
|
||||
cluster=cluster_name,
|
||||
serviceName=service_name,
|
||||
taskDefinition=task_def_name,
|
||||
desiredCount=2,
|
||||
deploymentController={"type": "EXTERNAL"},
|
||||
)
|
||||
load_balancers = [
|
||||
{
|
||||
"targetGroupArn": "arn:aws:elasticloadbalancing:us-east-1:01234567890:targetgroup/c26b93c1bc35466ba792d5b08fe6a5bc/ec39113f8831453a",
|
||||
"containerName": "hello_world",
|
||||
"containerPort": 8080,
|
||||
},
|
||||
]
|
||||
|
||||
task_set = client.create_task_set(
|
||||
cluster=cluster_name,
|
||||
service=service_name,
|
||||
taskDefinition=task_def_name,
|
||||
loadBalancers=load_balancers,
|
||||
)["taskSet"]
|
||||
|
||||
cluster_arn = client.describe_clusters(clusters=[cluster_name])["clusters"][0][
|
||||
"clusterArn"
|
||||
]
|
||||
service_arn = client.describe_services(
|
||||
cluster=cluster_name, services=[service_name]
|
||||
)["services"][0]["serviceArn"]
|
||||
task_set["clusterArn"].should.equal(cluster_arn)
|
||||
task_set["serviceArn"].should.equal(service_arn)
|
||||
task_set["taskDefinition"].should.match(f"{task_def_name}:1$")
|
||||
task_set["scale"].should.equal({"value": 100.0, "unit": "PERCENT"})
|
||||
task_set["loadBalancers"][0]["targetGroupArn"].should.equal(
|
||||
"arn:aws:elasticloadbalancing:us-east-1:01234567890:targetgroup/"
|
||||
"c26b93c1bc35466ba792d5b08fe6a5bc/ec39113f8831453a"
|
||||
)
|
||||
task_set["loadBalancers"][0]["containerPort"].should.equal(8080)
|
||||
task_set["loadBalancers"][0]["containerName"].should.equal("hello_world")
|
||||
task_set["launchType"].should.equal("EC2")
|
||||
task_set["platformVersion"].should.equal("LATEST")
|
||||
|
||||
|
||||
@mock_ecs
|
||||
def test_create_task_set_errors():
|
||||
# given
|
||||
client = boto3.client("ecs", region_name="us-east-1")
|
||||
_ = client.create_cluster(clusterName=cluster_name)
|
||||
create_task_def(client)
|
||||
_ = client.create_service(
|
||||
cluster=cluster_name,
|
||||
serviceName=service_name,
|
||||
taskDefinition=task_def_name,
|
||||
desiredCount=2,
|
||||
deploymentController={"type": "EXTERNAL"},
|
||||
)
|
||||
|
||||
# not existing launch type
|
||||
# when
|
||||
with pytest.raises(ClientError) as e:
|
||||
client.create_task_set(
|
||||
cluster=cluster_name,
|
||||
service=service_name,
|
||||
taskDefinition=task_def_name,
|
||||
launchType="SOMETHING",
|
||||
)
|
||||
|
||||
# then
|
||||
ex = e.value
|
||||
ex.operation_name.should.equal("CreateTaskSet")
|
||||
ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
|
||||
ex.response["Error"]["Code"].should.contain("ClientException")
|
||||
ex.response["Error"]["Message"].should.equal(
|
||||
"launch type should be one of [EC2,FARGATE]"
|
||||
)
|
||||
|
||||
|
||||
@mock_ecs
|
||||
def test_describe_task_sets():
|
||||
client = boto3.client("ecs", region_name="us-east-1")
|
||||
_ = client.create_cluster(clusterName=cluster_name)
|
||||
create_task_def(client)
|
||||
_ = client.create_service(
|
||||
cluster=cluster_name,
|
||||
serviceName=service_name,
|
||||
taskDefinition=task_def_name,
|
||||
desiredCount=2,
|
||||
deploymentController={"type": "EXTERNAL"},
|
||||
)
|
||||
|
||||
load_balancers = [
|
||||
{
|
||||
"targetGroupArn": "arn:aws:elasticloadbalancing:us-east-1:01234567890:targetgroup/c26b93c1bc35466ba792d5b08fe6a5bc/ec39113f8831453a",
|
||||
"containerName": "hello_world",
|
||||
"containerPort": 8080,
|
||||
}
|
||||
]
|
||||
|
||||
_ = client.create_task_set(
|
||||
cluster=cluster_name,
|
||||
service=service_name,
|
||||
taskDefinition=task_def_name,
|
||||
loadBalancers=load_balancers,
|
||||
)
|
||||
task_sets = client.describe_task_sets(cluster=cluster_name, service=service_name)[
|
||||
"taskSets"
|
||||
]
|
||||
assert "tags" not in task_sets[0]
|
||||
|
||||
task_sets = client.describe_task_sets(
|
||||
cluster=cluster_name, service=service_name, include=["TAGS"]
|
||||
)["taskSets"]
|
||||
|
||||
cluster_arn = client.describe_clusters(clusters=[cluster_name])["clusters"][0][
|
||||
"clusterArn"
|
||||
]
|
||||
|
||||
service_arn = client.describe_services(
|
||||
cluster=cluster_name, services=[service_name]
|
||||
)["services"][0]["serviceArn"]
|
||||
|
||||
task_sets.should.have.length_of(1)
|
||||
task_sets[0].should.have.key("tags")
|
||||
task_sets[0]["taskDefinition"].should.match(f"{task_def_name}:1$")
|
||||
task_sets[0]["clusterArn"].should.equal(cluster_arn)
|
||||
task_sets[0]["serviceArn"].should.equal(service_arn)
|
||||
task_sets[0]["serviceArn"].should.match(f"{service_name}$")
|
||||
task_sets[0]["scale"].should.equal({"value": 100.0, "unit": "PERCENT"})
|
||||
task_sets[0]["taskSetArn"].should.match(f"{task_sets[0]['id']}$")
|
||||
task_sets[0]["loadBalancers"][0]["targetGroupArn"].should.equal(
|
||||
"arn:aws:elasticloadbalancing:us-east-1:01234567890:targetgroup/"
|
||||
"c26b93c1bc35466ba792d5b08fe6a5bc/ec39113f8831453a"
|
||||
)
|
||||
task_sets[0]["loadBalancers"][0]["containerPort"].should.equal(8080)
|
||||
task_sets[0]["loadBalancers"][0]["containerName"].should.equal("hello_world")
|
||||
task_sets[0]["launchType"].should.equal("EC2")
|
||||
|
||||
|
||||
@mock_ecs
|
||||
def test_delete_task_set():
|
||||
client = boto3.client("ecs", region_name="us-east-1")
|
||||
_ = client.create_cluster(clusterName=cluster_name)
|
||||
create_task_def(client)
|
||||
_ = client.create_service(
|
||||
cluster=cluster_name,
|
||||
serviceName=service_name,
|
||||
taskDefinition=task_def_name,
|
||||
desiredCount=2,
|
||||
deploymentController={"type": "EXTERNAL"},
|
||||
)
|
||||
|
||||
task_set = client.create_task_set(
|
||||
cluster=cluster_name, service=service_name, taskDefinition=task_def_name
|
||||
)["taskSet"]
|
||||
|
||||
task_sets = client.describe_task_sets(
|
||||
cluster=cluster_name, service=service_name, taskSets=[task_set["taskSetArn"]]
|
||||
)["taskSets"]
|
||||
|
||||
assert len(task_sets) == 1
|
||||
|
||||
task_sets = client.describe_task_sets(
|
||||
cluster=cluster_name, service=service_name, taskSets=[task_set["taskSetArn"]]
|
||||
)["taskSets"]
|
||||
|
||||
assert len(task_sets) == 1
|
||||
|
||||
response = client.delete_task_set(
|
||||
cluster=cluster_name, service=service_name, taskSet=task_set["taskSetArn"]
|
||||
)
|
||||
assert response["taskSet"]["taskSetArn"] == task_set["taskSetArn"]
|
||||
|
||||
task_sets = client.describe_task_sets(
|
||||
cluster=cluster_name, service=service_name, taskSets=[task_set["taskSetArn"]]
|
||||
)["taskSets"]
|
||||
|
||||
assert len(task_sets) == 0
|
||||
|
||||
with pytest.raises(ClientError):
|
||||
_ = client.delete_task_set(
|
||||
cluster=cluster_name, service=service_name, taskSet=task_set["taskSetArn"]
|
||||
)
|
||||
|
||||
|
||||
@mock_ecs
|
||||
def test_delete_task_set__using_partial_arn():
|
||||
client = boto3.client("ecs", region_name="us-east-1")
|
||||
_ = client.create_cluster(clusterName=cluster_name)
|
||||
create_task_def(client)
|
||||
_ = client.create_service(
|
||||
cluster=cluster_name,
|
||||
serviceName=service_name,
|
||||
taskDefinition=task_def_name,
|
||||
desiredCount=2,
|
||||
deploymentController={"type": "EXTERNAL"},
|
||||
)
|
||||
|
||||
task_set = client.create_task_set(
|
||||
cluster=cluster_name, service=service_name, taskDefinition=task_def_name
|
||||
)["taskSet"]
|
||||
|
||||
# Partial ARN match
|
||||
# arn:aws:ecs:us-east-1:123456789012:task-set/test_ecs_cluster/test_ecs_service/ecs-svc/386233676373827416
|
||||
# --> ecs-svc/386233676373827416
|
||||
partial_arn = "/".join(task_set["taskSetArn"].split("/")[-2:])
|
||||
task_sets = client.describe_task_sets(
|
||||
cluster=cluster_name, service=service_name, taskSets=[partial_arn]
|
||||
)["taskSets"]
|
||||
|
||||
assert len(task_sets) == 1
|
||||
|
||||
response = client.delete_task_set(
|
||||
cluster=cluster_name, service=service_name, taskSet=partial_arn
|
||||
)
|
||||
assert response["taskSet"]["taskSetArn"] == task_set["taskSetArn"]
|
||||
|
||||
|
||||
@mock_ecs
|
||||
def test_update_service_primary_task_set():
|
||||
client = boto3.client("ecs", region_name="us-east-1")
|
||||
_ = client.create_cluster(clusterName=cluster_name)
|
||||
create_task_def(client)
|
||||
_ = client.create_service(
|
||||
cluster=cluster_name,
|
||||
serviceName=service_name,
|
||||
desiredCount=2,
|
||||
deploymentController={"type": "EXTERNAL"},
|
||||
)
|
||||
|
||||
task_set = client.create_task_set(
|
||||
cluster=cluster_name, service=service_name, taskDefinition=task_def_name
|
||||
)["taskSet"]
|
||||
|
||||
service = client.describe_services(cluster=cluster_name, services=[service_name],)[
|
||||
"services"
|
||||
][0]
|
||||
|
||||
_ = client.update_service_primary_task_set(
|
||||
cluster=cluster_name,
|
||||
service=service_name,
|
||||
primaryTaskSet=task_set["taskSetArn"],
|
||||
)
|
||||
|
||||
service = client.describe_services(cluster=cluster_name, services=[service_name],)[
|
||||
"services"
|
||||
][0]
|
||||
assert service["taskSets"][0]["status"] == "PRIMARY"
|
||||
assert service["taskDefinition"] == service["taskSets"][0]["taskDefinition"]
|
||||
|
||||
another_task_set = client.create_task_set(
|
||||
cluster=cluster_name, service=service_name, taskDefinition=task_def_name
|
||||
)["taskSet"]
|
||||
service = client.describe_services(cluster=cluster_name, services=[service_name],)[
|
||||
"services"
|
||||
][0]
|
||||
assert service["taskSets"][1]["status"] == "ACTIVE"
|
||||
|
||||
_ = client.update_service_primary_task_set(
|
||||
cluster=cluster_name,
|
||||
service=service_name,
|
||||
primaryTaskSet=another_task_set["taskSetArn"],
|
||||
)
|
||||
service = client.describe_services(cluster=cluster_name, services=[service_name],)[
|
||||
"services"
|
||||
][0]
|
||||
assert service["taskSets"][0]["status"] == "ACTIVE"
|
||||
assert service["taskSets"][1]["status"] == "PRIMARY"
|
||||
assert service["taskDefinition"] == service["taskSets"][1]["taskDefinition"]
|
||||
|
||||
|
||||
@mock_ecs
|
||||
def test_update_task_set():
|
||||
client = boto3.client("ecs", region_name="us-east-1")
|
||||
_ = client.create_cluster(clusterName=cluster_name)
|
||||
create_task_def(client)
|
||||
_ = client.create_service(
|
||||
cluster=cluster_name,
|
||||
serviceName=service_name,
|
||||
desiredCount=2,
|
||||
deploymentController={"type": "EXTERNAL"},
|
||||
)
|
||||
|
||||
task_set = client.create_task_set(
|
||||
cluster=cluster_name, service=service_name, taskDefinition=task_def_name
|
||||
)["taskSet"]
|
||||
|
||||
another_task_set = client.create_task_set(
|
||||
cluster=cluster_name, service=service_name, taskDefinition=task_def_name
|
||||
)["taskSet"]
|
||||
assert another_task_set["scale"]["unit"] == "PERCENT"
|
||||
assert another_task_set["scale"]["value"] == 100.0
|
||||
|
||||
client.update_task_set(
|
||||
cluster=cluster_name,
|
||||
service=service_name,
|
||||
taskSet=task_set["taskSetArn"],
|
||||
scale={"value": 25.0, "unit": "PERCENT"},
|
||||
)
|
||||
|
||||
updated_task_set = client.describe_task_sets(
|
||||
cluster=cluster_name, service=service_name, taskSets=[task_set["taskSetArn"]]
|
||||
)["taskSets"][0]
|
||||
assert updated_task_set["scale"]["value"] == 25.0
|
||||
assert updated_task_set["scale"]["unit"] == "PERCENT"
|
||||
|
||||
|
||||
@mock_ecs
|
||||
def test_create_task_sets_with_tags():
|
||||
client = boto3.client("ecs", region_name="us-east-1")
|
||||
_ = client.create_cluster(clusterName=cluster_name)
|
||||
create_task_def(client)
|
||||
_ = client.create_service(
|
||||
cluster=cluster_name,
|
||||
serviceName=service_name,
|
||||
taskDefinition=task_def_name,
|
||||
desiredCount=2,
|
||||
deploymentController={"type": "EXTERNAL"},
|
||||
)
|
||||
|
||||
load_balancers = [
|
||||
{
|
||||
"targetGroupArn": "arn:aws:elasticloadbalancing:us-east-1:01234567890:targetgroup/c26b93c1bc35466ba792d5b08fe6a5bc/ec39113f8831453a",
|
||||
"containerName": "hello_world",
|
||||
"containerPort": 8080,
|
||||
}
|
||||
]
|
||||
|
||||
_ = client.create_task_set(
|
||||
cluster=cluster_name,
|
||||
service=service_name,
|
||||
taskDefinition=task_def_name,
|
||||
loadBalancers=load_balancers,
|
||||
tags=[{"key": "k1", "value": "v1"}, {"key": "k2", "value": "v2"}],
|
||||
)
|
||||
|
||||
task_set = client.describe_task_sets(
|
||||
cluster=cluster_name, service=service_name, include=["TAGS"]
|
||||
)["taskSets"][0]
|
||||
task_set.should.have.key("tags").equals(
|
||||
[{"key": "k1", "value": "v1"}, {"key": "k2", "value": "v2"}]
|
||||
)
|
||||
|
||||
client.tag_resource(
|
||||
resourceArn=task_set["taskSetArn"], tags=[{"key": "k3", "value": "v3"}]
|
||||
)
|
||||
|
||||
task_set = client.describe_task_sets(
|
||||
cluster=cluster_name, service=service_name, include=["TAGS"]
|
||||
)["taskSets"][0]
|
||||
task_set.should.have.key("tags")
|
||||
task_set["tags"].should.have.length_of(3)
|
||||
task_set["tags"].should.contain({"key": "k1", "value": "v1"})
|
||||
task_set["tags"].should.contain({"key": "k2", "value": "v2"})
|
||||
task_set["tags"].should.contain({"key": "k3", "value": "v3"})
|
||||
|
||||
client.untag_resource(resourceArn=task_set["taskSetArn"], tagKeys=["k2"])
|
||||
|
||||
resp = client.list_tags_for_resource(resourceArn=task_set["taskSetArn"])
|
||||
resp.should.have.key("tags")
|
||||
resp["tags"].should.have.length_of(2)
|
||||
resp["tags"].should.contain({"key": "k1", "value": "v1"})
|
||||
resp["tags"].should.contain({"key": "k3", "value": "v3"})
|
||||
|
||||
|
||||
def create_task_def(client):
|
||||
client.register_task_definition(
|
||||
family=task_def_name,
|
||||
containerDefinitions=[
|
||||
{
|
||||
"name": "hello_world",
|
||||
"image": "docker/hello-world:latest",
|
||||
"cpu": 1024,
|
||||
"memory": 400,
|
||||
"essential": True,
|
||||
"environment": [
|
||||
{"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"}
|
||||
],
|
||||
"logConfiguration": {"logDriver": "json-file"},
|
||||
}
|
||||
],
|
||||
)
|
Loading…
Reference in New Issue
Block a user