Techdebt: Replace sure with regular asserts in Batch (#6413)

This commit is contained in:
Bert Blommers 2023-06-16 10:42:07 +00:00 committed by GitHub
parent a15b14085b
commit 3741058242
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 287 additions and 322 deletions

View File

@ -9,4 +9,4 @@ from moto import mock_batch
def test_batch_regions(region): def test_batch_regions(region):
client = boto3.client("batch", region_name=region) client = boto3.client("batch", region_name=region)
resp = client.describe_jobs(jobs=[""]) resp = client.describe_jobs(jobs=[""])
resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) assert resp["ResponseMetadata"]["HTTPStatusCode"] == 200

View File

@ -1,5 +1,4 @@
import boto3 import boto3
import sure # noqa # pylint: disable=unused-import
from moto import mock_batch, mock_iam, mock_ec2, mock_ecs, mock_cloudformation from moto import mock_batch, mock_iam, mock_ec2, mock_ecs, mock_cloudformation
import json import json
from uuid import uuid4 from uuid import uuid4
@ -86,17 +85,12 @@ def test_create_env_cf():
] ]
stack_resources = cf_conn.list_stack_resources(StackName=stack_id) stack_resources = cf_conn.list_stack_resources(StackName=stack_id)
summary = stack_resources["StackResourceSummaries"][0]
stack_resources["StackResourceSummaries"][0]["ResourceStatus"].should.equal( assert summary["ResourceStatus"] == "CREATE_COMPLETE"
"CREATE_COMPLETE"
)
# Spot checks on the ARN # Spot checks on the ARN
stack_resources["StackResourceSummaries"][0]["PhysicalResourceId"].startswith( assert "arn:aws:batch:" in summary["PhysicalResourceId"]
"arn:aws:batch:" assert stack_name in summary["PhysicalResourceId"]
)
stack_resources["StackResourceSummaries"][0]["PhysicalResourceId"].should.contain(
stack_name
)
@mock_cloudformation() @mock_cloudformation()
@ -150,7 +144,7 @@ def test_create_job_queue_cf():
] ]
stack_resources = cf_conn.list_stack_resources(StackName=stack_id) stack_resources = cf_conn.list_stack_resources(StackName=stack_id)
len(stack_resources["StackResourceSummaries"]).should.equal(2) assert len(stack_resources["StackResourceSummaries"]) == 2
job_queue_resource = list( job_queue_resource = list(
filter( filter(
@ -159,11 +153,11 @@ def test_create_job_queue_cf():
) )
)[0] )[0]
job_queue_resource["ResourceStatus"].should.equal("CREATE_COMPLETE") assert job_queue_resource["ResourceStatus"] == "CREATE_COMPLETE"
# Spot checks on the ARN # Spot checks on the ARN
job_queue_resource["PhysicalResourceId"].startswith("arn:aws:batch:") job_queue_resource["PhysicalResourceId"].startswith("arn:aws:batch:")
job_queue_resource["PhysicalResourceId"].should.contain(stack_name) assert stack_name in job_queue_resource["PhysicalResourceId"]
job_queue_resource["PhysicalResourceId"].should.contain("job-queue/") assert "job-queue/" in job_queue_resource["PhysicalResourceId"]
@mock_cloudformation() @mock_cloudformation()
@ -242,7 +236,7 @@ def test_create_job_def_cf():
] ]
stack_resources = cf_conn.list_stack_resources(StackName=stack_id) stack_resources = cf_conn.list_stack_resources(StackName=stack_id)
len(stack_resources["StackResourceSummaries"]).should.equal(3) assert len(stack_resources["StackResourceSummaries"]) == 3
job_def_resource = list( job_def_resource = list(
filter( filter(
@ -251,11 +245,11 @@ def test_create_job_def_cf():
) )
)[0] )[0]
job_def_resource["ResourceStatus"].should.equal("CREATE_COMPLETE") assert job_def_resource["ResourceStatus"] == "CREATE_COMPLETE"
# Spot checks on the ARN # Spot checks on the ARN
job_def_resource["PhysicalResourceId"].startswith("arn:aws:batch:") job_def_resource["PhysicalResourceId"].startswith("arn:aws:batch:")
job_def_resource["PhysicalResourceId"].should.contain(f"{stack_name}-JobDef") assert f"{stack_name}-JobDef" in job_def_resource["PhysicalResourceId"]
job_def_resource["PhysicalResourceId"].should.contain("job-definition/") assert "job-definition/" in job_def_resource["PhysicalResourceId"]
# Test the linux parameter device host path # Test the linux parameter device host path
# This ensures that batch is parsing the parameter dictionaries # This ensures that batch is parsing the parameter dictionaries
@ -269,4 +263,4 @@ def test_create_job_def_cf():
"containerProperties" "containerProperties"
]["linuxParameters"]["devices"][0]["hostPath"] ]["linuxParameters"]["devices"][0]["hostPath"]
job_def_linux_device_host_path.should.equal("test-path") assert job_def_linux_device_host_path == "test-path"

View File

@ -1,6 +1,5 @@
from . import _get_clients, _setup from . import _get_clients, _setup
import pytest import pytest
import sure # noqa # pylint: disable=unused-import
from botocore.exceptions import ClientError from botocore.exceptions import ClientError
from moto import mock_batch, mock_iam, mock_ec2, mock_ecs, settings from moto import mock_batch, mock_iam, mock_ec2, mock_ecs, settings
from uuid import uuid4 from uuid import uuid4
@ -37,8 +36,8 @@ def test_create_managed_compute_environment():
}, },
serviceRole=iam_arn, serviceRole=iam_arn,
) )
resp.should.contain("computeEnvironmentArn") assert "computeEnvironmentArn" in resp
resp["computeEnvironmentName"].should.equal(compute_name) assert resp["computeEnvironmentName"] == compute_name
our_env = batch_client.describe_compute_environments( our_env = batch_client.describe_compute_environments(
computeEnvironments=[compute_name] computeEnvironments=[compute_name]
@ -48,12 +47,12 @@ def test_create_managed_compute_environment():
if not settings.TEST_SERVER_MODE: if not settings.TEST_SERVER_MODE:
# Can't verify this in ServerMode, as other tests may have created instances # Can't verify this in ServerMode, as other tests may have created instances
resp = ec2_client.describe_instances() resp = ec2_client.describe_instances()
resp.should.contain("Reservations") assert "Reservations" in resp
len(resp["Reservations"]).should.equal(3) assert len(resp["Reservations"]) == 3
# Should have created 1 ECS cluster # Should have created 1 ECS cluster
all_clusters = ecs_client.list_clusters()["clusterArns"] all_clusters = ecs_client.list_clusters()["clusterArns"]
all_clusters.should.contain(our_env["ecsClusterArn"]) assert our_env["ecsClusterArn"] in all_clusters
@mock_ec2 @mock_ec2
@ -97,7 +96,7 @@ def test_create_managed_compute_environment_with_instance_family():
our_env = batch_client.describe_compute_environments( our_env = batch_client.describe_compute_environments(
computeEnvironments=[compute_name] computeEnvironments=[compute_name]
)["computeEnvironments"][0] )["computeEnvironments"][0]
our_env["computeResources"]["instanceTypes"].should.equal(["t2"]) assert our_env["computeResources"]["instanceTypes"] == ["t2"]
@mock_ec2 @mock_ec2
@ -139,8 +138,8 @@ def test_create_managed_compute_environment_with_unknown_instance_type():
serviceRole=iam_arn, serviceRole=iam_arn,
) )
err = exc.value.response["Error"] err = exc.value.response["Error"]
err["Code"].should.equal("InvalidParameterValue") assert err["Code"] == "InvalidParameterValue"
err["Message"].should.equal("Instance type unknown does not exist") assert err["Message"] == "Instance type unknown does not exist"
@mock_ec2 @mock_ec2
@ -158,24 +157,24 @@ def test_create_unmanaged_compute_environment():
state="ENABLED", state="ENABLED",
serviceRole=iam_arn, serviceRole=iam_arn,
) )
resp.should.contain("computeEnvironmentArn") assert "computeEnvironmentArn" in resp
resp["computeEnvironmentName"].should.equal(compute_name) assert resp["computeEnvironmentName"] == compute_name
our_env = batch_client.describe_compute_environments( our_env = batch_client.describe_compute_environments(
computeEnvironments=[compute_name] computeEnvironments=[compute_name]
)["computeEnvironments"][0] )["computeEnvironments"][0]
our_env.should.have.key("ecsClusterArn") assert "ecsClusterArn" in our_env
# Its unmanaged so no instances should be created # Its unmanaged so no instances should be created
if not settings.TEST_SERVER_MODE: if not settings.TEST_SERVER_MODE:
# Can't verify this in ServerMode, as other tests may have created instances # Can't verify this in ServerMode, as other tests may have created instances
resp = ec2_client.describe_instances() resp = ec2_client.describe_instances()
resp.should.contain("Reservations") assert "Reservations" in resp
len(resp["Reservations"]).should.equal(0) assert len(resp["Reservations"]) == 0
# Should have created 1 ECS cluster # Should have created 1 ECS cluster
all_clusters = ecs_client.list_clusters()["clusterArns"] all_clusters = ecs_client.list_clusters()["clusterArns"]
all_clusters.should.contain(our_env["ecsClusterArn"]) assert our_env["ecsClusterArn"] in all_clusters
# TODO create 1000s of tests to test complex option combinations of create environment # TODO create 1000s of tests to test complex option combinations of create environment
@ -202,16 +201,16 @@ def test_describe_compute_environment():
all_envs = batch_client.describe_compute_environments()["computeEnvironments"] all_envs = batch_client.describe_compute_environments()["computeEnvironments"]
our_envs = [e for e in all_envs if e["computeEnvironmentName"] == compute_name] our_envs = [e for e in all_envs if e["computeEnvironmentName"] == compute_name]
our_envs.should.have.length_of(1) assert len(our_envs) == 1
our_envs[0]["computeEnvironmentName"].should.equal(compute_name) assert our_envs[0]["computeEnvironmentName"] == compute_name
our_envs[0]["computeEnvironmentArn"].should.equal(compute_arn) assert our_envs[0]["computeEnvironmentArn"] == compute_arn
our_envs[0].should.have.key("ecsClusterArn") assert "ecsClusterArn" in our_envs[0]
our_envs[0].should.have.key("state").equal("ENABLED") assert our_envs[0]["state"] == "ENABLED"
our_envs[0].should.have.key("status").equal("VALID") assert our_envs[0]["status"] == "VALID"
# Test filtering # Test filtering
resp = batch_client.describe_compute_environments(computeEnvironments=["test1"]) resp = batch_client.describe_compute_environments(computeEnvironments=["test1"])
len(resp["computeEnvironments"]).should.equal(0) assert len(resp["computeEnvironments"]) == 0
@mock_ec2 @mock_ec2
@ -238,12 +237,12 @@ def test_delete_unmanaged_compute_environment():
all_envs = batch_client.describe_compute_environments()["computeEnvironments"] all_envs = batch_client.describe_compute_environments()["computeEnvironments"]
all_names = [e["computeEnvironmentName"] for e in all_envs] all_names = [e["computeEnvironmentName"] for e in all_envs]
all_names.shouldnt.contain(compute_name) assert compute_name not in all_names
cluster = ecs_client.describe_clusters(clusters=[our_env["ecsClusterArn"]])[ cluster = ecs_client.describe_clusters(clusters=[our_env["ecsClusterArn"]])[
"clusters" "clusters"
][0] ][0]
cluster.should.have.key("status").equals("INACTIVE") assert cluster["status"] == "INACTIVE"
@mock_ec2 @mock_ec2
@ -285,20 +284,20 @@ def test_delete_managed_compute_environment():
all_envs = batch_client.describe_compute_environments()["computeEnvironments"] all_envs = batch_client.describe_compute_environments()["computeEnvironments"]
all_names = [e["computeEnvironmentName"] for e in all_envs] all_names = [e["computeEnvironmentName"] for e in all_envs]
all_names.shouldnt.contain(compute_name) assert compute_name not in all_names
if not settings.TEST_SERVER_MODE: if not settings.TEST_SERVER_MODE:
# Too many instances to know which one is ours in ServerMode # Too many instances to know which one is ours in ServerMode
resp = ec2_client.describe_instances() resp = ec2_client.describe_instances()
resp.should.contain("Reservations") assert "Reservations" in resp
len(resp["Reservations"]).should.equal(3) assert len(resp["Reservations"]) == 3
for reservation in resp["Reservations"]: for reservation in resp["Reservations"]:
reservation["Instances"][0]["State"]["Name"].should.equal("terminated") assert reservation["Instances"][0]["State"]["Name"] == "terminated"
cluster = ecs_client.describe_clusters(clusters=[our_env["ecsClusterArn"]])[ cluster = ecs_client.describe_clusters(clusters=[our_env["ecsClusterArn"]])[
"clusters" "clusters"
][0] ][0]
cluster.should.have.key("status").equals("INACTIVE") assert cluster["status"] == "INACTIVE"
@mock_ec2 @mock_ec2
@ -323,8 +322,8 @@ def test_update_unmanaged_compute_environment_state():
all_envs = batch_client.describe_compute_environments()["computeEnvironments"] all_envs = batch_client.describe_compute_environments()["computeEnvironments"]
our_envs = [e for e in all_envs if e["computeEnvironmentName"] == compute_name] our_envs = [e for e in all_envs if e["computeEnvironmentName"] == compute_name]
our_envs.should.have.length_of(1) assert len(our_envs) == 1
our_envs[0]["state"].should.equal("DISABLED") assert our_envs[0]["state"] == "DISABLED"
@mock_ec2 @mock_ec2
@ -352,15 +351,15 @@ def test_update_iam_role():
all_envs = batch_client.describe_compute_environments()["computeEnvironments"] all_envs = batch_client.describe_compute_environments()["computeEnvironments"]
our_envs = [e for e in all_envs if e["computeEnvironmentName"] == compute_name] our_envs = [e for e in all_envs if e["computeEnvironmentName"] == compute_name]
our_envs.should.have.length_of(1) assert len(our_envs) == 1
our_envs[0]["serviceRole"].should.equal(iam_arn2) assert our_envs[0]["serviceRole"] == iam_arn2
with pytest.raises(ClientError) as exc: with pytest.raises(ClientError) as exc:
batch_client.update_compute_environment( batch_client.update_compute_environment(
computeEnvironment=compute_name, serviceRole="unknown" computeEnvironment=compute_name, serviceRole="unknown"
) )
err = exc.value.response["Error"] err = exc.value.response["Error"]
err["Code"].should.equal("InvalidParameterValue") assert err["Code"] == "InvalidParameterValue"
@pytest.mark.parametrize("compute_env_type", ["FARGATE", "FARGATE_SPOT"]) @pytest.mark.parametrize("compute_env_type", ["FARGATE", "FARGATE_SPOT"])
@ -385,14 +384,14 @@ def test_create_fargate_managed_compute_environment(compute_env_type):
}, },
serviceRole=iam_arn, serviceRole=iam_arn,
) )
resp.should.contain("computeEnvironmentArn") assert "computeEnvironmentArn" in resp
resp["computeEnvironmentName"].should.equal(compute_name) assert resp["computeEnvironmentName"] == compute_name
our_env = batch_client.describe_compute_environments( our_env = batch_client.describe_compute_environments(
computeEnvironments=[compute_name] computeEnvironments=[compute_name]
)["computeEnvironments"][0] )["computeEnvironments"][0]
our_env["computeResources"]["type"].should.equal(compute_env_type) assert our_env["computeResources"]["type"] == compute_env_type
# Should have created 1 ECS cluster # Should have created 1 ECS cluster
all_clusters = ecs_client.list_clusters()["clusterArns"] all_clusters = ecs_client.list_clusters()["clusterArns"]
all_clusters.should.contain(our_env["ecsClusterArn"]) assert our_env["ecsClusterArn"] in all_clusters

View File

@ -1,8 +1,7 @@
from . import _get_clients, _setup from . import _get_clients, _setup
import boto3
from botocore.exceptions import ClientError
import pytest import pytest
import sure # noqa # pylint: disable=unused-import from botocore.exceptions import ClientError
from moto import mock_batch, mock_iam, mock_ec2, mock_ecs from moto import mock_batch, mock_iam, mock_ec2, mock_ecs
from uuid import uuid4 from uuid import uuid4
@ -32,26 +31,23 @@ def test_create_job_queue():
computeEnvironmentOrder=[{"order": 123, "computeEnvironment": arn}], computeEnvironmentOrder=[{"order": 123, "computeEnvironment": arn}],
schedulingPolicyArn="policy_arn", schedulingPolicyArn="policy_arn",
) )
resp.should.contain("jobQueueArn") assert "jobQueueArn" in resp
resp.should.contain("jobQueueName") assert "jobQueueName" in resp
queue_arn = resp["jobQueueArn"] queue_arn = resp["jobQueueArn"]
all_queues = batch_client.describe_job_queues()["jobQueues"] all_queues = batch_client.describe_job_queues()["jobQueues"]
our_queues = [q for q in all_queues if q["jobQueueName"] == jq_name] our_queues = [q for q in all_queues if q["jobQueueName"] == jq_name]
our_queues.should.have.length_of(1) assert len(our_queues) == 1
our_queues[0]["jobQueueArn"].should.equal(queue_arn) assert our_queues[0]["jobQueueArn"] == queue_arn
our_queues[0]["schedulingPolicyArn"].should.equal("policy_arn") assert our_queues[0]["schedulingPolicyArn"] == "policy_arn"
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch @mock_batch
def test_describe_job_queue_unknown_value(): def test_describe_job_queue_unknown_value():
_, _, _, _, batch_client = _get_clients() batch_client = boto3.client("batch", "us-east-1")
resp = batch_client.describe_job_queues(jobQueues=["test_invalid_queue"]) resp = batch_client.describe_job_queues(jobQueues=["test_invalid_queue"])
resp.should.have.key("jobQueues").being.length_of(0) assert len(resp["jobQueues"]) == 0
@mock_ec2 @mock_ec2
@ -89,8 +85,8 @@ def test_create_job_queue_twice():
) )
err = ex.value.response["Error"] err = ex.value.response["Error"]
err["Code"].should.equal("ClientException") assert err["Code"] == "ClientException"
err["Message"].should.equal(f"Job queue {jq_name} already exists") assert err["Message"] == f"Job queue {jq_name} already exists"
@mock_ec2 @mock_ec2
@ -108,8 +104,8 @@ def test_create_job_queue_incorrect_state():
computeEnvironmentOrder=[], computeEnvironmentOrder=[],
) )
err = ex.value.response["Error"] err = ex.value.response["Error"]
err["Code"].should.equal("ClientException") assert err["Code"] == "ClientException"
err["Message"].should.equal("state JUNK must be one of ENABLED | DISABLED") assert err["Message"] == "state JUNK must be one of ENABLED | DISABLED"
@mock_ec2 @mock_ec2
@ -127,8 +123,8 @@ def test_create_job_queue_without_compute_environment():
computeEnvironmentOrder=[], computeEnvironmentOrder=[],
) )
err = ex.value.response["Error"] err = ex.value.response["Error"]
err["Code"].should.equal("ClientException") assert err["Code"] == "ClientException"
err["Message"].should.equal("At least 1 compute environment must be provided") assert err["Message"] == "At least 1 compute environment must be provided"
@mock_ec2 @mock_ec2
@ -158,8 +154,8 @@ def test_job_queue_bad_arn():
], ],
) )
err = ex.value.response["Error"] err = ex.value.response["Error"]
err["Code"].should.equal("ClientException") assert err["Code"] == "ClientException"
err["Message"].should.equal("computeEnvironmentOrder is malformed") assert err["Message"] == "computeEnvironmentOrder is malformed"
@mock_ec2 @mock_ec2
@ -192,14 +188,14 @@ def test_update_job_queue():
all_queues = batch_client.describe_job_queues()["jobQueues"] all_queues = batch_client.describe_job_queues()["jobQueues"]
our_queues = [q for q in all_queues if q["jobQueueName"] == jq_name] our_queues = [q for q in all_queues if q["jobQueueName"] == jq_name]
our_queues[0]["priority"].should.equal(5) assert our_queues[0]["priority"] == 5
batch_client.update_job_queue(jobQueue=jq_name, priority=15) batch_client.update_job_queue(jobQueue=jq_name, priority=15)
all_queues = batch_client.describe_job_queues()["jobQueues"] all_queues = batch_client.describe_job_queues()["jobQueues"]
our_queues = [q for q in all_queues if q["jobQueueName"] == jq_name] our_queues = [q for q in all_queues if q["jobQueueName"] == jq_name]
our_queues.should.have.length_of(1) assert len(our_queues) == 1
our_queues[0]["priority"].should.equal(15) assert our_queues[0]["priority"] == 15
@mock_ec2 @mock_ec2
@ -231,4 +227,4 @@ def test_delete_job_queue():
batch_client.delete_job_queue(jobQueue=queue_arn) batch_client.delete_job_queue(jobQueue=queue_arn)
all_queues = batch_client.describe_job_queues()["jobQueues"] all_queues = batch_client.describe_job_queues()["jobQueues"]
[q["jobQueueName"] for q in all_queues].shouldnt.contain(jq_name) assert jq_name not in [q["jobQueueName"] for q in all_queues]

View File

@ -1,7 +1,6 @@
from . import _get_clients, _setup from . import _get_clients, _setup
import datetime import datetime
import sure # noqa # pylint: disable=unused-import
from moto import mock_batch, mock_iam, mock_ec2, mock_ecs, mock_logs from moto import mock_batch, mock_iam, mock_ec2, mock_ecs, mock_logs
import botocore.exceptions import botocore.exceptions
import pytest import pytest
@ -74,17 +73,17 @@ def test_submit_job_by_name():
resp = batch_client.submit_job( resp = batch_client.submit_job(
jobName="test1", jobQueue=queue_arn, jobDefinition=job_definition_name jobName="test1", jobQueue=queue_arn, jobDefinition=job_definition_name
) )
resp["ResponseMetadata"].should.have.key("RequestId") assert "RequestId" in resp["ResponseMetadata"]
job_id = resp["jobId"] job_id = resp["jobId"]
resp_jobs = batch_client.describe_jobs(jobs=[job_id]) resp_jobs = batch_client.describe_jobs(jobs=[job_id])
resp_jobs["ResponseMetadata"].should.have.key("RequestId") assert "RequestId" in resp_jobs["ResponseMetadata"]
len(resp_jobs["jobs"]).should.equal(1) assert len(resp_jobs["jobs"]) == 1
resp_jobs["jobs"][0]["jobId"].should.equal(job_id) assert resp_jobs["jobs"][0]["jobId"] == job_id
resp_jobs["jobs"][0]["jobQueue"].should.equal(queue_arn) assert resp_jobs["jobs"][0]["jobQueue"] == queue_arn
resp_jobs["jobs"][0]["jobDefinition"].should.equal(job_definition_arn) assert resp_jobs["jobs"][0]["jobDefinition"] == job_definition_arn
# SLOW TESTS # SLOW TESTS
@ -115,20 +114,20 @@ def test_submit_job():
# github.com/getmoto/moto/issues/4364 # github.com/getmoto/moto/issues/4364
resp = batch_client.describe_jobs(jobs=[job_id]) resp = batch_client.describe_jobs(jobs=[job_id])
created_at = resp["jobs"][0]["createdAt"] created_at = resp["jobs"][0]["createdAt"]
created_at.should.be.greater_than(start_time_milliseconds) assert created_at > start_time_milliseconds
_wait_for_job_status(batch_client, job_id, "SUCCEEDED") _wait_for_job_status(batch_client, job_id, "SUCCEEDED")
resp = logs_client.describe_log_streams( resp = logs_client.describe_log_streams(
logGroupName="/aws/batch/job", logStreamNamePrefix=job_def_name logGroupName="/aws/batch/job", logStreamNamePrefix=job_def_name
) )
resp["logStreams"].should.have.length_of(1) assert len(resp["logStreams"]) == 1
ls_name = resp["logStreams"][0]["logStreamName"] ls_name = resp["logStreams"][0]["logStreamName"]
resp = logs_client.get_log_events( resp = logs_client.get_log_events(
logGroupName="/aws/batch/job", logStreamName=ls_name logGroupName="/aws/batch/job", logStreamName=ls_name
) )
[event["message"] for event in resp["events"]].should.equal(["hello"]) assert [event["message"] for event in resp["events"]] == ["hello"]
# Test that describe_jobs() returns timestamps in milliseconds # Test that describe_jobs() returns timestamps in milliseconds
# github.com/getmoto/moto/issues/4364 # github.com/getmoto/moto/issues/4364
@ -137,22 +136,20 @@ def test_submit_job():
started_at = job["startedAt"] started_at = job["startedAt"]
stopped_at = job["stoppedAt"] stopped_at = job["stoppedAt"]
created_at.should.be.greater_than(start_time_milliseconds) assert created_at > start_time_milliseconds
started_at.should.be.greater_than(start_time_milliseconds) assert started_at > start_time_milliseconds
stopped_at.should.be.greater_than(start_time_milliseconds) assert stopped_at > start_time_milliseconds
# Verify we track attempts # Verify we track attempts
job.should.have.key("attempts").length_of(1) assert len(job["attempts"]) == 1
attempt = job["attempts"][0] attempt = job["attempts"][0]
attempt.should.have.key("container") assert "container" in attempt
attempt["container"].should.have.key("containerInstanceArn") assert "containerInstanceArn" in attempt["container"]
attempt["container"].should.have.key("logStreamName").equals( assert attempt["container"]["logStreamName"] == job["container"]["logStreamName"]
job["container"]["logStreamName"] assert "networkInterfaces" in attempt["container"]
) assert "taskArn" in attempt["container"]
attempt["container"].should.have.key("networkInterfaces") assert attempt["startedAt"] == started_at
attempt["container"].should.have.key("taskArn") assert attempt["stoppedAt"] == stopped_at
attempt.should.have.key("startedAt").equals(started_at)
attempt.should.have.key("stoppedAt").equals(stopped_at)
@mock_logs @mock_logs
@ -182,20 +179,20 @@ def test_submit_job_multinode():
# github.com/getmoto/moto/issues/4364 # github.com/getmoto/moto/issues/4364
resp = batch_client.describe_jobs(jobs=[job_id]) resp = batch_client.describe_jobs(jobs=[job_id])
created_at = resp["jobs"][0]["createdAt"] created_at = resp["jobs"][0]["createdAt"]
created_at.should.be.greater_than(start_time_milliseconds) assert created_at > start_time_milliseconds
_wait_for_job_status(batch_client, job_id, "SUCCEEDED") _wait_for_job_status(batch_client, job_id, "SUCCEEDED")
resp = logs_client.describe_log_streams( resp = logs_client.describe_log_streams(
logGroupName="/aws/batch/job", logStreamNamePrefix=job_def_name logGroupName="/aws/batch/job", logStreamNamePrefix=job_def_name
) )
resp["logStreams"].should.have.length_of(1) assert len(resp["logStreams"]) == 1
ls_name = resp["logStreams"][0]["logStreamName"] ls_name = resp["logStreams"][0]["logStreamName"]
resp = logs_client.get_log_events( resp = logs_client.get_log_events(
logGroupName="/aws/batch/job", logStreamName=ls_name logGroupName="/aws/batch/job", logStreamName=ls_name
) )
[event["message"] for event in resp["events"]].should.equal(["hello", "hello"]) assert [event["message"] for event in resp["events"]] == ["hello", "hello"]
# Test that describe_jobs() returns timestamps in milliseconds # Test that describe_jobs() returns timestamps in milliseconds
# github.com/getmoto/moto/issues/4364 # github.com/getmoto/moto/issues/4364
@ -204,22 +201,20 @@ def test_submit_job_multinode():
started_at = job["startedAt"] started_at = job["startedAt"]
stopped_at = job["stoppedAt"] stopped_at = job["stoppedAt"]
created_at.should.be.greater_than(start_time_milliseconds) assert created_at > start_time_milliseconds
started_at.should.be.greater_than(start_time_milliseconds) assert started_at > start_time_milliseconds
stopped_at.should.be.greater_than(start_time_milliseconds) assert stopped_at > start_time_milliseconds
# Verify we track attempts # Verify we track attempts
job.should.have.key("attempts").length_of(1) assert len(job["attempts"]) == 1
attempt = job["attempts"][0] attempt = job["attempts"][0]
attempt.should.have.key("container") assert "container" in attempt
attempt["container"].should.have.key("containerInstanceArn") assert "containerInstanceArn" in attempt["container"]
attempt["container"].should.have.key("logStreamName").equals( assert attempt["container"]["logStreamName"] == job["container"]["logStreamName"]
job["container"]["logStreamName"] assert "networkInterfaces" in attempt["container"]
) assert "taskArn" in attempt["container"]
attempt["container"].should.have.key("networkInterfaces") assert attempt["startedAt"] == started_at
attempt["container"].should.have.key("taskArn") assert attempt["stoppedAt"] == stopped_at
attempt.should.have.key("startedAt").equals(started_at)
attempt.should.have.key("stoppedAt").equals(stopped_at)
@mock_logs @mock_logs
@ -247,19 +242,22 @@ def test_list_jobs():
job_id2 = resp["jobId"] job_id2 = resp["jobId"]
all_jobs = batch_client.list_jobs(jobQueue=queue_arn)["jobSummaryList"] all_jobs = batch_client.list_jobs(jobQueue=queue_arn)["jobSummaryList"]
all_jobs.should.have.length_of(2) assert len(all_jobs) == 2
for job in all_jobs: for job in all_jobs:
job.should.have.key("createdAt") assert "createdAt" in job
job.should.have.key("jobDefinition") assert "jobDefinition" in job
job.should.have.key("jobName") assert "jobName" in job
# This is async, so we can't be sure where we are in the process # This is async, so we can't be sure where we are in the process
job.should.have.key("status").within( assert job["status"] in [
["SUBMITTED", "PENDING", "STARTING", "RUNNABLE", "RUNNING"] "SUBMITTED",
) "PENDING",
"STARTING",
"RUNNABLE",
"RUNNING",
]
batch_client.list_jobs(jobQueue=queue_arn, jobStatus="SUCCEEDED")[ resp = batch_client.list_jobs(jobQueue=queue_arn, jobStatus="SUCCEEDED")
"jobSummaryList" assert len(resp["jobSummaryList"]) == 0
].should.have.length_of(0)
# Wait only as long as it takes to run the jobs # Wait only as long as it takes to run the jobs
for job_id in [job_id1, job_id2]: for job_id in [job_id1, job_id2]:
@ -268,14 +266,14 @@ def test_list_jobs():
succeeded_jobs = batch_client.list_jobs(jobQueue=queue_arn, jobStatus="SUCCEEDED")[ succeeded_jobs = batch_client.list_jobs(jobQueue=queue_arn, jobStatus="SUCCEEDED")[
"jobSummaryList" "jobSummaryList"
] ]
succeeded_jobs.should.have.length_of(2) assert len(succeeded_jobs) == 2
for job in succeeded_jobs: for job in succeeded_jobs:
job.should.have.key("createdAt") assert "createdAt" in job
job.should.have.key("jobDefinition") assert "jobDefinition" in job
job.should.have.key("jobName") assert "jobName" in job
job.should.have.key("status").equals("SUCCEEDED") assert job["status"] == "SUCCEEDED"
job.should.have.key("stoppedAt") assert "stoppedAt" in job
job.should.have.key("container").should.have.key("exitCode").equals(0) assert job["container"]["exitCode"] == 0
filtered_jobs = batch_client.list_jobs( filtered_jobs = batch_client.list_jobs(
jobQueue=queue_arn, jobQueue=queue_arn,
@ -286,8 +284,8 @@ def test_list_jobs():
} }
], ],
)["jobSummaryList"] )["jobSummaryList"]
filtered_jobs.should.have.length_of(1) assert len(filtered_jobs) == 1
filtered_jobs[0]["jobName"].should.equal("test2") assert filtered_jobs[0]["jobName"] == "test2"
@mock_logs @mock_logs
@ -316,10 +314,10 @@ def test_terminate_job():
_wait_for_job_status(batch_client, job_id, "FAILED", seconds_to_wait=120) _wait_for_job_status(batch_client, job_id, "FAILED", seconds_to_wait=120)
resp = batch_client.describe_jobs(jobs=[job_id]) resp = batch_client.describe_jobs(jobs=[job_id])
resp["jobs"][0]["jobName"].should.equal("test1") assert resp["jobs"][0]["jobName"] == "test1"
resp["jobs"][0]["status"].should.equal("FAILED") assert resp["jobs"][0]["status"] == "FAILED"
resp["jobs"][0]["statusReason"].should.equal("test_terminate") assert resp["jobs"][0]["statusReason"] == "test_terminate"
resp["jobs"][0]["container"].should.have.key("logStreamName") assert "logStreamName" in resp["jobs"][0]["container"]
ls_name = f"{job_def_name}/default/{job_id}" ls_name = f"{job_def_name}/default/{job_id}"
@ -328,8 +326,8 @@ def test_terminate_job():
) )
# Events should only contain 'start' because we interrupted # Events should only contain 'start' because we interrupted
# the job before 'stop' was written to the logs. # the job before 'stop' was written to the logs.
resp["events"].should.have.length_of(1) assert len(resp["events"]) == 1
resp["events"][0]["message"].should.equal("start") assert resp["events"][0]["message"] == "start"
@mock_batch @mock_batch
@ -341,7 +339,7 @@ def test_terminate_nonexisting_job():
resp = batch_client.terminate_job( resp = batch_client.terminate_job(
jobId="nonexisting_job", reason="test_terminate_nonexisting_job" jobId="nonexisting_job", reason="test_terminate_nonexisting_job"
) )
resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) assert resp["ResponseMetadata"]["HTTPStatusCode"] == 200
@mock_batch @mock_batch
@ -392,9 +390,9 @@ def test_cancel_pending_job():
_wait_for_job_status(batch_client, job_id, "FAILED", seconds_to_wait=30) _wait_for_job_status(batch_client, job_id, "FAILED", seconds_to_wait=30)
resp = batch_client.describe_jobs(jobs=[job_id]) resp = batch_client.describe_jobs(jobs=[job_id])
resp["jobs"][0]["jobName"].should.equal("test_job_name") assert resp["jobs"][0]["jobName"] == "test_job_name"
resp["jobs"][0]["statusReason"].should.equal("test_cancel") assert resp["jobs"][0]["statusReason"] == "test_cancel"
resp["jobs"][0]["container"].shouldnt.have.key("logStreamName") assert "logStreamName" not in resp["jobs"][0]["container"]
@mock_logs @mock_logs
@ -427,9 +425,9 @@ def test_cancel_running_job():
_wait_for_job_status(batch_client, job_id, "SUCCEEDED", seconds_to_wait=30) _wait_for_job_status(batch_client, job_id, "SUCCEEDED", seconds_to_wait=30)
resp = batch_client.describe_jobs(jobs=[job_id]) resp = batch_client.describe_jobs(jobs=[job_id])
resp["jobs"][0]["jobName"].should.equal("test_job_name") assert resp["jobs"][0]["jobName"] == "test_job_name"
resp["jobs"][0].shouldnt.have.key("statusReason") assert "statusReason" not in resp["jobs"][0]
resp["jobs"][0]["container"].should.have.key("logStreamName") assert "logStreamName" in resp["jobs"][0]["container"]
@mock_batch @mock_batch
@ -441,7 +439,7 @@ def test_cancel_nonexisting_job():
resp = batch_client.cancel_job( resp = batch_client.cancel_job(
jobId="nonexisting_job", reason="test_cancel_nonexisting_job" jobId="nonexisting_job", reason="test_cancel_nonexisting_job"
) )
resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) assert resp["ResponseMetadata"]["HTTPStatusCode"] == 200
@mock_batch @mock_batch
@ -503,7 +501,7 @@ def test_failed_job():
resp = batch_client.describe_jobs(jobs=[job_id]) resp = batch_client.describe_jobs(jobs=[job_id])
if resp["jobs"][0]["status"] == "FAILED": if resp["jobs"][0]["status"] == "FAILED":
resp["jobs"][0]["container"].should.have.key("logStreamName") assert "logStreamName" in resp["jobs"][0]["container"]
break break
if resp["jobs"][0]["status"] == "SUCCEEDED": if resp["jobs"][0]["status"] == "SUCCEEDED":
raise RuntimeError("Batch job succeeded even though it had exit code 1") raise RuntimeError("Batch job succeeded even though it had exit code 1")
@ -580,10 +578,10 @@ def test_dependencies():
resp = logs_client.get_log_events( resp = logs_client.get_log_events(
logGroupName=log_stream_name, logStreamName=ls_name logGroupName=log_stream_name, logStreamName=ls_name
) )
[event["message"] for event in resp["events"]].should.equal(["hello"]) assert [event["message"] for event in resp["events"]] == ["hello"]
nr_logstreams_found = nr_logstreams_found + 1 nr_logstreams_found = nr_logstreams_found + 1
nr_logstreams_found.should.equal(3) assert nr_logstreams_found == 3
def retrieve_all_streams(log_stream_name, logs_client): def retrieve_all_streams(log_stream_name, logs_client):
@ -685,11 +683,11 @@ def test_failed_dependencies():
assert resp["jobs"][1]["status"] != "SUCCEEDED", "Job 3 cannot succeed" assert resp["jobs"][1]["status"] != "SUCCEEDED", "Job 3 cannot succeed"
if resp["jobs"][1]["status"] == "FAILED": if resp["jobs"][1]["status"] == "FAILED":
assert resp["jobs"][0]["container"].should.have.key( assert (
"logStreamName" "logStreamName" in resp["jobs"][0]["container"]
), "Job 2 should have logStreamName because it FAILED but was in RUNNING state" ), "Job 2 should have logStreamName because it FAILED but was in RUNNING state"
assert resp["jobs"][1]["container"].shouldnt.have.key( assert (
"logStreamName" "logStreamName" not in resp["jobs"][1]["container"]
), "Job 3 shouldn't have logStreamName because it was never in RUNNING state" ), "Job 3 shouldn't have logStreamName because it was never in RUNNING state"
break break
@ -803,32 +801,25 @@ def test_container_overrides():
key, value = tuple(event["message"].split("=")) key, value = tuple(event["message"].split("="))
env_var.append({"name": key, "value": value}) env_var.append({"name": key, "value": value})
len(resp_jobs["jobs"]).should.equal(1) assert len(resp_jobs["jobs"]) == 1
resp_jobs["jobs"][0]["jobId"].should.equal(job_id) assert resp_jobs["jobs"][0]["jobId"] == job_id
resp_jobs["jobs"][0]["jobQueue"].should.equal(queue_arn) assert resp_jobs["jobs"][0]["jobQueue"] == queue_arn
resp_jobs["jobs"][0]["jobDefinition"].should.equal(job_definition_arn) assert resp_jobs["jobs"][0]["jobDefinition"] == job_definition_arn
resp_jobs["jobs"][0]["container"]["vcpus"].should.equal(2) assert resp_jobs["jobs"][0]["container"]["vcpus"] == 2
resp_jobs["jobs"][0]["container"]["memory"].should.equal(1024) assert resp_jobs["jobs"][0]["container"]["memory"] == 1024
resp_jobs["jobs"][0]["container"]["command"].should.equal(["printenv"]) assert resp_jobs["jobs"][0]["container"]["command"] == ["printenv"]
sure.expect(resp_jobs["jobs"][0]["container"]["environment"]).to.contain( env = resp_jobs["jobs"][0]["container"]["environment"]
{"name": "TEST0", "value": "from job"} assert {"name": "TEST0", "value": "from job"} in env
) assert {"name": "TEST1", "value": "from job definition"} in env
sure.expect(resp_jobs["jobs"][0]["container"]["environment"]).to.contain( assert {"name": "TEST2", "value": "from job"} in env
{"name": "TEST1", "value": "from job definition"} assert {"name": "AWS_BATCH_JOB_ID", "value": job_id} in env
)
sure.expect(resp_jobs["jobs"][0]["container"]["environment"]).to.contain(
{"name": "TEST2", "value": "from job"}
)
sure.expect(resp_jobs["jobs"][0]["container"]["environment"]).to.contain(
{"name": "AWS_BATCH_JOB_ID", "value": job_id}
)
sure.expect(env_var).to.contain({"name": "TEST0", "value": "from job"}) assert {"name": "TEST0", "value": "from job"} in env_var
sure.expect(env_var).to.contain({"name": "TEST1", "value": "from job definition"}) assert {"name": "TEST1", "value": "from job definition"} in env_var
sure.expect(env_var).to.contain({"name": "TEST2", "value": "from job"}) assert {"name": "TEST2", "value": "from job"} in env_var
sure.expect(env_var).to.contain({"name": "AWS_BATCH_JOB_ID", "value": job_id}) assert {"name": "AWS_BATCH_JOB_ID", "value": job_id} in env_var
def prepare_job(batch_client, commands, iam_arn, job_def_name): def prepare_job(batch_client, commands, iam_arn, job_def_name):
@ -943,14 +934,14 @@ def test_update_job_definition():
job_defs = batch_client.describe_job_definitions(jobDefinitionName=job_def_name)[ job_defs = batch_client.describe_job_definitions(jobDefinitionName=job_def_name)[
"jobDefinitions" "jobDefinitions"
] ]
job_defs.should.have.length_of(2) assert len(job_defs) == 2
job_defs[0]["containerProperties"]["memory"].should.equal(1024) assert job_defs[0]["containerProperties"]["memory"] == 1024
job_defs[0]["tags"].should.equal(tags[0]) assert job_defs[0]["tags"] == tags[0]
job_defs[0].shouldnt.have.key("timeout") assert "timeout" not in job_defs[0]
job_defs[1]["containerProperties"]["memory"].should.equal(2048) assert job_defs[1]["containerProperties"]["memory"] == 2048
job_defs[1]["tags"].should.equal(tags[1]) assert job_defs[1]["tags"] == tags[1]
@mock_batch @mock_batch
@ -974,7 +965,7 @@ def test_register_job_definition_with_timeout():
resp = batch_client.describe_job_definitions(jobDefinitionName=job_def_name) resp = batch_client.describe_job_definitions(jobDefinitionName=job_def_name)
job_def = resp["jobDefinitions"][0] job_def = resp["jobDefinitions"][0]
job_def.should.have.key("timeout").equals({"attemptDurationSeconds": 3}) assert job_def["timeout"] == {"attemptDurationSeconds": 3}
@mock_batch @mock_batch

View File

@ -8,9 +8,10 @@ from tests import DEFAULT_ACCOUNT_ID
def test_create_scheduling_policy(): def test_create_scheduling_policy():
client = boto3.client("batch", "us-east-2") client = boto3.client("batch", "us-east-2")
resp = client.create_scheduling_policy(name="test") resp = client.create_scheduling_policy(name="test")
resp.should.have.key("name").equals("test") assert resp["name"] == "test"
resp.should.have.key("arn").equals( assert (
f"arn:aws:batch:us-east-2:{DEFAULT_ACCOUNT_ID}:scheduling-policy/test" resp["arn"]
== f"arn:aws:batch:us-east-2:{DEFAULT_ACCOUNT_ID}:scheduling-policy/test"
) )
@ -20,14 +21,16 @@ def test_describe_default_scheduling_policy():
arn = client.create_scheduling_policy(name="test")["arn"] arn = client.create_scheduling_policy(name="test")["arn"]
resp = client.describe_scheduling_policies(arns=[arn]) resp = client.describe_scheduling_policies(arns=[arn])
resp.should.have.key("schedulingPolicies").length_of(1) assert len(resp["schedulingPolicies"]) == 1
policy = resp["schedulingPolicies"][0] policy = resp["schedulingPolicies"][0]
policy["arn"].should.equal(arn) assert policy["arn"] == arn
policy["fairsharePolicy"].should.equal( assert policy["fairsharePolicy"] == {
{"computeReservation": 0, "shareDecaySeconds": 0, "shareDistribution": []} "computeReservation": 0,
) "shareDecaySeconds": 0,
policy["tags"].should.equal({}) "shareDistribution": [],
}
assert policy["tags"] == {}
@mock_batch @mock_batch
@ -43,23 +46,21 @@ def test_describe_scheduling_policy():
)["arn"] )["arn"]
resp = client.list_scheduling_policies() resp = client.list_scheduling_policies()
resp.should.have.key("schedulingPolicies") assert "schedulingPolicies" in resp
arns = [a["arn"] for a in resp["schedulingPolicies"]] arns = [a["arn"] for a in resp["schedulingPolicies"]]
arns.should.contain(arn) assert arn in arns
resp = client.describe_scheduling_policies(arns=[arn]) resp = client.describe_scheduling_policies(arns=[arn])
resp.should.have.key("schedulingPolicies").length_of(1) assert len(resp["schedulingPolicies"]) == 1
policy = resp["schedulingPolicies"][0] policy = resp["schedulingPolicies"][0]
policy["arn"].should.equal(arn) assert policy["arn"] == arn
policy["fairsharePolicy"].should.equal( assert policy["fairsharePolicy"] == {
{ "computeReservation": 2,
"computeReservation": 2, "shareDecaySeconds": 1,
"shareDecaySeconds": 1, "shareDistribution": [{"shareIdentifier": "A", "weightFactor": 0.1}],
"shareDistribution": [{"shareIdentifier": "A", "weightFactor": 0.1}], }
} assert policy["tags"] == {}
)
policy["tags"].should.equal({})
@mock_batch @mock_batch
@ -70,7 +71,7 @@ def test_delete_scheduling_policy():
client.delete_scheduling_policy(arn=arn) client.delete_scheduling_policy(arn=arn)
resp = client.describe_scheduling_policies(arns=[arn]) resp = client.describe_scheduling_policies(arns=[arn])
resp.should.have.key("schedulingPolicies").length_of(0) assert len(resp["schedulingPolicies"]) == 0
@mock_batch @mock_batch
@ -88,10 +89,12 @@ def test_update_scheduling_policy():
) )
resp = client.describe_scheduling_policies(arns=[arn]) resp = client.describe_scheduling_policies(arns=[arn])
resp.should.have.key("schedulingPolicies").length_of(1) assert len(resp["schedulingPolicies"]) == 1
policy = resp["schedulingPolicies"][0] policy = resp["schedulingPolicies"][0]
policy["arn"].should.equal(arn) assert policy["arn"] == arn
policy["fairsharePolicy"].should.equal( assert policy["fairsharePolicy"] == {
{"computeReservation": 5, "shareDecaySeconds": 10, "shareDistribution": []} "computeReservation": 5,
) "shareDecaySeconds": 10,
"shareDistribution": [],
}

View File

@ -1,6 +1,5 @@
from . import _get_clients from . import _get_clients
import sure # noqa # pylint: disable=unused-import
from moto import mock_batch from moto import mock_batch
from uuid import uuid4 from uuid import uuid4
@ -26,7 +25,7 @@ def test_list_tags_with_job_definition():
)["jobDefinitionArn"] )["jobDefinitionArn"]
my_queue = batch_client.list_tags_for_resource(resourceArn=job_def_arn) my_queue = batch_client.list_tags_for_resource(resourceArn=job_def_arn)
my_queue.should.have.key("tags").equals({"foo": "123", "bar": "456"}) assert my_queue["tags"] == {"foo": "123", "bar": "456"}
@mock_batch @mock_batch
@ -44,7 +43,7 @@ def test_tag_job_definition():
batch_client.tag_resource(resourceArn=job_def_arn, tags={"k1": "v1", "k2": "v2"}) batch_client.tag_resource(resourceArn=job_def_arn, tags={"k1": "v1", "k2": "v2"})
my_queue = batch_client.list_tags_for_resource(resourceArn=job_def_arn) my_queue = batch_client.list_tags_for_resource(resourceArn=job_def_arn)
my_queue.should.have.key("tags").equals({"k1": "v1", "k2": "v2"}) assert my_queue["tags"] == {"k1": "v1", "k2": "v2"}
@mock_batch @mock_batch
@ -64,4 +63,4 @@ def test_untag_job_queue():
batch_client.untag_resource(resourceArn=job_def_arn, tagKeys=["k2"]) batch_client.untag_resource(resourceArn=job_def_arn, tagKeys=["k2"])
my_queue = batch_client.list_tags_for_resource(resourceArn=job_def_arn) my_queue = batch_client.list_tags_for_resource(resourceArn=job_def_arn)
my_queue.should.have.key("tags").equals({"k1": "v1", "k3": "v3"}) assert my_queue["tags"] == {"k1": "v1", "k3": "v3"}

View File

@ -1,6 +1,5 @@
from . import _get_clients, _setup from . import _get_clients, _setup
import sure # noqa # pylint: disable=unused-import
from moto import mock_batch, mock_iam, mock_ec2, mock_ecs from moto import mock_batch, mock_iam, mock_ec2, mock_ecs
from uuid import uuid4 from uuid import uuid4
@ -30,12 +29,12 @@ def test_create_job_queue_with_tags():
computeEnvironmentOrder=[{"order": 123, "computeEnvironment": arn}], computeEnvironmentOrder=[{"order": 123, "computeEnvironment": arn}],
tags={"k1": "v1", "k2": "v2"}, tags={"k1": "v1", "k2": "v2"},
) )
resp.should.contain("jobQueueArn") assert "jobQueueArn" in resp
resp.should.contain("jobQueueName") assert "jobQueueName" in resp
queue_arn = resp["jobQueueArn"] queue_arn = resp["jobQueueArn"]
my_queue = batch_client.describe_job_queues(jobQueues=[queue_arn])["jobQueues"][0] my_queue = batch_client.describe_job_queues(jobQueues=[queue_arn])["jobQueues"][0]
my_queue.should.have.key("tags").equals({"k1": "v1", "k2": "v2"}) assert my_queue["tags"] == {"k1": "v1", "k2": "v2"}
@mock_ec2 @mock_ec2
@ -63,12 +62,12 @@ def test_list_tags():
computeEnvironmentOrder=[{"order": 123, "computeEnvironment": arn}], computeEnvironmentOrder=[{"order": 123, "computeEnvironment": arn}],
tags={"k1": "v1", "k2": "v2"}, tags={"k1": "v1", "k2": "v2"},
) )
resp.should.contain("jobQueueArn") assert "jobQueueArn" in resp
resp.should.contain("jobQueueName") assert "jobQueueName" in resp
queue_arn = resp["jobQueueArn"] queue_arn = resp["jobQueueArn"]
my_queue = batch_client.list_tags_for_resource(resourceArn=queue_arn) my_queue = batch_client.list_tags_for_resource(resourceArn=queue_arn)
my_queue.should.have.key("tags").equals({"k1": "v1", "k2": "v2"}) assert my_queue["tags"] == {"k1": "v1", "k2": "v2"}
@mock_ec2 @mock_ec2
@ -100,7 +99,7 @@ def test_tag_job_queue():
batch_client.tag_resource(resourceArn=queue_arn, tags={"k1": "v1", "k2": "v2"}) batch_client.tag_resource(resourceArn=queue_arn, tags={"k1": "v1", "k2": "v2"})
my_queue = batch_client.list_tags_for_resource(resourceArn=queue_arn) my_queue = batch_client.list_tags_for_resource(resourceArn=queue_arn)
my_queue.should.have.key("tags").equals({"k1": "v1", "k2": "v2"}) assert my_queue["tags"] == {"k1": "v1", "k2": "v2"}
@mock_ec2 @mock_ec2
@ -134,4 +133,4 @@ def test_untag_job_queue():
batch_client.untag_resource(resourceArn=queue_arn, tagKeys=["k2"]) batch_client.untag_resource(resourceArn=queue_arn, tagKeys=["k2"])
my_queue = batch_client.list_tags_for_resource(resourceArn=queue_arn) my_queue = batch_client.list_tags_for_resource(resourceArn=queue_arn)
my_queue.should.have.key("tags").equals({"k1": "v1", "k3": "v3"}) assert my_queue["tags"] == {"k1": "v1", "k3": "v3"}

View File

@ -11,4 +11,4 @@ def test_create_with_tags():
resp = client.describe_scheduling_policies(arns=[arn]) resp = client.describe_scheduling_policies(arns=[arn])
policy = resp["schedulingPolicies"][0] policy = resp["schedulingPolicies"][0]
policy["tags"].should.equal({"key": "val"}) assert policy["tags"] == {"key": "val"}

View File

@ -1,7 +1,6 @@
from . import _get_clients from . import _get_clients
import random import random
import pytest import pytest
import sure # noqa # pylint: disable=unused-import
from moto import mock_batch from moto import mock_batch
from uuid import uuid4 from uuid import uuid4
@ -13,13 +12,11 @@ def test_register_task_definition(use_resource_reqs):
resp = register_job_def(batch_client, use_resource_reqs=use_resource_reqs) resp = register_job_def(batch_client, use_resource_reqs=use_resource_reqs)
resp.should.contain("jobDefinitionArn") assert "jobDefinitionArn" in resp
resp.should.contain("jobDefinitionName") assert "jobDefinitionName" in resp
resp.should.contain("revision") assert "revision" in resp
assert resp["jobDefinitionArn"].endswith( assert f"{resp['jobDefinitionName']}:{resp['revision']}" in resp["jobDefinitionArn"]
f"{resp['jobDefinitionName']}:{resp['revision']}"
)
@mock_batch @mock_batch
@ -33,9 +30,9 @@ def test_register_task_definition_with_tags(propagate_tags):
resp = batch_client.describe_job_definitions(jobDefinitionName=job_def_name) resp = batch_client.describe_job_definitions(jobDefinitionName=job_def_name)
job_def = resp["jobDefinitions"][0] job_def = resp["jobDefinitions"][0]
if propagate_tags is None: if propagate_tags is None:
job_def.shouldnt.have.key("propagateTags") assert "propagateTags" not in job_def
else: else:
job_def.should.have.key("propagateTags").equals(propagate_tags) assert job_def["propagateTags"] == propagate_tags
@mock_batch @mock_batch
@ -57,9 +54,7 @@ def test_register_task_definition_with_platform_capability(platform_capability):
) )
resp = batch_client.describe_job_definitions(jobDefinitionName=def_name) resp = batch_client.describe_job_definitions(jobDefinitionName=def_name)
resp["jobDefinitions"][0].should.have.key("platformCapabilities").equals( assert resp["jobDefinitions"][0]["platformCapabilities"] == [platform_capability]
[platform_capability]
)
@mock_batch @mock_batch
@ -86,15 +81,13 @@ def test_register_task_definition_with_retry_strategies():
) )
resp = batch_client.describe_job_definitions(jobDefinitionName=def_name) resp = batch_client.describe_job_definitions(jobDefinitionName=def_name)
resp["jobDefinitions"][0].should.have.key("retryStrategy").equals( assert resp["jobDefinitions"][0]["retryStrategy"] == {
{ "attempts": 4,
"attempts": 4, "evaluateOnExit": [
"evaluateOnExit": [ {"onStatusReason": "osr", "action": "retry"},
{"onStatusReason": "osr", "action": "retry"}, {"onStatusReason": "osr2", "action": "exit"},
{"onStatusReason": "osr2", "action": "exit"}, ],
], }
}
)
@mock_batch @mock_batch
@ -108,38 +101,38 @@ def test_reregister_task_definition(use_resource_reqs):
batch_client, definition_name=job_def_name, use_resource_reqs=use_resource_reqs batch_client, definition_name=job_def_name, use_resource_reqs=use_resource_reqs
) )
resp1.should.contain("jobDefinitionArn") assert "jobDefinitionArn" in resp1
resp1.should.have.key("jobDefinitionName").equals(job_def_name) assert resp1["jobDefinitionName"] == job_def_name
resp1.should.contain("revision") assert "revision" in resp1
assert resp1["jobDefinitionArn"].endswith( assert resp1["jobDefinitionArn"].endswith(
f"{resp1['jobDefinitionName']}:{resp1['revision']}" f"{resp1['jobDefinitionName']}:{resp1['revision']}"
) )
resp1["revision"].should.equal(1) assert resp1["revision"] == 1
resp2 = register_job_def( resp2 = register_job_def(
batch_client, definition_name=job_def_name, use_resource_reqs=use_resource_reqs batch_client, definition_name=job_def_name, use_resource_reqs=use_resource_reqs
) )
resp2["revision"].should.equal(2) assert resp2["revision"] == 2
resp2["jobDefinitionArn"].should_not.equal(resp1["jobDefinitionArn"]) assert resp2["jobDefinitionArn"] != resp1["jobDefinitionArn"]
resp3 = register_job_def( resp3 = register_job_def(
batch_client, definition_name=job_def_name, use_resource_reqs=use_resource_reqs batch_client, definition_name=job_def_name, use_resource_reqs=use_resource_reqs
) )
resp3["revision"].should.equal(3) assert resp3["revision"] == 3
resp3["jobDefinitionArn"].should_not.equal(resp1["jobDefinitionArn"]) assert resp3["jobDefinitionArn"] != resp1["jobDefinitionArn"]
resp3["jobDefinitionArn"].should_not.equal(resp2["jobDefinitionArn"]) assert resp3["jobDefinitionArn"] != resp2["jobDefinitionArn"]
resp4 = register_job_def( resp4 = register_job_def(
batch_client, definition_name=job_def_name, use_resource_reqs=use_resource_reqs batch_client, definition_name=job_def_name, use_resource_reqs=use_resource_reqs
) )
resp4["revision"].should.equal(4) assert resp4["revision"] == 4
resp4["jobDefinitionArn"].should_not.equal(resp1["jobDefinitionArn"]) assert resp4["jobDefinitionArn"] != resp1["jobDefinitionArn"]
resp4["jobDefinitionArn"].should_not.equal(resp2["jobDefinitionArn"]) assert resp4["jobDefinitionArn"] != resp2["jobDefinitionArn"]
resp4["jobDefinitionArn"].should_not.equal(resp3["jobDefinitionArn"]) assert resp4["jobDefinitionArn"] != resp3["jobDefinitionArn"]
@mock_batch @mock_batch
@ -165,9 +158,9 @@ def test_reregister_task_definition_should_not_reuse_parameters_from_inactive_de
definitions = batch_client.describe_job_definitions(jobDefinitionName=job_def_name)[ definitions = batch_client.describe_job_definitions(jobDefinitionName=job_def_name)[
"jobDefinitions" "jobDefinitions"
] ]
definitions.should.have.length_of(1) assert len(definitions) == 1
definitions[0].should.have.key("parameters").equals({"param1": "val1"}) assert definitions[0]["parameters"] == {"param1": "val1"}
# Deactivate the definition # Deactivate the definition
batch_client.deregister_job_definition(jobDefinition=job_def_arn) batch_client.deregister_job_definition(jobDefinition=job_def_arn)
@ -187,12 +180,12 @@ def test_reregister_task_definition_should_not_reuse_parameters_from_inactive_de
definitions = batch_client.describe_job_definitions(jobDefinitionName=job_def_name)[ definitions = batch_client.describe_job_definitions(jobDefinitionName=job_def_name)[
"jobDefinitions" "jobDefinitions"
] ]
definitions.should.have.length_of(2) assert len(definitions) == 2
# Only the inactive definition should have the parameters # Only the inactive definition should have the parameters
actual = [(d["revision"], d["status"], d.get("parameters")) for d in definitions] actual = [(d["revision"], d["status"], d.get("parameters")) for d in definitions]
actual.should.contain((1, "INACTIVE", {"param1": "val1"})) assert (1, "INACTIVE", {"param1": "val1"}) in actual
actual.should.contain((2, "ACTIVE", {})) assert (2, "ACTIVE", {}) in actual
@mock_batch @mock_batch
@ -208,15 +201,15 @@ def test_delete_task_definition(use_resource_reqs):
batch_client.deregister_job_definition(jobDefinition=resp["jobDefinitionArn"]) batch_client.deregister_job_definition(jobDefinition=resp["jobDefinitionArn"])
all_defs = batch_client.describe_job_definitions()["jobDefinitions"] all_defs = batch_client.describe_job_definitions()["jobDefinitions"]
[jobdef["jobDefinitionName"] for jobdef in all_defs].should.contain(name) assert name in [jobdef["jobDefinitionName"] for jobdef in all_defs]
definitions = batch_client.describe_job_definitions(jobDefinitionName=name)[ definitions = batch_client.describe_job_definitions(jobDefinitionName=name)[
"jobDefinitions" "jobDefinitions"
] ]
definitions.should.have.length_of(1) assert len(definitions) == 1
definitions[0].should.have.key("revision").equals(1) assert definitions[0]["revision"] == 1
definitions[0].should.have.key("status").equals("INACTIVE") assert definitions[0]["status"] == "INACTIVE"
@mock_batch @mock_batch
@ -233,7 +226,7 @@ def test_delete_task_definition_by_name(use_resource_reqs):
all_defs = batch_client.describe_job_definitions()["jobDefinitions"] all_defs = batch_client.describe_job_definitions()["jobDefinitions"]
# We should still see our job definition as INACTIVE, as it is kept for 180 days # We should still see our job definition as INACTIVE, as it is kept for 180 days
[jobdef["jobDefinitionName"] for jobdef in all_defs].should.contain(name) assert name in [jobdef["jobDefinitionName"] for jobdef in all_defs]
# Registering the job definition again should up the revision number # Registering the job definition again should up the revision number
register_job_def( register_job_def(
@ -243,14 +236,14 @@ def test_delete_task_definition_by_name(use_resource_reqs):
definitions = batch_client.describe_job_definitions(jobDefinitionName=name)[ definitions = batch_client.describe_job_definitions(jobDefinitionName=name)[
"jobDefinitions" "jobDefinitions"
] ]
definitions.should.have.length_of(2) assert len(definitions) == 2
revision_status = [ revision_status = [
{"revision": d["revision"], "status": d["status"]} for d in definitions {"revision": d["revision"], "status": d["status"]} for d in definitions
] ]
revision_status.should.contain({"revision": 1, "status": "INACTIVE"}) assert {"revision": 1, "status": "INACTIVE"} in revision_status
revision_status.should.contain({"revision": 2, "status": "ACTIVE"}) assert {"revision": 2, "status": "ACTIVE"} in revision_status
@mock_batch @mock_batch
@ -277,27 +270,27 @@ def test_describe_task_definition(use_resource_reqs):
register_job_def_with_tags(batch_client, definition_name=tagged_name) register_job_def_with_tags(batch_client, definition_name=tagged_name)
resp = batch_client.describe_job_definitions(jobDefinitionName=sleep_def_name) resp = batch_client.describe_job_definitions(jobDefinitionName=sleep_def_name)
len(resp["jobDefinitions"]).should.equal(2) assert len(resp["jobDefinitions"]) == 2
job_defs = batch_client.describe_job_definitions()["jobDefinitions"] job_defs = batch_client.describe_job_definitions()["jobDefinitions"]
all_names = [jd["jobDefinitionName"] for jd in job_defs] all_names = [jd["jobDefinitionName"] for jd in job_defs]
all_names.should.contain(sleep_def_name) assert sleep_def_name in all_names
all_names.should.contain(other_name) assert other_name in all_names
all_names.should.contain(tagged_name) assert tagged_name in all_names
resp = batch_client.describe_job_definitions( resp = batch_client.describe_job_definitions(
jobDefinitions=[sleep_def_name, other_name] jobDefinitions=[sleep_def_name, other_name]
) )
len(resp["jobDefinitions"]).should.equal(3) assert len(resp["jobDefinitions"]) == 3
resp["jobDefinitions"][0]["tags"].should.equal({}) assert resp["jobDefinitions"][0]["tags"] == {}
resp = batch_client.describe_job_definitions(jobDefinitionName=tagged_name) resp = batch_client.describe_job_definitions(jobDefinitionName=tagged_name)
resp["jobDefinitions"][0]["tags"].should.equal({"foo": "123", "bar": "456"}) assert resp["jobDefinitions"][0]["tags"] == {"foo": "123", "bar": "456"}
for job_definition in resp["jobDefinitions"]: for job_definition in resp["jobDefinitions"]:
job_definition["status"].should.equal("ACTIVE") assert job_definition["status"] == "ACTIVE"
job_definition.shouldnt.have.key("platformCapabilities") assert "platformCapabilities" not in job_definition
job_definition.shouldnt.have.key("retryStrategy") assert "retryStrategy" not in job_definition
def register_job_def(batch_client, definition_name="sleep10", use_resource_reqs=True): def register_job_def(batch_client, definition_name="sleep10", use_resource_reqs=True):

View File

@ -1,5 +1,3 @@
import sure # noqa # pylint: disable=unused-import
import moto.server as server import moto.server as server
from moto import mock_batch from moto import mock_batch
@ -14,4 +12,4 @@ def test_batch_list():
test_client = backend.test_client() test_client = backend.test_client()
res = test_client.get("/v1/describecomputeenvironments") res = test_client.get("/v1/describecomputeenvironments")
res.status_code.should.equal(200) assert res.status_code == 200

View File

@ -1,6 +1,5 @@
import boto3 import boto3
import json import json
import sure # noqa # pylint: disable=unused-import
from moto import mock_iam, mock_ec2, mock_ecs, mock_cloudformation from moto import mock_iam, mock_ec2, mock_ecs, mock_cloudformation
from moto import mock_batch_simple as mock_batch_without_docker from moto import mock_batch_simple as mock_batch_without_docker
from uuid import uuid4 from uuid import uuid4
@ -92,17 +91,12 @@ def test_create_env_cf():
] ]
stack_resources = cf_conn.list_stack_resources(StackName=stack_id) stack_resources = cf_conn.list_stack_resources(StackName=stack_id)
summary = stack_resources["StackResourceSummaries"][0]
stack_resources["StackResourceSummaries"][0]["ResourceStatus"].should.equal( assert summary["ResourceStatus"] == "CREATE_COMPLETE"
"CREATE_COMPLETE"
)
# Spot checks on the ARN # Spot checks on the ARN
stack_resources["StackResourceSummaries"][0]["PhysicalResourceId"].startswith( assert "arn:aws:batch:" in summary["PhysicalResourceId"]
"arn:aws:batch:" assert stack_name in summary["PhysicalResourceId"]
)
stack_resources["StackResourceSummaries"][0]["PhysicalResourceId"].should.contain(
stack_name
)
@mock_cloudformation() @mock_cloudformation()
@ -156,7 +150,7 @@ def test_create_job_queue_cf():
] ]
stack_resources = cf_conn.list_stack_resources(StackName=stack_id) stack_resources = cf_conn.list_stack_resources(StackName=stack_id)
len(stack_resources["StackResourceSummaries"]).should.equal(2) assert len(stack_resources["StackResourceSummaries"]) == 2
job_queue_resource = list( job_queue_resource = list(
filter( filter(
@ -165,11 +159,11 @@ def test_create_job_queue_cf():
) )
)[0] )[0]
job_queue_resource["ResourceStatus"].should.equal("CREATE_COMPLETE") assert job_queue_resource["ResourceStatus"] == "CREATE_COMPLETE"
# Spot checks on the ARN # Spot checks on the ARN
job_queue_resource["PhysicalResourceId"].startswith("arn:aws:batch:") assert job_queue_resource["PhysicalResourceId"].startswith("arn:aws:batch:")
job_queue_resource["PhysicalResourceId"].should.contain(stack_name) assert stack_name in job_queue_resource["PhysicalResourceId"]
job_queue_resource["PhysicalResourceId"].should.contain("job-queue/") assert "job-queue/" in job_queue_resource["PhysicalResourceId"]
@mock_cloudformation @mock_cloudformation
@ -248,7 +242,7 @@ def test_create_job_def_cf():
] ]
stack_resources = cf_conn.list_stack_resources(StackName=stack_id) stack_resources = cf_conn.list_stack_resources(StackName=stack_id)
len(stack_resources["StackResourceSummaries"]).should.equal(3) assert len(stack_resources["StackResourceSummaries"]) == 3
job_def_resource = list( job_def_resource = list(
filter( filter(
@ -257,11 +251,11 @@ def test_create_job_def_cf():
) )
)[0] )[0]
job_def_resource["ResourceStatus"].should.equal("CREATE_COMPLETE") assert job_def_resource["ResourceStatus"] == "CREATE_COMPLETE"
# Spot checks on the ARN # Spot checks on the ARN
job_def_resource["PhysicalResourceId"].startswith("arn:aws:batch:") assert job_def_resource["PhysicalResourceId"].startswith("arn:aws:batch:")
job_def_resource["PhysicalResourceId"].should.contain(f"{stack_name}-JobDef") assert f"{stack_name}-JobDef" in job_def_resource["PhysicalResourceId"]
job_def_resource["PhysicalResourceId"].should.contain("job-definition/") assert "job-definition/" in job_def_resource["PhysicalResourceId"]
# Test the linux parameter device host path # Test the linux parameter device host path
# This ensures that batch is parsing the parameter dictionaries # This ensures that batch is parsing the parameter dictionaries
@ -275,4 +269,4 @@ def test_create_job_def_cf():
"containerProperties" "containerProperties"
]["linuxParameters"]["devices"][0]["hostPath"] ]["linuxParameters"]["devices"][0]["hostPath"]
job_def_linux_device_host_path.should.equal("test-path") assert job_def_linux_device_host_path == "test-path"

View File

@ -1,10 +1,9 @@
from ..test_batch import _get_clients, _setup from ..test_batch import _get_clients, _setup
import sure # noqa # pylint: disable=unused-import
from moto import mock_batch_simple, mock_iam, mock_ec2, mock_ecs, settings from moto import mock_batch_simple, mock_iam, mock_ec2, mock_ecs, settings
from uuid import uuid4 from uuid import uuid4
# Copy of test_batch/test_batch_cloudformation # Copy of test_batch/test_batch_compute_envs
# Except that we verify this behaviour still works without docker # Except that we verify this behaviour still works without docker
@ -38,8 +37,8 @@ def test_create_managed_compute_environment():
}, },
serviceRole=iam_arn, serviceRole=iam_arn,
) )
resp.should.contain("computeEnvironmentArn") assert "computeEnvironmentArn" in resp
resp["computeEnvironmentName"].should.equal(compute_name) assert resp["computeEnvironmentName"] == compute_name
our_env = batch_client.describe_compute_environments( our_env = batch_client.describe_compute_environments(
computeEnvironments=[compute_name] computeEnvironments=[compute_name]
@ -49,12 +48,12 @@ def test_create_managed_compute_environment():
if not settings.TEST_SERVER_MODE: if not settings.TEST_SERVER_MODE:
# Can't verify this in ServerMode, as other tests may have created instances # Can't verify this in ServerMode, as other tests may have created instances
resp = ec2_client.describe_instances() resp = ec2_client.describe_instances()
resp.should.contain("Reservations") assert "Reservations" in resp
len(resp["Reservations"]).should.equal(3) assert len(resp["Reservations"]) == 3
# Should have created 1 ECS cluster # Should have created 1 ECS cluster
all_clusters = ecs_client.list_clusters()["clusterArns"] all_clusters = ecs_client.list_clusters()["clusterArns"]
all_clusters.should.contain(our_env["ecsClusterArn"]) assert our_env["ecsClusterArn"] in all_clusters
@mock_ec2 @mock_ec2
@ -98,4 +97,4 @@ def test_create_managed_compute_environment_with_instance_family():
our_env = batch_client.describe_compute_environments( our_env = batch_client.describe_compute_environments(
computeEnvironments=[compute_name] computeEnvironments=[compute_name]
)["computeEnvironments"][0] )["computeEnvironments"][0]
our_env["computeResources"]["instanceTypes"].should.equal(["t2"]) assert our_env["computeResources"]["instanceTypes"] == ["t2"]