Merge pull request #1197 from terrycain/batch

AWS Batch
This commit is contained in:
Jack Danger 2017-10-23 09:43:10 -07:00 committed by GitHub
commit 2eec3a5c36
16 changed files with 2539 additions and 9 deletions

View File

@ -40,6 +40,7 @@ from .route53 import mock_route53, mock_route53_deprecated # flake8: noqa
from .swf import mock_swf, mock_swf_deprecated # flake8: noqa
from .xray import mock_xray, mock_xray_client, XRaySegment # flake8: noqa
from .logs import mock_logs, mock_logs_deprecated # flake8: noqa
from .batch import mock_batch # flake8: noqa
try:

View File

@ -35,11 +35,13 @@ from moto.sqs import sqs_backends
from moto.ssm import ssm_backends
from moto.sts import sts_backends
from moto.xray import xray_backends
from moto.batch import batch_backends
BACKENDS = {
'acm': acm_backends,
'apigateway': apigateway_backends,
'autoscaling': autoscaling_backends,
'batch': batch_backends,
'cloudformation': cloudformation_backends,
'cloudwatch': cloudwatch_backends,
'datapipeline': datapipeline_backends,

6
moto/batch/__init__.py Normal file
View File

@ -0,0 +1,6 @@
from __future__ import unicode_literals
from .models import batch_backends
from ..core.models import base_decorator
batch_backend = batch_backends['us-east-1']
mock_batch = base_decorator(batch_backends)

37
moto/batch/exceptions.py Normal file
View File

@ -0,0 +1,37 @@
from __future__ import unicode_literals
import json
class AWSError(Exception):
CODE = None
STATUS = 400
def __init__(self, message, code=None, status=None):
self.message = message
self.code = code if code is not None else self.CODE
self.status = status if status is not None else self.STATUS
def response(self):
return json.dumps({'__type': self.code, 'message': self.message}), dict(status=self.status)
class InvalidRequestException(AWSError):
CODE = 'InvalidRequestException'
class InvalidParameterValueException(AWSError):
CODE = 'InvalidParameterValue'
class ValidationError(AWSError):
CODE = 'ValidationError'
class InternalFailure(AWSError):
CODE = 'InternalFailure'
STATUS = 500
class ClientException(AWSError):
CODE = 'ClientException'
STATUS = 400

1042
moto/batch/models.py Normal file

File diff suppressed because it is too large Load Diff

296
moto/batch/responses.py Normal file
View File

@ -0,0 +1,296 @@
from __future__ import unicode_literals
from moto.core.responses import BaseResponse
from .models import batch_backends
from six.moves.urllib.parse import urlsplit
from .exceptions import AWSError
import json
class BatchResponse(BaseResponse):
def _error(self, code, message):
return json.dumps({'__type': code, 'message': message}), dict(status=400)
@property
def batch_backend(self):
"""
:return: Batch Backend
:rtype: moto.batch.models.BatchBackend
"""
return batch_backends[self.region]
@property
def json(self):
if self.body is None or self.body == '':
self._json = {}
elif not hasattr(self, '_json'):
try:
self._json = json.loads(self.body)
except json.JSONDecodeError:
print()
return self._json
def _get_param(self, param_name, if_none=None):
val = self.json.get(param_name)
if val is not None:
return val
return if_none
def _get_action(self):
# Return element after the /v1/*
return urlsplit(self.uri).path.lstrip('/').split('/')[1]
# CreateComputeEnvironment
def createcomputeenvironment(self):
compute_env_name = self._get_param('computeEnvironmentName')
compute_resource = self._get_param('computeResources')
service_role = self._get_param('serviceRole')
state = self._get_param('state')
_type = self._get_param('type')
try:
name, arn = self.batch_backend.create_compute_environment(
compute_environment_name=compute_env_name,
_type=_type, state=state,
compute_resources=compute_resource,
service_role=service_role
)
except AWSError as err:
return err.response()
result = {
'computeEnvironmentArn': arn,
'computeEnvironmentName': name
}
return json.dumps(result)
# DescribeComputeEnvironments
def describecomputeenvironments(self):
compute_environments = self._get_param('computeEnvironments')
max_results = self._get_param('maxResults') # Ignored, should be int
next_token = self._get_param('nextToken') # Ignored
envs = self.batch_backend.describe_compute_environments(compute_environments, max_results=max_results, next_token=next_token)
result = {'computeEnvironments': envs}
return json.dumps(result)
# DeleteComputeEnvironment
def deletecomputeenvironment(self):
compute_environment = self._get_param('computeEnvironment')
try:
self.batch_backend.delete_compute_environment(compute_environment)
except AWSError as err:
return err.response()
return ''
# UpdateComputeEnvironment
def updatecomputeenvironment(self):
compute_env_name = self._get_param('computeEnvironment')
compute_resource = self._get_param('computeResources')
service_role = self._get_param('serviceRole')
state = self._get_param('state')
try:
name, arn = self.batch_backend.update_compute_environment(
compute_environment_name=compute_env_name,
compute_resources=compute_resource,
service_role=service_role,
state=state
)
except AWSError as err:
return err.response()
result = {
'computeEnvironmentArn': arn,
'computeEnvironmentName': name
}
return json.dumps(result)
# CreateJobQueue
def createjobqueue(self):
compute_env_order = self._get_param('computeEnvironmentOrder')
queue_name = self._get_param('jobQueueName')
priority = self._get_param('priority')
state = self._get_param('state')
try:
name, arn = self.batch_backend.create_job_queue(
queue_name=queue_name,
priority=priority,
state=state,
compute_env_order=compute_env_order
)
except AWSError as err:
return err.response()
result = {
'jobQueueArn': arn,
'jobQueueName': name
}
return json.dumps(result)
# DescribeJobQueues
def describejobqueues(self):
job_queues = self._get_param('jobQueues')
max_results = self._get_param('maxResults') # Ignored, should be int
next_token = self._get_param('nextToken') # Ignored
queues = self.batch_backend.describe_job_queues(job_queues, max_results=max_results, next_token=next_token)
result = {'jobQueues': queues}
return json.dumps(result)
# UpdateJobQueue
def updatejobqueue(self):
compute_env_order = self._get_param('computeEnvironmentOrder')
queue_name = self._get_param('jobQueue')
priority = self._get_param('priority')
state = self._get_param('state')
try:
name, arn = self.batch_backend.update_job_queue(
queue_name=queue_name,
priority=priority,
state=state,
compute_env_order=compute_env_order
)
except AWSError as err:
return err.response()
result = {
'jobQueueArn': arn,
'jobQueueName': name
}
return json.dumps(result)
# DeleteJobQueue
def deletejobqueue(self):
queue_name = self._get_param('jobQueue')
self.batch_backend.delete_job_queue(queue_name)
return ''
# RegisterJobDefinition
def registerjobdefinition(self):
container_properties = self._get_param('containerProperties')
def_name = self._get_param('jobDefinitionName')
parameters = self._get_param('parameters')
retry_strategy = self._get_param('retryStrategy')
_type = self._get_param('type')
try:
name, arn, revision = self.batch_backend.register_job_definition(
def_name=def_name,
parameters=parameters,
_type=_type,
retry_strategy=retry_strategy,
container_properties=container_properties
)
except AWSError as err:
return err.response()
result = {
'jobDefinitionArn': arn,
'jobDefinitionName': name,
'revision': revision
}
return json.dumps(result)
# DeregisterJobDefinition
def deregisterjobdefinition(self):
queue_name = self._get_param('jobDefinition')
self.batch_backend.deregister_job_definition(queue_name)
return ''
# DescribeJobDefinitions
def describejobdefinitions(self):
job_def_name = self._get_param('jobDefinitionName')
job_def_list = self._get_param('jobDefinitions')
max_results = self._get_param('maxResults')
next_token = self._get_param('nextToken')
status = self._get_param('status')
job_defs = self.batch_backend.describe_job_definitions(job_def_name, job_def_list, status, max_results, next_token)
result = {'jobDefinitions': [job.describe() for job in job_defs]}
return json.dumps(result)
# SubmitJob
def submitjob(self):
container_overrides = self._get_param('containerOverrides')
depends_on = self._get_param('dependsOn')
job_def = self._get_param('jobDefinition')
job_name = self._get_param('jobName')
job_queue = self._get_param('jobQueue')
parameters = self._get_param('parameters')
retries = self._get_param('retryStrategy')
try:
name, job_id = self.batch_backend.submit_job(
job_name, job_def, job_queue,
parameters=parameters,
retries=retries,
depends_on=depends_on,
container_overrides=container_overrides
)
except AWSError as err:
return err.response()
result = {
'jobId': job_id,
'jobName': name,
}
return json.dumps(result)
# DescribeJobs
def describejobs(self):
jobs = self._get_param('jobs')
try:
return json.dumps({'jobs': self.batch_backend.describe_jobs(jobs)})
except AWSError as err:
return err.response()
# ListJobs
def listjobs(self):
job_queue = self._get_param('jobQueue')
job_status = self._get_param('jobStatus')
max_results = self._get_param('maxResults')
next_token = self._get_param('nextToken')
try:
jobs = self.batch_backend.list_jobs(job_queue, job_status, max_results, next_token)
except AWSError as err:
return err.response()
result = {'jobSummaryList': [{'jobId': job.job_id, 'jobName': job.job_name} for job in jobs]}
return json.dumps(result)
# TerminateJob
def terminatejob(self):
job_id = self._get_param('jobId')
reason = self._get_param('reason')
try:
self.batch_backend.terminate_job(job_id, reason)
except AWSError as err:
return err.response()
return ''
# CancelJob
def canceljob(self): # Theres some AWS semantics on the differences but for us they're identical ;-)
return self.terminatejob()

25
moto/batch/urls.py Normal file
View File

@ -0,0 +1,25 @@
from __future__ import unicode_literals
from .responses import BatchResponse
url_bases = [
"https?://batch.(.+).amazonaws.com",
]
url_paths = {
'{0}/v1/createcomputeenvironment$': BatchResponse.dispatch,
'{0}/v1/describecomputeenvironments$': BatchResponse.dispatch,
'{0}/v1/deletecomputeenvironment': BatchResponse.dispatch,
'{0}/v1/updatecomputeenvironment': BatchResponse.dispatch,
'{0}/v1/createjobqueue': BatchResponse.dispatch,
'{0}/v1/describejobqueues': BatchResponse.dispatch,
'{0}/v1/updatejobqueue': BatchResponse.dispatch,
'{0}/v1/deletejobqueue': BatchResponse.dispatch,
'{0}/v1/registerjobdefinition': BatchResponse.dispatch,
'{0}/v1/deregisterjobdefinition': BatchResponse.dispatch,
'{0}/v1/describejobdefinitions': BatchResponse.dispatch,
'{0}/v1/submitjob': BatchResponse.dispatch,
'{0}/v1/describejobs': BatchResponse.dispatch,
'{0}/v1/listjobs': BatchResponse.dispatch,
'{0}/v1/terminatejob': BatchResponse.dispatch,
'{0}/v1/canceljob': BatchResponse.dispatch,
}

22
moto/batch/utils.py Normal file
View File

@ -0,0 +1,22 @@
from __future__ import unicode_literals
def make_arn_for_compute_env(account_id, name, region_name):
return "arn:aws:batch:{0}:{1}:compute-environment/{2}".format(region_name, account_id, name)
def make_arn_for_job_queue(account_id, name, region_name):
return "arn:aws:batch:{0}:{1}:job-queue/{2}".format(region_name, account_id, name)
def make_arn_for_task_def(account_id, name, revision, region_name):
return "arn:aws:batch:{0}:{1}:job-definition/{2}:{3}".format(region_name, account_id, name, revision)
def lowercase_first_key(some_dict):
new_dict = {}
for key, value in some_dict.items():
new_key = key[0].lower() + key[1:]
new_dict[new_key] = value
return new_dict

View File

@ -8,6 +8,7 @@ import re
from moto.autoscaling import models as autoscaling_models
from moto.awslambda import models as lambda_models
from moto.batch import models as batch_models
from moto.cloudwatch import models as cloudwatch_models
from moto.datapipeline import models as datapipeline_models
from moto.dynamodb import models as dynamodb_models
@ -31,6 +32,9 @@ from boto.cloudformation.stack import Output
MODEL_MAP = {
"AWS::AutoScaling::AutoScalingGroup": autoscaling_models.FakeAutoScalingGroup,
"AWS::AutoScaling::LaunchConfiguration": autoscaling_models.FakeLaunchConfiguration,
"AWS::Batch::JobDefinition": batch_models.JobDefinition,
"AWS::Batch::JobQueue": batch_models.JobQueue,
"AWS::Batch::ComputeEnvironment": batch_models.ComputeEnvironment,
"AWS::DynamoDB::Table": dynamodb_models.Table,
"AWS::Kinesis::Stream": kinesis_models.Stream,
"AWS::Lambda::EventSourceMapping": lambda_models.EventSourceMapping,

View File

@ -1,10 +1,10 @@
from __future__ import unicode_literals
from .responses import ELBV2Response
from ..elb.urls import api_version_elb_backend
url_bases = [
"https?://elasticloadbalancing.(.+).amazonaws.com",
]
url_paths = {
'{0}/$': ELBV2Response.dispatch,
'{0}/$': api_version_elb_backend,
}

View File

@ -528,6 +528,12 @@ class IAMBackend(BaseBackend):
return role
raise IAMNotFoundException("Role {0} not found".format(role_name))
def get_role_by_arn(self, arn):
for role in self.get_roles():
if role.arn == arn:
return role
raise IAMNotFoundException("Role {0} not found".format(arn))
def delete_role(self, role_name):
for role in self.get_roles():
if role.name == role_name:

View File

@ -22,6 +22,13 @@ class LogEvent:
"timestamp": self.timestamp
}
def to_response_dict(self):
return {
"ingestionTime": self.ingestionTime,
"message": self.message,
"timestamp": self.timestamp
}
class LogStream:
_log_ids = 0
@ -41,7 +48,14 @@ class LogStream:
self.__class__._log_ids += 1
def _update(self):
self.firstEventTimestamp = min([x.timestamp for x in self.events])
self.lastEventTimestamp = max([x.timestamp for x in self.events])
def to_describe_dict(self):
# Compute start and end times
self._update()
return {
"arn": self.arn,
"creationTime": self.creationTime,
@ -79,7 +93,7 @@ class LogStream:
if next_token is None:
next_token = 0
events_page = events[next_token: next_token + limit]
events_page = [event.to_response_dict() for event in events[next_token: next_token + limit]]
next_token += limit
if next_token >= len(self.events):
next_token = None
@ -120,17 +134,17 @@ class LogGroup:
del self.streams[log_stream_name]
def describe_log_streams(self, descending, limit, log_group_name, log_stream_name_prefix, next_token, order_by):
log_streams = [stream.to_describe_dict() for name, stream in self.streams.items() if name.startswith(log_stream_name_prefix)]
log_streams = [(name, stream.to_describe_dict()) for name, stream in self.streams.items() if name.startswith(log_stream_name_prefix)]
def sorter(stream):
return stream.name if order_by == 'logStreamName' else stream.lastEventTimestamp
def sorter(item):
return item[0] if order_by == 'logStreamName' else item[1]['lastEventTimestamp']
if next_token is None:
next_token = 0
log_streams = sorted(log_streams, key=sorter, reverse=descending)
new_token = next_token + limit
log_streams_page = log_streams[next_token: new_token]
log_streams_page = [x[1] for x in log_streams[next_token: new_token]]
if new_token >= len(log_streams):
new_token = None

View File

@ -47,7 +47,7 @@ class LogsResponse(BaseResponse):
def describe_log_streams(self):
log_group_name = self._get_param('logGroupName')
log_stream_name_prefix = self._get_param('logStreamNamePrefix')
log_stream_name_prefix = self._get_param('logStreamNamePrefix', '')
descending = self._get_param('descending', False)
limit = self._get_param('limit', 50)
assert limit <= 50
@ -83,7 +83,7 @@ class LogsResponse(BaseResponse):
limit = self._get_param('limit', 10000)
assert limit <= 10000
next_token = self._get_param('nextToken')
start_from_head = self._get_param('startFromHead')
start_from_head = self._get_param('startFromHead', False)
events, next_backward_token, next_foward_token = \
self.logs_backend.get_log_events(log_group_name, log_stream_name, start_time, end_time, limit, next_token, start_from_head)

View File

@ -0,0 +1,809 @@
from __future__ import unicode_literals
import time
import datetime
import boto3
from botocore.exceptions import ClientError
import sure # noqa
from moto import mock_batch, mock_iam, mock_ec2, mock_ecs, mock_logs
import functools
import nose
def expected_failure(test):
@functools.wraps(test)
def inner(*args, **kwargs):
try:
test(*args, **kwargs)
except Exception as err:
raise nose.SkipTest
return inner
DEFAULT_REGION = 'eu-central-1'
def _get_clients():
return boto3.client('ec2', region_name=DEFAULT_REGION), \
boto3.client('iam', region_name=DEFAULT_REGION), \
boto3.client('ecs', region_name=DEFAULT_REGION), \
boto3.client('logs', region_name=DEFAULT_REGION), \
boto3.client('batch', region_name=DEFAULT_REGION)
def _setup(ec2_client, iam_client):
"""
Do prerequisite setup
:return: VPC ID, Subnet ID, Security group ID, IAM Role ARN
:rtype: tuple
"""
resp = ec2_client.create_vpc(CidrBlock='172.30.0.0/24')
vpc_id = resp['Vpc']['VpcId']
resp = ec2_client.create_subnet(
AvailabilityZone='eu-central-1a',
CidrBlock='172.30.0.0/25',
VpcId=vpc_id
)
subnet_id = resp['Subnet']['SubnetId']
resp = ec2_client.create_security_group(
Description='test_sg_desc',
GroupName='test_sg',
VpcId=vpc_id
)
sg_id = resp['GroupId']
resp = iam_client.create_role(
RoleName='TestRole',
AssumeRolePolicyDocument='some_policy'
)
iam_arn = resp['Role']['Arn']
return vpc_id, subnet_id, sg_id, iam_arn
# Yes, yes it talks to all the things
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_create_managed_compute_environment():
ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients()
vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
compute_name = 'test_compute_env'
resp = batch_client.create_compute_environment(
computeEnvironmentName=compute_name,
type='MANAGED',
state='ENABLED',
computeResources={
'type': 'EC2',
'minvCpus': 5,
'maxvCpus': 10,
'desiredvCpus': 5,
'instanceTypes': [
't2.small',
't2.medium'
],
'imageId': 'some_image_id',
'subnets': [
subnet_id,
],
'securityGroupIds': [
sg_id,
],
'ec2KeyPair': 'string',
'instanceRole': iam_arn,
'tags': {
'string': 'string'
},
'bidPercentage': 123,
'spotIamFleetRole': 'string'
},
serviceRole=iam_arn
)
resp.should.contain('computeEnvironmentArn')
resp['computeEnvironmentName'].should.equal(compute_name)
# Given a t2.medium is 2 vcpu and t2.small is 1, therefore 2 mediums and 1 small should be created
resp = ec2_client.describe_instances()
resp.should.contain('Reservations')
len(resp['Reservations']).should.equal(3)
# Should have created 1 ECS cluster
resp = ecs_client.list_clusters()
resp.should.contain('clusterArns')
len(resp['clusterArns']).should.equal(1)
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_create_unmanaged_compute_environment():
ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients()
vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
compute_name = 'test_compute_env'
resp = batch_client.create_compute_environment(
computeEnvironmentName=compute_name,
type='UNMANAGED',
state='ENABLED',
serviceRole=iam_arn
)
resp.should.contain('computeEnvironmentArn')
resp['computeEnvironmentName'].should.equal(compute_name)
# Its unmanaged so no instances should be created
resp = ec2_client.describe_instances()
resp.should.contain('Reservations')
len(resp['Reservations']).should.equal(0)
# Should have created 1 ECS cluster
resp = ecs_client.list_clusters()
resp.should.contain('clusterArns')
len(resp['clusterArns']).should.equal(1)
# TODO create 1000s of tests to test complex option combinations of create environment
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_describe_compute_environment():
ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients()
vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
compute_name = 'test_compute_env'
batch_client.create_compute_environment(
computeEnvironmentName=compute_name,
type='UNMANAGED',
state='ENABLED',
serviceRole=iam_arn
)
resp = batch_client.describe_compute_environments()
len(resp['computeEnvironments']).should.equal(1)
resp['computeEnvironments'][0]['computeEnvironmentName'].should.equal(compute_name)
# Test filtering
resp = batch_client.describe_compute_environments(
computeEnvironments=['test1']
)
len(resp['computeEnvironments']).should.equal(0)
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_delete_unmanaged_compute_environment():
ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients()
vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
compute_name = 'test_compute_env'
batch_client.create_compute_environment(
computeEnvironmentName=compute_name,
type='UNMANAGED',
state='ENABLED',
serviceRole=iam_arn
)
batch_client.delete_compute_environment(
computeEnvironment=compute_name,
)
resp = batch_client.describe_compute_environments()
len(resp['computeEnvironments']).should.equal(0)
resp = ecs_client.list_clusters()
len(resp.get('clusterArns', [])).should.equal(0)
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_delete_managed_compute_environment():
ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients()
vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
compute_name = 'test_compute_env'
batch_client.create_compute_environment(
computeEnvironmentName=compute_name,
type='MANAGED',
state='ENABLED',
computeResources={
'type': 'EC2',
'minvCpus': 5,
'maxvCpus': 10,
'desiredvCpus': 5,
'instanceTypes': [
't2.small',
't2.medium'
],
'imageId': 'some_image_id',
'subnets': [
subnet_id,
],
'securityGroupIds': [
sg_id,
],
'ec2KeyPair': 'string',
'instanceRole': iam_arn,
'tags': {
'string': 'string'
},
'bidPercentage': 123,
'spotIamFleetRole': 'string'
},
serviceRole=iam_arn
)
batch_client.delete_compute_environment(
computeEnvironment=compute_name,
)
resp = batch_client.describe_compute_environments()
len(resp['computeEnvironments']).should.equal(0)
resp = ec2_client.describe_instances()
resp.should.contain('Reservations')
len(resp['Reservations']).should.equal(3)
for reservation in resp['Reservations']:
reservation['Instances'][0]['State']['Name'].should.equal('terminated')
resp = ecs_client.list_clusters()
len(resp.get('clusterArns', [])).should.equal(0)
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_update_unmanaged_compute_environment_state():
ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients()
vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
compute_name = 'test_compute_env'
batch_client.create_compute_environment(
computeEnvironmentName=compute_name,
type='UNMANAGED',
state='ENABLED',
serviceRole=iam_arn
)
batch_client.update_compute_environment(
computeEnvironment=compute_name,
state='DISABLED'
)
resp = batch_client.describe_compute_environments()
len(resp['computeEnvironments']).should.equal(1)
resp['computeEnvironments'][0]['state'].should.equal('DISABLED')
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_create_job_queue():
ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients()
vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
compute_name = 'test_compute_env'
resp = batch_client.create_compute_environment(
computeEnvironmentName=compute_name,
type='UNMANAGED',
state='ENABLED',
serviceRole=iam_arn
)
arn = resp['computeEnvironmentArn']
resp = batch_client.create_job_queue(
jobQueueName='test_job_queue',
state='ENABLED',
priority=123,
computeEnvironmentOrder=[
{
'order': 123,
'computeEnvironment': arn
},
]
)
resp.should.contain('jobQueueArn')
resp.should.contain('jobQueueName')
queue_arn = resp['jobQueueArn']
resp = batch_client.describe_job_queues()
resp.should.contain('jobQueues')
len(resp['jobQueues']).should.equal(1)
resp['jobQueues'][0]['jobQueueArn'].should.equal(queue_arn)
resp = batch_client.describe_job_queues(jobQueues=['test_invalid_queue'])
resp.should.contain('jobQueues')
len(resp['jobQueues']).should.equal(0)
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_job_queue_bad_arn():
ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients()
vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
compute_name = 'test_compute_env'
resp = batch_client.create_compute_environment(
computeEnvironmentName=compute_name,
type='UNMANAGED',
state='ENABLED',
serviceRole=iam_arn
)
arn = resp['computeEnvironmentArn']
try:
batch_client.create_job_queue(
jobQueueName='test_job_queue',
state='ENABLED',
priority=123,
computeEnvironmentOrder=[
{
'order': 123,
'computeEnvironment': arn + 'LALALA'
},
]
)
except ClientError as err:
err.response['Error']['Code'].should.equal('ClientException')
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_update_job_queue():
ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients()
vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
compute_name = 'test_compute_env'
resp = batch_client.create_compute_environment(
computeEnvironmentName=compute_name,
type='UNMANAGED',
state='ENABLED',
serviceRole=iam_arn
)
arn = resp['computeEnvironmentArn']
resp = batch_client.create_job_queue(
jobQueueName='test_job_queue',
state='ENABLED',
priority=123,
computeEnvironmentOrder=[
{
'order': 123,
'computeEnvironment': arn
},
]
)
queue_arn = resp['jobQueueArn']
batch_client.update_job_queue(
jobQueue=queue_arn,
priority=5
)
resp = batch_client.describe_job_queues()
resp.should.contain('jobQueues')
len(resp['jobQueues']).should.equal(1)
resp['jobQueues'][0]['priority'].should.equal(5)
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_update_job_queue():
ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients()
vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
compute_name = 'test_compute_env'
resp = batch_client.create_compute_environment(
computeEnvironmentName=compute_name,
type='UNMANAGED',
state='ENABLED',
serviceRole=iam_arn
)
arn = resp['computeEnvironmentArn']
resp = batch_client.create_job_queue(
jobQueueName='test_job_queue',
state='ENABLED',
priority=123,
computeEnvironmentOrder=[
{
'order': 123,
'computeEnvironment': arn
},
]
)
queue_arn = resp['jobQueueArn']
batch_client.delete_job_queue(
jobQueue=queue_arn
)
resp = batch_client.describe_job_queues()
resp.should.contain('jobQueues')
len(resp['jobQueues']).should.equal(0)
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_register_task_definition():
ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients()
vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
resp = batch_client.register_job_definition(
jobDefinitionName='sleep10',
type='container',
containerProperties={
'image': 'busybox',
'vcpus': 1,
'memory': 128,
'command': ['sleep', '10']
}
)
resp.should.contain('jobDefinitionArn')
resp.should.contain('jobDefinitionName')
resp.should.contain('revision')
assert resp['jobDefinitionArn'].endswith('{0}:{1}'.format(resp['jobDefinitionName'], resp['revision']))
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_reregister_task_definition():
# Reregistering task with the same name bumps the revision number
ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients()
vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
resp1 = batch_client.register_job_definition(
jobDefinitionName='sleep10',
type='container',
containerProperties={
'image': 'busybox',
'vcpus': 1,
'memory': 128,
'command': ['sleep', '10']
}
)
resp1.should.contain('jobDefinitionArn')
resp1.should.contain('jobDefinitionName')
resp1.should.contain('revision')
assert resp1['jobDefinitionArn'].endswith('{0}:{1}'.format(resp1['jobDefinitionName'], resp1['revision']))
resp1['revision'].should.equal(1)
resp2 = batch_client.register_job_definition(
jobDefinitionName='sleep10',
type='container',
containerProperties={
'image': 'busybox',
'vcpus': 1,
'memory': 68,
'command': ['sleep', '10']
}
)
resp2['revision'].should.equal(2)
resp2['jobDefinitionArn'].should_not.equal(resp1['jobDefinitionArn'])
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_delete_task_definition():
ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients()
vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
resp = batch_client.register_job_definition(
jobDefinitionName='sleep10',
type='container',
containerProperties={
'image': 'busybox',
'vcpus': 1,
'memory': 128,
'command': ['sleep', '10']
}
)
batch_client.deregister_job_definition(jobDefinition=resp['jobDefinitionArn'])
resp = batch_client.describe_job_definitions()
len(resp['jobDefinitions']).should.equal(0)
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_describe_task_definition():
ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients()
vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
batch_client.register_job_definition(
jobDefinitionName='sleep10',
type='container',
containerProperties={
'image': 'busybox',
'vcpus': 1,
'memory': 128,
'command': ['sleep', '10']
}
)
batch_client.register_job_definition(
jobDefinitionName='sleep10',
type='container',
containerProperties={
'image': 'busybox',
'vcpus': 1,
'memory': 64,
'command': ['sleep', '10']
}
)
batch_client.register_job_definition(
jobDefinitionName='test1',
type='container',
containerProperties={
'image': 'busybox',
'vcpus': 1,
'memory': 64,
'command': ['sleep', '10']
}
)
resp = batch_client.describe_job_definitions(
jobDefinitionName='sleep10'
)
len(resp['jobDefinitions']).should.equal(2)
resp = batch_client.describe_job_definitions()
len(resp['jobDefinitions']).should.equal(3)
resp = batch_client.describe_job_definitions(
jobDefinitions=['sleep10', 'test1']
)
len(resp['jobDefinitions']).should.equal(3)
# SLOW TESTS
@expected_failure
@mock_logs
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_submit_job():
ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients()
vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
compute_name = 'test_compute_env'
resp = batch_client.create_compute_environment(
computeEnvironmentName=compute_name,
type='UNMANAGED',
state='ENABLED',
serviceRole=iam_arn
)
arn = resp['computeEnvironmentArn']
resp = batch_client.create_job_queue(
jobQueueName='test_job_queue',
state='ENABLED',
priority=123,
computeEnvironmentOrder=[
{
'order': 123,
'computeEnvironment': arn
},
]
)
queue_arn = resp['jobQueueArn']
resp = batch_client.register_job_definition(
jobDefinitionName='sleep10',
type='container',
containerProperties={
'image': 'busybox',
'vcpus': 1,
'memory': 128,
'command': ['sleep', '10']
}
)
job_def_arn = resp['jobDefinitionArn']
resp = batch_client.submit_job(
jobName='test1',
jobQueue=queue_arn,
jobDefinition=job_def_arn
)
job_id = resp['jobId']
future = datetime.datetime.now() + datetime.timedelta(seconds=30)
while datetime.datetime.now() < future:
resp = batch_client.describe_jobs(jobs=[job_id])
print("{0}:{1} {2}".format(resp['jobs'][0]['jobName'], resp['jobs'][0]['jobId'], resp['jobs'][0]['status']))
if resp['jobs'][0]['status'] == 'FAILED':
raise RuntimeError('Batch job failed')
if resp['jobs'][0]['status'] == 'SUCCEEDED':
break
time.sleep(0.5)
else:
raise RuntimeError('Batch job timed out')
resp = logs_client.describe_log_streams(logGroupName='/aws/batch/job')
len(resp['logStreams']).should.equal(1)
ls_name = resp['logStreams'][0]['logStreamName']
resp = logs_client.get_log_events(logGroupName='/aws/batch/job', logStreamName=ls_name)
len(resp['events']).should.be.greater_than(5)
@expected_failure
@mock_logs
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_list_jobs():
ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients()
vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
compute_name = 'test_compute_env'
resp = batch_client.create_compute_environment(
computeEnvironmentName=compute_name,
type='UNMANAGED',
state='ENABLED',
serviceRole=iam_arn
)
arn = resp['computeEnvironmentArn']
resp = batch_client.create_job_queue(
jobQueueName='test_job_queue',
state='ENABLED',
priority=123,
computeEnvironmentOrder=[
{
'order': 123,
'computeEnvironment': arn
},
]
)
queue_arn = resp['jobQueueArn']
resp = batch_client.register_job_definition(
jobDefinitionName='sleep10',
type='container',
containerProperties={
'image': 'busybox',
'vcpus': 1,
'memory': 128,
'command': ['sleep', '10']
}
)
job_def_arn = resp['jobDefinitionArn']
resp = batch_client.submit_job(
jobName='test1',
jobQueue=queue_arn,
jobDefinition=job_def_arn
)
job_id1 = resp['jobId']
resp = batch_client.submit_job(
jobName='test2',
jobQueue=queue_arn,
jobDefinition=job_def_arn
)
job_id2 = resp['jobId']
future = datetime.datetime.now() + datetime.timedelta(seconds=30)
resp_finished_jobs = batch_client.list_jobs(
jobQueue=queue_arn,
jobStatus='SUCCEEDED'
)
# Wait only as long as it takes to run the jobs
while datetime.datetime.now() < future:
resp = batch_client.describe_jobs(jobs=[job_id1, job_id2])
any_failed_jobs = any([job['status'] == 'FAILED' for job in resp['jobs']])
succeeded_jobs = all([job['status'] == 'SUCCEEDED' for job in resp['jobs']])
if any_failed_jobs:
raise RuntimeError('A Batch job failed')
if succeeded_jobs:
break
time.sleep(0.5)
else:
raise RuntimeError('Batch jobs timed out')
resp_finished_jobs2 = batch_client.list_jobs(
jobQueue=queue_arn,
jobStatus='SUCCEEDED'
)
len(resp_finished_jobs['jobSummaryList']).should.equal(0)
len(resp_finished_jobs2['jobSummaryList']).should.equal(2)
@expected_failure
@mock_logs
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_terminate_job():
ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients()
vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
compute_name = 'test_compute_env'
resp = batch_client.create_compute_environment(
computeEnvironmentName=compute_name,
type='UNMANAGED',
state='ENABLED',
serviceRole=iam_arn
)
arn = resp['computeEnvironmentArn']
resp = batch_client.create_job_queue(
jobQueueName='test_job_queue',
state='ENABLED',
priority=123,
computeEnvironmentOrder=[
{
'order': 123,
'computeEnvironment': arn
},
]
)
queue_arn = resp['jobQueueArn']
resp = batch_client.register_job_definition(
jobDefinitionName='sleep10',
type='container',
containerProperties={
'image': 'busybox',
'vcpus': 1,
'memory': 128,
'command': ['sleep', '10']
}
)
job_def_arn = resp['jobDefinitionArn']
resp = batch_client.submit_job(
jobName='test1',
jobQueue=queue_arn,
jobDefinition=job_def_arn
)
job_id = resp['jobId']
time.sleep(2)
batch_client.terminate_job(jobId=job_id, reason='test_terminate')
time.sleep(1)
resp = batch_client.describe_jobs(jobs=[job_id])
resp['jobs'][0]['jobName'].should.equal('test1')
resp['jobs'][0]['status'].should.equal('FAILED')
resp['jobs'][0]['statusReason'].should.equal('test_terminate')

View File

@ -0,0 +1,247 @@
from __future__ import unicode_literals
import time
import datetime
import boto3
from botocore.exceptions import ClientError
import sure # noqa
from moto import mock_batch, mock_iam, mock_ec2, mock_ecs, mock_logs, mock_cloudformation
import functools
import nose
import json
DEFAULT_REGION = 'eu-central-1'
def _get_clients():
return boto3.client('ec2', region_name=DEFAULT_REGION), \
boto3.client('iam', region_name=DEFAULT_REGION), \
boto3.client('ecs', region_name=DEFAULT_REGION), \
boto3.client('logs', region_name=DEFAULT_REGION), \
boto3.client('batch', region_name=DEFAULT_REGION)
def _setup(ec2_client, iam_client):
"""
Do prerequisite setup
:return: VPC ID, Subnet ID, Security group ID, IAM Role ARN
:rtype: tuple
"""
resp = ec2_client.create_vpc(CidrBlock='172.30.0.0/24')
vpc_id = resp['Vpc']['VpcId']
resp = ec2_client.create_subnet(
AvailabilityZone='eu-central-1a',
CidrBlock='172.30.0.0/25',
VpcId=vpc_id
)
subnet_id = resp['Subnet']['SubnetId']
resp = ec2_client.create_security_group(
Description='test_sg_desc',
GroupName='test_sg',
VpcId=vpc_id
)
sg_id = resp['GroupId']
resp = iam_client.create_role(
RoleName='TestRole',
AssumeRolePolicyDocument='some_policy'
)
iam_arn = resp['Role']['Arn']
return vpc_id, subnet_id, sg_id, iam_arn
@mock_cloudformation()
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_create_env_cf():
ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients()
vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
create_environment_template = {
'Resources': {
"ComputeEnvironment": {
"Type": "AWS::Batch::ComputeEnvironment",
"Properties": {
"Type": "MANAGED",
"ComputeResources": {
"Type": "EC2",
"MinvCpus": 0,
"DesiredvCpus": 0,
"MaxvCpus": 64,
"InstanceTypes": [
"optimal"
],
"Subnets": [subnet_id],
"SecurityGroupIds": [sg_id],
"InstanceRole": iam_arn
},
"ServiceRole": iam_arn
}
}
}
}
cf_json = json.dumps(create_environment_template)
cf_conn = boto3.client('cloudformation', DEFAULT_REGION)
stack_id = cf_conn.create_stack(
StackName='test_stack',
TemplateBody=cf_json,
)['StackId']
stack_resources = cf_conn.list_stack_resources(StackName=stack_id)
stack_resources['StackResourceSummaries'][0]['ResourceStatus'].should.equal('CREATE_COMPLETE')
# Spot checks on the ARN
stack_resources['StackResourceSummaries'][0]['PhysicalResourceId'].startswith('arn:aws:batch:')
stack_resources['StackResourceSummaries'][0]['PhysicalResourceId'].should.contain('test_stack')
@mock_cloudformation()
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_create_job_queue_cf():
ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients()
vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
create_environment_template = {
'Resources': {
"ComputeEnvironment": {
"Type": "AWS::Batch::ComputeEnvironment",
"Properties": {
"Type": "MANAGED",
"ComputeResources": {
"Type": "EC2",
"MinvCpus": 0,
"DesiredvCpus": 0,
"MaxvCpus": 64,
"InstanceTypes": [
"optimal"
],
"Subnets": [subnet_id],
"SecurityGroupIds": [sg_id],
"InstanceRole": iam_arn
},
"ServiceRole": iam_arn
}
},
"JobQueue": {
"Type": "AWS::Batch::JobQueue",
"Properties": {
"Priority": 1,
"ComputeEnvironmentOrder": [
{
"Order": 1,
"ComputeEnvironment": {"Ref": "ComputeEnvironment"}
}
]
}
},
}
}
cf_json = json.dumps(create_environment_template)
cf_conn = boto3.client('cloudformation', DEFAULT_REGION)
stack_id = cf_conn.create_stack(
StackName='test_stack',
TemplateBody=cf_json,
)['StackId']
stack_resources = cf_conn.list_stack_resources(StackName=stack_id)
len(stack_resources['StackResourceSummaries']).should.equal(2)
job_queue_resource = list(filter(lambda item: item['ResourceType'] == 'AWS::Batch::JobQueue', stack_resources['StackResourceSummaries']))[0]
job_queue_resource['ResourceStatus'].should.equal('CREATE_COMPLETE')
# Spot checks on the ARN
job_queue_resource['PhysicalResourceId'].startswith('arn:aws:batch:')
job_queue_resource['PhysicalResourceId'].should.contain('test_stack')
job_queue_resource['PhysicalResourceId'].should.contain('job-queue/')
@mock_cloudformation()
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_create_job_def_cf():
ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients()
vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
create_environment_template = {
'Resources': {
"ComputeEnvironment": {
"Type": "AWS::Batch::ComputeEnvironment",
"Properties": {
"Type": "MANAGED",
"ComputeResources": {
"Type": "EC2",
"MinvCpus": 0,
"DesiredvCpus": 0,
"MaxvCpus": 64,
"InstanceTypes": [
"optimal"
],
"Subnets": [subnet_id],
"SecurityGroupIds": [sg_id],
"InstanceRole": iam_arn
},
"ServiceRole": iam_arn
}
},
"JobQueue": {
"Type": "AWS::Batch::JobQueue",
"Properties": {
"Priority": 1,
"ComputeEnvironmentOrder": [
{
"Order": 1,
"ComputeEnvironment": {"Ref": "ComputeEnvironment"}
}
]
}
},
"JobDefinition": {
"Type": "AWS::Batch::JobDefinition",
"Properties": {
"Type": "container",
"ContainerProperties": {
"Image": {
"Fn::Join": ["", ["137112412989.dkr.ecr.", {"Ref": "AWS::Region"}, ".amazonaws.com/amazonlinux:latest"]]
},
"Vcpus": 2,
"Memory": 2000,
"Command": ["echo", "Hello world"]
},
"RetryStrategy": {
"Attempts": 1
}
}
},
}
}
cf_json = json.dumps(create_environment_template)
cf_conn = boto3.client('cloudformation', DEFAULT_REGION)
stack_id = cf_conn.create_stack(
StackName='test_stack',
TemplateBody=cf_json,
)['StackId']
stack_resources = cf_conn.list_stack_resources(StackName=stack_id)
len(stack_resources['StackResourceSummaries']).should.equal(3)
job_def_resource = list(filter(lambda item: item['ResourceType'] == 'AWS::Batch::JobDefinition', stack_resources['StackResourceSummaries']))[0]
job_def_resource['ResourceStatus'].should.equal('CREATE_COMPLETE')
# Spot checks on the ARN
job_def_resource['PhysicalResourceId'].startswith('arn:aws:batch:')
job_def_resource['PhysicalResourceId'].should.contain('test_stack-JobDef')
job_def_resource['PhysicalResourceId'].should.contain('job-definition/')

View File

@ -0,0 +1,19 @@
from __future__ import unicode_literals
import sure # noqa
import moto.server as server
from moto import mock_batch
'''
Test the different server responses
'''
@mock_batch
def test_batch_list():
backend = server.create_backend_app("batch")
test_client = backend.test_client()
res = test_client.get('/v1/describecomputeenvironments')
res.status_code.should.equal(200)