Added CreateEnvironment to cloudformation

This commit is contained in:
Terry Cain 2017-10-20 00:51:04 +01:00
parent d67ef8d128
commit 453da4c8b3
No known key found for this signature in database
GPG Key ID: 14D90844E4E9B9F3
3 changed files with 141 additions and 7 deletions

View File

@ -28,7 +28,7 @@ from moto.iam.exceptions import IAMNotFoundException
_orig_adapter_send = requests.adapters.HTTPAdapter.send
logger = logging.getLogger(__name__)
DEFAULT_ACCOUNT_ID = 123456789012
COMPUTE_ENVIRONMENT_NAME_REGEX = re.compile(r'^[A-Za-z0-9_]{1,128}$')
COMPUTE_ENVIRONMENT_NAME_REGEX = re.compile(r'^[A-Za-z0-9][A-Za-z0-9_-]{1,126}[A-Za-z0-9]$')
def datetime2int(date):
@ -38,7 +38,7 @@ def datetime2int(date):
class ComputeEnvironment(BaseModel):
def __init__(self, compute_environment_name, _type, state, compute_resources, service_role, region_name):
self.name = compute_environment_name
self.type = _type
self.env_type = _type
self.state = state
self.compute_resources = compute_resources
self.service_role = service_role
@ -55,6 +55,33 @@ class ComputeEnvironment(BaseModel):
self.ecs_arn = arn
self.ecs_name = name
@property
def physical_resource_id(self):
return self.arn
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
backend = batch_backends[region_name]
properties = cloudformation_json['Properties']
# Need to deal with difference case from cloudformation compute_resources, e.g. instanceRole vs InstanceRole
# Hacky fix to normalise keys
new_comp_res = {}
for key, value in properties['ComputeResources'].items():
new_key = key[0].lower() + key[1:]
new_comp_res[new_key] = value
env = backend.create_compute_environment(
resource_name,
properties['Type'],
properties.get('State', 'ENABLED'),
new_comp_res,
properties['ServiceRole']
)
arn = env[1]
return backend.get_compute_environment_by_arn(arn)
class JobQueue(BaseModel):
def __init__(self, name, priority, state, environments, env_order_json, region_name):
@ -517,10 +544,10 @@ class BatchBackend(BaseBackend):
'ecsClusterArn': environment.ecs_arn,
'serviceRole': environment.service_role,
'state': environment.state,
'type': environment.type,
'type': environment.env_type,
'status': 'VALID'
}
if environment.type == 'MANAGED':
if environment.env_type == 'MANAGED':
json_part['computeResources'] = environment.compute_resources
result.append(json_part)
@ -530,7 +557,7 @@ class BatchBackend(BaseBackend):
def create_compute_environment(self, compute_environment_name, _type, state, compute_resources, service_role):
# Validate
if COMPUTE_ENVIRONMENT_NAME_REGEX.match(compute_environment_name) is None:
raise InvalidParameterValueException('Compute environment name does not match ^[A-Za-z0-9_]{1,128}$')
raise InvalidParameterValueException('Compute environment name does not match ^[A-Za-z0-9][A-Za-z0-9_-]{1,126}[A-Za-z0-9]$')
if self.get_compute_environment_by_name(compute_environment_name) is not None:
raise InvalidParameterValueException('A compute environment already exists with the name {0}'.format(compute_environment_name))
@ -617,7 +644,9 @@ class BatchBackend(BaseBackend):
if len(cr['instanceTypes']) == 0:
raise InvalidParameterValueException('At least 1 instance type must be provided')
for instance_type in cr['instanceTypes']:
if instance_type not in EC2_INSTANCE_TYPES:
if instance_type == 'optimal':
pass # Optimal should pick from latest of current gen
elif instance_type not in EC2_INSTANCE_TYPES:
raise InvalidParameterValueException('Instance type {0} does not exist'.format(instance_type))
for sec_id in cr['securityGroupIds']:
@ -657,6 +686,9 @@ class BatchBackend(BaseBackend):
instances = []
for instance_type in instance_types:
if instance_type == 'optimal':
instance_type = 'm4.4xlarge'
instance_vcpus.append(
(EC2_INSTANCE_TYPES[instance_type]['vcpus'], instance_type)
)
@ -700,7 +732,7 @@ class BatchBackend(BaseBackend):
# Delete ECS cluster
self.ecs_backend.delete_cluster(compute_env.ecs_name)
if compute_env.type == 'MANAGED':
if compute_env.env_type == 'MANAGED':
# Delete compute envrionment
instance_ids = [instance.id for instance in compute_env.instances]
self.ec2_backend.terminate_instances(instance_ids)

View File

@ -8,6 +8,7 @@ import re
from moto.autoscaling import models as autoscaling_models
from moto.awslambda import models as lambda_models
from moto.batch import models as batch_models
from moto.cloudwatch import models as cloudwatch_models
from moto.datapipeline import models as datapipeline_models
from moto.dynamodb import models as dynamodb_models
@ -31,6 +32,9 @@ from boto.cloudformation.stack import Output
MODEL_MAP = {
"AWS::AutoScaling::AutoScalingGroup": autoscaling_models.FakeAutoScalingGroup,
"AWS::AutoScaling::LaunchConfiguration": autoscaling_models.FakeLaunchConfiguration,
"AWS::Batch::JobDefinition": batch_models.JobDefinition,
"AWS::Batch::JobQueue": batch_models.JobQueue,
"AWS::Batch::ComputeEnvironment": batch_models.ComputeEnvironment,
"AWS::DynamoDB::Table": dynamodb_models.Table,
"AWS::Kinesis::Stream": kinesis_models.Stream,
"AWS::Lambda::EventSourceMapping": lambda_models.EventSourceMapping,

View File

@ -0,0 +1,98 @@
from __future__ import unicode_literals
import time
import datetime
import boto3
from botocore.exceptions import ClientError
import sure # noqa
from moto import mock_batch, mock_iam, mock_ec2, mock_ecs, mock_logs, mock_cloudformation
import functools
import nose
import json
DEFAULT_REGION = 'eu-central-1'
def _get_clients():
return boto3.client('ec2', region_name=DEFAULT_REGION), \
boto3.client('iam', region_name=DEFAULT_REGION), \
boto3.client('ecs', region_name=DEFAULT_REGION), \
boto3.client('logs', region_name=DEFAULT_REGION), \
boto3.client('batch', region_name=DEFAULT_REGION)
def _setup(ec2_client, iam_client):
"""
Do prerequisite setup
:return: VPC ID, Subnet ID, Security group ID, IAM Role ARN
:rtype: tuple
"""
resp = ec2_client.create_vpc(CidrBlock='172.30.0.0/24')
vpc_id = resp['Vpc']['VpcId']
resp = ec2_client.create_subnet(
AvailabilityZone='eu-central-1a',
CidrBlock='172.30.0.0/25',
VpcId=vpc_id
)
subnet_id = resp['Subnet']['SubnetId']
resp = ec2_client.create_security_group(
Description='test_sg_desc',
GroupName='test_sg',
VpcId=vpc_id
)
sg_id = resp['GroupId']
resp = iam_client.create_role(
RoleName='TestRole',
AssumeRolePolicyDocument='some_policy'
)
iam_arn = resp['Role']['Arn']
return vpc_id, subnet_id, sg_id, iam_arn
@mock_cloudformation()
@mock_ec2
@mock_ecs
@mock_iam
@mock_batch
def test_create_env_cf():
ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients()
vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client)
create_environment_template = {
'Resources': {
"ComputeEnvironment": {
"Type": "AWS::Batch::ComputeEnvironment",
"Properties": {
"Type": "MANAGED",
"ComputeResources": {
"Type": "EC2",
"MinvCpus": 0,
"DesiredvCpus": 0,
"MaxvCpus": 64,
"InstanceTypes": [
"optimal"
],
"Subnets": [subnet_id],
"SecurityGroupIds": [sg_id],
"InstanceRole": iam_arn
},
"ServiceRole": iam_arn
}
}
}
}
cf_json = json.dumps(create_environment_template)
cf_conn = boto3.client('cloudformation', DEFAULT_REGION)
stack_id = cf_conn.create_stack(
StackName='test_stack',
TemplateBody=cf_json,
)['StackId']
stack_resources = cf_conn.list_stack_resources(StackName=stack_id)
stack_resources['StackResourceSummaries'][0]['ResourceStatus'].should.equal('CREATE_COMPLETE')
stack_resources['StackResourceSummaries'][0]['PhysicalResourceId'].startswith('arn:aws:batch:')
stack_resources['StackResourceSummaries'][0]['PhysicalResourceId'].should.contain('test_stack')