Merge pull request #38 from spulec/master

Merge upstream
This commit is contained in:
Bert Blommers 2020-04-16 07:07:25 +01:00 committed by GitHub
commit adfe08eb29
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
33 changed files with 1918 additions and 341 deletions

View File

@ -119,3 +119,57 @@ class ApiKeyAlreadyExists(RESTError):
super(ApiKeyAlreadyExists, self).__init__(
"ConflictException", "API Key already exists"
)
class InvalidDomainName(BadRequestException):
code = 404
def __init__(self):
super(InvalidDomainName, self).__init__(
"BadRequestException", "No Domain Name specified"
)
class DomainNameNotFound(RESTError):
code = 404
def __init__(self):
super(DomainNameNotFound, self).__init__(
"NotFoundException", "Invalid Domain Name specified"
)
class InvalidRestApiId(BadRequestException):
code = 404
def __init__(self):
super(InvalidRestApiId, self).__init__(
"BadRequestException", "No Rest API Id specified"
)
class InvalidModelName(BadRequestException):
code = 404
def __init__(self):
super(InvalidModelName, self).__init__(
"BadRequestException", "No Model Name specified"
)
class RestAPINotFound(RESTError):
code = 404
def __init__(self):
super(RestAPINotFound, self).__init__(
"NotFoundException", "Invalid Rest API Id specified"
)
class ModelNotFound(RESTError):
code = 404
def __init__(self):
super(ModelNotFound, self).__init__(
"NotFoundException", "Invalid Model Name specified"
)

View File

@ -34,6 +34,12 @@ from .exceptions import (
NoIntegrationDefined,
NoMethodDefined,
ApiKeyAlreadyExists,
DomainNameNotFound,
InvalidDomainName,
InvalidRestApiId,
InvalidModelName,
RestAPINotFound,
ModelNotFound,
)
STAGE_URL = "https://{api_id}.execute-api.{region_name}.amazonaws.com/{stage_name}"
@ -463,8 +469,8 @@ class RestAPI(BaseModel):
self.deployments = {}
self.authorizers = {}
self.stages = {}
self.resources = {}
self.models = {}
self.add_child("/") # Add default child
def __repr__(self):
@ -493,6 +499,29 @@ class RestAPI(BaseModel):
self.resources[child_id] = child
return child
def add_model(
self,
name,
description=None,
schema=None,
content_type=None,
cli_input_json=None,
generate_cli_skeleton=None,
):
model_id = create_id()
new_model = Model(
id=model_id,
name=name,
description=description,
schema=schema,
content_type=content_type,
cli_input_json=cli_input_json,
generate_cli_skeleton=generate_cli_skeleton,
)
self.models[name] = new_model
return new_model
def get_resource_for_path(self, path_after_stage_name):
for resource in self.resources.values():
if resource.get_path() == path_after_stage_name:
@ -609,6 +638,58 @@ class RestAPI(BaseModel):
return self.deployments.pop(deployment_id)
class DomainName(BaseModel, dict):
def __init__(self, domain_name, **kwargs):
super(DomainName, self).__init__()
self["domainName"] = domain_name
self["regionalDomainName"] = domain_name
self["distributionDomainName"] = domain_name
self["domainNameStatus"] = "AVAILABLE"
self["domainNameStatusMessage"] = "Domain Name Available"
self["regionalHostedZoneId"] = "Z2FDTNDATAQYW2"
self["distributionHostedZoneId"] = "Z2FDTNDATAQYW2"
self["certificateUploadDate"] = int(time.time())
if kwargs.get("certificate_name"):
self["certificateName"] = kwargs.get("certificate_name")
if kwargs.get("certificate_arn"):
self["certificateArn"] = kwargs.get("certificate_arn")
if kwargs.get("certificate_body"):
self["certificateBody"] = kwargs.get("certificate_body")
if kwargs.get("tags"):
self["tags"] = kwargs.get("tags")
if kwargs.get("security_policy"):
self["securityPolicy"] = kwargs.get("security_policy")
if kwargs.get("certificate_chain"):
self["certificateChain"] = kwargs.get("certificate_chain")
if kwargs.get("regional_certificate_name"):
self["regionalCertificateName"] = kwargs.get("regional_certificate_name")
if kwargs.get("certificate_private_key"):
self["certificatePrivateKey"] = kwargs.get("certificate_private_key")
if kwargs.get("regional_certificate_arn"):
self["regionalCertificateArn"] = kwargs.get("regional_certificate_arn")
if kwargs.get("endpoint_configuration"):
self["endpointConfiguration"] = kwargs.get("endpoint_configuration")
if kwargs.get("generate_cli_skeleton"):
self["generateCliSkeleton"] = kwargs.get("generate_cli_skeleton")
class Model(BaseModel, dict):
def __init__(self, id, name, **kwargs):
super(Model, self).__init__()
self["id"] = id
self["name"] = name
if kwargs.get("description"):
self["description"] = kwargs.get("description")
if kwargs.get("schema"):
self["schema"] = kwargs.get("schema")
if kwargs.get("content_type"):
self["contentType"] = kwargs.get("content_type")
if kwargs.get("cli_input_json"):
self["cliInputJson"] = kwargs.get("cli_input_json")
if kwargs.get("generate_cli_skeleton"):
self["generateCliSkeleton"] = kwargs.get("generate_cli_skeleton")
class APIGatewayBackend(BaseBackend):
def __init__(self, region_name):
super(APIGatewayBackend, self).__init__()
@ -616,6 +697,8 @@ class APIGatewayBackend(BaseBackend):
self.keys = {}
self.usage_plans = {}
self.usage_plan_keys = {}
self.domain_names = {}
self.models = {}
self.region_name = region_name
def reset(self):
@ -645,7 +728,9 @@ class APIGatewayBackend(BaseBackend):
return rest_api
def get_rest_api(self, function_id):
rest_api = self.apis[function_id]
rest_api = self.apis.get(function_id)
if rest_api is None:
raise RestAPINotFound()
return rest_api
def list_apis(self):
@ -1001,6 +1086,98 @@ class APIGatewayBackend(BaseBackend):
except Exception:
return False
def create_domain_name(
self,
domain_name,
certificate_name=None,
tags=None,
certificate_arn=None,
certificate_body=None,
certificate_private_key=None,
certificate_chain=None,
regional_certificate_name=None,
regional_certificate_arn=None,
endpoint_configuration=None,
security_policy=None,
generate_cli_skeleton=None,
):
if not domain_name:
raise InvalidDomainName()
new_domain_name = DomainName(
domain_name=domain_name,
certificate_name=certificate_name,
certificate_private_key=certificate_private_key,
certificate_arn=certificate_arn,
certificate_body=certificate_body,
certificate_chain=certificate_chain,
regional_certificate_name=regional_certificate_name,
regional_certificate_arn=regional_certificate_arn,
endpoint_configuration=endpoint_configuration,
tags=tags,
security_policy=security_policy,
generate_cli_skeleton=generate_cli_skeleton,
)
self.domain_names[domain_name] = new_domain_name
return new_domain_name
def get_domain_names(self):
return list(self.domain_names.values())
def get_domain_name(self, domain_name):
domain_info = self.domain_names.get(domain_name)
if domain_info is None:
raise DomainNameNotFound
else:
return self.domain_names[domain_name]
def create_model(
self,
rest_api_id,
name,
content_type,
description=None,
schema=None,
cli_input_json=None,
generate_cli_skeleton=None,
):
if not rest_api_id:
raise InvalidRestApiId
if not name:
raise InvalidModelName
api = self.get_rest_api(rest_api_id)
new_model = api.add_model(
name=name,
description=description,
schema=schema,
content_type=content_type,
cli_input_json=cli_input_json,
generate_cli_skeleton=generate_cli_skeleton,
)
return new_model
def get_models(self, rest_api_id):
if not rest_api_id:
raise InvalidRestApiId
api = self.get_rest_api(rest_api_id)
models = api.models.values()
return list(models)
def get_model(self, rest_api_id, model_name):
if not rest_api_id:
raise InvalidRestApiId
api = self.get_rest_api(rest_api_id)
model = api.models.get(model_name)
if model is None:
raise ModelNotFound
else:
return model
apigateway_backends = {}
for region_name in Session().get_available_regions("apigateway"):

View File

@ -11,6 +11,12 @@ from .exceptions import (
AuthorizerNotFoundException,
StageNotFoundException,
ApiKeyAlreadyExists,
DomainNameNotFound,
InvalidDomainName,
InvalidRestApiId,
InvalidModelName,
RestAPINotFound,
ModelNotFound,
)
API_KEY_SOURCES = ["AUTHORIZER", "HEADER"]
@ -527,3 +533,130 @@ class APIGatewayResponse(BaseResponse):
usage_plan_id, key_id
)
return 200, {}, json.dumps(usage_plan_response)
def domain_names(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
try:
if self.method == "GET":
domain_names = self.backend.get_domain_names()
return 200, {}, json.dumps({"item": domain_names})
elif self.method == "POST":
domain_name = self._get_param("domainName")
certificate_name = self._get_param("certificateName")
tags = self._get_param("tags")
certificate_arn = self._get_param("certificateArn")
certificate_body = self._get_param("certificateBody")
certificate_private_key = self._get_param("certificatePrivateKey")
certificate_chain = self._get_param("certificateChain")
regional_certificate_name = self._get_param("regionalCertificateName")
regional_certificate_arn = self._get_param("regionalCertificateArn")
endpoint_configuration = self._get_param("endpointConfiguration")
security_policy = self._get_param("securityPolicy")
generate_cli_skeleton = self._get_param("generateCliSkeleton")
domain_name_resp = self.backend.create_domain_name(
domain_name,
certificate_name,
tags,
certificate_arn,
certificate_body,
certificate_private_key,
certificate_chain,
regional_certificate_name,
regional_certificate_arn,
endpoint_configuration,
security_policy,
generate_cli_skeleton,
)
return 200, {}, json.dumps(domain_name_resp)
except InvalidDomainName as error:
return (
error.code,
{},
'{{"message":"{0}","code":"{1}"}}'.format(
error.message, error.error_type
),
)
def domain_name_induvidual(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
url_path_parts = self.path.split("/")
domain_name = url_path_parts[2]
domain_names = {}
try:
if self.method == "GET":
if domain_name is not None:
domain_names = self.backend.get_domain_name(domain_name)
return 200, {}, json.dumps(domain_names)
except DomainNameNotFound as error:
return (
error.code,
{},
'{{"message":"{0}","code":"{1}"}}'.format(
error.message, error.error_type
),
)
def models(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
rest_api_id = self.path.replace("/restapis/", "", 1).split("/")[0]
try:
if self.method == "GET":
models = self.backend.get_models(rest_api_id)
return 200, {}, json.dumps({"item": models})
elif self.method == "POST":
name = self._get_param("name")
description = self._get_param("description")
schema = self._get_param("schema")
content_type = self._get_param("contentType")
cli_input_json = self._get_param("cliInputJson")
generate_cli_skeleton = self._get_param("generateCliSkeleton")
model = self.backend.create_model(
rest_api_id,
name,
content_type,
description,
schema,
cli_input_json,
generate_cli_skeleton,
)
return 200, {}, json.dumps(model)
except (InvalidRestApiId, InvalidModelName, RestAPINotFound) as error:
return (
error.code,
{},
'{{"message":"{0}","code":"{1}"}}'.format(
error.message, error.error_type
),
)
def model_induvidual(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
url_path_parts = self.path.split("/")
rest_api_id = url_path_parts[2]
model_name = url_path_parts[4]
model_info = {}
try:
if self.method == "GET":
model_info = self.backend.get_model(rest_api_id, model_name)
return 200, {}, json.dumps(model_info)
except (
ModelNotFound,
RestAPINotFound,
InvalidRestApiId,
InvalidModelName,
) as error:
return (
error.code,
{},
'{{"message":"{0}","code":"{1}"}}'.format(
error.message, error.error_type
),
)

View File

@ -21,6 +21,10 @@ url_paths = {
"{0}/apikeys$": APIGatewayResponse().apikeys,
"{0}/apikeys/(?P<apikey>[^/]+)": APIGatewayResponse().apikey_individual,
"{0}/usageplans$": APIGatewayResponse().usage_plans,
"{0}/domainnames$": APIGatewayResponse().domain_names,
"{0}/restapis/(?P<function_id>[^/]+)/models$": APIGatewayResponse().models,
"{0}/restapis/(?P<function_id>[^/]+)/models/(?P<model_name>[^/]+)/?$": APIGatewayResponse().model_induvidual,
"{0}/domainnames/(?P<domain_name>[^/]+)/?$": APIGatewayResponse().domain_name_induvidual,
"{0}/usageplans/(?P<usage_plan_id>[^/]+)/?$": APIGatewayResponse().usage_plan_individual,
"{0}/usageplans/(?P<usage_plan_id>[^/]+)/keys$": APIGatewayResponse().usage_plan_keys,
"{0}/usageplans/(?P<usage_plan_id>[^/]+)/keys/(?P<api_key_id>[^/]+)/?$": APIGatewayResponse().usage_plan_key_individual,

View File

@ -267,6 +267,9 @@ class FakeAutoScalingGroup(BaseModel):
self.tags = tags if tags else []
self.set_desired_capacity(desired_capacity)
def active_instances(self):
return [x for x in self.instance_states if x.lifecycle_state == "InService"]
def _set_azs_and_vpcs(self, availability_zones, vpc_zone_identifier, update=False):
# for updates, if only AZs are provided, they must not clash with
# the AZs of existing VPCs
@ -413,9 +416,11 @@ class FakeAutoScalingGroup(BaseModel):
else:
self.desired_capacity = new_capacity
curr_instance_count = len(self.instance_states)
curr_instance_count = len(self.active_instances())
if self.desired_capacity == curr_instance_count:
self.autoscaling_backend.update_attached_elbs(self.name)
self.autoscaling_backend.update_attached_target_groups(self.name)
return
if self.desired_capacity > curr_instance_count:
@ -442,6 +447,8 @@ class FakeAutoScalingGroup(BaseModel):
self.instance_states = list(
set(self.instance_states) - set(instances_to_remove)
)
self.autoscaling_backend.update_attached_elbs(self.name)
self.autoscaling_backend.update_attached_target_groups(self.name)
def get_propagated_tags(self):
propagated_tags = {}
@ -655,10 +662,16 @@ class AutoScalingBackend(BaseBackend):
self.set_desired_capacity(group_name, 0)
self.autoscaling_groups.pop(group_name, None)
def describe_auto_scaling_instances(self):
def describe_auto_scaling_instances(self, instance_ids):
instance_states = []
for group in self.autoscaling_groups.values():
instance_states.extend(group.instance_states)
instance_states.extend(
[
x
for x in group.instance_states
if not instance_ids or x.instance.id in instance_ids
]
)
return instance_states
def attach_instances(self, group_name, instance_ids):
@ -697,7 +710,7 @@ class AutoScalingBackend(BaseBackend):
def detach_instances(self, group_name, instance_ids, should_decrement):
group = self.autoscaling_groups[group_name]
original_size = len(group.instance_states)
original_size = group.desired_capacity
detached_instances = [
x for x in group.instance_states if x.instance.id in instance_ids
@ -714,13 +727,8 @@ class AutoScalingBackend(BaseBackend):
if should_decrement:
group.desired_capacity = original_size - len(instance_ids)
else:
count_needed = len(instance_ids)
group.replace_autoscaling_group_instances(
count_needed, group.get_propagated_tags()
)
self.update_attached_elbs(group_name)
group.set_desired_capacity(group.desired_capacity)
return detached_instances
def set_desired_capacity(self, group_name, desired_capacity):
@ -785,7 +793,9 @@ class AutoScalingBackend(BaseBackend):
def update_attached_elbs(self, group_name):
group = self.autoscaling_groups[group_name]
group_instance_ids = set(state.instance.id for state in group.instance_states)
group_instance_ids = set(
state.instance.id for state in group.active_instances()
)
# skip this if group.load_balancers is empty
# otherwise elb_backend.describe_load_balancers returns all available load balancers
@ -902,15 +912,15 @@ class AutoScalingBackend(BaseBackend):
autoscaling_group_name,
autoscaling_group,
) in self.autoscaling_groups.items():
original_instance_count = len(autoscaling_group.instance_states)
original_active_instance_count = len(autoscaling_group.active_instances())
autoscaling_group.instance_states = list(
filter(
lambda i_state: i_state.instance.id not in instance_ids,
autoscaling_group.instance_states,
)
)
difference = original_instance_count - len(
autoscaling_group.instance_states
difference = original_active_instance_count - len(
autoscaling_group.active_instances()
)
if difference > 0:
autoscaling_group.replace_autoscaling_group_instances(
@ -918,6 +928,45 @@ class AutoScalingBackend(BaseBackend):
)
self.update_attached_elbs(autoscaling_group_name)
def enter_standby_instances(self, group_name, instance_ids, should_decrement):
group = self.autoscaling_groups[group_name]
original_size = group.desired_capacity
standby_instances = []
for instance_state in group.instance_states:
if instance_state.instance.id in instance_ids:
instance_state.lifecycle_state = "Standby"
standby_instances.append(instance_state)
if should_decrement:
group.desired_capacity = group.desired_capacity - len(instance_ids)
else:
group.set_desired_capacity(group.desired_capacity)
return standby_instances, original_size, group.desired_capacity
def exit_standby_instances(self, group_name, instance_ids):
group = self.autoscaling_groups[group_name]
original_size = group.desired_capacity
standby_instances = []
for instance_state in group.instance_states:
if instance_state.instance.id in instance_ids:
instance_state.lifecycle_state = "InService"
standby_instances.append(instance_state)
group.desired_capacity = group.desired_capacity + len(instance_ids)
return standby_instances, original_size, group.desired_capacity
def terminate_instance(self, instance_id, should_decrement):
instance = self.ec2_backend.get_instance(instance_id)
instance_state = next(
instance_state
for group in self.autoscaling_groups.values()
for instance_state in group.instance_states
if instance_state.instance.id == instance.id
)
group = instance.autoscaling_group
original_size = group.desired_capacity
self.detach_instances(group.name, [instance.id], should_decrement)
self.ec2_backend.terminate_instances([instance.id])
return instance_state, original_size, group.desired_capacity
autoscaling_backends = {}
for region, ec2_backend in ec2_backends.items():

View File

@ -1,7 +1,12 @@
from __future__ import unicode_literals
import datetime
from moto.core.responses import BaseResponse
from moto.core.utils import amz_crc32, amzn_request_id
from moto.core.utils import (
amz_crc32,
amzn_request_id,
iso_8601_datetime_with_milliseconds,
)
from .models import autoscaling_backends
@ -226,7 +231,9 @@ class AutoScalingResponse(BaseResponse):
return template.render()
def describe_auto_scaling_instances(self):
instance_states = self.autoscaling_backend.describe_auto_scaling_instances()
instance_states = self.autoscaling_backend.describe_auto_scaling_instances(
instance_ids=self._get_multi_param("InstanceIds.member")
)
template = self.response_template(DESCRIBE_AUTOSCALING_INSTANCES_TEMPLATE)
return template.render(instance_states=instance_states)
@ -289,6 +296,50 @@ class AutoScalingResponse(BaseResponse):
template = self.response_template(DETACH_LOAD_BALANCERS_TEMPLATE)
return template.render()
@amz_crc32
@amzn_request_id
def enter_standby(self):
group_name = self._get_param("AutoScalingGroupName")
instance_ids = self._get_multi_param("InstanceIds.member")
should_decrement_string = self._get_param("ShouldDecrementDesiredCapacity")
if should_decrement_string == "true":
should_decrement = True
else:
should_decrement = False
(
standby_instances,
original_size,
desired_capacity,
) = self.autoscaling_backend.enter_standby_instances(
group_name, instance_ids, should_decrement
)
template = self.response_template(ENTER_STANDBY_TEMPLATE)
return template.render(
standby_instances=standby_instances,
should_decrement=should_decrement,
original_size=original_size,
desired_capacity=desired_capacity,
timestamp=iso_8601_datetime_with_milliseconds(datetime.datetime.utcnow()),
)
@amz_crc32
@amzn_request_id
def exit_standby(self):
group_name = self._get_param("AutoScalingGroupName")
instance_ids = self._get_multi_param("InstanceIds.member")
(
standby_instances,
original_size,
desired_capacity,
) = self.autoscaling_backend.exit_standby_instances(group_name, instance_ids)
template = self.response_template(EXIT_STANDBY_TEMPLATE)
return template.render(
standby_instances=standby_instances,
original_size=original_size,
desired_capacity=desired_capacity,
timestamp=iso_8601_datetime_with_milliseconds(datetime.datetime.utcnow()),
)
def suspend_processes(self):
autoscaling_group_name = self._get_param("AutoScalingGroupName")
scaling_processes = self._get_multi_param("ScalingProcesses.member")
@ -308,6 +359,29 @@ class AutoScalingResponse(BaseResponse):
template = self.response_template(SET_INSTANCE_PROTECTION_TEMPLATE)
return template.render()
@amz_crc32
@amzn_request_id
def terminate_instance_in_auto_scaling_group(self):
instance_id = self._get_param("InstanceId")
should_decrement_string = self._get_param("ShouldDecrementDesiredCapacity")
if should_decrement_string == "true":
should_decrement = True
else:
should_decrement = False
(
instance,
original_size,
desired_capacity,
) = self.autoscaling_backend.terminate_instance(instance_id, should_decrement)
template = self.response_template(TERMINATE_INSTANCES_TEMPLATE)
return template.render(
instance=instance,
should_decrement=should_decrement,
original_size=original_size,
desired_capacity=desired_capacity,
timestamp=iso_8601_datetime_with_milliseconds(datetime.datetime.utcnow()),
)
CREATE_LAUNCH_CONFIGURATION_TEMPLATE = """<CreateLaunchConfigurationResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
<ResponseMetadata>
@ -705,3 +779,73 @@ SET_INSTANCE_PROTECTION_TEMPLATE = """<SetInstanceProtectionResponse xmlns="http
<RequestId></RequestId>
</ResponseMetadata>
</SetInstanceProtectionResponse>"""
ENTER_STANDBY_TEMPLATE = """<EnterStandbyResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
<EnterStandbyResult>
<Activities>
{% for instance in standby_instances %}
<member>
<ActivityId>12345678-1234-1234-1234-123456789012</ActivityId>
<AutoScalingGroupName>{{ group_name }}</AutoScalingGroupName>
{% if should_decrement %}
<Cause>At {{ timestamp }} instance {{ instance.instance.id }} was moved to standby in response to a user request, shrinking the capacity from {{ original_size }} to {{ desired_capacity }}.</Cause>
{% else %}
<Cause>At {{ timestamp }} instance {{ instance.instance.id }} was moved to standby in response to a user request.</Cause>
{% endif %}
<Description>Moving EC2 instance to Standby: {{ instance.instance.id }}</Description>
<Progress>50</Progress>
<StartTime>{{ timestamp }}</StartTime>
<Details>{&quot;Subnet ID&quot;:&quot;??&quot;,&quot;Availability Zone&quot;:&quot;{{ instance.instance.placement }}&quot;}</Details>
<StatusCode>InProgress</StatusCode>
</member>
{% endfor %}
</Activities>
</EnterStandbyResult>
<ResponseMetadata>
<RequestId>7c6e177f-f082-11e1-ac58-3714bEXAMPLE</RequestId>
</ResponseMetadata>
</EnterStandbyResponse>"""
EXIT_STANDBY_TEMPLATE = """<ExitStandbyResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
<ExitStandbyResult>
<Activities>
{% for instance in standby_instances %}
<member>
<ActivityId>12345678-1234-1234-1234-123456789012</ActivityId>
<AutoScalingGroupName>{{ group_name }}</AutoScalingGroupName>
<Description>Moving EC2 instance out of Standby: {{ instance.instance.id }}</Description>
<Progress>30</Progress>
<Cause>At {{ timestamp }} instance {{ instance.instance.id }} was moved out of standby in response to a user request, increasing the capacity from {{ original_size }} to {{ desired_capacity }}.</Cause>
<StartTime>{{ timestamp }}</StartTime>
<Details>{&quot;Subnet ID&quot;:&quot;??&quot;,&quot;Availability Zone&quot;:&quot;{{ instance.instance.placement }}&quot;}</Details>
<StatusCode>PreInService</StatusCode>
</member>
{% endfor %}
</Activities>
</ExitStandbyResult>
<ResponseMetadata>
<RequestId>7c6e177f-f082-11e1-ac58-3714bEXAMPLE</RequestId>
</ResponseMetadata>
</ExitStandbyResponse>"""
TERMINATE_INSTANCES_TEMPLATE = """<TerminateInstanceInAutoScalingGroupResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
<TerminateInstanceInAutoScalingGroupResult>
<Activity>
<ActivityId>35b5c464-0b63-2fc7-1611-467d4a7f2497EXAMPLE</ActivityId>
<AutoScalingGroupName>{{ group_name }}</AutoScalingGroupName>
{% if should_decrement %}
<Cause>At {{ timestamp }} instance {{ instance.instance.id }} was taken out of service in response to a user request, shrinking the capacity from {{ original_size }} to {{ desired_capacity }}.</Cause>
{% else %}
<Cause>At {{ timestamp }} instance {{ instance.instance.id }} was taken out of service in response to a user request.</Cause>
{% endif %}
<Description>Terminating EC2 instance: {{ instance.instance.id }}</Description>
<Progress>0</Progress>
<StartTime>{{ timestamp }}</StartTime>
<Details>{&quot;Subnet ID&quot;:&quot;??&quot;,&quot;Availability Zone&quot;:&quot;{{ instance.instance.placement }}&quot;}</Details>
<StatusCode>InProgress</StatusCode>
</Activity>
</TerminateInstanceInAutoScalingGroupResult>
<ResponseMetadata>
<RequestId>a1ba8fb9-31d6-4d9a-ace1-a7f76749df11EXAMPLE</RequestId>
</ResponseMetadata>
</TerminateInstanceInAutoScalingGroupResponse>"""

View File

@ -431,7 +431,9 @@ class LogGroup(BaseModel):
properties = cloudformation_json["Properties"]
log_group_name = properties["LogGroupName"]
tags = properties.get("Tags", {})
return logs_backends[region_name].create_log_group(log_group_name, tags)
return logs_backends[region_name].create_log_group(
log_group_name, tags, **properties
)
cloudwatch_backends = {}

View File

@ -1,5 +1,5 @@
from __future__ import unicode_literals
from .models import dynamodb_backends as dynamodb_backends2
from moto.dynamodb2.models import dynamodb_backends as dynamodb_backends2
from ..core.models import base_decorator, deprecated_base_decorator
dynamodb_backend2 = dynamodb_backends2["us-east-1"]

View File

@ -6,7 +6,6 @@ import decimal
import json
import re
import uuid
import six
from boto3 import Session
from botocore.exceptions import ParamValidationError
@ -14,10 +13,11 @@ from moto.compat import OrderedDict
from moto.core import BaseBackend, BaseModel
from moto.core.utils import unix_time
from moto.core.exceptions import JsonRESTError
from .comparisons import get_comparison_func
from .comparisons import get_filter_expression
from .comparisons import get_expected
from .exceptions import InvalidIndexNameError, InvalidUpdateExpression, ItemSizeTooLarge
from moto.dynamodb2.comparisons import get_filter_expression
from moto.dynamodb2.comparisons import get_expected
from moto.dynamodb2.exceptions import InvalidIndexNameError, ItemSizeTooLarge
from moto.dynamodb2.models.utilities import bytesize, attribute_is_list
from moto.dynamodb2.models.dynamo_type import DynamoType
class DynamoJsonEncoder(json.JSONEncoder):
@ -30,223 +30,6 @@ def dynamo_json_dump(dynamo_object):
return json.dumps(dynamo_object, cls=DynamoJsonEncoder)
def bytesize(val):
return len(str(val).encode("utf-8"))
def attribute_is_list(attr):
"""
Checks if attribute denotes a list, and returns the name of the list and the given list index if so
:param attr: attr or attr[index]
:return: attr, index or None
"""
list_index_update = re.match("(.+)\\[([0-9]+)\\]", attr)
if list_index_update:
attr = list_index_update.group(1)
return attr, list_index_update.group(2) if list_index_update else None
class DynamoType(object):
"""
http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html#DataModelDataTypes
"""
def __init__(self, type_as_dict):
if type(type_as_dict) == DynamoType:
self.type = type_as_dict.type
self.value = type_as_dict.value
else:
self.type = list(type_as_dict)[0]
self.value = list(type_as_dict.values())[0]
if self.is_list():
self.value = [DynamoType(val) for val in self.value]
elif self.is_map():
self.value = dict((k, DynamoType(v)) for k, v in self.value.items())
def get(self, key):
if not key:
return self
else:
key_head = key.split(".")[0]
key_tail = ".".join(key.split(".")[1:])
if key_head not in self.value:
self.value[key_head] = DynamoType({"NONE": None})
return self.value[key_head].get(key_tail)
def set(self, key, new_value, index=None):
if index:
index = int(index)
if type(self.value) is not list:
raise InvalidUpdateExpression
if index >= len(self.value):
self.value.append(new_value)
# {'L': [DynamoType, ..]} ==> DynamoType.set()
self.value[min(index, len(self.value) - 1)].set(key, new_value)
else:
attr = (key or "").split(".").pop(0)
attr, list_index = attribute_is_list(attr)
if not key:
# {'S': value} ==> {'S': new_value}
self.type = new_value.type
self.value = new_value.value
else:
if attr not in self.value: # nonexistingattribute
type_of_new_attr = "M" if "." in key else new_value.type
self.value[attr] = DynamoType({type_of_new_attr: {}})
# {'M': {'foo': DynamoType}} ==> DynamoType.set(new_value)
self.value[attr].set(
".".join(key.split(".")[1:]), new_value, list_index
)
def delete(self, key, index=None):
if index:
if not key:
if int(index) < len(self.value):
del self.value[int(index)]
elif "." in key:
self.value[int(index)].delete(".".join(key.split(".")[1:]))
else:
self.value[int(index)].delete(key)
else:
attr = key.split(".")[0]
attr, list_index = attribute_is_list(attr)
if list_index:
self.value[attr].delete(".".join(key.split(".")[1:]), list_index)
elif "." in key:
self.value[attr].delete(".".join(key.split(".")[1:]))
else:
self.value.pop(key)
def filter(self, projection_expressions):
nested_projections = [
expr[0 : expr.index(".")] for expr in projection_expressions if "." in expr
]
if self.is_map():
expressions_to_delete = []
for attr in self.value:
if (
attr not in projection_expressions
and attr not in nested_projections
):
expressions_to_delete.append(attr)
elif attr in nested_projections:
relevant_expressions = [
expr[len(attr + ".") :]
for expr in projection_expressions
if expr.startswith(attr + ".")
]
self.value[attr].filter(relevant_expressions)
for expr in expressions_to_delete:
self.value.pop(expr)
def __hash__(self):
return hash((self.type, self.value))
def __eq__(self, other):
return self.type == other.type and self.value == other.value
def __ne__(self, other):
return self.type != other.type or self.value != other.value
def __lt__(self, other):
return self.cast_value < other.cast_value
def __le__(self, other):
return self.cast_value <= other.cast_value
def __gt__(self, other):
return self.cast_value > other.cast_value
def __ge__(self, other):
return self.cast_value >= other.cast_value
def __repr__(self):
return "DynamoType: {0}".format(self.to_json())
@property
def cast_value(self):
if self.is_number():
try:
return int(self.value)
except ValueError:
return float(self.value)
elif self.is_set():
sub_type = self.type[0]
return set([DynamoType({sub_type: v}).cast_value for v in self.value])
elif self.is_list():
return [DynamoType(v).cast_value for v in self.value]
elif self.is_map():
return dict([(k, DynamoType(v).cast_value) for k, v in self.value.items()])
else:
return self.value
def child_attr(self, key):
"""
Get Map or List children by key. str for Map, int for List.
Returns DynamoType or None.
"""
if isinstance(key, six.string_types) and self.is_map():
if "." in key and key.split(".")[0] in self.value:
return self.value[key.split(".")[0]].child_attr(
".".join(key.split(".")[1:])
)
elif "." not in key and key in self.value:
return DynamoType(self.value[key])
if isinstance(key, int) and self.is_list():
idx = key
if 0 <= idx < len(self.value):
return DynamoType(self.value[idx])
return None
def size(self):
if self.is_number():
value_size = len(str(self.value))
elif self.is_set():
sub_type = self.type[0]
value_size = sum([DynamoType({sub_type: v}).size() for v in self.value])
elif self.is_list():
value_size = sum([v.size() for v in self.value])
elif self.is_map():
value_size = sum(
[bytesize(k) + DynamoType(v).size() for k, v in self.value.items()]
)
elif type(self.value) == bool:
value_size = 1
else:
value_size = bytesize(self.value)
return value_size
def to_json(self):
return {self.type: self.value}
def compare(self, range_comparison, range_objs):
"""
Compares this type against comparison filters
"""
range_values = [obj.cast_value for obj in range_objs]
comparison_func = get_comparison_func(range_comparison)
return comparison_func(self.cast_value, *range_values)
def is_number(self):
return self.type == "N"
def is_set(self):
return self.type == "SS" or self.type == "NS" or self.type == "BS"
def is_list(self):
return self.type == "L"
def is_map(self):
return self.type == "M"
def same_type(self, other):
return self.type == other.type
# https://github.com/spulec/moto/issues/1874
# Ensure that the total size of an item does not exceed 400kb
class LimitedSizeDict(dict):

View File

@ -0,0 +1,206 @@
import six
from moto.dynamodb2.comparisons import get_comparison_func
from moto.dynamodb2.exceptions import InvalidUpdateExpression
from moto.dynamodb2.models.utilities import attribute_is_list, bytesize
class DynamoType(object):
"""
http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html#DataModelDataTypes
"""
def __init__(self, type_as_dict):
if type(type_as_dict) == DynamoType:
self.type = type_as_dict.type
self.value = type_as_dict.value
else:
self.type = list(type_as_dict)[0]
self.value = list(type_as_dict.values())[0]
if self.is_list():
self.value = [DynamoType(val) for val in self.value]
elif self.is_map():
self.value = dict((k, DynamoType(v)) for k, v in self.value.items())
def get(self, key):
if not key:
return self
else:
key_head = key.split(".")[0]
key_tail = ".".join(key.split(".")[1:])
if key_head not in self.value:
self.value[key_head] = DynamoType({"NONE": None})
return self.value[key_head].get(key_tail)
def set(self, key, new_value, index=None):
if index:
index = int(index)
if type(self.value) is not list:
raise InvalidUpdateExpression
if index >= len(self.value):
self.value.append(new_value)
# {'L': [DynamoType, ..]} ==> DynamoType.set()
self.value[min(index, len(self.value) - 1)].set(key, new_value)
else:
attr = (key or "").split(".").pop(0)
attr, list_index = attribute_is_list(attr)
if not key:
# {'S': value} ==> {'S': new_value}
self.type = new_value.type
self.value = new_value.value
else:
if attr not in self.value: # nonexistingattribute
type_of_new_attr = "M" if "." in key else new_value.type
self.value[attr] = DynamoType({type_of_new_attr: {}})
# {'M': {'foo': DynamoType}} ==> DynamoType.set(new_value)
self.value[attr].set(
".".join(key.split(".")[1:]), new_value, list_index
)
def delete(self, key, index=None):
if index:
if not key:
if int(index) < len(self.value):
del self.value[int(index)]
elif "." in key:
self.value[int(index)].delete(".".join(key.split(".")[1:]))
else:
self.value[int(index)].delete(key)
else:
attr = key.split(".")[0]
attr, list_index = attribute_is_list(attr)
if list_index:
self.value[attr].delete(".".join(key.split(".")[1:]), list_index)
elif "." in key:
self.value[attr].delete(".".join(key.split(".")[1:]))
else:
self.value.pop(key)
def filter(self, projection_expressions):
nested_projections = [
expr[0 : expr.index(".")] for expr in projection_expressions if "." in expr
]
if self.is_map():
expressions_to_delete = []
for attr in self.value:
if (
attr not in projection_expressions
and attr not in nested_projections
):
expressions_to_delete.append(attr)
elif attr in nested_projections:
relevant_expressions = [
expr[len(attr + ".") :]
for expr in projection_expressions
if expr.startswith(attr + ".")
]
self.value[attr].filter(relevant_expressions)
for expr in expressions_to_delete:
self.value.pop(expr)
def __hash__(self):
return hash((self.type, self.value))
def __eq__(self, other):
return self.type == other.type and self.value == other.value
def __ne__(self, other):
return self.type != other.type or self.value != other.value
def __lt__(self, other):
return self.cast_value < other.cast_value
def __le__(self, other):
return self.cast_value <= other.cast_value
def __gt__(self, other):
return self.cast_value > other.cast_value
def __ge__(self, other):
return self.cast_value >= other.cast_value
def __repr__(self):
return "DynamoType: {0}".format(self.to_json())
@property
def cast_value(self):
if self.is_number():
try:
return int(self.value)
except ValueError:
return float(self.value)
elif self.is_set():
sub_type = self.type[0]
return set([DynamoType({sub_type: v}).cast_value for v in self.value])
elif self.is_list():
return [DynamoType(v).cast_value for v in self.value]
elif self.is_map():
return dict([(k, DynamoType(v).cast_value) for k, v in self.value.items()])
else:
return self.value
def child_attr(self, key):
"""
Get Map or List children by key. str for Map, int for List.
Returns DynamoType or None.
"""
if isinstance(key, six.string_types) and self.is_map():
if "." in key and key.split(".")[0] in self.value:
return self.value[key.split(".")[0]].child_attr(
".".join(key.split(".")[1:])
)
elif "." not in key and key in self.value:
return DynamoType(self.value[key])
if isinstance(key, int) and self.is_list():
idx = key
if 0 <= idx < len(self.value):
return DynamoType(self.value[idx])
return None
def size(self):
if self.is_number():
value_size = len(str(self.value))
elif self.is_set():
sub_type = self.type[0]
value_size = sum([DynamoType({sub_type: v}).size() for v in self.value])
elif self.is_list():
value_size = sum([v.size() for v in self.value])
elif self.is_map():
value_size = sum(
[bytesize(k) + DynamoType(v).size() for k, v in self.value.items()]
)
elif type(self.value) == bool:
value_size = 1
else:
value_size = bytesize(self.value)
return value_size
def to_json(self):
return {self.type: self.value}
def compare(self, range_comparison, range_objs):
"""
Compares this type against comparison filters
"""
range_values = [obj.cast_value for obj in range_objs]
comparison_func = get_comparison_func(range_comparison)
return comparison_func(self.cast_value, *range_values)
def is_number(self):
return self.type == "N"
def is_set(self):
return self.type == "SS" or self.type == "NS" or self.type == "BS"
def is_list(self):
return self.type == "L"
def is_map(self):
return self.type == "M"
def same_type(self, other):
return self.type == other.type

View File

@ -0,0 +1,17 @@
import re
def bytesize(val):
return len(str(val).encode("utf-8"))
def attribute_is_list(attr):
"""
Checks if attribute denotes a list, and returns the name of the list and the given list index if so
:param attr: attr or attr[index]
:return: attr, index or None
"""
list_index_update = re.match("(.+)\\[([0-9]+)\\]", attr)
if list_index_update:
attr = list_index_update.group(1)
return attr, list_index_update.group(2) if list_index_update else None

View File

@ -10,7 +10,7 @@ import six
from moto.core.responses import BaseResponse
from moto.core.utils import camelcase_to_underscores, amzn_request_id
from .exceptions import InvalidIndexNameError, InvalidUpdateExpression, ItemSizeTooLarge
from .models import dynamodb_backends, dynamo_json_dump
from moto.dynamodb2.models import dynamodb_backends, dynamo_json_dump
TRANSACTION_MAX_ITEMS = 25

View File

@ -7,7 +7,7 @@ import base64
from boto3 import Session
from moto.core import BaseBackend, BaseModel
from moto.dynamodb2.models import dynamodb_backends
from moto.dynamodb2.models import dynamodb_backends, DynamoJsonEncoder
class ShardIterator(BaseModel):
@ -137,7 +137,7 @@ class DynamoDBStreamsBackend(BaseBackend):
def get_records(self, iterator_arn, limit):
shard_iterator = self.shard_iterators[iterator_arn]
return json.dumps(shard_iterator.get(limit))
return json.dumps(shard_iterator.get(limit), cls=DynamoJsonEncoder)
dynamodbstreams_backends = {}

View File

@ -231,6 +231,14 @@ class InvalidVolumeAttachmentError(EC2ClientError):
)
class VolumeInUseError(EC2ClientError):
def __init__(self, volume_id, instance_id):
super(VolumeInUseError, self).__init__(
"VolumeInUse",
"Volume {0} is currently attached to {1}".format(volume_id, instance_id),
)
class InvalidDomainError(EC2ClientError):
def __init__(self, domain):
super(InvalidDomainError, self).__init__(

View File

@ -70,6 +70,7 @@ from .exceptions import (
InvalidSubnetIdError,
InvalidSubnetRangeError,
InvalidVolumeIdError,
VolumeInUseError,
InvalidVolumeAttachmentError,
InvalidVpcCidrBlockAssociationIdError,
InvalidVPCPeeringConnectionIdError,
@ -936,6 +937,12 @@ class InstanceBackend(object):
value = getattr(instance, key)
return instance, value
def describe_instance_credit_specifications(self, instance_ids):
queried_instances = []
for instance in self.get_multi_instances_by_id(instance_ids):
queried_instances.append(instance)
return queried_instances
def all_instances(self, filters=None):
instances = []
for reservation in self.all_reservations():
@ -2385,6 +2392,9 @@ class EBSBackend(object):
def delete_volume(self, volume_id):
if volume_id in self.volumes:
volume = self.volumes[volume_id]
if volume.attachment:
raise VolumeInUseError(volume_id, volume.attachment.instance.id)
return self.volumes.pop(volume_id)
raise InvalidVolumeIdError(volume_id)

File diff suppressed because one or more lines are too long

View File

@ -35,6 +35,7 @@ DESCRIBE_ZONES_RESPONSE = """<DescribeAvailabilityZonesResponse xmlns="http://ec
<zoneName>{{ zone.name }}</zoneName>
<zoneState>available</zoneState>
<regionName>{{ zone.region_name }}</regionName>
<zoneId>{{ zone.zone_id }}</zoneId>
<messageSet/>
</item>
{% endfor %}

View File

@ -168,6 +168,14 @@ class InstanceResponse(BaseResponse):
return template.render(instance=instance, attribute=attribute, value=value)
def describe_instance_credit_specifications(self):
instance_ids = self._get_multi_param("InstanceId")
instance = self.ec2_backend.describe_instance_credit_specifications(
instance_ids
)
template = self.response_template(EC2_DESCRIBE_INSTANCE_CREDIT_SPECIFICATIONS)
return template.render(instances=instance)
def modify_instance_attribute(self):
handlers = [
self._dot_value_instance_attribute_handler,
@ -671,6 +679,18 @@ EC2_DESCRIBE_INSTANCE_ATTRIBUTE = """<DescribeInstanceAttributeResponse xmlns="h
</{{ attribute }}>
</DescribeInstanceAttributeResponse>"""
EC2_DESCRIBE_INSTANCE_CREDIT_SPECIFICATIONS = """<DescribeInstanceCreditSpecificationsResponse xmlns="http://ec2.amazonaws.com/doc/2016-11-15/">
<requestId>1b234b5c-d6ef-7gh8-90i1-j2345678901</requestId>
<instanceCreditSpecificationSet>
{% for instance in instances %}
<item>
<instanceId>{{ instance.id }}</instanceId>
<cpuCredits>standard</cpuCredits>
</item>
{% endfor %}
</instanceCreditSpecificationSet>
</DescribeInstanceCreditSpecificationsResponse>"""
EC2_DESCRIBE_INSTANCE_GROUPSET_ATTRIBUTE = """<DescribeInstanceAttributeResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<instanceId>{{ instance.id }}</instanceId>

View File

@ -604,7 +604,10 @@ class EC2ContainerServiceBackend(BaseBackend):
raise Exception("{0} is not a task_definition".format(task_definition_name))
def run_task(self, cluster_str, task_definition_str, count, overrides, started_by):
cluster_name = cluster_str.split("/")[-1]
if cluster_str:
cluster_name = cluster_str.split("/")[-1]
else:
cluster_name = "default"
if cluster_name in self.clusters:
cluster = self.clusters[cluster_name]
else:

View File

@ -26,6 +26,10 @@ class Rule(BaseModel):
self.role_arn = kwargs.get("RoleArn")
self.targets = []
@property
def physical_resource_id(self):
return self.name
# This song and dance for targets is because we need order for Limits and NextTokens, but can't use OrderedDicts
# with Python 2.6, so tracking it with an array it is.
def _check_target_exists(self, target_id):
@ -59,6 +63,14 @@ class Rule(BaseModel):
if index is not None:
self.targets.pop(index)
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == "Arn":
return self.arn
raise UnformattedGetAttTemplateException()
@classmethod
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name

View File

@ -134,7 +134,7 @@ class LogStream:
return None, 0
events = sorted(
filter(filter_func, self.events), key=lambda event: event.timestamp,
filter(filter_func, self.events), key=lambda event: event.timestamp
)
direction, index = get_index_and_direction_from_token(next_token)
@ -169,11 +169,7 @@ class LogStream:
if end_index > final_index:
end_index = final_index
elif end_index < 0:
return (
[],
"b/{:056d}".format(0),
"f/{:056d}".format(0),
)
return ([], "b/{:056d}".format(0), "f/{:056d}".format(0))
events_page = [
event.to_response_dict() for event in events[start_index : end_index + 1]
@ -219,7 +215,7 @@ class LogStream:
class LogGroup:
def __init__(self, region, name, tags):
def __init__(self, region, name, tags, **kwargs):
self.name = name
self.region = region
self.arn = "arn:aws:logs:{region}:1:log-group:{log_group}".format(
@ -228,9 +224,9 @@ class LogGroup:
self.creationTime = int(unix_time_millis())
self.tags = tags
self.streams = dict() # {name: LogStream}
self.retentionInDays = (
None # AWS defaults to Never Expire for log group retention
)
self.retention_in_days = kwargs.get(
"RetentionInDays"
) # AWS defaults to Never Expire for log group retention
def create_log_stream(self, log_stream_name):
if log_stream_name in self.streams:
@ -368,12 +364,12 @@ class LogGroup:
"storedBytes": sum(s.storedBytes for s in self.streams.values()),
}
# AWS only returns retentionInDays if a value is set for the log group (ie. not Never Expire)
if self.retentionInDays:
log_group["retentionInDays"] = self.retentionInDays
if self.retention_in_days:
log_group["retentionInDays"] = self.retention_in_days
return log_group
def set_retention_policy(self, retention_in_days):
self.retentionInDays = retention_in_days
self.retention_in_days = retention_in_days
def list_tags(self):
return self.tags if self.tags else {}
@ -401,10 +397,12 @@ class LogsBackend(BaseBackend):
self.__dict__ = {}
self.__init__(region_name)
def create_log_group(self, log_group_name, tags):
def create_log_group(self, log_group_name, tags, **kwargs):
if log_group_name in self.groups:
raise ResourceAlreadyExistsException()
self.groups[log_group_name] = LogGroup(self.region_name, log_group_name, tags)
self.groups[log_group_name] = LogGroup(
self.region_name, log_group_name, tags, **kwargs
)
return self.groups[log_group_name]
def ensure_log_group(self, log_group_name, tags):

View File

@ -1232,9 +1232,8 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin):
)
new_key.set_tagging(tagging)
template = self.response_template(S3_OBJECT_RESPONSE)
response_headers.update(new_key.response_dict)
return 200, response_headers, template.render(key=new_key)
return 200, response_headers, ""
def _key_response_head(self, bucket_name, query, key_name, headers):
response_headers = {}
@ -1552,8 +1551,7 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin):
return 204, {}, ""
version_id = query.get("versionId", [None])[0]
self.backend.delete_key(bucket_name, key_name, version_id=version_id)
template = self.response_template(S3_DELETE_OBJECT_SUCCESS)
return 204, {}, template.render()
return 204, {}, ""
def _complete_multipart_body(self, body):
ps = minidom.parseString(body).getElementsByTagName("Part")
@ -1868,20 +1866,6 @@ S3_DELETE_KEYS_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
{% endfor %}
</DeleteResult>"""
S3_DELETE_OBJECT_SUCCESS = """<DeleteObjectResponse xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<DeleteObjectResponse>
<Code>200</Code>
<Description>OK</Description>
</DeleteObjectResponse>
</DeleteObjectResponse>"""
S3_OBJECT_RESPONSE = """<PutObjectResponse xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<PutObjectResponse>
<ETag>{{ key.etag }}</ETag>
<LastModified>{{ key.last_modified_ISO8601 }}</LastModified>
</PutObjectResponse>
</PutObjectResponse>"""
S3_OBJECT_ACL_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<AccessControlPolicy xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Owner>

View File

@ -1,4 +1,5 @@
#!/usr/bin/env python
import json
import os
import subprocess
@ -11,128 +12,142 @@ class Instance(object):
self.instance = instance
def _get_td(self, td):
return self.instance.find('td', attrs={'class': td})
return self.instance.find("td", attrs={"class": td})
def _get_sort(self, td):
return float(self.instance.find('td', attrs={'class': td}).find('span')['sort'])
return float(self.instance.find("td", attrs={"class": td}).find("span")["sort"])
@property
def name(self):
return self._get_td('name').text.strip()
return self._get_td("name").text.strip()
@property
def apiname(self):
return self._get_td('apiname').text.strip()
return self._get_td("apiname").text.strip()
@property
def memory(self):
return self._get_sort('memory')
return self._get_sort("memory")
@property
def computeunits(self):
return self._get_sort('computeunits')
return self._get_sort("computeunits")
@property
def vcpus(self):
return self._get_sort('vcpus')
return self._get_sort("vcpus")
@property
def gpus(self):
return int(self._get_td('gpus').text.strip())
return int(self._get_td("gpus").text.strip())
@property
def fpga(self):
return int(self._get_td('fpga').text.strip())
return int(self._get_td("fpga").text.strip())
@property
def ecu_per_vcpu(self):
return self._get_sort('ecu-per-vcpu')
return self._get_sort("ecu-per-vcpu")
@property
def physical_processor(self):
return self._get_td('physical_processor').text.strip()
return self._get_td("physical_processor").text.strip()
@property
def clock_speed_ghz(self):
return self._get_td('clock_speed_ghz').text.strip()
return self._get_td("clock_speed_ghz").text.strip()
@property
def intel_avx(self):
return self._get_td('intel_avx').text.strip()
return self._get_td("intel_avx").text.strip()
@property
def intel_avx2(self):
return self._get_td('intel_avx2').text.strip()
return self._get_td("intel_avx2").text.strip()
@property
def intel_turbo(self):
return self._get_td('intel_turbo').text.strip()
return self._get_td("intel_turbo").text.strip()
@property
def storage(self):
return self._get_sort('storage')
return self._get_sort("storage")
@property
def architecture(self):
return self._get_td('architecture').text.strip()
return self._get_td("architecture").text.strip()
@property
def network_perf(self): # 2 == low
return self._get_sort('networkperf')
return self._get_sort("networkperf")
@property
def ebs_max_bandwidth(self):
return self._get_sort('ebs-max-bandwidth')
return self._get_sort("ebs-max-bandwidth")
@property
def ebs_throughput(self):
return self._get_sort('ebs-throughput')
return self._get_sort("ebs-throughput")
@property
def ebs_iops(self):
return self._get_sort('ebs-iops')
return self._get_sort("ebs-iops")
@property
def max_ips(self):
return int(self._get_td('maxips').text.strip())
return int(self._get_td("maxips").text.strip())
@property
def enhanced_networking(self):
return self._get_td('enhanced-networking').text.strip() != 'No'
return self._get_td("enhanced-networking").text.strip() != "No"
@property
def vpc_only(self):
return self._get_td('vpc-only').text.strip() != 'No'
return self._get_td("vpc-only").text.strip() != "No"
@property
def ipv6_support(self):
return self._get_td('ipv6-support').text.strip() != 'No'
return self._get_td("ipv6-support").text.strip() != "No"
@property
def placement_group_support(self):
return self._get_td('placement-group-support').text.strip() != 'No'
return self._get_td("placement-group-support").text.strip() != "No"
@property
def linux_virtualization(self):
return self._get_td('linux-virtualization').text.strip()
return self._get_td("linux-virtualization").text.strip()
def to_dict(self):
result = {}
for attr in [x for x in self.__class__.__dict__.keys() if not x.startswith('_') and x != 'to_dict']:
result[attr] = getattr(self, attr)
for attr in [
x
for x in self.__class__.__dict__.keys()
if not x.startswith("_") and x != "to_dict"
]:
try:
result[attr] = getattr(self, attr)
except ValueError as ex:
if "'N/A'" in str(ex):
print(
"Skipping attribute '{0}' for instance type '{1}' (not found)".format(
attr, self.name
)
)
else:
raise
return self.apiname, result
def main():
print("Getting HTML from http://www.ec2instances.info")
page_request = requests.get('http://www.ec2instances.info')
soup = BeautifulSoup(page_request.text, 'html.parser')
data_table = soup.find(id='data')
page_request = requests.get("http://www.ec2instances.info")
soup = BeautifulSoup(page_request.text, "html.parser")
data_table = soup.find(id="data")
print("Finding data in table")
instances = data_table.find('tbody').find_all('tr')
instances = data_table.find("tbody").find_all("tr")
print("Parsing data")
result = {}
@ -140,11 +155,16 @@ def main():
instance_id, instance_data = Instance(instance).to_dict()
result[instance_id] = instance_data
root_dir = subprocess.check_output(['git', 'rev-parse', '--show-toplevel']).decode().strip()
dest = os.path.join(root_dir, 'moto/ec2/resources/instance_types.json')
root_dir = (
subprocess.check_output(["git", "rev-parse", "--show-toplevel"])
.decode()
.strip()
)
dest = os.path.join(root_dir, "moto/ec2/resources/instance_types.json")
print("Writing data to {0}".format(dest))
with open(dest, 'w') as open_file:
json.dump(result, open_file)
with open(dest, "w") as open_file:
json.dump(result, open_file, sort_keys=True)
if __name__ == '__main__':
if __name__ == "__main__":
main()

View File

@ -1483,6 +1483,181 @@ def test_deployment():
stage["description"].should.equal("_new_description_")
@mock_apigateway
def test_create_domain_names():
client = boto3.client("apigateway", region_name="us-west-2")
domain_name = "testDomain"
test_certificate_name = "test.certificate"
test_certificate_private_key = "testPrivateKey"
# success case with valid params
response = client.create_domain_name(
domainName=domain_name,
certificateName=test_certificate_name,
certificatePrivateKey=test_certificate_private_key,
)
response["domainName"].should.equal(domain_name)
response["certificateName"].should.equal(test_certificate_name)
# without domain name it should throw BadRequestException
with assert_raises(ClientError) as ex:
client.create_domain_name(domainName="")
ex.exception.response["Error"]["Message"].should.equal("No Domain Name specified")
ex.exception.response["Error"]["Code"].should.equal("BadRequestException")
@mock_apigateway
def test_get_domain_names():
client = boto3.client("apigateway", region_name="us-west-2")
# without any domain names already present
result = client.get_domain_names()
result["items"].should.equal([])
domain_name = "testDomain"
test_certificate_name = "test.certificate"
response = client.create_domain_name(
domainName=domain_name, certificateName=test_certificate_name
)
response["domainName"].should.equal(domain_name)
response["certificateName"].should.equal(test_certificate_name)
response["domainNameStatus"].should.equal("AVAILABLE")
# after adding a new domain name
result = client.get_domain_names()
result["items"][0]["domainName"].should.equal(domain_name)
result["items"][0]["certificateName"].should.equal(test_certificate_name)
result["items"][0]["domainNameStatus"].should.equal("AVAILABLE")
@mock_apigateway
def test_get_domain_name():
client = boto3.client("apigateway", region_name="us-west-2")
domain_name = "testDomain"
# quering an invalid domain name which is not present
with assert_raises(ClientError) as ex:
client.get_domain_name(domainName=domain_name)
ex.exception.response["Error"]["Message"].should.equal(
"Invalid Domain Name specified"
)
ex.exception.response["Error"]["Code"].should.equal("NotFoundException")
# adding a domain name
client.create_domain_name(domainName=domain_name)
# retrieving the data of added domain name.
result = client.get_domain_name(domainName=domain_name)
result["domainName"].should.equal(domain_name)
result["domainNameStatus"].should.equal("AVAILABLE")
@mock_apigateway
def test_create_model():
client = boto3.client("apigateway", region_name="us-west-2")
response = client.create_rest_api(name="my_api", description="this is my api")
rest_api_id = response["id"]
dummy_rest_api_id = "a12b3c4d"
model_name = "testModel"
description = "test model"
content_type = "application/json"
# success case with valid params
response = client.create_model(
restApiId=rest_api_id,
name=model_name,
description=description,
contentType=content_type,
)
response["name"].should.equal(model_name)
response["description"].should.equal(description)
# with an invalid rest_api_id it should throw NotFoundException
with assert_raises(ClientError) as ex:
client.create_model(
restApiId=dummy_rest_api_id,
name=model_name,
description=description,
contentType=content_type,
)
ex.exception.response["Error"]["Message"].should.equal(
"Invalid Rest API Id specified"
)
ex.exception.response["Error"]["Code"].should.equal("NotFoundException")
with assert_raises(ClientError) as ex:
client.create_model(
restApiId=rest_api_id,
name="",
description=description,
contentType=content_type,
)
ex.exception.response["Error"]["Message"].should.equal("No Model Name specified")
ex.exception.response["Error"]["Code"].should.equal("BadRequestException")
@mock_apigateway
def test_get_api_models():
client = boto3.client("apigateway", region_name="us-west-2")
response = client.create_rest_api(name="my_api", description="this is my api")
rest_api_id = response["id"]
model_name = "testModel"
description = "test model"
content_type = "application/json"
# when no models are present
result = client.get_models(restApiId=rest_api_id)
result["items"].should.equal([])
# add a model
client.create_model(
restApiId=rest_api_id,
name=model_name,
description=description,
contentType=content_type,
)
# get models after adding
result = client.get_models(restApiId=rest_api_id)
result["items"][0]["name"] = model_name
result["items"][0]["description"] = description
@mock_apigateway
def test_get_model_by_name():
client = boto3.client("apigateway", region_name="us-west-2")
response = client.create_rest_api(name="my_api", description="this is my api")
rest_api_id = response["id"]
dummy_rest_api_id = "a12b3c4d"
model_name = "testModel"
description = "test model"
content_type = "application/json"
# add a model
client.create_model(
restApiId=rest_api_id,
name=model_name,
description=description,
contentType=content_type,
)
# get models after adding
result = client.get_model(restApiId=rest_api_id, modelName=model_name)
result["name"] = model_name
result["description"] = description
with assert_raises(ClientError) as ex:
client.get_model(restApiId=dummy_rest_api_id, modelName=model_name)
ex.exception.response["Error"]["Message"].should.equal(
"Invalid Rest API Id specified"
)
ex.exception.response["Error"]["Code"].should.equal("NotFoundException")
@mock_apigateway
def test_get_model_with_invalid_name():
client = boto3.client("apigateway", region_name="us-west-2")
response = client.create_rest_api(name="my_api", description="this is my api")
rest_api_id = response["id"]
# test with an invalid model name
with assert_raises(ClientError) as ex:
client.get_model(restApiId=rest_api_id, modelName="fake")
ex.exception.response["Error"]["Message"].should.equal(
"Invalid Model Name specified"
)
ex.exception.response["Error"]["Code"].should.equal("NotFoundException")
@mock_apigateway
def test_http_proxying_integration():
responses.add(

View File

@ -843,13 +843,41 @@ def test_describe_autoscaling_instances_boto3():
NewInstancesProtectedFromScaleIn=True,
)
response = client.describe_auto_scaling_instances()
len(response["AutoScalingInstances"]).should.equal(5)
for instance in response["AutoScalingInstances"]:
instance["AutoScalingGroupName"].should.equal("test_asg")
instance["AvailabilityZone"].should.equal("us-east-1a")
instance["ProtectedFromScaleIn"].should.equal(True)
@mock_autoscaling
def test_describe_autoscaling_instances_instanceid_filter():
mocked_networking = setup_networking()
client = boto3.client("autoscaling", region_name="us-east-1")
_ = client.create_launch_configuration(
LaunchConfigurationName="test_launch_configuration"
)
_ = client.create_auto_scaling_group(
AutoScalingGroupName="test_asg",
LaunchConfigurationName="test_launch_configuration",
MinSize=0,
MaxSize=20,
DesiredCapacity=5,
VPCZoneIdentifier=mocked_networking["subnet1"],
NewInstancesProtectedFromScaleIn=True,
)
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
instance_ids = [
instance["InstanceId"]
for instance in response["AutoScalingGroups"][0]["Instances"]
]
response = client.describe_auto_scaling_instances(InstanceIds=instance_ids)
response = client.describe_auto_scaling_instances(
InstanceIds=instance_ids[0:2]
) # Filter by first 2 of 5
len(response["AutoScalingInstances"]).should.equal(2)
for instance in response["AutoScalingInstances"]:
instance["AutoScalingGroupName"].should.equal("test_asg")
instance["AvailabilityZone"].should.equal("us-east-1a")
@ -1074,8 +1102,6 @@ def test_detach_one_instance_decrement():
ec2_client = boto3.client("ec2", region_name="us-east-1")
response = ec2_client.describe_instances(InstanceIds=[instance_to_detach])
response = client.detach_instances(
AutoScalingGroupName="test_asg",
InstanceIds=[instance_to_detach],
@ -1128,8 +1154,6 @@ def test_detach_one_instance():
ec2_client = boto3.client("ec2", region_name="us-east-1")
response = ec2_client.describe_instances(InstanceIds=[instance_to_detach])
response = client.detach_instances(
AutoScalingGroupName="test_asg",
InstanceIds=[instance_to_detach],
@ -1150,6 +1174,516 @@ def test_detach_one_instance():
tags.should.have.length_of(2)
@mock_autoscaling
@mock_ec2
def test_standby_one_instance_decrement():
mocked_networking = setup_networking()
client = boto3.client("autoscaling", region_name="us-east-1")
_ = client.create_launch_configuration(
LaunchConfigurationName="test_launch_configuration"
)
client.create_auto_scaling_group(
AutoScalingGroupName="test_asg",
LaunchConfigurationName="test_launch_configuration",
MinSize=0,
MaxSize=2,
DesiredCapacity=2,
Tags=[
{
"ResourceId": "test_asg",
"ResourceType": "auto-scaling-group",
"Key": "propogated-tag-key",
"Value": "propagate-tag-value",
"PropagateAtLaunch": True,
}
],
VPCZoneIdentifier=mocked_networking["subnet1"],
)
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
instance_to_standby = response["AutoScalingGroups"][0]["Instances"][0]["InstanceId"]
instance_to_keep = response["AutoScalingGroups"][0]["Instances"][1]["InstanceId"]
ec2_client = boto3.client("ec2", region_name="us-east-1")
response = client.enter_standby(
AutoScalingGroupName="test_asg",
InstanceIds=[instance_to_standby],
ShouldDecrementDesiredCapacity=True,
)
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
response["AutoScalingGroups"][0]["Instances"].should.have.length_of(2)
response["AutoScalingGroups"][0]["DesiredCapacity"].should.equal(1)
response = client.describe_auto_scaling_instances(InstanceIds=[instance_to_standby])
response["AutoScalingInstances"][0]["LifecycleState"].should.equal("Standby")
# test to ensure tag has been retained (standby instance is still part of the ASG)
response = ec2_client.describe_instances()
for reservation in response["Reservations"]:
for instance in reservation["Instances"]:
tags = instance["Tags"]
tags.should.have.length_of(2)
@mock_autoscaling
@mock_ec2
def test_standby_one_instance():
mocked_networking = setup_networking()
client = boto3.client("autoscaling", region_name="us-east-1")
_ = client.create_launch_configuration(
LaunchConfigurationName="test_launch_configuration"
)
client.create_auto_scaling_group(
AutoScalingGroupName="test_asg",
LaunchConfigurationName="test_launch_configuration",
MinSize=0,
MaxSize=2,
DesiredCapacity=2,
Tags=[
{
"ResourceId": "test_asg",
"ResourceType": "auto-scaling-group",
"Key": "propogated-tag-key",
"Value": "propagate-tag-value",
"PropagateAtLaunch": True,
}
],
VPCZoneIdentifier=mocked_networking["subnet1"],
)
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
instance_to_standby = response["AutoScalingGroups"][0]["Instances"][0]["InstanceId"]
instance_to_keep = response["AutoScalingGroups"][0]["Instances"][1]["InstanceId"]
ec2_client = boto3.client("ec2", region_name="us-east-1")
response = client.enter_standby(
AutoScalingGroupName="test_asg",
InstanceIds=[instance_to_standby],
ShouldDecrementDesiredCapacity=False,
)
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
response["AutoScalingGroups"][0]["Instances"].should.have.length_of(3)
response["AutoScalingGroups"][0]["DesiredCapacity"].should.equal(2)
response = client.describe_auto_scaling_instances(InstanceIds=[instance_to_standby])
response["AutoScalingInstances"][0]["LifecycleState"].should.equal("Standby")
# test to ensure tag has been retained (standby instance is still part of the ASG)
response = ec2_client.describe_instances()
for reservation in response["Reservations"]:
for instance in reservation["Instances"]:
tags = instance["Tags"]
tags.should.have.length_of(2)
@mock_elb
@mock_autoscaling
@mock_ec2
def test_standby_elb_update():
mocked_networking = setup_networking()
client = boto3.client("autoscaling", region_name="us-east-1")
_ = client.create_launch_configuration(
LaunchConfigurationName="test_launch_configuration"
)
client.create_auto_scaling_group(
AutoScalingGroupName="test_asg",
LaunchConfigurationName="test_launch_configuration",
MinSize=0,
MaxSize=2,
DesiredCapacity=2,
Tags=[
{
"ResourceId": "test_asg",
"ResourceType": "auto-scaling-group",
"Key": "propogated-tag-key",
"Value": "propagate-tag-value",
"PropagateAtLaunch": True,
}
],
VPCZoneIdentifier=mocked_networking["subnet1"],
)
elb_client = boto3.client("elb", region_name="us-east-1")
elb_client.create_load_balancer(
LoadBalancerName="my-lb",
Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}],
AvailabilityZones=["us-east-1a", "us-east-1b"],
)
response = client.attach_load_balancers(
AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"]
)
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
instance_to_standby = response["AutoScalingGroups"][0]["Instances"][0]["InstanceId"]
response = client.enter_standby(
AutoScalingGroupName="test_asg",
InstanceIds=[instance_to_standby],
ShouldDecrementDesiredCapacity=False,
)
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
response["AutoScalingGroups"][0]["Instances"].should.have.length_of(3)
response["AutoScalingGroups"][0]["DesiredCapacity"].should.equal(2)
response = client.describe_auto_scaling_instances(InstanceIds=[instance_to_standby])
response["AutoScalingInstances"][0]["LifecycleState"].should.equal("Standby")
response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"])
list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(2)
@mock_autoscaling
@mock_ec2
def test_standby_terminate_instance_decrement():
mocked_networking = setup_networking()
client = boto3.client("autoscaling", region_name="us-east-1")
_ = client.create_launch_configuration(
LaunchConfigurationName="test_launch_configuration"
)
client.create_auto_scaling_group(
AutoScalingGroupName="test_asg",
LaunchConfigurationName="test_launch_configuration",
MinSize=0,
MaxSize=3,
DesiredCapacity=2,
Tags=[
{
"ResourceId": "test_asg",
"ResourceType": "auto-scaling-group",
"Key": "propogated-tag-key",
"Value": "propagate-tag-value",
"PropagateAtLaunch": True,
}
],
VPCZoneIdentifier=mocked_networking["subnet1"],
)
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
instance_to_standby_terminate = response["AutoScalingGroups"][0]["Instances"][0][
"InstanceId"
]
ec2_client = boto3.client("ec2", region_name="us-east-1")
response = client.enter_standby(
AutoScalingGroupName="test_asg",
InstanceIds=[instance_to_standby_terminate],
ShouldDecrementDesiredCapacity=False,
)
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
response["AutoScalingGroups"][0]["Instances"].should.have.length_of(3)
response["AutoScalingGroups"][0]["DesiredCapacity"].should.equal(2)
response = client.describe_auto_scaling_instances(
InstanceIds=[instance_to_standby_terminate]
)
response["AutoScalingInstances"][0]["LifecycleState"].should.equal("Standby")
response = client.terminate_instance_in_auto_scaling_group(
InstanceId=instance_to_standby_terminate, ShouldDecrementDesiredCapacity=True
)
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
# AWS still decrements desired capacity ASG if requested, even if the terminated instance is in standby
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
response["AutoScalingGroups"][0]["Instances"].should.have.length_of(1)
response["AutoScalingGroups"][0]["Instances"][0]["InstanceId"].should_not.equal(
instance_to_standby_terminate
)
response["AutoScalingGroups"][0]["DesiredCapacity"].should.equal(1)
response = ec2_client.describe_instances(
InstanceIds=[instance_to_standby_terminate]
)
response["Reservations"][0]["Instances"][0]["State"]["Name"].should.equal(
"terminated"
)
@mock_autoscaling
@mock_ec2
def test_standby_terminate_instance_no_decrement():
mocked_networking = setup_networking()
client = boto3.client("autoscaling", region_name="us-east-1")
_ = client.create_launch_configuration(
LaunchConfigurationName="test_launch_configuration"
)
client.create_auto_scaling_group(
AutoScalingGroupName="test_asg",
LaunchConfigurationName="test_launch_configuration",
MinSize=0,
MaxSize=3,
DesiredCapacity=2,
Tags=[
{
"ResourceId": "test_asg",
"ResourceType": "auto-scaling-group",
"Key": "propogated-tag-key",
"Value": "propagate-tag-value",
"PropagateAtLaunch": True,
}
],
VPCZoneIdentifier=mocked_networking["subnet1"],
)
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
instance_to_standby_terminate = response["AutoScalingGroups"][0]["Instances"][0][
"InstanceId"
]
ec2_client = boto3.client("ec2", region_name="us-east-1")
response = client.enter_standby(
AutoScalingGroupName="test_asg",
InstanceIds=[instance_to_standby_terminate],
ShouldDecrementDesiredCapacity=False,
)
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
response["AutoScalingGroups"][0]["Instances"].should.have.length_of(3)
response["AutoScalingGroups"][0]["DesiredCapacity"].should.equal(2)
response = client.describe_auto_scaling_instances(
InstanceIds=[instance_to_standby_terminate]
)
response["AutoScalingInstances"][0]["LifecycleState"].should.equal("Standby")
response = client.terminate_instance_in_auto_scaling_group(
InstanceId=instance_to_standby_terminate, ShouldDecrementDesiredCapacity=False
)
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
group = response["AutoScalingGroups"][0]
group["Instances"].should.have.length_of(2)
instance_to_standby_terminate.shouldnt.be.within(
[x["InstanceId"] for x in group["Instances"]]
)
group["DesiredCapacity"].should.equal(2)
response = ec2_client.describe_instances(
InstanceIds=[instance_to_standby_terminate]
)
response["Reservations"][0]["Instances"][0]["State"]["Name"].should.equal(
"terminated"
)
@mock_autoscaling
@mock_ec2
def test_standby_detach_instance_decrement():
mocked_networking = setup_networking()
client = boto3.client("autoscaling", region_name="us-east-1")
_ = client.create_launch_configuration(
LaunchConfigurationName="test_launch_configuration"
)
client.create_auto_scaling_group(
AutoScalingGroupName="test_asg",
LaunchConfigurationName="test_launch_configuration",
MinSize=0,
MaxSize=3,
DesiredCapacity=2,
Tags=[
{
"ResourceId": "test_asg",
"ResourceType": "auto-scaling-group",
"Key": "propogated-tag-key",
"Value": "propagate-tag-value",
"PropagateAtLaunch": True,
}
],
VPCZoneIdentifier=mocked_networking["subnet1"],
)
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
instance_to_standby_detach = response["AutoScalingGroups"][0]["Instances"][0][
"InstanceId"
]
ec2_client = boto3.client("ec2", region_name="us-east-1")
response = client.enter_standby(
AutoScalingGroupName="test_asg",
InstanceIds=[instance_to_standby_detach],
ShouldDecrementDesiredCapacity=False,
)
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
response["AutoScalingGroups"][0]["Instances"].should.have.length_of(3)
response["AutoScalingGroups"][0]["DesiredCapacity"].should.equal(2)
response = client.describe_auto_scaling_instances(
InstanceIds=[instance_to_standby_detach]
)
response["AutoScalingInstances"][0]["LifecycleState"].should.equal("Standby")
response = client.detach_instances(
AutoScalingGroupName="test_asg",
InstanceIds=[instance_to_standby_detach],
ShouldDecrementDesiredCapacity=True,
)
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
# AWS still decrements desired capacity ASG if requested, even if the detached instance was in standby
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
response["AutoScalingGroups"][0]["Instances"].should.have.length_of(1)
response["AutoScalingGroups"][0]["Instances"][0]["InstanceId"].should_not.equal(
instance_to_standby_detach
)
response["AutoScalingGroups"][0]["DesiredCapacity"].should.equal(1)
response = ec2_client.describe_instances(InstanceIds=[instance_to_standby_detach])
response["Reservations"][0]["Instances"][0]["State"]["Name"].should.equal("running")
@mock_autoscaling
@mock_ec2
def test_standby_detach_instance_no_decrement():
mocked_networking = setup_networking()
client = boto3.client("autoscaling", region_name="us-east-1")
_ = client.create_launch_configuration(
LaunchConfigurationName="test_launch_configuration"
)
client.create_auto_scaling_group(
AutoScalingGroupName="test_asg",
LaunchConfigurationName="test_launch_configuration",
MinSize=0,
MaxSize=3,
DesiredCapacity=2,
Tags=[
{
"ResourceId": "test_asg",
"ResourceType": "auto-scaling-group",
"Key": "propogated-tag-key",
"Value": "propagate-tag-value",
"PropagateAtLaunch": True,
}
],
VPCZoneIdentifier=mocked_networking["subnet1"],
)
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
instance_to_standby_detach = response["AutoScalingGroups"][0]["Instances"][0][
"InstanceId"
]
ec2_client = boto3.client("ec2", region_name="us-east-1")
response = client.enter_standby(
AutoScalingGroupName="test_asg",
InstanceIds=[instance_to_standby_detach],
ShouldDecrementDesiredCapacity=False,
)
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
response["AutoScalingGroups"][0]["Instances"].should.have.length_of(3)
response["AutoScalingGroups"][0]["DesiredCapacity"].should.equal(2)
response = client.describe_auto_scaling_instances(
InstanceIds=[instance_to_standby_detach]
)
response["AutoScalingInstances"][0]["LifecycleState"].should.equal("Standby")
response = client.detach_instances(
AutoScalingGroupName="test_asg",
InstanceIds=[instance_to_standby_detach],
ShouldDecrementDesiredCapacity=False,
)
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
group = response["AutoScalingGroups"][0]
group["Instances"].should.have.length_of(2)
instance_to_standby_detach.shouldnt.be.within(
[x["InstanceId"] for x in group["Instances"]]
)
group["DesiredCapacity"].should.equal(2)
response = ec2_client.describe_instances(InstanceIds=[instance_to_standby_detach])
response["Reservations"][0]["Instances"][0]["State"]["Name"].should.equal("running")
@mock_autoscaling
@mock_ec2
def test_standby_exit_standby():
mocked_networking = setup_networking()
client = boto3.client("autoscaling", region_name="us-east-1")
_ = client.create_launch_configuration(
LaunchConfigurationName="test_launch_configuration"
)
client.create_auto_scaling_group(
AutoScalingGroupName="test_asg",
LaunchConfigurationName="test_launch_configuration",
MinSize=0,
MaxSize=3,
DesiredCapacity=2,
Tags=[
{
"ResourceId": "test_asg",
"ResourceType": "auto-scaling-group",
"Key": "propogated-tag-key",
"Value": "propagate-tag-value",
"PropagateAtLaunch": True,
}
],
VPCZoneIdentifier=mocked_networking["subnet1"],
)
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
instance_to_standby_exit_standby = response["AutoScalingGroups"][0]["Instances"][0][
"InstanceId"
]
ec2_client = boto3.client("ec2", region_name="us-east-1")
response = client.enter_standby(
AutoScalingGroupName="test_asg",
InstanceIds=[instance_to_standby_exit_standby],
ShouldDecrementDesiredCapacity=False,
)
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
response["AutoScalingGroups"][0]["Instances"].should.have.length_of(3)
response["AutoScalingGroups"][0]["DesiredCapacity"].should.equal(2)
response = client.describe_auto_scaling_instances(
InstanceIds=[instance_to_standby_exit_standby]
)
response["AutoScalingInstances"][0]["LifecycleState"].should.equal("Standby")
response = client.exit_standby(
AutoScalingGroupName="test_asg", InstanceIds=[instance_to_standby_exit_standby],
)
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
group = response["AutoScalingGroups"][0]
group["Instances"].should.have.length_of(3)
instance_to_standby_exit_standby.should.be.within(
[x["InstanceId"] for x in group["Instances"]]
)
group["DesiredCapacity"].should.equal(3)
response = ec2_client.describe_instances(
InstanceIds=[instance_to_standby_exit_standby]
)
response["Reservations"][0]["Instances"][0]["State"]["Name"].should.equal("running")
@mock_autoscaling
@mock_ec2
def test_attach_one_instance():
@ -1383,7 +1917,7 @@ def test_set_desired_capacity_down_boto3():
@mock_autoscaling
@mock_ec2
def test_terminate_instance_in_autoscaling_group():
def test_terminate_instance_via_ec2_in_autoscaling_group():
mocked_networking = setup_networking()
client = boto3.client("autoscaling", region_name="us-east-1")
_ = client.create_launch_configuration(
@ -1412,3 +1946,71 @@ def test_terminate_instance_in_autoscaling_group():
for instance in response["AutoScalingGroups"][0]["Instances"]
)
replaced_instance_id.should_not.equal(original_instance_id)
@mock_autoscaling
@mock_ec2
def test_terminate_instance_in_auto_scaling_group_decrement():
mocked_networking = setup_networking()
client = boto3.client("autoscaling", region_name="us-east-1")
_ = client.create_launch_configuration(
LaunchConfigurationName="test_launch_configuration"
)
_ = client.create_auto_scaling_group(
AutoScalingGroupName="test_asg",
LaunchConfigurationName="test_launch_configuration",
MinSize=0,
DesiredCapacity=1,
MaxSize=2,
VPCZoneIdentifier=mocked_networking["subnet1"],
NewInstancesProtectedFromScaleIn=False,
)
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
original_instance_id = next(
instance["InstanceId"]
for instance in response["AutoScalingGroups"][0]["Instances"]
)
client.terminate_instance_in_auto_scaling_group(
InstanceId=original_instance_id, ShouldDecrementDesiredCapacity=True
)
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
response["AutoScalingGroups"][0]["Instances"].should.equal([])
response["AutoScalingGroups"][0]["DesiredCapacity"].should.equal(0)
@mock_autoscaling
@mock_ec2
def test_terminate_instance_in_auto_scaling_group_no_decrement():
mocked_networking = setup_networking()
client = boto3.client("autoscaling", region_name="us-east-1")
_ = client.create_launch_configuration(
LaunchConfigurationName="test_launch_configuration"
)
_ = client.create_auto_scaling_group(
AutoScalingGroupName="test_asg",
LaunchConfigurationName="test_launch_configuration",
MinSize=0,
DesiredCapacity=1,
MaxSize=2,
VPCZoneIdentifier=mocked_networking["subnet1"],
NewInstancesProtectedFromScaleIn=False,
)
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
original_instance_id = next(
instance["InstanceId"]
for instance in response["AutoScalingGroups"][0]["Instances"]
)
client.terminate_instance_in_auto_scaling_group(
InstanceId=original_instance_id, ShouldDecrementDesiredCapacity=False
)
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
replaced_instance_id = next(
instance["InstanceId"]
for instance in response["AutoScalingGroups"][0]["Instances"]
)
replaced_instance_id.should_not.equal(original_instance_id)
response["AutoScalingGroups"][0]["DesiredCapacity"].should.equal(1)

View File

@ -2373,13 +2373,12 @@ def test_create_log_group_using_fntransform():
}
cf_conn = boto3.client("cloudformation", "us-west-2")
cf_conn.create_stack(
StackName="test_stack", TemplateBody=json.dumps(template),
)
cf_conn.create_stack(StackName="test_stack", TemplateBody=json.dumps(template))
logs_conn = boto3.client("logs", region_name="us-west-2")
log_group = logs_conn.describe_log_groups()["logGroups"][0]
log_group["logGroupName"].should.equal("some-log-group")
log_group["retentionInDays"].should.be.equal(90)
@mock_cloudformation
@ -2400,7 +2399,7 @@ def test_stack_events_create_rule_integration():
}
cf_conn = boto3.client("cloudformation", "us-west-2")
cf_conn.create_stack(
StackName="test_stack", TemplateBody=json.dumps(events_template),
StackName="test_stack", TemplateBody=json.dumps(events_template)
)
rules = boto3.client("events", "us-west-2").list_rules()
@ -2428,7 +2427,7 @@ def test_stack_events_delete_rule_integration():
}
cf_conn = boto3.client("cloudformation", "us-west-2")
cf_conn.create_stack(
StackName="test_stack", TemplateBody=json.dumps(events_template),
StackName="test_stack", TemplateBody=json.dumps(events_template)
)
rules = boto3.client("events", "us-west-2").list_rules()
@ -2457,8 +2456,45 @@ def test_stack_events_create_rule_without_name_integration():
}
cf_conn = boto3.client("cloudformation", "us-west-2")
cf_conn.create_stack(
StackName="test_stack", TemplateBody=json.dumps(events_template),
StackName="test_stack", TemplateBody=json.dumps(events_template)
)
rules = boto3.client("events", "us-west-2").list_rules()
rules["Rules"][0]["Name"].should.contain("test_stack-Event-")
@mock_cloudformation
@mock_events
@mock_logs
def test_stack_events_create_rule_as_target():
events_template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
"SecurityGroup": {
"Type": "AWS::Logs::LogGroup",
"Properties": {
"LogGroupName": {"Fn::GetAtt": ["Event", "Arn"]},
"RetentionInDays": 3,
},
},
"Event": {
"Type": "AWS::Events::Rule",
"Properties": {
"State": "ENABLED",
"ScheduleExpression": "rate(5 minutes)",
},
},
},
}
cf_conn = boto3.client("cloudformation", "us-west-2")
cf_conn.create_stack(
StackName="test_stack", TemplateBody=json.dumps(events_template)
)
rules = boto3.client("events", "us-west-2").list_rules()
log_groups = boto3.client("logs", "us-west-2").describe_log_groups()
rules["Rules"][0]["Name"].should.contain("test_stack-Event-")
log_groups["logGroups"][0]["logGroupName"].should.equal(rules["Rules"][0]["Arn"])
log_groups["logGroups"][0]["retentionInDays"].should.equal(3)

View File

@ -134,6 +134,7 @@ class TestCore:
"id": {"S": "entry1"},
"first_col": {"S": "bar"},
"second_col": {"S": "baz"},
"a": {"L": [{"M": {"b": {"S": "bar1"}}}]},
},
)
conn.delete_item(TableName="test-streams", Key={"id": {"S": "entry1"}})

View File

@ -52,3 +52,15 @@ def test_boto3_availability_zones():
resp = conn.describe_availability_zones()
for rec in resp["AvailabilityZones"]:
rec["ZoneName"].should.contain(region)
@mock_ec2
def test_boto3_zoneId_in_availability_zones():
conn = boto3.client("ec2", "us-east-1")
resp = conn.describe_availability_zones()
for rec in resp["AvailabilityZones"]:
rec.get("ZoneId").should.contain("use1")
conn = boto3.client("ec2", "us-west-1")
resp = conn.describe_availability_zones()
for rec in resp["AvailabilityZones"]:
rec.get("ZoneId").should.contain("usw1")

View File

@ -53,6 +53,45 @@ def test_create_and_delete_volume():
cm.exception.request_id.should_not.be.none
@mock_ec2_deprecated
def test_delete_attached_volume():
conn = boto.ec2.connect_to_region("us-east-1")
reservation = conn.run_instances("ami-1234abcd")
# create an instance
instance = reservation.instances[0]
# create a volume
volume = conn.create_volume(80, "us-east-1a")
# attach volume to instance
volume.attach(instance.id, "/dev/sdh")
volume.update()
volume.volume_state().should.equal("in-use")
volume.attachment_state().should.equal("attached")
volume.attach_data.instance_id.should.equal(instance.id)
# attempt to delete volume
# assert raises VolumeInUseError
with assert_raises(EC2ResponseError) as ex:
volume.delete()
ex.exception.error_code.should.equal("VolumeInUse")
ex.exception.status.should.equal(400)
ex.exception.message.should.equal(
"Volume {0} is currently attached to {1}".format(volume.id, instance.id)
)
volume.detach()
volume.update()
volume.volume_state().should.equal("available")
volume.delete()
all_volumes = conn.get_all_volumes()
my_volume = [item for item in all_volumes if item.id == volume.id]
my_volume.should.have.length_of(0)
@mock_ec2_deprecated
def test_create_encrypted_volume_dryrun():
conn = boto.ec2.connect_to_region("us-east-1")

View File

@ -1166,6 +1166,21 @@ def test_describe_instance_status_with_instance_filter_deprecated():
cm.exception.request_id.should_not.be.none
@mock_ec2
def test_describe_instance_credit_specifications():
conn = boto3.client("ec2", region_name="us-west-1")
# We want to filter based on this one
reservation = conn.run_instances(ImageId="ami-1234abcd", MinCount=1, MaxCount=1)
result = conn.describe_instance_credit_specifications(
InstanceIds=[reservation["Instances"][0]["InstanceId"]]
)
assert (
result["InstanceCreditSpecifications"][0]["InstanceId"]
== reservation["Instances"][0]["InstanceId"]
)
@mock_ec2
def test_describe_instance_status_with_instance_filter():
conn = boto3.client("ec2", region_name="us-west-1")

View File

@ -1122,6 +1122,71 @@ def test_run_task():
response["tasks"][0]["stoppedReason"].should.equal("")
@mock_ec2
@mock_ecs
def test_run_task_default_cluster():
client = boto3.client("ecs", region_name="us-east-1")
ec2 = boto3.resource("ec2", region_name="us-east-1")
test_cluster_name = "default"
_ = client.create_cluster(clusterName=test_cluster_name)
test_instance = ec2.create_instances(
ImageId="ami-1234abcd", MinCount=1, MaxCount=1
)[0]
instance_id_document = json.dumps(
ec2_utils.generate_instance_identity_document(test_instance)
)
response = client.register_container_instance(
cluster=test_cluster_name, instanceIdentityDocument=instance_id_document
)
_ = client.register_task_definition(
family="test_ecs_task",
containerDefinitions=[
{
"name": "hello_world",
"image": "docker/hello-world:latest",
"cpu": 1024,
"memory": 400,
"essential": True,
"environment": [
{"name": "AWS_ACCESS_KEY_ID", "value": "SOME_ACCESS_KEY"}
],
"logConfiguration": {"logDriver": "json-file"},
}
],
)
response = client.run_task(
launchType="FARGATE",
overrides={},
taskDefinition="test_ecs_task",
count=2,
startedBy="moto",
)
len(response["tasks"]).should.equal(2)
response["tasks"][0]["taskArn"].should.contain(
"arn:aws:ecs:us-east-1:012345678910:task/"
)
response["tasks"][0]["clusterArn"].should.equal(
"arn:aws:ecs:us-east-1:012345678910:cluster/default"
)
response["tasks"][0]["taskDefinitionArn"].should.equal(
"arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1"
)
response["tasks"][0]["containerInstanceArn"].should.contain(
"arn:aws:ecs:us-east-1:012345678910:container-instance/"
)
response["tasks"][0]["overrides"].should.equal({})
response["tasks"][0]["lastStatus"].should.equal("RUNNING")
response["tasks"][0]["desiredStatus"].should.equal("RUNNING")
response["tasks"][0]["startedBy"].should.equal("moto")
response["tasks"][0]["stoppedReason"].should.equal("")
@mock_ec2
@mock_ecs
def test_start_task():

View File

@ -79,13 +79,23 @@ def generate_environment():
@mock_events
def test_put_rule():
client = boto3.client("events", "us-west-2")
client.list_rules()["Rules"].should.have.length_of(0)
rule_data = get_random_rule()
rule_data = {
"Name": "my-event",
"ScheduleExpression": "rate(5 minutes)",
"EventPattern": '{"source": ["test-source"]}',
}
client.put_rule(**rule_data)
client.list_rules()["Rules"].should.have.length_of(1)
rules = client.list_rules()["Rules"]
rules.should.have.length_of(1)
rules[0]["Name"].should.equal(rule_data["Name"])
rules[0]["ScheduleExpression"].should.equal(rule_data["ScheduleExpression"])
rules[0]["EventPattern"].should.equal(rule_data["EventPattern"])
rules[0]["State"].should.equal("ENABLED")
@mock_events

View File

@ -12,17 +12,14 @@ _logs_region = "us-east-1" if settings.TEST_SERVER_MODE else "us-west-2"
@mock_logs
def test_log_group_create():
def test_create_log_group():
conn = boto3.client("logs", "us-west-2")
log_group_name = "dummy"
response = conn.create_log_group(logGroupName=log_group_name)
response = conn.describe_log_groups(logGroupNamePrefix=log_group_name)
assert len(response["logGroups"]) == 1
# AWS defaults to Never Expire for log group retention
assert response["logGroups"][0].get("retentionInDays") == None
response = conn.create_log_group(logGroupName="dummy")
response = conn.describe_log_groups()
response = conn.delete_log_group(logGroupName=log_group_name)
response["logGroups"].should.have.length_of(1)
response["logGroups"][0].should_not.have.key("retentionInDays")
@mock_logs