commit
1aa99bb405
@ -981,7 +981,7 @@ class LambdaBackend(BaseBackend):
|
|||||||
]
|
]
|
||||||
}
|
}
|
||||||
func = self._lambdas.get_arn(function_arn)
|
func = self._lambdas.get_arn(function_arn)
|
||||||
func.invoke(json.dumps(event), {}, {})
|
return func.invoke(json.dumps(event), {}, {})
|
||||||
|
|
||||||
def list_tags(self, resource):
|
def list_tags(self, resource):
|
||||||
return self.get_function_by_arn(resource).tags
|
return self.get_function_by_arn(resource).tags
|
||||||
|
@ -366,3 +366,13 @@ class TooManyResourceKeys(JsonRESTError):
|
|||||||
message = str(message)
|
message = str(message)
|
||||||
|
|
||||||
super(TooManyResourceKeys, self).__init__("ValidationException", message)
|
super(TooManyResourceKeys, self).__init__("ValidationException", message)
|
||||||
|
|
||||||
|
|
||||||
|
class InvalidResultTokenException(JsonRESTError):
|
||||||
|
code = 400
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
message = "The resultToken provided is invalid"
|
||||||
|
super(InvalidResultTokenException, self).__init__(
|
||||||
|
"InvalidResultTokenException", message
|
||||||
|
)
|
||||||
|
@ -40,6 +40,7 @@ from moto.config.exceptions import (
|
|||||||
TooManyResourceIds,
|
TooManyResourceIds,
|
||||||
ResourceNotDiscoveredException,
|
ResourceNotDiscoveredException,
|
||||||
TooManyResourceKeys,
|
TooManyResourceKeys,
|
||||||
|
InvalidResultTokenException,
|
||||||
)
|
)
|
||||||
|
|
||||||
from moto.core import BaseBackend, BaseModel
|
from moto.core import BaseBackend, BaseModel
|
||||||
@ -1089,6 +1090,26 @@ class ConfigBackend(BaseBackend):
|
|||||||
"UnprocessedResourceIdentifiers": not_found,
|
"UnprocessedResourceIdentifiers": not_found,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
def put_evaluations(self, evaluations=None, result_token=None, test_mode=False):
|
||||||
|
if not evaluations:
|
||||||
|
raise InvalidParameterValueException(
|
||||||
|
"The Evaluations object in your request cannot be null."
|
||||||
|
"Add the required parameters and try again."
|
||||||
|
)
|
||||||
|
|
||||||
|
if not result_token:
|
||||||
|
raise InvalidResultTokenException()
|
||||||
|
|
||||||
|
# Moto only supports PutEvaluations with test mode currently (missing rule and token support)
|
||||||
|
if not test_mode:
|
||||||
|
raise NotImplementedError(
|
||||||
|
"PutEvaluations without TestMode is not yet implemented"
|
||||||
|
)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"FailedEvaluations": [],
|
||||||
|
} # At this time, moto is not adding failed evaluations.
|
||||||
|
|
||||||
|
|
||||||
config_backends = {}
|
config_backends = {}
|
||||||
for region in Session().get_available_regions("config"):
|
for region in Session().get_available_regions("config"):
|
||||||
|
@ -151,3 +151,11 @@ class ConfigResponse(BaseResponse):
|
|||||||
self._get_param("ResourceIdentifiers"),
|
self._get_param("ResourceIdentifiers"),
|
||||||
)
|
)
|
||||||
return json.dumps(schema)
|
return json.dumps(schema)
|
||||||
|
|
||||||
|
def put_evaluations(self):
|
||||||
|
evaluations = self.config_backend.put_evaluations(
|
||||||
|
self._get_param("Evaluations"),
|
||||||
|
self._get_param("ResultToken"),
|
||||||
|
self._get_param("TestMode"),
|
||||||
|
)
|
||||||
|
return json.dumps(evaluations)
|
||||||
|
@ -7,6 +7,7 @@ import inspect
|
|||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
import six
|
import six
|
||||||
|
import types
|
||||||
from io import BytesIO
|
from io import BytesIO
|
||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
from botocore.handlers import BUILTIN_HANDLERS
|
from botocore.handlers import BUILTIN_HANDLERS
|
||||||
@ -217,12 +218,29 @@ botocore_mock = responses.RequestsMock(
|
|||||||
assert_all_requests_are_fired=False,
|
assert_all_requests_are_fired=False,
|
||||||
target="botocore.vendored.requests.adapters.HTTPAdapter.send",
|
target="botocore.vendored.requests.adapters.HTTPAdapter.send",
|
||||||
)
|
)
|
||||||
|
|
||||||
responses_mock = responses._default_mock
|
responses_mock = responses._default_mock
|
||||||
# Add passthrough to allow any other requests to work
|
# Add passthrough to allow any other requests to work
|
||||||
# Since this uses .startswith, it applies to http and https requests.
|
# Since this uses .startswith, it applies to http and https requests.
|
||||||
responses_mock.add_passthru("http")
|
responses_mock.add_passthru("http")
|
||||||
|
|
||||||
|
|
||||||
|
def _find_first_match(self, request):
|
||||||
|
for i, match in enumerate(self._matches):
|
||||||
|
if match.matches(request):
|
||||||
|
return match
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
# Modify behaviour of the matcher to only/always return the first match
|
||||||
|
# Default behaviour is to return subsequent matches for subsequent requests, which leads to https://github.com/spulec/moto/issues/2567
|
||||||
|
# - First request matches on the appropriate S3 URL
|
||||||
|
# - Same request, executed again, will be matched on the subsequent match, which happens to be the catch-all, not-yet-implemented, callback
|
||||||
|
# Fix: Always return the first match
|
||||||
|
responses_mock._find_match = types.MethodType(_find_first_match, responses_mock)
|
||||||
|
|
||||||
|
|
||||||
BOTOCORE_HTTP_METHODS = ["GET", "DELETE", "HEAD", "OPTIONS", "PATCH", "POST", "PUT"]
|
BOTOCORE_HTTP_METHODS = ["GET", "DELETE", "HEAD", "OPTIONS", "PATCH", "POST", "PUT"]
|
||||||
|
|
||||||
|
|
||||||
|
@ -822,6 +822,21 @@ class Instance(TaggedEC2Resource, BotoInstance):
|
|||||||
return self.public_ip
|
return self.public_ip
|
||||||
raise UnformattedGetAttTemplateException()
|
raise UnformattedGetAttTemplateException()
|
||||||
|
|
||||||
|
def applies(self, filters):
|
||||||
|
if filters:
|
||||||
|
applicable = False
|
||||||
|
for f in filters:
|
||||||
|
acceptable_values = f["values"]
|
||||||
|
if f["name"] == "instance-state-name":
|
||||||
|
if self._state.name in acceptable_values:
|
||||||
|
applicable = True
|
||||||
|
if f["name"] == "instance-state-code":
|
||||||
|
if str(self._state.code) in acceptable_values:
|
||||||
|
applicable = True
|
||||||
|
return applicable
|
||||||
|
# If there are no filters, all instances are valid
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
class InstanceBackend(object):
|
class InstanceBackend(object):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
@ -921,22 +936,23 @@ class InstanceBackend(object):
|
|||||||
value = getattr(instance, key)
|
value = getattr(instance, key)
|
||||||
return instance, value
|
return instance, value
|
||||||
|
|
||||||
def all_instances(self):
|
def all_instances(self, filters=None):
|
||||||
instances = []
|
instances = []
|
||||||
for reservation in self.all_reservations():
|
for reservation in self.all_reservations():
|
||||||
for instance in reservation.instances:
|
for instance in reservation.instances:
|
||||||
instances.append(instance)
|
if instance.applies(filters):
|
||||||
return instances
|
|
||||||
|
|
||||||
def all_running_instances(self):
|
|
||||||
instances = []
|
|
||||||
for reservation in self.all_reservations():
|
|
||||||
for instance in reservation.instances:
|
|
||||||
if instance.state_code == 16:
|
|
||||||
instances.append(instance)
|
instances.append(instance)
|
||||||
return instances
|
return instances
|
||||||
|
|
||||||
def get_multi_instances_by_id(self, instance_ids):
|
def all_running_instances(self, filters=None):
|
||||||
|
instances = []
|
||||||
|
for reservation in self.all_reservations():
|
||||||
|
for instance in reservation.instances:
|
||||||
|
if instance.state_code == 16 and instance.applies(filters):
|
||||||
|
instances.append(instance)
|
||||||
|
return instances
|
||||||
|
|
||||||
|
def get_multi_instances_by_id(self, instance_ids, filters=None):
|
||||||
"""
|
"""
|
||||||
:param instance_ids: A string list with instance ids
|
:param instance_ids: A string list with instance ids
|
||||||
:return: A list with instance objects
|
:return: A list with instance objects
|
||||||
@ -946,7 +962,8 @@ class InstanceBackend(object):
|
|||||||
for reservation in self.all_reservations():
|
for reservation in self.all_reservations():
|
||||||
for instance in reservation.instances:
|
for instance in reservation.instances:
|
||||||
if instance.id in instance_ids:
|
if instance.id in instance_ids:
|
||||||
result.append(instance)
|
if instance.applies(filters):
|
||||||
|
result.append(instance)
|
||||||
|
|
||||||
# TODO: Trim error message down to specific invalid id.
|
# TODO: Trim error message down to specific invalid id.
|
||||||
if instance_ids and len(instance_ids) > len(result):
|
if instance_ids and len(instance_ids) > len(result):
|
||||||
@ -1722,6 +1739,12 @@ class SecurityGroup(TaggedEC2Resource):
|
|||||||
self.vpc_id = vpc_id
|
self.vpc_id = vpc_id
|
||||||
self.owner_id = OWNER_ID
|
self.owner_id = OWNER_ID
|
||||||
|
|
||||||
|
# Append default IPv6 egress rule for VPCs with IPv6 support
|
||||||
|
if vpc_id:
|
||||||
|
vpc = self.ec2_backend.vpcs.get(vpc_id)
|
||||||
|
if vpc and len(vpc.get_cidr_block_association_set(ipv6=True)) > 0:
|
||||||
|
self.egress_rules.append(SecurityRule("-1", None, None, [], []))
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def create_from_cloudformation_json(
|
def create_from_cloudformation_json(
|
||||||
cls, resource_name, cloudformation_json, region_name
|
cls, resource_name, cloudformation_json, region_name
|
||||||
|
@ -113,16 +113,34 @@ class InstanceResponse(BaseResponse):
|
|||||||
template = self.response_template(EC2_START_INSTANCES)
|
template = self.response_template(EC2_START_INSTANCES)
|
||||||
return template.render(instances=instances)
|
return template.render(instances=instances)
|
||||||
|
|
||||||
|
def _get_list_of_dict_params(self, param_prefix, _dct):
|
||||||
|
"""
|
||||||
|
Simplified version of _get_dict_param
|
||||||
|
Allows you to pass in a custom dict instead of using self.querystring by default
|
||||||
|
"""
|
||||||
|
params = []
|
||||||
|
for key, value in _dct.items():
|
||||||
|
if key.startswith(param_prefix):
|
||||||
|
params.append(value)
|
||||||
|
return params
|
||||||
|
|
||||||
def describe_instance_status(self):
|
def describe_instance_status(self):
|
||||||
instance_ids = self._get_multi_param("InstanceId")
|
instance_ids = self._get_multi_param("InstanceId")
|
||||||
include_all_instances = self._get_param("IncludeAllInstances") == "true"
|
include_all_instances = self._get_param("IncludeAllInstances") == "true"
|
||||||
|
filters = self._get_list_prefix("Filter")
|
||||||
|
filters = [
|
||||||
|
{"name": f["name"], "values": self._get_list_of_dict_params("value.", f)}
|
||||||
|
for f in filters
|
||||||
|
]
|
||||||
|
|
||||||
if instance_ids:
|
if instance_ids:
|
||||||
instances = self.ec2_backend.get_multi_instances_by_id(instance_ids)
|
instances = self.ec2_backend.get_multi_instances_by_id(
|
||||||
|
instance_ids, filters
|
||||||
|
)
|
||||||
elif include_all_instances:
|
elif include_all_instances:
|
||||||
instances = self.ec2_backend.all_instances()
|
instances = self.ec2_backend.all_instances(filters)
|
||||||
else:
|
else:
|
||||||
instances = self.ec2_backend.all_running_instances()
|
instances = self.ec2_backend.all_running_instances(filters)
|
||||||
|
|
||||||
template = self.response_template(EC2_INSTANCE_STATUS)
|
template = self.response_template(EC2_INSTANCE_STATUS)
|
||||||
return template.render(instances=instances)
|
return template.render(instances=instances)
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
from moto.core.exceptions import RESTError
|
from moto.core.exceptions import RESTError, JsonRESTError
|
||||||
|
|
||||||
|
|
||||||
class RepositoryNotFoundException(RESTError):
|
class RepositoryNotFoundException(RESTError):
|
||||||
@ -13,7 +13,7 @@ class RepositoryNotFoundException(RESTError):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class ImageNotFoundException(RESTError):
|
class ImageNotFoundException(JsonRESTError):
|
||||||
code = 400
|
code = 400
|
||||||
|
|
||||||
def __init__(self, image_id, repository_name, registry_id):
|
def __init__(self, image_id, repository_name, registry_id):
|
||||||
|
@ -35,6 +35,7 @@ class FakeInstanceGroup(BaseModel):
|
|||||||
name=None,
|
name=None,
|
||||||
id=None,
|
id=None,
|
||||||
bid_price=None,
|
bid_price=None,
|
||||||
|
ebs_configuration=None,
|
||||||
):
|
):
|
||||||
self.id = id or random_instance_group_id()
|
self.id = id or random_instance_group_id()
|
||||||
|
|
||||||
@ -51,6 +52,7 @@ class FakeInstanceGroup(BaseModel):
|
|||||||
self.num_instances = instance_count
|
self.num_instances = instance_count
|
||||||
self.role = instance_role
|
self.role = instance_role
|
||||||
self.type = instance_type
|
self.type = instance_type
|
||||||
|
self.ebs_configuration = ebs_configuration
|
||||||
|
|
||||||
self.creation_datetime = datetime.now(pytz.utc)
|
self.creation_datetime = datetime.now(pytz.utc)
|
||||||
self.start_datetime = datetime.now(pytz.utc)
|
self.start_datetime = datetime.now(pytz.utc)
|
||||||
|
@ -73,6 +73,8 @@ class ElasticMapReduceResponse(BaseResponse):
|
|||||||
instance_groups = self._get_list_prefix("InstanceGroups.member")
|
instance_groups = self._get_list_prefix("InstanceGroups.member")
|
||||||
for item in instance_groups:
|
for item in instance_groups:
|
||||||
item["instance_count"] = int(item["instance_count"])
|
item["instance_count"] = int(item["instance_count"])
|
||||||
|
# Adding support to EbsConfiguration
|
||||||
|
self._parse_ebs_configuration(item)
|
||||||
instance_groups = self.backend.add_instance_groups(jobflow_id, instance_groups)
|
instance_groups = self.backend.add_instance_groups(jobflow_id, instance_groups)
|
||||||
template = self.response_template(ADD_INSTANCE_GROUPS_TEMPLATE)
|
template = self.response_template(ADD_INSTANCE_GROUPS_TEMPLATE)
|
||||||
return template.render(instance_groups=instance_groups)
|
return template.render(instance_groups=instance_groups)
|
||||||
@ -324,6 +326,8 @@ class ElasticMapReduceResponse(BaseResponse):
|
|||||||
if instance_groups:
|
if instance_groups:
|
||||||
for ig in instance_groups:
|
for ig in instance_groups:
|
||||||
ig["instance_count"] = int(ig["instance_count"])
|
ig["instance_count"] = int(ig["instance_count"])
|
||||||
|
# Adding support to EbsConfiguration
|
||||||
|
self._parse_ebs_configuration(ig)
|
||||||
self.backend.add_instance_groups(cluster.id, instance_groups)
|
self.backend.add_instance_groups(cluster.id, instance_groups)
|
||||||
|
|
||||||
tags = self._get_list_prefix("Tags.member")
|
tags = self._get_list_prefix("Tags.member")
|
||||||
@ -335,6 +339,85 @@ class ElasticMapReduceResponse(BaseResponse):
|
|||||||
template = self.response_template(RUN_JOB_FLOW_TEMPLATE)
|
template = self.response_template(RUN_JOB_FLOW_TEMPLATE)
|
||||||
return template.render(cluster=cluster)
|
return template.render(cluster=cluster)
|
||||||
|
|
||||||
|
def _has_key_prefix(self, key_prefix, value):
|
||||||
|
for key in value: # iter on both keys and values
|
||||||
|
if key.startswith(key_prefix):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def _parse_ebs_configuration(self, instance_group):
|
||||||
|
key_ebs_config = "ebs_configuration"
|
||||||
|
ebs_configuration = dict()
|
||||||
|
# Filter only EBS config keys
|
||||||
|
for key in instance_group:
|
||||||
|
if key.startswith(key_ebs_config):
|
||||||
|
ebs_configuration[key] = instance_group[key]
|
||||||
|
|
||||||
|
if len(ebs_configuration) > 0:
|
||||||
|
# Key that should be extracted
|
||||||
|
ebs_optimized = "ebs_optimized"
|
||||||
|
ebs_block_device_configs = "ebs_block_device_configs"
|
||||||
|
volume_specification = "volume_specification"
|
||||||
|
size_in_gb = "size_in_gb"
|
||||||
|
volume_type = "volume_type"
|
||||||
|
iops = "iops"
|
||||||
|
volumes_per_instance = "volumes_per_instance"
|
||||||
|
|
||||||
|
key_ebs_optimized = "{0}._{1}".format(key_ebs_config, ebs_optimized)
|
||||||
|
# EbsOptimized config
|
||||||
|
if key_ebs_optimized in ebs_configuration:
|
||||||
|
instance_group.pop(key_ebs_optimized)
|
||||||
|
ebs_configuration[ebs_optimized] = ebs_configuration.pop(
|
||||||
|
key_ebs_optimized
|
||||||
|
)
|
||||||
|
|
||||||
|
# Ebs Blocks
|
||||||
|
ebs_blocks = []
|
||||||
|
idx = 1
|
||||||
|
keyfmt = "{0}._{1}.member.{{}}".format(
|
||||||
|
key_ebs_config, ebs_block_device_configs
|
||||||
|
)
|
||||||
|
key = keyfmt.format(idx)
|
||||||
|
while self._has_key_prefix(key, ebs_configuration):
|
||||||
|
vlespc_keyfmt = "{0}._{1}._{{}}".format(key, volume_specification)
|
||||||
|
vol_size = vlespc_keyfmt.format(size_in_gb)
|
||||||
|
vol_iops = vlespc_keyfmt.format(iops)
|
||||||
|
vol_type = vlespc_keyfmt.format(volume_type)
|
||||||
|
|
||||||
|
ebs_block = dict()
|
||||||
|
ebs_block[volume_specification] = dict()
|
||||||
|
if vol_size in ebs_configuration:
|
||||||
|
instance_group.pop(vol_size)
|
||||||
|
ebs_block[volume_specification][size_in_gb] = int(
|
||||||
|
ebs_configuration.pop(vol_size)
|
||||||
|
)
|
||||||
|
if vol_iops in ebs_configuration:
|
||||||
|
instance_group.pop(vol_iops)
|
||||||
|
ebs_block[volume_specification][iops] = ebs_configuration.pop(
|
||||||
|
vol_iops
|
||||||
|
)
|
||||||
|
if vol_type in ebs_configuration:
|
||||||
|
instance_group.pop(vol_type)
|
||||||
|
ebs_block[volume_specification][
|
||||||
|
volume_type
|
||||||
|
] = ebs_configuration.pop(vol_type)
|
||||||
|
|
||||||
|
per_instance = "{0}._{1}".format(key, volumes_per_instance)
|
||||||
|
if per_instance in ebs_configuration:
|
||||||
|
instance_group.pop(per_instance)
|
||||||
|
ebs_block[volumes_per_instance] = int(
|
||||||
|
ebs_configuration.pop(per_instance)
|
||||||
|
)
|
||||||
|
|
||||||
|
if len(ebs_block) > 0:
|
||||||
|
ebs_blocks.append(ebs_block)
|
||||||
|
idx += 1
|
||||||
|
key = keyfmt.format(idx)
|
||||||
|
|
||||||
|
if len(ebs_blocks) > 0:
|
||||||
|
ebs_configuration[ebs_block_device_configs] = ebs_blocks
|
||||||
|
instance_group[key_ebs_config] = ebs_configuration
|
||||||
|
|
||||||
@generate_boto3_response("SetTerminationProtection")
|
@generate_boto3_response("SetTerminationProtection")
|
||||||
def set_termination_protection(self):
|
def set_termination_protection(self):
|
||||||
termination_protection = self._get_param("TerminationProtected")
|
termination_protection = self._get_param("TerminationProtected")
|
||||||
@ -754,7 +837,22 @@ LIST_INSTANCE_GROUPS_TEMPLATE = """<ListInstanceGroupsResponse xmlns="http://ela
|
|||||||
<BidPrice>{{ instance_group.bid_price }}</BidPrice>
|
<BidPrice>{{ instance_group.bid_price }}</BidPrice>
|
||||||
{% endif %}
|
{% endif %}
|
||||||
<Configurations/>
|
<Configurations/>
|
||||||
<EbsBlockDevices/>
|
{% if instance_group.ebs_configuration is not none %}
|
||||||
|
<EbsBlockDevices>
|
||||||
|
{% for ebs_block_device in instance_group.ebs_configuration.ebs_block_device_configs %}
|
||||||
|
{% for i in range(ebs_block_device.volumes_per_instance) %}
|
||||||
|
<member>
|
||||||
|
<VolumeSpecification>
|
||||||
|
<VolumeType>{{ebs_block_device.volume_specification.volume_type}}</VolumeType>
|
||||||
|
<Iops>{{ebs_block_device.volume_specification.iops}}</Iops>
|
||||||
|
<SizeInGB>{{ebs_block_device.volume_specification.size_in_gb}}</SizeInGB>
|
||||||
|
</VolumeSpecification>
|
||||||
|
<Device>/dev/sd{{i}}</Device>
|
||||||
|
</member>
|
||||||
|
{% endfor %}
|
||||||
|
{% endfor %}
|
||||||
|
</EbsBlockDevices>
|
||||||
|
{% endif %}
|
||||||
{% if instance_group.ebs_optimized is not none %}
|
{% if instance_group.ebs_optimized is not none %}
|
||||||
<EbsOptimized>{{ instance_group.ebs_optimized }}</EbsOptimized>
|
<EbsOptimized>{{ instance_group.ebs_optimized }}</EbsOptimized>
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
@ -70,6 +70,7 @@ def lambda_handler(event, context):
|
|||||||
def get_test_zip_file3():
|
def get_test_zip_file3():
|
||||||
pfunc = """
|
pfunc = """
|
||||||
def lambda_handler(event, context):
|
def lambda_handler(event, context):
|
||||||
|
print("Nr_of_records("+str(len(event['Records']))+")")
|
||||||
print("get_test_zip_file3 success")
|
print("get_test_zip_file3 success")
|
||||||
return event
|
return event
|
||||||
"""
|
"""
|
||||||
@ -1111,7 +1112,6 @@ def test_create_event_source_mapping():
|
|||||||
@mock_lambda
|
@mock_lambda
|
||||||
@mock_sqs
|
@mock_sqs
|
||||||
def test_invoke_function_from_sqs():
|
def test_invoke_function_from_sqs():
|
||||||
logs_conn = boto3.client("logs", region_name="us-east-1")
|
|
||||||
sqs = boto3.resource("sqs", region_name="us-east-1")
|
sqs = boto3.resource("sqs", region_name="us-east-1")
|
||||||
queue = sqs.create_queue(QueueName="test-sqs-queue1")
|
queue = sqs.create_queue(QueueName="test-sqs-queue1")
|
||||||
|
|
||||||
@ -1137,32 +1137,22 @@ def test_invoke_function_from_sqs():
|
|||||||
|
|
||||||
sqs_client = boto3.client("sqs", region_name="us-east-1")
|
sqs_client = boto3.client("sqs", region_name="us-east-1")
|
||||||
sqs_client.send_message(QueueUrl=queue.url, MessageBody="test")
|
sqs_client.send_message(QueueUrl=queue.url, MessageBody="test")
|
||||||
start = time.time()
|
|
||||||
while (time.time() - start) < 30:
|
|
||||||
result = logs_conn.describe_log_streams(logGroupName="/aws/lambda/testFunction")
|
|
||||||
log_streams = result.get("logStreams")
|
|
||||||
if not log_streams:
|
|
||||||
time.sleep(1)
|
|
||||||
continue
|
|
||||||
|
|
||||||
assert len(log_streams) == 1
|
expected_msg = "get_test_zip_file3 success"
|
||||||
result = logs_conn.get_log_events(
|
log_group = "/aws/lambda/testFunction"
|
||||||
logGroupName="/aws/lambda/testFunction",
|
msg_showed_up, all_logs = wait_for_log_msg(expected_msg, log_group)
|
||||||
logStreamName=log_streams[0]["logStreamName"],
|
|
||||||
)
|
|
||||||
for event in result.get("events"):
|
|
||||||
if event["message"] == "get_test_zip_file3 success":
|
|
||||||
return
|
|
||||||
time.sleep(1)
|
|
||||||
|
|
||||||
assert False, "Test Failed"
|
assert msg_showed_up, (
|
||||||
|
expected_msg
|
||||||
|
+ " was not found after sending an SQS message. All logs: "
|
||||||
|
+ all_logs
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@mock_logs
|
@mock_logs
|
||||||
@mock_lambda
|
@mock_lambda
|
||||||
@mock_dynamodb2
|
@mock_dynamodb2
|
||||||
def test_invoke_function_from_dynamodb_put():
|
def test_invoke_function_from_dynamodb_put():
|
||||||
logs_conn = boto3.client("logs", region_name="us-east-1")
|
|
||||||
dynamodb = boto3.client("dynamodb", region_name="us-east-1")
|
dynamodb = boto3.client("dynamodb", region_name="us-east-1")
|
||||||
table_name = "table_with_stream"
|
table_name = "table_with_stream"
|
||||||
table = dynamodb.create_table(
|
table = dynamodb.create_table(
|
||||||
@ -1197,32 +1187,20 @@ def test_invoke_function_from_dynamodb_put():
|
|||||||
assert response["State"] == "Enabled"
|
assert response["State"] == "Enabled"
|
||||||
|
|
||||||
dynamodb.put_item(TableName=table_name, Item={"id": {"S": "item 1"}})
|
dynamodb.put_item(TableName=table_name, Item={"id": {"S": "item 1"}})
|
||||||
start = time.time()
|
|
||||||
while (time.time() - start) < 30:
|
|
||||||
result = logs_conn.describe_log_streams(logGroupName="/aws/lambda/testFunction")
|
|
||||||
log_streams = result.get("logStreams")
|
|
||||||
if not log_streams:
|
|
||||||
time.sleep(1)
|
|
||||||
continue
|
|
||||||
|
|
||||||
assert len(log_streams) == 1
|
expected_msg = "get_test_zip_file3 success"
|
||||||
result = logs_conn.get_log_events(
|
log_group = "/aws/lambda/testFunction"
|
||||||
logGroupName="/aws/lambda/testFunction",
|
msg_showed_up, all_logs = wait_for_log_msg(expected_msg, log_group)
|
||||||
logStreamName=log_streams[0]["logStreamName"],
|
|
||||||
)
|
|
||||||
for event in result.get("events"):
|
|
||||||
if event["message"] == "get_test_zip_file3 success":
|
|
||||||
return
|
|
||||||
time.sleep(1)
|
|
||||||
|
|
||||||
assert False, "Test Failed"
|
assert msg_showed_up, (
|
||||||
|
expected_msg + " was not found after a DDB insert. All logs: " + all_logs
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@mock_logs
|
@mock_logs
|
||||||
@mock_lambda
|
@mock_lambda
|
||||||
@mock_dynamodb2
|
@mock_dynamodb2
|
||||||
def test_invoke_function_from_dynamodb_update():
|
def test_invoke_function_from_dynamodb_update():
|
||||||
logs_conn = boto3.client("logs", region_name="us-east-1")
|
|
||||||
dynamodb = boto3.client("dynamodb", region_name="us-east-1")
|
dynamodb = boto3.client("dynamodb", region_name="us-east-1")
|
||||||
table_name = "table_with_stream"
|
table_name = "table_with_stream"
|
||||||
table = dynamodb.create_table(
|
table = dynamodb.create_table(
|
||||||
@ -1234,7 +1212,6 @@ def test_invoke_function_from_dynamodb_update():
|
|||||||
"StreamViewType": "NEW_AND_OLD_IMAGES",
|
"StreamViewType": "NEW_AND_OLD_IMAGES",
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
dynamodb.put_item(TableName=table_name, Item={"id": {"S": "item 1"}})
|
|
||||||
|
|
||||||
conn = boto3.client("lambda", region_name="us-east-1")
|
conn = boto3.client("lambda", region_name="us-east-1")
|
||||||
func = conn.create_function(
|
func = conn.create_function(
|
||||||
@ -1249,13 +1226,17 @@ def test_invoke_function_from_dynamodb_update():
|
|||||||
Publish=True,
|
Publish=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
response = conn.create_event_source_mapping(
|
conn.create_event_source_mapping(
|
||||||
EventSourceArn=table["TableDescription"]["LatestStreamArn"],
|
EventSourceArn=table["TableDescription"]["LatestStreamArn"],
|
||||||
FunctionName=func["FunctionArn"],
|
FunctionName=func["FunctionArn"],
|
||||||
)
|
)
|
||||||
|
|
||||||
assert response["EventSourceArn"] == table["TableDescription"]["LatestStreamArn"]
|
dynamodb.put_item(TableName=table_name, Item={"id": {"S": "item 1"}})
|
||||||
assert response["State"] == "Enabled"
|
log_group = "/aws/lambda/testFunction"
|
||||||
|
expected_msg = "get_test_zip_file3 success"
|
||||||
|
msg_showed_up, all_logs = wait_for_log_msg(expected_msg, log_group)
|
||||||
|
assert "Nr_of_records(1)" in all_logs, "Only one item should be inserted"
|
||||||
|
|
||||||
dynamodb.update_item(
|
dynamodb.update_item(
|
||||||
TableName=table_name,
|
TableName=table_name,
|
||||||
Key={"id": {"S": "item 1"}},
|
Key={"id": {"S": "item 1"}},
|
||||||
@ -1263,25 +1244,39 @@ def test_invoke_function_from_dynamodb_update():
|
|||||||
ExpressionAttributeNames={"#attr": "new_attr"},
|
ExpressionAttributeNames={"#attr": "new_attr"},
|
||||||
ExpressionAttributeValues={":val": {"S": "new_val"}},
|
ExpressionAttributeValues={":val": {"S": "new_val"}},
|
||||||
)
|
)
|
||||||
|
msg_showed_up, all_logs = wait_for_log_msg(expected_msg, log_group)
|
||||||
|
|
||||||
|
assert msg_showed_up, (
|
||||||
|
expected_msg + " was not found after updating DDB. All logs: " + str(all_logs)
|
||||||
|
)
|
||||||
|
assert "Nr_of_records(1)" in all_logs, "Only one item should be updated"
|
||||||
|
assert (
|
||||||
|
"Nr_of_records(2)" not in all_logs
|
||||||
|
), "The inserted item should not show up again"
|
||||||
|
|
||||||
|
|
||||||
|
def wait_for_log_msg(expected_msg, log_group):
|
||||||
|
logs_conn = boto3.client("logs", region_name="us-east-1")
|
||||||
|
received_messages = []
|
||||||
start = time.time()
|
start = time.time()
|
||||||
while (time.time() - start) < 30:
|
while (time.time() - start) < 10:
|
||||||
result = logs_conn.describe_log_streams(logGroupName="/aws/lambda/testFunction")
|
result = logs_conn.describe_log_streams(logGroupName=log_group)
|
||||||
log_streams = result.get("logStreams")
|
log_streams = result.get("logStreams")
|
||||||
if not log_streams:
|
if not log_streams:
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
assert len(log_streams) == 1
|
for log_stream in log_streams:
|
||||||
result = logs_conn.get_log_events(
|
result = logs_conn.get_log_events(
|
||||||
logGroupName="/aws/lambda/testFunction",
|
logGroupName=log_group, logStreamName=log_stream["logStreamName"],
|
||||||
logStreamName=log_streams[0]["logStreamName"],
|
)
|
||||||
)
|
received_messages.extend(
|
||||||
for event in result.get("events"):
|
[event["message"] for event in result.get("events")]
|
||||||
if event["message"] == "get_test_zip_file3 success":
|
)
|
||||||
return
|
if expected_msg in received_messages:
|
||||||
|
return True, received_messages
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
|
return False, received_messages
|
||||||
assert False, "Test Failed"
|
|
||||||
|
|
||||||
|
|
||||||
@mock_logs
|
@mock_logs
|
||||||
|
@ -1,8 +1,10 @@
|
|||||||
import json
|
import json
|
||||||
|
import os
|
||||||
from datetime import datetime, timedelta
|
from datetime import datetime, timedelta
|
||||||
|
|
||||||
import boto3
|
import boto3
|
||||||
from botocore.exceptions import ClientError
|
from botocore.exceptions import ClientError
|
||||||
|
from nose import SkipTest
|
||||||
from nose.tools import assert_raises
|
from nose.tools import assert_raises
|
||||||
|
|
||||||
from moto import mock_s3
|
from moto import mock_s3
|
||||||
@ -1802,3 +1804,71 @@ def test_batch_get_aggregate_resource_config():
|
|||||||
len(result["UnprocessedResourceIdentifiers"]) == 1
|
len(result["UnprocessedResourceIdentifiers"]) == 1
|
||||||
and result["UnprocessedResourceIdentifiers"][0]["SourceRegion"] == "eu-west-1"
|
and result["UnprocessedResourceIdentifiers"][0]["SourceRegion"] == "eu-west-1"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@mock_config
|
||||||
|
def test_put_evaluations():
|
||||||
|
client = boto3.client("config", region_name="us-west-2")
|
||||||
|
|
||||||
|
# Try without Evaluations supplied:
|
||||||
|
with assert_raises(ClientError) as ce:
|
||||||
|
client.put_evaluations(Evaluations=[], ResultToken="test", TestMode=True)
|
||||||
|
assert ce.exception.response["Error"]["Code"] == "InvalidParameterValueException"
|
||||||
|
assert (
|
||||||
|
"The Evaluations object in your request cannot be null"
|
||||||
|
in ce.exception.response["Error"]["Message"]
|
||||||
|
)
|
||||||
|
|
||||||
|
# Try without a ResultToken supplied:
|
||||||
|
with assert_raises(ClientError) as ce:
|
||||||
|
client.put_evaluations(
|
||||||
|
Evaluations=[
|
||||||
|
{
|
||||||
|
"ComplianceResourceType": "AWS::ApiGateway::RestApi",
|
||||||
|
"ComplianceResourceId": "test-api",
|
||||||
|
"ComplianceType": "INSUFFICIENT_DATA",
|
||||||
|
"OrderingTimestamp": datetime(2015, 1, 1),
|
||||||
|
}
|
||||||
|
],
|
||||||
|
ResultToken="",
|
||||||
|
TestMode=True,
|
||||||
|
)
|
||||||
|
assert ce.exception.response["Error"]["Code"] == "InvalidResultTokenException"
|
||||||
|
|
||||||
|
if os.environ.get("TEST_SERVER_MODE", "false").lower() == "true":
|
||||||
|
raise SkipTest("Does not work in server mode due to error in Workzeug")
|
||||||
|
else:
|
||||||
|
# Try without TestMode supplied:
|
||||||
|
with assert_raises(NotImplementedError):
|
||||||
|
client.put_evaluations(
|
||||||
|
Evaluations=[
|
||||||
|
{
|
||||||
|
"ComplianceResourceType": "AWS::ApiGateway::RestApi",
|
||||||
|
"ComplianceResourceId": "test-api",
|
||||||
|
"ComplianceType": "INSUFFICIENT_DATA",
|
||||||
|
"OrderingTimestamp": datetime(2015, 1, 1),
|
||||||
|
}
|
||||||
|
],
|
||||||
|
ResultToken="test",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Now with proper params:
|
||||||
|
response = client.put_evaluations(
|
||||||
|
Evaluations=[
|
||||||
|
{
|
||||||
|
"ComplianceResourceType": "AWS::ApiGateway::RestApi",
|
||||||
|
"ComplianceResourceId": "test-api",
|
||||||
|
"ComplianceType": "INSUFFICIENT_DATA",
|
||||||
|
"OrderingTimestamp": datetime(2015, 1, 1),
|
||||||
|
}
|
||||||
|
],
|
||||||
|
TestMode=True,
|
||||||
|
ResultToken="test",
|
||||||
|
)
|
||||||
|
|
||||||
|
# this is hard to match against, so remove it
|
||||||
|
response["ResponseMetadata"].pop("HTTPHeaders", None)
|
||||||
|
response["ResponseMetadata"].pop("RetryAttempts", None)
|
||||||
|
response.should.equal(
|
||||||
|
{"FailedEvaluations": [], "ResponseMetadata": {"HTTPStatusCode": 200,},}
|
||||||
|
)
|
||||||
|
@ -1144,7 +1144,7 @@ def test_describe_instance_status_with_instances():
|
|||||||
|
|
||||||
|
|
||||||
@mock_ec2_deprecated
|
@mock_ec2_deprecated
|
||||||
def test_describe_instance_status_with_instance_filter():
|
def test_describe_instance_status_with_instance_filter_deprecated():
|
||||||
conn = boto.connect_ec2("the_key", "the_secret")
|
conn = boto.connect_ec2("the_key", "the_secret")
|
||||||
|
|
||||||
# We want to filter based on this one
|
# We want to filter based on this one
|
||||||
@ -1166,6 +1166,75 @@ def test_describe_instance_status_with_instance_filter():
|
|||||||
cm.exception.request_id.should_not.be.none
|
cm.exception.request_id.should_not.be.none
|
||||||
|
|
||||||
|
|
||||||
|
@mock_ec2
|
||||||
|
def test_describe_instance_status_with_instance_filter():
|
||||||
|
conn = boto3.client("ec2", region_name="us-west-1")
|
||||||
|
|
||||||
|
# We want to filter based on this one
|
||||||
|
reservation = conn.run_instances(ImageId="ami-1234abcd", MinCount=3, MaxCount=3)
|
||||||
|
instance1 = reservation["Instances"][0]
|
||||||
|
instance2 = reservation["Instances"][1]
|
||||||
|
instance3 = reservation["Instances"][2]
|
||||||
|
conn.stop_instances(InstanceIds=[instance1["InstanceId"]])
|
||||||
|
stopped_instance_ids = [instance1["InstanceId"]]
|
||||||
|
running_instance_ids = sorted([instance2["InstanceId"], instance3["InstanceId"]])
|
||||||
|
all_instance_ids = sorted(stopped_instance_ids + running_instance_ids)
|
||||||
|
|
||||||
|
# Filter instance using the state name
|
||||||
|
state_name_filter = {
|
||||||
|
"running_and_stopped": [
|
||||||
|
{"Name": "instance-state-name", "Values": ["running", "stopped"]}
|
||||||
|
],
|
||||||
|
"running": [{"Name": "instance-state-name", "Values": ["running"]}],
|
||||||
|
"stopped": [{"Name": "instance-state-name", "Values": ["stopped"]}],
|
||||||
|
}
|
||||||
|
|
||||||
|
found_statuses = conn.describe_instance_status(
|
||||||
|
IncludeAllInstances=True, Filters=state_name_filter["running_and_stopped"]
|
||||||
|
)["InstanceStatuses"]
|
||||||
|
found_instance_ids = [status["InstanceId"] for status in found_statuses]
|
||||||
|
sorted(found_instance_ids).should.equal(all_instance_ids)
|
||||||
|
|
||||||
|
found_statuses = conn.describe_instance_status(
|
||||||
|
IncludeAllInstances=True, Filters=state_name_filter["running"]
|
||||||
|
)["InstanceStatuses"]
|
||||||
|
found_instance_ids = [status["InstanceId"] for status in found_statuses]
|
||||||
|
sorted(found_instance_ids).should.equal(running_instance_ids)
|
||||||
|
|
||||||
|
found_statuses = conn.describe_instance_status(
|
||||||
|
IncludeAllInstances=True, Filters=state_name_filter["stopped"]
|
||||||
|
)["InstanceStatuses"]
|
||||||
|
found_instance_ids = [status["InstanceId"] for status in found_statuses]
|
||||||
|
sorted(found_instance_ids).should.equal(stopped_instance_ids)
|
||||||
|
|
||||||
|
# Filter instance using the state code
|
||||||
|
state_code_filter = {
|
||||||
|
"running_and_stopped": [
|
||||||
|
{"Name": "instance-state-code", "Values": ["16", "80"]}
|
||||||
|
],
|
||||||
|
"running": [{"Name": "instance-state-code", "Values": ["16"]}],
|
||||||
|
"stopped": [{"Name": "instance-state-code", "Values": ["80"]}],
|
||||||
|
}
|
||||||
|
|
||||||
|
found_statuses = conn.describe_instance_status(
|
||||||
|
IncludeAllInstances=True, Filters=state_code_filter["running_and_stopped"]
|
||||||
|
)["InstanceStatuses"]
|
||||||
|
found_instance_ids = [status["InstanceId"] for status in found_statuses]
|
||||||
|
sorted(found_instance_ids).should.equal(all_instance_ids)
|
||||||
|
|
||||||
|
found_statuses = conn.describe_instance_status(
|
||||||
|
IncludeAllInstances=True, Filters=state_code_filter["running"]
|
||||||
|
)["InstanceStatuses"]
|
||||||
|
found_instance_ids = [status["InstanceId"] for status in found_statuses]
|
||||||
|
sorted(found_instance_ids).should.equal(running_instance_ids)
|
||||||
|
|
||||||
|
found_statuses = conn.describe_instance_status(
|
||||||
|
IncludeAllInstances=True, Filters=state_code_filter["stopped"]
|
||||||
|
)["InstanceStatuses"]
|
||||||
|
found_instance_ids = [status["InstanceId"] for status in found_statuses]
|
||||||
|
sorted(found_instance_ids).should.equal(stopped_instance_ids)
|
||||||
|
|
||||||
|
|
||||||
@requires_boto_gte("2.32.0")
|
@requires_boto_gte("2.32.0")
|
||||||
@mock_ec2_deprecated
|
@mock_ec2_deprecated
|
||||||
def test_describe_instance_status_with_non_running_instances():
|
def test_describe_instance_status_with_non_running_instances():
|
||||||
|
@ -123,6 +123,19 @@ def test_create_two_security_groups_with_same_name_in_different_vpc():
|
|||||||
set(group_names).should.equal(set(["default", "test security group"]))
|
set(group_names).should.equal(set(["default", "test security group"]))
|
||||||
|
|
||||||
|
|
||||||
|
@mock_ec2
|
||||||
|
def test_create_two_security_groups_in_vpc_with_ipv6_enabled():
|
||||||
|
ec2 = boto3.resource("ec2", region_name="us-west-1")
|
||||||
|
vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16", AmazonProvidedIpv6CidrBlock=True)
|
||||||
|
|
||||||
|
security_group = ec2.create_security_group(
|
||||||
|
GroupName="sg01", Description="Test security group sg01", VpcId=vpc.id
|
||||||
|
)
|
||||||
|
|
||||||
|
# The security group must have two defaul egress rules (one for ipv4 and aonther for ipv6)
|
||||||
|
security_group.ip_permissions_egress.should.have.length_of(2)
|
||||||
|
|
||||||
|
|
||||||
@mock_ec2_deprecated
|
@mock_ec2_deprecated
|
||||||
def test_deleting_security_groups():
|
def test_deleting_security_groups():
|
||||||
conn = boto.connect_ec2("the_key", "the_secret")
|
conn = boto.connect_ec2("the_key", "the_secret")
|
||||||
|
@ -538,7 +538,7 @@ def test_describe_image_that_doesnt_exist():
|
|||||||
repositoryName="test_repository",
|
repositoryName="test_repository",
|
||||||
imageIds=[{"imageTag": "testtag"}],
|
imageIds=[{"imageTag": "testtag"}],
|
||||||
registryId="123",
|
registryId="123",
|
||||||
).should.throw(ClientError, error_msg1)
|
).should.throw(client.exceptions.ImageNotFoundException, error_msg1)
|
||||||
|
|
||||||
error_msg2 = re.compile(
|
error_msg2 = re.compile(
|
||||||
r".*The repository with name 'repo-that-doesnt-exist' does not exist in the registry with id '123'.*",
|
r".*The repository with name 'repo-that-doesnt-exist' does not exist in the registry with id '123'.*",
|
||||||
|
@ -60,6 +60,15 @@ input_instance_groups = [
|
|||||||
"Market": "SPOT",
|
"Market": "SPOT",
|
||||||
"Name": "task-2",
|
"Name": "task-2",
|
||||||
"BidPrice": "0.05",
|
"BidPrice": "0.05",
|
||||||
|
"EbsConfiguration": {
|
||||||
|
"EbsBlockDeviceConfigs": [
|
||||||
|
{
|
||||||
|
"VolumeSpecification": {"VolumeType": "gp2", "SizeInGB": 800},
|
||||||
|
"VolumesPerInstance": 6,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
"EbsOptimized": True,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -430,6 +439,21 @@ def test_run_job_flow_with_visible_to_all_users():
|
|||||||
resp["Cluster"]["VisibleToAllUsers"].should.equal(expected)
|
resp["Cluster"]["VisibleToAllUsers"].should.equal(expected)
|
||||||
|
|
||||||
|
|
||||||
|
def _do_assertion_ebs_configuration(x, y):
|
||||||
|
total_volumes = 0
|
||||||
|
total_size = 0
|
||||||
|
for ebs_block in y["EbsConfiguration"]["EbsBlockDeviceConfigs"]:
|
||||||
|
total_volumes += ebs_block["VolumesPerInstance"]
|
||||||
|
total_size += ebs_block["VolumeSpecification"]["SizeInGB"]
|
||||||
|
# Multiply by total volumes
|
||||||
|
total_size = total_size * total_volumes
|
||||||
|
comp_total_size = 0
|
||||||
|
for ebs_block in x["EbsBlockDevices"]:
|
||||||
|
comp_total_size += ebs_block["VolumeSpecification"]["SizeInGB"]
|
||||||
|
len(x["EbsBlockDevices"]).should.equal(total_volumes)
|
||||||
|
comp_total_size.should.equal(comp_total_size)
|
||||||
|
|
||||||
|
|
||||||
@mock_emr
|
@mock_emr
|
||||||
def test_run_job_flow_with_instance_groups():
|
def test_run_job_flow_with_instance_groups():
|
||||||
input_groups = dict((g["Name"], g) for g in input_instance_groups)
|
input_groups = dict((g["Name"], g) for g in input_instance_groups)
|
||||||
@ -448,6 +472,9 @@ def test_run_job_flow_with_instance_groups():
|
|||||||
if "BidPrice" in y:
|
if "BidPrice" in y:
|
||||||
x["BidPrice"].should.equal(y["BidPrice"])
|
x["BidPrice"].should.equal(y["BidPrice"])
|
||||||
|
|
||||||
|
if "EbsConfiguration" in y:
|
||||||
|
_do_assertion_ebs_configuration(x, y)
|
||||||
|
|
||||||
|
|
||||||
@mock_emr
|
@mock_emr
|
||||||
def test_run_job_flow_with_custom_ami():
|
def test_run_job_flow_with_custom_ami():
|
||||||
@ -623,6 +650,8 @@ def test_instance_groups():
|
|||||||
y = input_groups[x["Name"]]
|
y = input_groups[x["Name"]]
|
||||||
if hasattr(y, "BidPrice"):
|
if hasattr(y, "BidPrice"):
|
||||||
x["BidPrice"].should.equal("BidPrice")
|
x["BidPrice"].should.equal("BidPrice")
|
||||||
|
if "EbsConfiguration" in y:
|
||||||
|
_do_assertion_ebs_configuration(x, y)
|
||||||
# Configurations
|
# Configurations
|
||||||
# EbsBlockDevices
|
# EbsBlockDevices
|
||||||
# EbsOptimized
|
# EbsOptimized
|
||||||
|
@ -39,6 +39,7 @@ import moto.s3.models as s3model
|
|||||||
from moto.core.exceptions import InvalidNextTokenException
|
from moto.core.exceptions import InvalidNextTokenException
|
||||||
from moto.core.utils import py2_strip_unicode_keys
|
from moto.core.utils import py2_strip_unicode_keys
|
||||||
|
|
||||||
|
|
||||||
if settings.TEST_SERVER_MODE:
|
if settings.TEST_SERVER_MODE:
|
||||||
REDUCED_PART_SIZE = s3model.UPLOAD_PART_MIN_SIZE
|
REDUCED_PART_SIZE = s3model.UPLOAD_PART_MIN_SIZE
|
||||||
EXPECTED_ETAG = '"140f92a6df9f9e415f74a1463bcee9bb-2"'
|
EXPECTED_ETAG = '"140f92a6df9f9e415f74a1463bcee9bb-2"'
|
||||||
@ -1018,12 +1019,23 @@ def test_s3_object_in_public_bucket():
|
|||||||
s3_anonymous.Object(key="file.txt", bucket_name="test-bucket").get()
|
s3_anonymous.Object(key="file.txt", bucket_name="test-bucket").get()
|
||||||
exc.exception.response["Error"]["Code"].should.equal("403")
|
exc.exception.response["Error"]["Code"].should.equal("403")
|
||||||
|
|
||||||
|
|
||||||
|
@mock_s3
|
||||||
|
def test_s3_object_in_public_bucket_using_multiple_presigned_urls():
|
||||||
|
s3 = boto3.resource("s3")
|
||||||
|
bucket = s3.Bucket("test-bucket")
|
||||||
|
bucket.create(
|
||||||
|
ACL="public-read", CreateBucketConfiguration={"LocationConstraint": "us-west-1"}
|
||||||
|
)
|
||||||
|
bucket.put_object(Body=b"ABCD", Key="file.txt")
|
||||||
|
|
||||||
params = {"Bucket": "test-bucket", "Key": "file.txt"}
|
params = {"Bucket": "test-bucket", "Key": "file.txt"}
|
||||||
presigned_url = boto3.client("s3").generate_presigned_url(
|
presigned_url = boto3.client("s3").generate_presigned_url(
|
||||||
"get_object", params, ExpiresIn=900
|
"get_object", params, ExpiresIn=900
|
||||||
)
|
)
|
||||||
response = requests.get(presigned_url)
|
for i in range(1, 10):
|
||||||
assert response.status_code == 200
|
response = requests.get(presigned_url)
|
||||||
|
assert response.status_code == 200, "Failed on req number {}".format(i)
|
||||||
|
|
||||||
|
|
||||||
@mock_s3
|
@mock_s3
|
||||||
|
Loading…
Reference in New Issue
Block a user