Merge branch 'master' into batch

This commit is contained in:
Terry Cain 2017-10-19 21:16:40 +01:00
commit d67ef8d128
No known key found for this signature in database
GPG Key ID: 14D90844E4E9B9F3
22 changed files with 961 additions and 88 deletions

View File

@ -38,7 +38,7 @@ from .sts import mock_sts, mock_sts_deprecated # flake8: noqa
from .ssm import mock_ssm # flake8: noqa
from .route53 import mock_route53, mock_route53_deprecated # flake8: noqa
from .swf import mock_swf, mock_swf_deprecated # flake8: noqa
from .xray import mock_xray # flake8: noqa
from .xray import mock_xray, mock_xray_client, XRaySegment # flake8: noqa
from .logs import mock_logs, mock_logs_deprecated # flake8: noqa
from .batch import mock_batch # flake8: noqa

View File

@ -0,0 +1,14 @@
from __future__ import unicode_literals
from moto.core.exceptions import RESTError
class AutoscalingClientError(RESTError):
code = 500
class ResourceContentionError(AutoscalingClientError):
def __init__(self):
super(ResourceContentionError, self).__init__(
"ResourceContentionError",
"You already have a pending update to an Auto Scaling resource (for example, a group, instance, or load balancer).")

View File

@ -5,6 +5,9 @@ from moto.core import BaseBackend, BaseModel
from moto.ec2 import ec2_backends
from moto.elb import elb_backends
from moto.elb.exceptions import LoadBalancerNotFoundError
from .exceptions import (
ResourceContentionError,
)
# http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AS_Concepts.html#Cooldown
DEFAULT_COOLDOWN = 300
@ -259,27 +262,8 @@ class FakeAutoScalingGroup(BaseModel):
# Need more instances
count_needed = int(self.desired_capacity) - int(curr_instance_count)
propagated_tags = {}
for tag in self.tags:
# boto uses 'propagate_at_launch
# boto3 and cloudformation use PropagateAtLaunch
if 'propagate_at_launch' in tag and tag['propagate_at_launch'] == 'true':
propagated_tags[tag['key']] = tag['value']
if 'PropagateAtLaunch' in tag and tag['PropagateAtLaunch']:
propagated_tags[tag['Key']] = tag['Value']
propagated_tags[ASG_NAME_TAG] = self.name
reservation = self.autoscaling_backend.ec2_backend.add_instances(
self.launch_config.image_id,
count_needed,
self.launch_config.user_data,
self.launch_config.security_groups,
instance_type=self.launch_config.instance_type,
tags={'instance': propagated_tags}
)
for instance in reservation.instances:
instance.autoscaling_group = self
self.instance_states.append(InstanceState(instance))
propagated_tags = self.get_propagated_tags()
self.replace_autoscaling_group_instances(count_needed, propagated_tags)
else:
# Need to remove some instances
count_to_remove = curr_instance_count - self.desired_capacity
@ -290,6 +274,31 @@ class FakeAutoScalingGroup(BaseModel):
instance_ids_to_remove)
self.instance_states = self.instance_states[count_to_remove:]
def get_propagated_tags(self):
propagated_tags = {}
for tag in self.tags:
# boto uses 'propagate_at_launch
# boto3 and cloudformation use PropagateAtLaunch
if 'propagate_at_launch' in tag and tag['propagate_at_launch'] == 'true':
propagated_tags[tag['key']] = tag['value']
if 'PropagateAtLaunch' in tag and tag['PropagateAtLaunch']:
propagated_tags[tag['Key']] = tag['Value']
return propagated_tags
def replace_autoscaling_group_instances(self, count_needed, propagated_tags):
propagated_tags[ASG_NAME_TAG] = self.name
reservation = self.autoscaling_backend.ec2_backend.add_instances(
self.launch_config.image_id,
count_needed,
self.launch_config.user_data,
self.launch_config.security_groups,
instance_type=self.launch_config.instance_type,
tags={'instance': propagated_tags}
)
for instance in reservation.instances:
instance.autoscaling_group = self
self.instance_states.append(InstanceState(instance))
class AutoScalingBackend(BaseBackend):
def __init__(self, ec2_backend, elb_backend):
@ -409,6 +418,40 @@ class AutoScalingBackend(BaseBackend):
instance_states.extend(group.instance_states)
return instance_states
def attach_instances(self, group_name, instance_ids):
group = self.autoscaling_groups[group_name]
original_size = len(group.instance_states)
if (original_size + len(instance_ids)) > group.max_size:
raise ResourceContentionError
else:
group.desired_capacity = original_size + len(instance_ids)
new_instances = [InstanceState(self.ec2_backend.get_instance(x)) for x in instance_ids]
for instance in new_instances:
self.ec2_backend.create_tags([instance.instance.id], {ASG_NAME_TAG: group.name})
group.instance_states.extend(new_instances)
self.update_attached_elbs(group.name)
def detach_instances(self, group_name, instance_ids, should_decrement):
group = self.autoscaling_groups[group_name]
original_size = len(group.instance_states)
detached_instances = [x for x in group.instance_states if x.instance.id in instance_ids]
for instance in detached_instances:
self.ec2_backend.delete_tags([instance.instance.id], {ASG_NAME_TAG: group.name})
new_instance_state = [x for x in group.instance_states if x.instance.id not in instance_ids]
group.instance_states = new_instance_state
if should_decrement:
group.desired_capacity = original_size - len(instance_ids)
else:
count_needed = len(instance_ids)
group.replace_autoscaling_group_instances(count_needed, group.get_propagated_tags())
self.update_attached_elbs(group_name)
return detached_instances
def set_desired_capacity(self, group_name, desired_capacity):
group = self.autoscaling_groups[group_name]
group.set_desired_capacity(desired_capacity)
@ -461,6 +504,10 @@ class AutoScalingBackend(BaseBackend):
group_instance_ids = set(
state.instance.id for state in group.instance_states)
# skip this if group.load_balancers is empty
# otherwise elb_backend.describe_load_balancers returns all available load balancers
if not group.load_balancers:
return
try:
elbs = self.elb_backend.describe_load_balancers(
names=group.load_balancers)
@ -496,6 +543,25 @@ class AutoScalingBackend(BaseBackend):
group.tags = new_tags
def attach_load_balancers(self, group_name, load_balancer_names):
group = self.autoscaling_groups[group_name]
group.load_balancers.extend(
[x for x in load_balancer_names if x not in group.load_balancers])
self.update_attached_elbs(group_name)
def describe_load_balancers(self, group_name):
return self.autoscaling_groups[group_name].load_balancers
def detach_load_balancers(self, group_name, load_balancer_names):
group = self.autoscaling_groups[group_name]
group_instance_ids = set(
state.instance.id for state in group.instance_states)
elbs = self.elb_backend.describe_load_balancers(names=group.load_balancers)
for elb in elbs:
self.elb_backend.deregister_instances(
elb.name, group_instance_ids)
group.load_balancers = [x for x in group.load_balancers if x not in load_balancer_names]
autoscaling_backends = {}
for region, ec2_backend in ec2_backends.items():

View File

@ -1,6 +1,7 @@
from __future__ import unicode_literals
from moto.core.responses import BaseResponse
from moto.core.utils import amz_crc32, amzn_request_id
from .models import autoscaling_backends
@ -87,6 +88,31 @@ class AutoScalingResponse(BaseResponse):
template = self.response_template(CREATE_AUTOSCALING_GROUP_TEMPLATE)
return template.render()
@amz_crc32
@amzn_request_id
def attach_instances(self):
group_name = self._get_param('AutoScalingGroupName')
instance_ids = self._get_multi_param("InstanceIds.member")
self.autoscaling_backend.attach_instances(
group_name, instance_ids)
template = self.response_template(ATTACH_INSTANCES_TEMPLATE)
return template.render()
@amz_crc32
@amzn_request_id
def detach_instances(self):
group_name = self._get_param('AutoScalingGroupName')
instance_ids = self._get_multi_param("InstanceIds.member")
should_decrement_string = self._get_param('ShouldDecrementDesiredCapacity')
if should_decrement_string == 'true':
should_decrement = True
else:
should_decrement = False
detached_instances = self.autoscaling_backend.detach_instances(
group_name, instance_ids, should_decrement)
template = self.response_template(DETACH_INSTANCES_TEMPLATE)
return template.render(detached_instances=detached_instances)
def describe_auto_scaling_groups(self):
names = self._get_multi_param("AutoScalingGroupNames.member")
token = self._get_param("NextToken")
@ -186,6 +212,34 @@ class AutoScalingResponse(BaseResponse):
template = self.response_template(EXECUTE_POLICY_TEMPLATE)
return template.render()
@amz_crc32
@amzn_request_id
def attach_load_balancers(self):
group_name = self._get_param('AutoScalingGroupName')
load_balancer_names = self._get_multi_param("LoadBalancerNames.member")
self.autoscaling_backend.attach_load_balancers(
group_name, load_balancer_names)
template = self.response_template(ATTACH_LOAD_BALANCERS_TEMPLATE)
return template.render()
@amz_crc32
@amzn_request_id
def describe_load_balancers(self):
group_name = self._get_param('AutoScalingGroupName')
load_balancers = self.autoscaling_backend.describe_load_balancers(group_name)
template = self.response_template(DESCRIBE_LOAD_BALANCERS_TEMPLATE)
return template.render(load_balancers=load_balancers)
@amz_crc32
@amzn_request_id
def detach_load_balancers(self):
group_name = self._get_param('AutoScalingGroupName')
load_balancer_names = self._get_multi_param("LoadBalancerNames.member")
self.autoscaling_backend.detach_load_balancers(
group_name, load_balancer_names)
template = self.response_template(DETACH_LOAD_BALANCERS_TEMPLATE)
return template.render()
CREATE_LAUNCH_CONFIGURATION_TEMPLATE = """<CreateLaunchConfigurationResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
<ResponseMetadata>
@ -284,6 +338,40 @@ CREATE_AUTOSCALING_GROUP_TEMPLATE = """<CreateAutoScalingGroupResponse xmlns="ht
</ResponseMetadata>
</CreateAutoScalingGroupResponse>"""
ATTACH_INSTANCES_TEMPLATE = """<AttachInstancesResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
<AttachInstancesResult>
</AttachInstancesResult>
<ResponseMetadata>
<RequestId>{{ requestid }}</RequestId>
</ResponseMetadata>
</AttachInstancesResponse>"""
DETACH_INSTANCES_TEMPLATE = """<DetachInstancesResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
<DetachInstancesResult>
<Activities>
{% for instance in detached_instances %}
<member>
<ActivityId>5091cb52-547a-47ce-a236-c9ccbc2cb2c9EXAMPLE</ActivityId>
<AutoScalingGroupName>{{ group_name }}</AutoScalingGroupName>
<Cause>
At 2017-10-15T15:55:21Z instance {{ instance.instance.id }} was detached in response to a user request.
</Cause>
<Description>Detaching EC2 instance: {{ instance.instance.id }}</Description>
<StartTime>2017-10-15T15:55:21Z</StartTime>
<EndTime>2017-10-15T15:55:21Z</EndTime>
<StatusCode>InProgress</StatusCode>
<StatusMessage>InProgress</StatusMessage>
<Progress>50</Progress>
<Details>details</Details>
</member>
{% endfor %}
</Activities>
</DetachInstancesResult>
<ResponseMetadata>
<RequestId>{{ requestid }}</RequestId>
</ResponseMetadata>
</DetachInstancesResponse>"""
DESCRIBE_AUTOSCALING_GROUPS_TEMPLATE = """<DescribeAutoScalingGroupsResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
<DescribeAutoScalingGroupsResult>
<AutoScalingGroups>
@ -450,3 +538,33 @@ DELETE_POLICY_TEMPLATE = """<DeleteScalingPolicyResponse xmlns="http://autoscali
<RequestId>70a76d42-9665-11e2-9fdf-211deEXAMPLE</RequestId>
</ResponseMetadata>
</DeleteScalingPolicyResponse>"""
ATTACH_LOAD_BALANCERS_TEMPLATE = """<AttachLoadBalancersResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
<AttachLoadBalancersResult></AttachLoadBalancersResult>
<ResponseMetadata>
<RequestId>{{ requestid }}</RequestId>
</ResponseMetadata>
</AttachLoadBalancersResponse>"""
DESCRIBE_LOAD_BALANCERS_TEMPLATE = """<DescribeLoadBalancersResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
<DescribeLoadBalancersResult>
<LoadBalancers>
{% for load_balancer in load_balancers %}
<member>
<LoadBalancerName>{{ load_balancer }}</LoadBalancerName>
<State>Added</State>
</member>
{% endfor %}
</LoadBalancers>
</DescribeLoadBalancersResult>
<ResponseMetadata>
<RequestId>{{ requestid }}</RequestId>
</ResponseMetadata>
</DescribeLoadBalancersResponse>"""
DETACH_LOAD_BALANCERS_TEMPLATE = """<DetachLoadBalancersResponse xmlns="http://autoscaling.amazonaws.com/doc/2011-01-01/">
<DetachLoadBalancersResult></DetachLoadBalancersResult>
<ResponseMetadata>
<RequestId>{{ requestid }}</RequestId>
</ResponseMetadata>
</DetachLoadBalancersResponse>"""

View File

@ -9,6 +9,7 @@ try:
except:
from urllib.parse import unquote, urlparse, parse_qs
from moto.core.utils import amz_crc32, amzn_request_id
from moto.core.responses import BaseResponse
@ -32,6 +33,8 @@ class LambdaResponse(BaseResponse):
else:
raise ValueError("Cannot handle request")
@amz_crc32
@amzn_request_id
def invoke(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
if request.method == 'POST':
@ -39,6 +42,8 @@ class LambdaResponse(BaseResponse):
else:
raise ValueError("Cannot handle request")
@amz_crc32
@amzn_request_id
def invoke_async(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
if request.method == 'POST':

View File

@ -199,10 +199,14 @@ class BaseResponse(_TemplateEnvironmentMixin):
response = method()
except HTTPException as http_error:
response = http_error.description, dict(status=http_error.code)
if isinstance(response, six.string_types):
return 200, headers, response
else:
body, new_headers = response
if len(response) == 2:
body, new_headers = response
else:
status, new_headers, body = response
status = new_headers.get('status', 200)
headers.update(new_headers)
# Cast status to string

View File

@ -1,10 +1,16 @@
from __future__ import unicode_literals
from functools import wraps
import binascii
import datetime
import inspect
import random
import re
import six
import string
REQUEST_ID_LONG = string.digits + string.ascii_uppercase
def camelcase_to_underscores(argument):
@ -194,3 +200,90 @@ def unix_time(dt=None):
def unix_time_millis(dt=None):
return unix_time(dt) * 1000.0
def gen_amz_crc32(response, headerdict=None):
if not isinstance(response, bytes):
response = response.encode()
crc = str(binascii.crc32(response))
if headerdict is not None and isinstance(headerdict, dict):
headerdict.update({'x-amz-crc32': crc})
return crc
def gen_amzn_requestid_long(headerdict=None):
req_id = ''.join([random.choice(REQUEST_ID_LONG) for _ in range(0, 52)])
if headerdict is not None and isinstance(headerdict, dict):
headerdict.update({'x-amzn-requestid': req_id})
return req_id
def amz_crc32(f):
@wraps(f)
def _wrapper(*args, **kwargs):
response = f(*args, **kwargs)
headers = {}
status = 200
if isinstance(response, six.string_types):
body = response
else:
if len(response) == 2:
body, new_headers = response
status = new_headers.get('status', 200)
else:
status, new_headers, body = response
headers.update(new_headers)
# Cast status to string
if "status" in headers:
headers['status'] = str(headers['status'])
try:
# Doesnt work on python2 for some odd unicode strings
gen_amz_crc32(body, headers)
except Exception:
pass
return status, headers, body
return _wrapper
def amzn_request_id(f):
@wraps(f)
def _wrapper(*args, **kwargs):
response = f(*args, **kwargs)
headers = {}
status = 200
if isinstance(response, six.string_types):
body = response
else:
if len(response) == 2:
body, new_headers = response
status = new_headers.get('status', 200)
else:
status, new_headers, body = response
headers.update(new_headers)
# Cast status to string
if "status" in headers:
headers['status'] = str(headers['status'])
request_id = gen_amzn_requestid_long(headers)
# Update request ID in XML
try:
body = body.replace('{{ requestid }}', request_id)
except Exception: # Will just ignore if it cant work on bytes (which are str's on python2)
pass
return status, headers, body
return _wrapper

View File

@ -61,15 +61,27 @@ def get_filter_expression(expr, names, values):
# Do substitutions
for key, value in names.items():
expr = expr.replace(key, value)
# Store correct types of values for use later
values_map = {}
for key, value in values.items():
if 'N' in value:
expr.replace(key, float(value['N']))
values_map[key] = float(value['N'])
elif 'BOOL' in value:
values_map[key] = value['BOOL']
elif 'S' in value:
values_map[key] = value['S']
elif 'NS' in value:
values_map[key] = tuple(value['NS'])
elif 'SS' in value:
values_map[key] = tuple(value['SS'])
elif 'L' in value:
values_map[key] = tuple(value['L'])
else:
expr = expr.replace(key, value['S'])
raise NotImplementedError()
# Remove all spaces, tbf we could just skip them in the next step.
# The number of known options is really small so we can do a fair bit of cheating
#expr = list(re.sub('\s', '', expr)) # 'Id>5ANDattribute_exists(test)ORNOTlength<6'
expr = list(expr)
# DodgyTokenisation stage 1
@ -130,13 +142,9 @@ def get_filter_expression(expr, names, values):
next_token = six.next(token_iterator)
while next_token != ')':
try:
next_token = int(next_token)
except ValueError:
try:
next_token = float(next_token)
except ValueError:
pass
if next_token in values_map:
next_token = values_map[next_token]
tuple_list.append(next_token)
next_token = six.next(token_iterator)
@ -149,10 +157,14 @@ def get_filter_expression(expr, names, values):
tokens2.append(tuple(tuple_list))
elif token == 'BETWEEN':
field = tokens2.pop()
op1 = int(six.next(token_iterator))
# if values map contains a number, it would be a float
# so we need to int() it anyway
op1 = six.next(token_iterator)
op1 = int(values_map.get(op1, op1))
and_op = six.next(token_iterator)
assert and_op == 'AND'
op2 = int(six.next(token_iterator))
op2 = six.next(token_iterator)
op2 = int(values_map.get(op2, op2))
tokens2.append(['between', field, op1, op2])
elif is_function(token):
@ -169,14 +181,15 @@ def get_filter_expression(expr, names, values):
tokens2.append(function_list)
else:
try:
token = int(token)
except ValueError:
try:
token = float(token)
except ValueError:
pass
tokens2.append(token)
# Convert tokens back to real types
if token in values_map:
token = values_map[token]
# Need to join >= <= <>
if len(tokens2) > 0 and ((tokens2[-1] == '>' and token == '=') or (tokens2[-1] == '<' and token == '=') or (tokens2[-1] == '<' and token == '>')):
tokens2.append(tokens2.pop() + token)
else:
tokens2.append(token)
# Start of the Shunting-Yard algorithm. <-- Proper beast algorithm!
def is_number(val):

View File

@ -4,7 +4,7 @@ import six
import re
from moto.core.responses import BaseResponse
from moto.core.utils import camelcase_to_underscores
from moto.core.utils import camelcase_to_underscores, amzn_request_id
from .models import dynamodb_backend2, dynamo_json_dump
@ -24,6 +24,7 @@ class DynamoHandler(BaseResponse):
def error(self, type_, message, status=400):
return status, self.response_headers, dynamo_json_dump({'__type': type_, 'message': message})
@amzn_request_id
def call_action(self):
self.body = json.loads(self.body or '{}')
endpoint = self.get_endpoint_name(self.headers)
@ -56,6 +57,7 @@ class DynamoHandler(BaseResponse):
response = {"TableNames": tables}
if limit and len(all_tables) > start + limit:
response["LastEvaluatedTableName"] = tables[-1]
return dynamo_json_dump(response)
def create_table(self):

View File

@ -30,7 +30,7 @@ class InstanceResponse(BaseResponse):
if max_results and len(reservations) > (start + max_results):
next_token = reservations_resp[-1].id
template = self.response_template(EC2_DESCRIBE_INSTANCES)
return template.render(reservations=reservations_resp, next_token=next_token)
return template.render(reservations=reservations_resp, next_token=next_token).replace('True', 'true').replace('False', 'false')
def run_instances(self):
min_count = int(self._get_param('MinCount', if_none='1'))
@ -144,7 +144,12 @@ class InstanceResponse(BaseResponse):
"""
Handles requests which are generated by code similar to:
instance.modify_attribute('blockDeviceMapping', {'/dev/sda1': True})
instance.modify_attribute(
BlockDeviceMappings=[{
'DeviceName': '/dev/sda1',
'Ebs': {'DeleteOnTermination': True}
}]
)
The querystring contains information similar to:

View File

@ -81,6 +81,9 @@ class FakeKey(BaseModel):
def restore(self, days):
self._expiry = datetime.datetime.utcnow() + datetime.timedelta(days)
def increment_version(self):
self._version_id += 1
@property
def etag(self):
if self._etag is None:
@ -323,19 +326,10 @@ class CorsRule(BaseModel):
def __init__(self, allowed_methods, allowed_origins, allowed_headers=None, expose_headers=None,
max_age_seconds=None):
# Python 2 and 3 have different string types for handling unicodes. Python 2 wants `basestring`,
# whereas Python 3 is OK with str. This causes issues with the XML parser, which returns
# unicode strings in Python 2. So, need to do this to make it work in both Python 2 and 3:
import sys
if sys.version_info >= (3, 0):
str_type = str
else:
str_type = basestring # noqa
self.allowed_methods = [allowed_methods] if isinstance(allowed_methods, str_type) else allowed_methods
self.allowed_origins = [allowed_origins] if isinstance(allowed_origins, str_type) else allowed_origins
self.allowed_headers = [allowed_headers] if isinstance(allowed_headers, str_type) else allowed_headers
self.exposed_headers = [expose_headers] if isinstance(expose_headers, str_type) else expose_headers
self.allowed_methods = [allowed_methods] if isinstance(allowed_methods, six.string_types) else allowed_methods
self.allowed_origins = [allowed_origins] if isinstance(allowed_origins, six.string_types) else allowed_origins
self.allowed_headers = [allowed_headers] if isinstance(allowed_headers, six.string_types) else allowed_headers
self.exposed_headers = [expose_headers] if isinstance(expose_headers, six.string_types) else expose_headers
self.max_age_seconds = max_age_seconds
@ -389,25 +383,16 @@ class FakeBucket(BaseModel):
if len(rules) > 100:
raise MalformedXML()
# Python 2 and 3 have different string types for handling unicodes. Python 2 wants `basestring`,
# whereas Python 3 is OK with str. This causes issues with the XML parser, which returns
# unicode strings in Python 2. So, need to do this to make it work in both Python 2 and 3:
import sys
if sys.version_info >= (3, 0):
str_type = str
else:
str_type = basestring # noqa
for rule in rules:
assert isinstance(rule["AllowedMethod"], list) or isinstance(rule["AllowedMethod"], str_type)
assert isinstance(rule["AllowedOrigin"], list) or isinstance(rule["AllowedOrigin"], str_type)
assert isinstance(rule["AllowedMethod"], list) or isinstance(rule["AllowedMethod"], six.string_types)
assert isinstance(rule["AllowedOrigin"], list) or isinstance(rule["AllowedOrigin"], six.string_types)
assert isinstance(rule.get("AllowedHeader", []), list) or isinstance(rule.get("AllowedHeader", ""),
str_type)
six.string_types)
assert isinstance(rule.get("ExposedHeader", []), list) or isinstance(rule.get("ExposedHeader", ""),
str_type)
assert isinstance(rule.get("MaxAgeSeconds", "0"), str_type)
six.string_types)
assert isinstance(rule.get("MaxAgeSeconds", "0"), six.string_types)
if isinstance(rule["AllowedMethod"], str_type):
if isinstance(rule["AllowedMethod"], six.string_types):
methods = [rule["AllowedMethod"]]
else:
methods = rule["AllowedMethod"]
@ -745,6 +730,10 @@ class S3Backend(BaseBackend):
if dest_key_name != src_key_name:
key = key.copy(dest_key_name)
dest_bucket.keys[dest_key_name] = key
# By this point, the destination key must exist, or KeyError
if dest_bucket.is_versioned:
dest_bucket.keys[dest_key_name].increment_version()
if storage is not None:
key.set_storage_class(storage)
if acl is not None:

View File

@ -146,7 +146,7 @@ class PlatformEndpoint(BaseModel):
if 'Token' not in self.attributes:
self.attributes['Token'] = self.token
if 'Enabled' not in self.attributes:
self.attributes['Enabled'] = True
self.attributes['Enabled'] = 'True'
@property
def enabled(self):

View File

@ -2,7 +2,7 @@ from __future__ import unicode_literals
from six.moves.urllib.parse import urlparse
from moto.core.responses import BaseResponse
from moto.core.utils import camelcase_to_underscores
from moto.core.utils import camelcase_to_underscores, amz_crc32, amzn_request_id
from .utils import parse_message_attributes
from .models import sqs_backends
from .exceptions import (
@ -52,6 +52,8 @@ class SQSResponse(BaseResponse):
return visibility_timeout
@amz_crc32 # crc last as request_id can edit XML
@amzn_request_id
def call_action(self):
status_code, headers, body = super(SQSResponse, self).call_action()
if status_code == 404:
@ -296,7 +298,7 @@ CREATE_QUEUE_RESPONSE = """<CreateQueueResponse>
<VisibilityTimeout>{{ queue.visibility_timeout }}</VisibilityTimeout>
</CreateQueueResult>
<ResponseMetadata>
<RequestId>7a62c49f-347e-4fc4-9331-6e8e7a96aa73</RequestId>
<RequestId>{{ requestid }}</RequestId>
</ResponseMetadata>
</CreateQueueResponse>"""

View File

@ -1,6 +1,7 @@
from __future__ import unicode_literals
from .models import xray_backends
from ..core.models import base_decorator
from .mock_client import mock_xray_client, XRaySegment # noqa
xray_backend = xray_backends['us-east-1']
mock_xray = base_decorator(xray_backends)

83
moto/xray/mock_client.py Normal file
View File

@ -0,0 +1,83 @@
from functools import wraps
import os
from moto.xray import xray_backends
import aws_xray_sdk.core
from aws_xray_sdk.core.context import Context as AWSContext
from aws_xray_sdk.core.emitters.udp_emitter import UDPEmitter
class MockEmitter(UDPEmitter):
"""
Replaces the code that sends UDP to local X-Ray daemon
"""
def __init__(self, daemon_address='127.0.0.1:2000'):
address = os.getenv('AWS_XRAY_DAEMON_ADDRESS_YEAH_NOT_TODAY_MATE', daemon_address)
self._ip, self._port = self._parse_address(address)
def _xray_backend(self, region):
return xray_backends[region]
def send_entity(self, entity):
# Hack to get region
# region = entity.subsegments[0].aws['region']
# xray = self._xray_backend(region)
# TODO store X-Ray data, pretty sure X-Ray needs refactor for this
pass
def _send_data(self, data):
raise RuntimeError('Should not be running this')
def mock_xray_client(f):
"""
Mocks the X-Ray sdk by pwning its evil singleton with our methods
The X-Ray SDK has normally been imported and `patched()` called long before we start mocking.
This means the Context() will be very unhappy if an env var isnt present, so we set that, save
the old context, then supply our new context.
We also patch the Emitter by subclassing the UDPEmitter class replacing its methods and pushing
that itno the recorder instance.
"""
@wraps(f)
def _wrapped(*args, **kwargs):
print("Starting X-Ray Patch")
old_xray_context_var = os.environ.get('AWS_XRAY_CONTEXT_MISSING')
os.environ['AWS_XRAY_CONTEXT_MISSING'] = 'LOG_ERROR'
old_xray_context = aws_xray_sdk.core.xray_recorder._context
old_xray_emitter = aws_xray_sdk.core.xray_recorder._emitter
aws_xray_sdk.core.xray_recorder._context = AWSContext()
aws_xray_sdk.core.xray_recorder._emitter = MockEmitter()
try:
f(*args, **kwargs)
finally:
if old_xray_context_var is None:
del os.environ['AWS_XRAY_CONTEXT_MISSING']
else:
os.environ['AWS_XRAY_CONTEXT_MISSING'] = old_xray_context_var
aws_xray_sdk.core.xray_recorder._emitter = old_xray_emitter
aws_xray_sdk.core.xray_recorder._context = old_xray_context
return _wrapped
class XRaySegment(object):
"""
XRay is request oriented, when a request comes in, normally middleware like django (or automatically in lambda) will mark
the start of a segment, this stay open during the lifetime of the request. During that time subsegments may be generated
by calling other SDK aware services or using some boto functions. Once the request is finished, middleware will also stop
the segment, thus causing it to be emitted via UDP.
During testing we're going to have to control the start and end of a segment via context managers.
"""
def __enter__(self):
aws_xray_sdk.core.xray_recorder.begin_segment(name='moto_mock', traceid=None, parent_id=None, sampling=1)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
aws_xray_sdk.core.xray_recorder.end_segment()

View File

@ -9,6 +9,7 @@ install_requires = [
"Jinja2>=2.8",
"boto>=2.36.0",
"boto3>=1.2.1",
"botocore>=1.7.12",
"cookies",
"cryptography>=2.0.0",
"requests>=2.5",
@ -19,7 +20,8 @@ install_requires = [
"pytz",
"python-dateutil<3.0.0,>=2.1",
"mock",
"docker>=2.5.1"
"docker>=2.5.1",
"aws-xray-sdk==0.92.2"
]
extras_require = {
@ -36,7 +38,7 @@ else:
setup(
name='moto',
version='1.1.22',
version='1.1.23',
description='A library that allows your python tests to easily'
' mock out the boto library',
author='Steve Pulec',

View File

@ -8,7 +8,7 @@ from boto.ec2.autoscale import Tag
import boto.ec2.elb
import sure # noqa
from moto import mock_autoscaling, mock_ec2_deprecated, mock_elb_deprecated, mock_autoscaling_deprecated, mock_ec2
from moto import mock_autoscaling, mock_ec2_deprecated, mock_elb_deprecated, mock_elb, mock_autoscaling_deprecated, mock_ec2
from tests.helpers import requires_boto_gte
@ -484,6 +484,168 @@ Boto3
'''
@mock_autoscaling
@mock_elb
def test_describe_load_balancers():
INSTANCE_COUNT = 2
elb_client = boto3.client('elb', region_name='us-east-1')
elb_client.create_load_balancer(
LoadBalancerName='my-lb',
Listeners=[
{'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}],
AvailabilityZones=['us-east-1a', 'us-east-1b']
)
client = boto3.client('autoscaling', region_name='us-east-1')
client.create_launch_configuration(
LaunchConfigurationName='test_launch_configuration'
)
client.create_auto_scaling_group(
AutoScalingGroupName='test_asg',
LaunchConfigurationName='test_launch_configuration',
LoadBalancerNames=['my-lb'],
MinSize=0,
MaxSize=INSTANCE_COUNT,
DesiredCapacity=INSTANCE_COUNT,
Tags=[{
"ResourceId": 'test_asg',
"Key": 'test_key',
"Value": 'test_value',
"PropagateAtLaunch": True
}]
)
response = client.describe_load_balancers(AutoScalingGroupName='test_asg')
list(response['LoadBalancers']).should.have.length_of(1)
response['LoadBalancers'][0]['LoadBalancerName'].should.equal('my-lb')
@mock_autoscaling
@mock_elb
def test_create_elb_and_autoscaling_group_no_relationship():
INSTANCE_COUNT = 2
ELB_NAME = 'my-elb'
elb_client = boto3.client('elb', region_name='us-east-1')
elb_client.create_load_balancer(
LoadBalancerName=ELB_NAME,
Listeners=[
{'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}],
AvailabilityZones=['us-east-1a', 'us-east-1b']
)
client = boto3.client('autoscaling', region_name='us-east-1')
client.create_launch_configuration(
LaunchConfigurationName='test_launch_configuration'
)
client.create_auto_scaling_group(
AutoScalingGroupName='test_asg',
LaunchConfigurationName='test_launch_configuration',
MinSize=0,
MaxSize=INSTANCE_COUNT,
DesiredCapacity=INSTANCE_COUNT,
)
# autoscaling group and elb should have no relationship
response = client.describe_load_balancers(
AutoScalingGroupName='test_asg'
)
list(response['LoadBalancers']).should.have.length_of(0)
response = elb_client.describe_load_balancers(
LoadBalancerNames=[ELB_NAME]
)
list(response['LoadBalancerDescriptions'][0]['Instances']).should.have.length_of(0)
@mock_autoscaling
@mock_elb
def test_attach_load_balancer():
INSTANCE_COUNT = 2
elb_client = boto3.client('elb', region_name='us-east-1')
elb_client.create_load_balancer(
LoadBalancerName='my-lb',
Listeners=[
{'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}],
AvailabilityZones=['us-east-1a', 'us-east-1b']
)
client = boto3.client('autoscaling', region_name='us-east-1')
client.create_launch_configuration(
LaunchConfigurationName='test_launch_configuration'
)
client.create_auto_scaling_group(
AutoScalingGroupName='test_asg',
LaunchConfigurationName='test_launch_configuration',
MinSize=0,
MaxSize=INSTANCE_COUNT,
DesiredCapacity=INSTANCE_COUNT,
Tags=[{
"ResourceId": 'test_asg',
"Key": 'test_key',
"Value": 'test_value',
"PropagateAtLaunch": True
}]
)
response = client.attach_load_balancers(
AutoScalingGroupName='test_asg',
LoadBalancerNames=['my-lb'])
response['ResponseMetadata']['HTTPStatusCode'].should.equal(200)
response = elb_client.describe_load_balancers(
LoadBalancerNames=['my-lb']
)
list(response['LoadBalancerDescriptions'][0]['Instances']).should.have.length_of(INSTANCE_COUNT)
@mock_autoscaling
@mock_elb
def test_detach_load_balancer():
INSTANCE_COUNT = 2
elb_client = boto3.client('elb', region_name='us-east-1')
elb_client.create_load_balancer(
LoadBalancerName='my-lb',
Listeners=[
{'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}],
AvailabilityZones=['us-east-1a', 'us-east-1b']
)
client = boto3.client('autoscaling', region_name='us-east-1')
client.create_launch_configuration(
LaunchConfigurationName='test_launch_configuration'
)
client.create_auto_scaling_group(
AutoScalingGroupName='test_asg',
LaunchConfigurationName='test_launch_configuration',
LoadBalancerNames=['my-lb'],
MinSize=0,
MaxSize=INSTANCE_COUNT,
DesiredCapacity=INSTANCE_COUNT,
Tags=[{
"ResourceId": 'test_asg',
"Key": 'test_key',
"Value": 'test_value',
"PropagateAtLaunch": True
}]
)
response = client.detach_load_balancers(
AutoScalingGroupName='test_asg',
LoadBalancerNames=['my-lb'])
response['ResponseMetadata']['HTTPStatusCode'].should.equal(200)
response = elb_client.describe_load_balancers(
LoadBalancerNames=['my-lb']
)
list(response['LoadBalancerDescriptions'][0]['Instances']).should.have.length_of(0)
response = client.describe_load_balancers(AutoScalingGroupName='test_asg')
list(response['LoadBalancers']).should.have.length_of(0)
@mock_autoscaling
def test_create_autoscaling_group_boto3():
client = boto3.client('autoscaling', region_name='us-east-1')
@ -653,3 +815,147 @@ def test_autoscaling_describe_policies_boto3():
response['ScalingPolicies'].should.have.length_of(1)
response['ScalingPolicies'][0][
'PolicyName'].should.equal('test_policy_down')
@mock_autoscaling
@mock_ec2
def test_detach_one_instance_decrement():
client = boto3.client('autoscaling', region_name='us-east-1')
_ = client.create_launch_configuration(
LaunchConfigurationName='test_launch_configuration'
)
client.create_auto_scaling_group(
AutoScalingGroupName='test_asg',
LaunchConfigurationName='test_launch_configuration',
MinSize=0,
MaxSize=2,
DesiredCapacity=2,
Tags=[
{'ResourceId': 'test_asg',
'ResourceType': 'auto-scaling-group',
'Key': 'propogated-tag-key',
'Value': 'propogate-tag-value',
'PropagateAtLaunch': True
}]
)
response = client.describe_auto_scaling_groups(
AutoScalingGroupNames=['test_asg']
)
instance_to_detach = response['AutoScalingGroups'][0]['Instances'][0]['InstanceId']
instance_to_keep = response['AutoScalingGroups'][0]['Instances'][1]['InstanceId']
ec2_client = boto3.client('ec2', region_name='us-east-1')
response = ec2_client.describe_instances(InstanceIds=[instance_to_detach])
response = client.detach_instances(
AutoScalingGroupName='test_asg',
InstanceIds=[instance_to_detach],
ShouldDecrementDesiredCapacity=True
)
response['ResponseMetadata']['HTTPStatusCode'].should.equal(200)
response = client.describe_auto_scaling_groups(
AutoScalingGroupNames=['test_asg']
)
response['AutoScalingGroups'][0]['Instances'].should.have.length_of(1)
# test to ensure tag has been removed
response = ec2_client.describe_instances(InstanceIds=[instance_to_detach])
tags = response['Reservations'][0]['Instances'][0]['Tags']
tags.should.have.length_of(1)
# test to ensure tag is present on other instance
response = ec2_client.describe_instances(InstanceIds=[instance_to_keep])
tags = response['Reservations'][0]['Instances'][0]['Tags']
tags.should.have.length_of(2)
@mock_autoscaling
@mock_ec2
def test_detach_one_instance():
client = boto3.client('autoscaling', region_name='us-east-1')
_ = client.create_launch_configuration(
LaunchConfigurationName='test_launch_configuration'
)
client.create_auto_scaling_group(
AutoScalingGroupName='test_asg',
LaunchConfigurationName='test_launch_configuration',
MinSize=0,
MaxSize=2,
DesiredCapacity=2,
Tags=[
{'ResourceId': 'test_asg',
'ResourceType': 'auto-scaling-group',
'Key': 'propogated-tag-key',
'Value': 'propogate-tag-value',
'PropagateAtLaunch': True
}]
)
response = client.describe_auto_scaling_groups(
AutoScalingGroupNames=['test_asg']
)
instance_to_detach = response['AutoScalingGroups'][0]['Instances'][0]['InstanceId']
instance_to_keep = response['AutoScalingGroups'][0]['Instances'][1]['InstanceId']
ec2_client = boto3.client('ec2', region_name='us-east-1')
response = ec2_client.describe_instances(InstanceIds=[instance_to_detach])
response = client.detach_instances(
AutoScalingGroupName='test_asg',
InstanceIds=[instance_to_detach],
ShouldDecrementDesiredCapacity=False
)
response['ResponseMetadata']['HTTPStatusCode'].should.equal(200)
response = client.describe_auto_scaling_groups(
AutoScalingGroupNames=['test_asg']
)
# test to ensure instance was replaced
response['AutoScalingGroups'][0]['Instances'].should.have.length_of(2)
response = ec2_client.describe_instances(InstanceIds=[instance_to_detach])
tags = response['Reservations'][0]['Instances'][0]['Tags']
tags.should.have.length_of(1)
response = ec2_client.describe_instances(InstanceIds=[instance_to_keep])
tags = response['Reservations'][0]['Instances'][0]['Tags']
tags.should.have.length_of(2)
@mock_autoscaling
@mock_ec2
def test_attach_one_instance():
client = boto3.client('autoscaling', region_name='us-east-1')
_ = client.create_launch_configuration(
LaunchConfigurationName='test_launch_configuration'
)
client.create_auto_scaling_group(
AutoScalingGroupName='test_asg',
LaunchConfigurationName='test_launch_configuration',
MinSize=0,
MaxSize=4,
DesiredCapacity=2,
Tags=[
{'ResourceId': 'test_asg',
'ResourceType': 'auto-scaling-group',
'Key': 'propogated-tag-key',
'Value': 'propogate-tag-value',
'PropagateAtLaunch': True
}]
)
response = client.describe_auto_scaling_groups(
AutoScalingGroupNames=['test_asg']
)
ec2 = boto3.resource('ec2', 'us-east-1')
instances_to_add = [x.id for x in ec2.create_instances(ImageId='', MinCount=1, MaxCount=1)]
response = client.attach_instances(
AutoScalingGroupName='test_asg',
InstanceIds=instances_to_add
)
response['ResponseMetadata']['HTTPStatusCode'].should.equal(200)
response = client.describe_auto_scaling_groups(
AutoScalingGroupNames=['test_asg']
)
response['AutoScalingGroups'][0]['Instances'].should.have.length_of(3)

View File

@ -581,24 +581,24 @@ def test_filter_expression():
row2 = moto.dynamodb2.models.Item(None, None, None, None, {'Id': {'N': '8'}, 'Subs': {'N': '10'}, 'Desc': {'S': 'A description'}, 'KV': {'SS': ['test3', 'test4']}})
# AND test
filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id > 5 AND Subs < 7', {}, {})
filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id > :v0 AND Subs < :v1', {}, {':v0': {'N': 5}, ':v1': {'N': 7}})
filter_expr.expr(row1).should.be(True)
filter_expr.expr(row2).should.be(False)
# OR test
filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id = 5 OR Id=8', {}, {})
filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id = :v0 OR Id=:v1', {}, {':v0': {'N': 5}, ':v1': {'N': 8}})
filter_expr.expr(row1).should.be(True)
# BETWEEN test
filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id BETWEEN 5 AND 10', {}, {})
filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id BETWEEN :v0 AND :v1', {}, {':v0': {'N': 5}, ':v1': {'N': 10}})
filter_expr.expr(row1).should.be(True)
# PAREN test
filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id = 8 AND (Subs = 8 OR Subs = 5)', {}, {})
filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id = :v0 AND (Subs = :v0 OR Subs = :v1)', {}, {':v0': {'N': 8}, ':v1': {'N': 5}})
filter_expr.expr(row1).should.be(True)
# IN test
filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id IN (7,8, 9)', {}, {})
filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id IN :v0', {}, {':v0': {'NS': [7, 8, 9]}})
filter_expr.expr(row1).should.be(True)
# attribute function tests
@ -655,6 +655,63 @@ def test_scan_filter():
assert response['Count'] == 1
@mock_dynamodb2
def test_scan_filter2():
client = boto3.client('dynamodb', region_name='us-east-1')
# Create the DynamoDB table.
client.create_table(
TableName='test1',
AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'N'}],
KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}],
ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123}
)
client.put_item(
TableName='test1',
Item={
'client': {'S': 'client1'},
'app': {'N': '1'}
}
)
response = client.scan(
TableName='test1',
Select='ALL_ATTRIBUTES',
FilterExpression='#tb >= :dt',
ExpressionAttributeNames={"#tb": "app"},
ExpressionAttributeValues={":dt": {"N": str(1)}}
)
assert response['Count'] == 1
@mock_dynamodb2
def test_scan_filter3():
client = boto3.client('dynamodb', region_name='us-east-1')
dynamodb = boto3.resource('dynamodb', region_name='us-east-1')
# Create the DynamoDB table.
client.create_table(
TableName='test1',
AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'N'}],
KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}],
ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123}
)
client.put_item(
TableName='test1',
Item={
'client': {'S': 'client1'},
'app': {'N': '1'},
'active': {'BOOL': True}
}
)
table = dynamodb.Table('test1')
response = table.scan(
FilterExpression=Attr('active').eq(True)
)
assert response['Count'] == 1
@mock_dynamodb2
def test_bad_scan_filter():
client = boto3.client('dynamodb', region_name='us-east-1')
@ -680,7 +737,6 @@ def test_bad_scan_filter():
raise RuntimeError('Should of raised ResourceInUseException')
@mock_dynamodb2
def test_duplicate_create():
client = boto3.client('dynamodb', region_name='us-east-1')

View File

@ -1113,3 +1113,20 @@ def test_get_instance_by_security_group():
assert len(security_group_instances) == 1
assert security_group_instances[0].id == instance.id
@mock_ec2
def test_modify_delete_on_termination():
ec2_client = boto3.resource('ec2', region_name='us-west-1')
result = ec2_client.create_instances(ImageId='ami-12345678', MinCount=1, MaxCount=1)
instance = result[0]
instance.load()
instance.block_device_mappings[0]['Ebs']['DeleteOnTermination'].should.be(False)
instance.modify_attribute(
BlockDeviceMappings=[{
'DeviceName': '/dev/sda1',
'Ebs': {'DeleteOnTermination': True}
}]
)
instance.load()
instance.block_device_mappings[0]['Ebs']['DeleteOnTermination'].should.be(True)

View File

@ -414,7 +414,8 @@ def test_get_authorization_token_assume_region():
client = boto3.client('ecr', region_name='us-east-1')
auth_token_response = client.get_authorization_token()
list(auth_token_response.keys()).should.equal(['authorizationData', 'ResponseMetadata'])
auth_token_response.should.contain('authorizationData')
auth_token_response.should.contain('ResponseMetadata')
auth_token_response['authorizationData'].should.equal([
{
'authorizationToken': 'QVdTOnVzLWVhc3QtMS1hdXRoLXRva2Vu',
@ -429,7 +430,8 @@ def test_get_authorization_token_explicit_regions():
client = boto3.client('ecr', region_name='us-east-1')
auth_token_response = client.get_authorization_token(registryIds=['us-east-1', 'us-west-1'])
list(auth_token_response.keys()).should.equal(['authorizationData', 'ResponseMetadata'])
auth_token_response.should.contain('authorizationData')
auth_token_response.should.contain('ResponseMetadata')
auth_token_response['authorizationData'].should.equal([
{
'authorizationToken': 'QVdTOnVzLWVhc3QtMS1hdXRoLXRva2Vu',

View File

@ -1364,6 +1364,29 @@ def test_boto3_head_object_with_versioning():
old_head_object['ContentLength'].should.equal(len(old_content))
@mock_s3
def test_boto3_copy_object_with_versioning():
client = boto3.client('s3', region_name='us-east-1')
client.create_bucket(Bucket='blah', CreateBucketConfiguration={'LocationConstraint': 'eu-west-1'})
client.put_bucket_versioning(Bucket='blah', VersioningConfiguration={'Status': 'Enabled'})
client.put_object(Bucket='blah', Key='test1', Body=b'test1')
client.put_object(Bucket='blah', Key='test2', Body=b'test2')
obj1_version = client.get_object(Bucket='blah', Key='test1')['VersionId']
obj2_version = client.get_object(Bucket='blah', Key='test2')['VersionId']
# Versions should be the same
obj1_version.should.equal(obj2_version)
client.copy_object(CopySource={'Bucket': 'blah', 'Key': 'test1'}, Bucket='blah', Key='test2')
obj2_version_new = client.get_object(Bucket='blah', Key='test2')['VersionId']
# Version should be different to previous version
obj2_version_new.should_not.equal(obj2_version)
@mock_s3
def test_boto3_head_object_if_modified_since():
s3 = boto3.client('s3', region_name='us-east-1')

View File

@ -0,0 +1,72 @@
from __future__ import unicode_literals
from moto import mock_xray_client, XRaySegment, mock_dynamodb2
import sure # noqa
import boto3
from moto.xray.mock_client import MockEmitter
import aws_xray_sdk.core as xray_core
import aws_xray_sdk.core.patcher as xray_core_patcher
import botocore.client
import botocore.endpoint
original_make_api_call = botocore.client.BaseClient._make_api_call
original_encode_headers = botocore.endpoint.Endpoint._encode_headers
import requests
original_session_request = requests.Session.request
original_session_prep_request = requests.Session.prepare_request
@mock_xray_client
@mock_dynamodb2
def test_xray_dynamo_request_id():
# Could be ran in any order, so we need to tell sdk that its been unpatched
xray_core_patcher._PATCHED_MODULES = set()
xray_core.patch_all()
client = boto3.client('dynamodb', region_name='us-east-1')
with XRaySegment():
resp = client.list_tables()
resp['ResponseMetadata'].should.contain('RequestId')
id1 = resp['ResponseMetadata']['RequestId']
with XRaySegment():
client.list_tables()
resp = client.list_tables()
id2 = resp['ResponseMetadata']['RequestId']
id1.should_not.equal(id2)
setattr(botocore.client.BaseClient, '_make_api_call', original_make_api_call)
setattr(botocore.endpoint.Endpoint, '_encode_headers', original_encode_headers)
setattr(requests.Session, 'request', original_session_request)
setattr(requests.Session, 'prepare_request', original_session_prep_request)
@mock_xray_client
def test_xray_udp_emitter_patched():
# Could be ran in any order, so we need to tell sdk that its been unpatched
xray_core_patcher._PATCHED_MODULES = set()
xray_core.patch_all()
assert isinstance(xray_core.xray_recorder._emitter, MockEmitter)
setattr(botocore.client.BaseClient, '_make_api_call', original_make_api_call)
setattr(botocore.endpoint.Endpoint, '_encode_headers', original_encode_headers)
setattr(requests.Session, 'request', original_session_request)
setattr(requests.Session, 'prepare_request', original_session_prep_request)
@mock_xray_client
def test_xray_context_patched():
# Could be ran in any order, so we need to tell sdk that its been unpatched
xray_core_patcher._PATCHED_MODULES = set()
xray_core.patch_all()
xray_core.xray_recorder._context.context_missing.should.equal('LOG_ERROR')
setattr(botocore.client.BaseClient, '_make_api_call', original_make_api_call)
setattr(botocore.endpoint.Endpoint, '_encode_headers', original_encode_headers)
setattr(requests.Session, 'request', original_session_request)
setattr(requests.Session, 'prepare_request', original_session_prep_request)