Merge pull request #40 from spulec/master

Merge upstream
This commit is contained in:
Bert Blommers 2020-04-26 10:20:18 +01:00 committed by GitHub
commit 991a740b47
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
50 changed files with 2694 additions and 249 deletions

View File

@ -4,7 +4,10 @@ Moto has a [Code of Conduct](https://github.com/spulec/moto/blob/master/CODE_OF_
## Running the tests locally
Moto has a Makefile which has some helpful commands for getting setup. You should be able to run `make init` to install the dependencies and then `make test` to run the tests.
Moto has a Makefile which has some helpful commands for getting setup. You should be able to run `make init` to install the dependencies and then `make test` to run the tests.
## Linting
Run `make lint` or `black --check moto tests` to verify whether your code confirms to the guidelines.
## Is there a missing feature?

View File

@ -2878,15 +2878,15 @@
- [ ] test_failover
## elasticbeanstalk
0% implemented
13% implemented
- [ ] abort_environment_update
- [ ] apply_environment_managed_action
- [ ] check_dns_availability
- [ ] compose_environments
- [ ] create_application
- [X] create_application
- [ ] create_application_version
- [ ] create_configuration_template
- [ ] create_environment
- [X] create_environment
- [ ] create_platform_version
- [ ] create_storage_location
- [ ] delete_application
@ -2903,13 +2903,13 @@
- [ ] describe_environment_managed_action_history
- [ ] describe_environment_managed_actions
- [ ] describe_environment_resources
- [ ] describe_environments
- [X] describe_environments
- [ ] describe_events
- [ ] describe_instances_health
- [ ] describe_platform_version
- [ ] list_available_solution_stacks
- [X] list_available_solution_stacks
- [ ] list_platform_versions
- [ ] list_tags_for_resource
- [X] list_tags_for_resource
- [ ] rebuild_environment
- [ ] request_environment_info
- [ ] restart_app_server
@ -2921,7 +2921,7 @@
- [ ] update_application_version
- [ ] update_configuration_template
- [ ] update_environment
- [ ] update_tags_for_resource
- [X] update_tags_for_resource
- [ ] validate_configuration_settings
## elastictranscoder
@ -3351,11 +3351,11 @@
- [ ] update_listener
## glue
4% implemented
- [ ] batch_create_partition
11% implemented
- [X] batch_create_partition
- [ ] batch_delete_connection
- [ ] batch_delete_partition
- [ ] batch_delete_table
- [X] batch_delete_partition
- [X] batch_delete_table
- [ ] batch_delete_table_version
- [ ] batch_get_crawlers
- [ ] batch_get_dev_endpoints
@ -3372,7 +3372,7 @@
- [ ] create_dev_endpoint
- [ ] create_job
- [ ] create_ml_transform
- [ ] create_partition
- [X] create_partition
- [ ] create_script
- [ ] create_security_configuration
- [X] create_table
@ -3404,7 +3404,7 @@
- [ ] get_crawlers
- [ ] get_data_catalog_encryption_settings
- [X] get_database
- [ ] get_databases
- [X] get_databases
- [ ] get_dataflow_graph
- [ ] get_dev_endpoint
- [ ] get_dev_endpoints
@ -3418,7 +3418,7 @@
- [ ] get_ml_task_runs
- [ ] get_ml_transform
- [ ] get_ml_transforms
- [ ] get_partition
- [X] get_partition
- [ ] get_partitions
- [ ] get_plan
- [ ] get_resource_policy
@ -3470,8 +3470,8 @@
- [ ] update_dev_endpoint
- [ ] update_job
- [ ] update_ml_transform
- [ ] update_partition
- [ ] update_table
- [X] update_partition
- [X] update_table
- [ ] update_trigger
- [ ] update_user_defined_function
- [ ] update_workflow

View File

@ -21,6 +21,7 @@ from .datasync import mock_datasync # noqa
from .dynamodb import mock_dynamodb, mock_dynamodb_deprecated # noqa
from .dynamodb2 import mock_dynamodb2, mock_dynamodb2_deprecated # noqa
from .dynamodbstreams import mock_dynamodbstreams # noqa
from .elasticbeanstalk import mock_elasticbeanstalk # noqa
from .ec2 import mock_ec2, mock_ec2_deprecated # noqa
from .ec2_instance_connect import mock_ec2_instance_connect # noqa
from .ecr import mock_ecr, mock_ecr_deprecated # noqa

View File

@ -461,6 +461,7 @@ class RestAPI(BaseModel):
self.description = description
self.create_date = int(time.time())
self.api_key_source = kwargs.get("api_key_source") or "HEADER"
self.policy = kwargs.get("policy") or None
self.endpoint_configuration = kwargs.get("endpoint_configuration") or {
"types": ["EDGE"]
}
@ -485,6 +486,7 @@ class RestAPI(BaseModel):
"apiKeySource": self.api_key_source,
"endpointConfiguration": self.endpoint_configuration,
"tags": self.tags,
"policy": self.policy,
}
def add_child(self, path, parent_id=None):
@ -713,6 +715,7 @@ class APIGatewayBackend(BaseBackend):
api_key_source=None,
endpoint_configuration=None,
tags=None,
policy=None,
):
api_id = create_id()
rest_api = RestAPI(
@ -723,6 +726,7 @@ class APIGatewayBackend(BaseBackend):
api_key_source=api_key_source,
endpoint_configuration=endpoint_configuration,
tags=tags,
policy=policy,
)
self.apis[api_id] = rest_api
return rest_api

View File

@ -59,6 +59,7 @@ class APIGatewayResponse(BaseResponse):
api_key_source = self._get_param("apiKeySource")
endpoint_configuration = self._get_param("endpointConfiguration")
tags = self._get_param("tags")
policy = self._get_param("policy")
# Param validation
if api_key_source and api_key_source not in API_KEY_SOURCES:
@ -94,6 +95,7 @@ class APIGatewayResponse(BaseResponse):
api_key_source=api_key_source,
endpoint_configuration=endpoint_configuration,
tags=tags,
policy=policy,
)
return 200, {}, json.dumps(rest_api.to_dict())

View File

@ -1006,11 +1006,11 @@ class LambdaBackend(BaseBackend):
return True
return False
def add_policy_statement(self, function_name, raw):
def add_permission(self, function_name, raw):
fn = self.get_function(function_name)
fn.policy.add_statement(raw)
def del_policy_statement(self, function_name, sid, revision=""):
def remove_permission(self, function_name, sid, revision=""):
fn = self.get_function(function_name)
fn.policy.del_statement(sid, revision)

View File

@ -146,7 +146,7 @@ class LambdaResponse(BaseResponse):
function_name = path.split("/")[-2]
if self.lambda_backend.get_function(function_name):
statement = self.body
self.lambda_backend.add_policy_statement(function_name, statement)
self.lambda_backend.add_permission(function_name, statement)
return 200, {}, json.dumps({"Statement": statement})
else:
return 404, {}, "{}"
@ -166,9 +166,7 @@ class LambdaResponse(BaseResponse):
statement_id = path.split("/")[-1].split("?")[0]
revision = querystring.get("RevisionId", "")
if self.lambda_backend.get_function(function_name):
self.lambda_backend.del_policy_statement(
function_name, statement_id, revision
)
self.lambda_backend.remove_permission(function_name, statement_id, revision)
return 204, {}, "{}"
else:
return 404, {}, "{}"

View File

@ -23,6 +23,7 @@ from moto.ec2 import ec2_backends
from moto.ec2_instance_connect import ec2_instance_connect_backends
from moto.ecr import ecr_backends
from moto.ecs import ecs_backends
from moto.elasticbeanstalk import eb_backends
from moto.elb import elb_backends
from moto.elbv2 import elbv2_backends
from moto.emr import emr_backends
@ -77,6 +78,7 @@ BACKENDS = {
"ec2_instance_connect": ec2_instance_connect_backends,
"ecr": ecr_backends,
"ecs": ecs_backends,
"elasticbeanstalk": eb_backends,
"elb": elb_backends,
"elbv2": elbv2_backends,
"events": events_backends,

View File

@ -239,8 +239,11 @@ class FakeStack(BaseModel):
self.cross_stack_resources = cross_stack_resources or {}
self.resource_map = self._create_resource_map()
self.output_map = self._create_output_map()
self._add_stack_event("CREATE_COMPLETE")
self.status = "CREATE_COMPLETE"
if create_change_set:
self.status = "REVIEW_IN_PROGRESS"
else:
self.create_resources()
self._add_stack_event("CREATE_COMPLETE")
self.creation_time = datetime.utcnow()
def _create_resource_map(self):
@ -253,7 +256,7 @@ class FakeStack(BaseModel):
self.template_dict,
self.cross_stack_resources,
)
resource_map.create()
resource_map.load()
return resource_map
def _create_output_map(self):
@ -326,6 +329,10 @@ class FakeStack(BaseModel):
def exports(self):
return self.output_map.exports
def create_resources(self):
self.resource_map.create()
self.status = "CREATE_COMPLETE"
def update(self, template, role_arn=None, parameters=None, tags=None):
self._add_stack_event(
"UPDATE_IN_PROGRESS", resource_status_reason="User Initiated"
@ -640,6 +647,7 @@ class CloudFormationBackend(BaseBackend):
else:
stack._add_stack_event("UPDATE_IN_PROGRESS")
stack._add_stack_event("UPDATE_COMPLETE")
stack.create_resources()
return True
def describe_stacks(self, name_or_stack_id):

View File

@ -531,14 +531,16 @@ class ResourceMap(collections_abc.Mapping):
for condition_name in self.lazy_condition_map:
self.lazy_condition_map[condition_name]
def create(self):
def load(self):
self.load_mapping()
self.transform_mapping()
self.load_parameters()
self.load_conditions()
def create(self):
# Since this is a lazy map, to create every object we just need to
# iterate through self.
# Assumes that self.load() has been called before
self.tags.update(
{
"aws:cloudformation:stack-name": self.get("AWS::StackName"),

View File

@ -22,6 +22,14 @@ class Dimension(object):
self.name = name
self.value = value
def __eq__(self, item):
if isinstance(item, Dimension):
return self.name == item.name and self.value == item.value
return False
def __ne__(self, item): # Only needed on Py2; Py3 defines it implicitly
return self != item
def daterange(start, stop, step=timedelta(days=1), inclusive=False):
"""
@ -124,6 +132,17 @@ class MetricDatum(BaseModel):
Dimension(dimension["Name"], dimension["Value"]) for dimension in dimensions
]
def filter(self, namespace, name, dimensions):
if namespace and namespace != self.namespace:
return False
if name and name != self.name:
return False
if dimensions and any(
Dimension(d["Name"], d["Value"]) not in self.dimensions for d in dimensions
):
return False
return True
class Dashboard(BaseModel):
def __init__(self, name, body):
@ -202,6 +221,15 @@ class CloudWatchBackend(BaseBackend):
self.metric_data = []
self.paged_metric_data = {}
@property
# Retrieve a list of all OOTB metrics that are provided by metrics providers
# Computed on the fly
def aws_metric_data(self):
md = []
for name, service in metric_providers.items():
md.extend(service.get_cloudwatch_metrics())
return md
def put_metric_alarm(
self,
name,
@ -295,6 +323,43 @@ class CloudWatchBackend(BaseBackend):
)
)
def get_metric_data(self, queries, start_time, end_time):
period_data = [
md for md in self.metric_data if start_time <= md.timestamp <= end_time
]
results = []
for query in queries:
query_ns = query["metric_stat._metric._namespace"]
query_name = query["metric_stat._metric._metric_name"]
query_data = [
md
for md in period_data
if md.namespace == query_ns and md.name == query_name
]
metric_values = [m.value for m in query_data]
result_vals = []
stat = query["metric_stat._stat"]
if len(metric_values) > 0:
if stat == "Average":
result_vals.append(sum(metric_values) / len(metric_values))
elif stat == "Minimum":
result_vals.append(min(metric_values))
elif stat == "Maximum":
result_vals.append(max(metric_values))
elif stat == "Sum":
result_vals.append(sum(metric_values))
label = query["metric_stat._metric._metric_name"] + " " + stat
results.append(
{
"id": query["id"],
"label": label,
"vals": result_vals,
"timestamps": [datetime.now() for _ in result_vals],
}
)
return results
def get_metric_statistics(
self, namespace, metric_name, start_time, end_time, period, stats
):
@ -334,7 +399,7 @@ class CloudWatchBackend(BaseBackend):
return data
def get_all_metrics(self):
return self.metric_data
return self.metric_data + self.aws_metric_data
def put_dashboard(self, name, body):
self.dashboards[name] = Dashboard(name, body)
@ -386,7 +451,7 @@ class CloudWatchBackend(BaseBackend):
self.alarms[alarm_name].update_state(reason, reason_data, state_value)
def list_metrics(self, next_token, namespace, metric_name):
def list_metrics(self, next_token, namespace, metric_name, dimensions):
if next_token:
if next_token not in self.paged_metric_data:
raise RESTError(
@ -397,15 +462,16 @@ class CloudWatchBackend(BaseBackend):
del self.paged_metric_data[next_token] # Cant reuse same token twice
return self._get_paginated(metrics)
else:
metrics = self.get_filtered_metrics(metric_name, namespace)
metrics = self.get_filtered_metrics(metric_name, namespace, dimensions)
return self._get_paginated(metrics)
def get_filtered_metrics(self, metric_name, namespace):
def get_filtered_metrics(self, metric_name, namespace, dimensions):
metrics = self.get_all_metrics()
if namespace:
metrics = [md for md in metrics if md.namespace == namespace]
if metric_name:
metrics = [md for md in metrics if md.name == metric_name]
metrics = [
md
for md in metrics
if md.filter(namespace=namespace, name=metric_name, dimensions=dimensions)
]
return metrics
def _get_paginated(self, metrics):
@ -445,3 +511,8 @@ for region in Session().get_available_regions(
cloudwatch_backends[region] = CloudWatchBackend()
for region in Session().get_available_regions("cloudwatch", partition_name="aws-cn"):
cloudwatch_backends[region] = CloudWatchBackend()
# List of services that provide OOTB CW metrics
# See the S3Backend constructor for an example
# TODO: We might have to separate this out per region for non-global services
metric_providers = {}

View File

@ -92,6 +92,18 @@ class CloudWatchResponse(BaseResponse):
template = self.response_template(PUT_METRIC_DATA_TEMPLATE)
return template.render()
@amzn_request_id
def get_metric_data(self):
start = dtparse(self._get_param("StartTime"))
end = dtparse(self._get_param("EndTime"))
queries = self._get_list_prefix("MetricDataQueries.member")
results = self.cloudwatch_backend.get_metric_data(
start_time=start, end_time=end, queries=queries
)
template = self.response_template(GET_METRIC_DATA_TEMPLATE)
return template.render(results=results)
@amzn_request_id
def get_metric_statistics(self):
namespace = self._get_param("Namespace")
@ -124,9 +136,10 @@ class CloudWatchResponse(BaseResponse):
def list_metrics(self):
namespace = self._get_param("Namespace")
metric_name = self._get_param("MetricName")
dimensions = self._get_multi_param("Dimensions.member")
next_token = self._get_param("NextToken")
next_token, metrics = self.cloudwatch_backend.list_metrics(
next_token, namespace, metric_name
next_token, namespace, metric_name, dimensions
)
template = self.response_template(LIST_METRICS_TEMPLATE)
return template.render(metrics=metrics, next_token=next_token)
@ -285,6 +298,35 @@ PUT_METRIC_DATA_TEMPLATE = """<PutMetricDataResponse xmlns="http://monitoring.am
</ResponseMetadata>
</PutMetricDataResponse>"""
GET_METRIC_DATA_TEMPLATE = """<GetMetricDataResponse xmlns="http://monitoring.amazonaws.com/doc/2010-08-01/">
<ResponseMetadata>
<RequestId>
{{ request_id }}
</RequestId>
</ResponseMetadata>
<GetMetricDataResult>
<MetricDataResults>
{% for result in results %}
<member>
<Id>{{ result.id }}</Id>
<Label>{{ result.label }}</Label>
<StatusCode>Complete</StatusCode>
<Timestamps>
{% for val in result.timestamps %}
<member>{{ val }}</member>
{% endfor %}
</Timestamps>
<Values>
{% for val in result.vals %}
<member>{{ val }}</member>
{% endfor %}
</Values>
</member>
{% endfor %}
</MetricDataResults>
</GetMetricDataResult>
</GetMetricDataResponse>"""
GET_METRIC_STATISTICS_TEMPLATE = """<GetMetricStatisticsResponse xmlns="http://monitoring.amazonaws.com/doc/2010-08-01/">
<ResponseMetadata>
<RequestId>
@ -342,7 +384,7 @@ LIST_METRICS_TEMPLATE = """<ListMetricsResponse xmlns="http://monitoring.amazona
</member>
{% endfor %}
</Dimensions>
<MetricName>{{ metric.name }}</MetricName>
<MetricName>Metric:{{ metric.name }}</MetricName>
<Namespace>{{ metric.namespace }}</Namespace>
</member>
{% endfor %}

View File

@ -1,5 +1,5 @@
from moto.core.utils import get_random_hex
from uuid import uuid4
def get_random_identity_id(region):
return "{0}:{1}".format(region, get_random_hex(length=19))
return "{0}:{1}".format(region, uuid4())

View File

@ -12,6 +12,8 @@ from io import BytesIO
from collections import defaultdict
from botocore.handlers import BUILTIN_HANDLERS
from botocore.awsrequest import AWSResponse
from six.moves.urllib.parse import urlparse
from werkzeug.wrappers import Request
import mock
from moto import settings
@ -175,6 +177,26 @@ class CallbackResponse(responses.CallbackResponse):
"""
Need to override this so we can pass decode_content=False
"""
if not isinstance(request, Request):
url = urlparse(request.url)
if request.body is None:
body = None
elif isinstance(request.body, six.text_type):
body = six.BytesIO(six.b(request.body))
else:
body = six.BytesIO(request.body)
req = Request.from_values(
path="?".join([url.path, url.query]),
input_stream=body,
content_length=request.headers.get("Content-Length"),
content_type=request.headers.get("Content-Type"),
method=request.method,
base_url="{scheme}://{netloc}".format(
scheme=url.scheme, netloc=url.netloc
),
headers=[(k, v) for k, v in six.iteritems(request.headers)],
)
request = req
headers = self.get_headers()
result = self.callback(request)

View File

@ -328,3 +328,25 @@ def py2_strip_unicode_keys(blob):
blob = new_set
return blob
def tags_from_query_string(
querystring_dict, prefix="Tag", key_suffix="Key", value_suffix="Value"
):
response_values = {}
for key, value in querystring_dict.items():
if key.startswith(prefix) and key.endswith(key_suffix):
tag_index = key.replace(prefix + ".", "").replace("." + key_suffix, "")
tag_key = querystring_dict.get(
"{prefix}.{index}.{key_suffix}".format(
prefix=prefix, index=tag_index, key_suffix=key_suffix,
)
)[0]
tag_value_key = "{prefix}.{index}.{value_suffix}".format(
prefix=prefix, index=tag_index, value_suffix=value_suffix,
)
if tag_value_key in querystring_dict:
response_values[tag_key] = querystring_dict.get(tag_value_key)[0]
else:
response_values[tag_key] = None
return response_values

View File

@ -74,6 +74,9 @@ class Item(BaseModel):
def __repr__(self):
return "Item: {0}".format(self.to_json())
def size(self):
return sum(bytesize(key) + value.size() for key, value in self.attrs.items())
def to_json(self):
attributes = {}
for attribute_key, attribute in self.attrs.items():
@ -921,6 +924,14 @@ class Table(BaseModel):
break
last_evaluated_key = None
size_limit = 1000000 # DynamoDB has a 1MB size limit
item_size = sum(res.size() for res in results)
if item_size > size_limit:
item_size = idx = 0
while item_size + results[idx].size() < size_limit:
item_size += results[idx].size()
idx += 1
limit = min(limit, idx) if limit else idx
if limit and len(results) > limit:
results = results[:limit]
last_evaluated_key = {self.hash_key_attr: results[-1].hash_key}

View File

@ -92,16 +92,24 @@ class DynamoHandler(BaseResponse):
def list_tables(self):
body = self.body
limit = body.get("Limit", 100)
if body.get("ExclusiveStartTableName"):
last = body.get("ExclusiveStartTableName")
start = list(self.dynamodb_backend.tables.keys()).index(last) + 1
all_tables = list(self.dynamodb_backend.tables.keys())
exclusive_start_table_name = body.get("ExclusiveStartTableName")
if exclusive_start_table_name:
try:
last_table_index = all_tables.index(exclusive_start_table_name)
except ValueError:
start = len(all_tables)
else:
start = last_table_index + 1
else:
start = 0
all_tables = list(self.dynamodb_backend.tables.keys())
if limit:
tables = all_tables[start : start + limit]
else:
tables = all_tables[start:]
response = {"TableNames": tables}
if limit and len(all_tables) > start + limit:
response["LastEvaluatedTableName"] = tables[-1]
@ -462,8 +470,10 @@ class DynamoHandler(BaseResponse):
for k, v in six.iteritems(self.body.get("ExpressionAttributeNames", {}))
)
if " AND " in key_condition_expression:
expressions = key_condition_expression.split(" AND ", 1)
if " and " in key_condition_expression.lower():
expressions = re.split(
" AND ", key_condition_expression, maxsplit=1, flags=re.IGNORECASE
)
index_hash_key = [key for key in index if key["KeyType"] == "HASH"][0]
hash_key_var = reverse_attribute_lookup.get(

View File

@ -557,6 +557,10 @@ class Instance(TaggedEC2Resource, BotoInstance):
# worst case we'll get IP address exaustion... rarely
pass
def add_block_device(self, size, device_path):
volume = self.ec2_backend.create_volume(size, self.region_name)
self.ec2_backend.attach_volume(volume.id, self.id, device_path)
def setup_defaults(self):
# Default have an instance with root volume should you not wish to
# override with attach volume cmd.
@ -564,9 +568,10 @@ class Instance(TaggedEC2Resource, BotoInstance):
self.ec2_backend.attach_volume(volume.id, self.id, "/dev/sda1")
def teardown_defaults(self):
volume_id = self.block_device_mapping["/dev/sda1"].volume_id
self.ec2_backend.detach_volume(volume_id, self.id, "/dev/sda1")
self.ec2_backend.delete_volume(volume_id)
if "/dev/sda1" in self.block_device_mapping:
volume_id = self.block_device_mapping["/dev/sda1"].volume_id
self.ec2_backend.detach_volume(volume_id, self.id, "/dev/sda1")
self.ec2_backend.delete_volume(volume_id)
@property
def get_block_device_mapping(self):
@ -621,6 +626,7 @@ class Instance(TaggedEC2Resource, BotoInstance):
subnet_id=properties.get("SubnetId"),
key_name=properties.get("KeyName"),
private_ip=properties.get("PrivateIpAddress"),
block_device_mappings=properties.get("BlockDeviceMappings", {}),
)
instance = reservation.instances[0]
for tag in properties.get("Tags", []):
@ -880,7 +886,14 @@ class InstanceBackend(object):
)
new_reservation.instances.append(new_instance)
new_instance.add_tags(instance_tags)
new_instance.setup_defaults()
if "block_device_mappings" in kwargs:
for block_device in kwargs["block_device_mappings"]:
new_instance.add_block_device(
block_device["Ebs"]["VolumeSize"], block_device["DeviceName"]
)
else:
new_instance.setup_defaults()
return new_reservation
def start_instances(self, instance_ids):
@ -1512,6 +1525,11 @@ class RegionsAndZonesBackend(object):
regions.append(Region(region, "ec2.{}.amazonaws.com.cn".format(region)))
zones = {
"af-south-1": [
Zone(region_name="af-south-1", name="af-south-1a", zone_id="afs1-az1"),
Zone(region_name="af-south-1", name="af-south-1b", zone_id="afs1-az2"),
Zone(region_name="af-south-1", name="af-south-1c", zone_id="afs1-az3"),
],
"ap-south-1": [
Zone(region_name="ap-south-1", name="ap-south-1a", zone_id="aps1-az1"),
Zone(region_name="ap-south-1", name="ap-south-1b", zone_id="aps1-az3"),

View File

@ -52,7 +52,7 @@ class InstanceResponse(BaseResponse):
private_ip = self._get_param("PrivateIpAddress")
associate_public_ip = self._get_param("AssociatePublicIpAddress")
key_name = self._get_param("KeyName")
ebs_optimized = self._get_param("EbsOptimized")
ebs_optimized = self._get_param("EbsOptimized") or False
instance_initiated_shutdown_behavior = self._get_param(
"InstanceInitiatedShutdownBehavior"
)

View File

@ -2,7 +2,8 @@ from __future__ import unicode_literals
from moto.core.responses import BaseResponse
from moto.ec2.models import validate_resource_ids
from moto.ec2.utils import tags_from_query_string, filters_from_querystring
from moto.ec2.utils import filters_from_querystring
from moto.core.utils import tags_from_query_string
class TagResponse(BaseResponse):

View File

@ -196,22 +196,6 @@ def split_route_id(route_id):
return values[0], values[1]
def tags_from_query_string(querystring_dict):
prefix = "Tag"
suffix = "Key"
response_values = {}
for key, value in querystring_dict.items():
if key.startswith(prefix) and key.endswith(suffix):
tag_index = key.replace(prefix + ".", "").replace("." + suffix, "")
tag_key = querystring_dict.get("Tag.{0}.Key".format(tag_index))[0]
tag_value_key = "Tag.{0}.Value".format(tag_index)
if tag_value_key in querystring_dict:
response_values[tag_key] = querystring_dict.get(tag_value_key)[0]
else:
response_values[tag_key] = None
return response_values
def dhcp_configuration_from_querystring(querystring, option="DhcpConfiguration"):
"""
turn:

View File

@ -0,0 +1,4 @@
from .models import eb_backends
from moto.core.models import base_decorator
mock_elasticbeanstalk = base_decorator(eb_backends)

View File

@ -0,0 +1,15 @@
from moto.core.exceptions import RESTError
class InvalidParameterValueError(RESTError):
def __init__(self, message):
super(InvalidParameterValueError, self).__init__(
"InvalidParameterValue", message
)
class ResourceNotFoundException(RESTError):
def __init__(self, message):
super(ResourceNotFoundException, self).__init__(
"ResourceNotFoundException", message
)

View File

@ -0,0 +1,152 @@
import weakref
from boto3 import Session
from moto.core import BaseBackend, BaseModel
from .exceptions import InvalidParameterValueError, ResourceNotFoundException
class FakeEnvironment(BaseModel):
def __init__(
self, application, environment_name, solution_stack_name, tags,
):
self.application = weakref.proxy(
application
) # weakref to break circular dependencies
self.environment_name = environment_name
self.solution_stack_name = solution_stack_name
self.tags = tags
@property
def application_name(self):
return self.application.application_name
@property
def environment_arn(self):
return (
"arn:aws:elasticbeanstalk:{region}:{account_id}:"
"environment/{application_name}/{environment_name}".format(
region=self.region,
account_id="123456789012",
application_name=self.application_name,
environment_name=self.environment_name,
)
)
@property
def platform_arn(self):
return "TODO" # TODO
@property
def region(self):
return self.application.region
class FakeApplication(BaseModel):
def __init__(self, backend, application_name):
self.backend = weakref.proxy(backend) # weakref to break cycles
self.application_name = application_name
self.environments = dict()
def create_environment(
self, environment_name, solution_stack_name, tags,
):
if environment_name in self.environments:
raise InvalidParameterValueError
env = FakeEnvironment(
application=self,
environment_name=environment_name,
solution_stack_name=solution_stack_name,
tags=tags,
)
self.environments[environment_name] = env
return env
@property
def region(self):
return self.backend.region
class EBBackend(BaseBackend):
def __init__(self, region):
self.region = region
self.applications = dict()
def reset(self):
# preserve region
region = self.region
self._reset_model_refs()
self.__dict__ = {}
self.__init__(region)
def create_application(self, application_name):
if application_name in self.applications:
raise InvalidParameterValueError(
"Application {} already exists.".format(application_name)
)
new_app = FakeApplication(backend=self, application_name=application_name,)
self.applications[application_name] = new_app
return new_app
def create_environment(self, app, environment_name, stack_name, tags):
return app.create_environment(
environment_name=environment_name,
solution_stack_name=stack_name,
tags=tags,
)
def describe_environments(self):
envs = []
for app in self.applications.values():
for env in app.environments.values():
envs.append(env)
return envs
def list_available_solution_stacks(self):
# Implemented in response.py
pass
def update_tags_for_resource(self, resource_arn, tags_to_add, tags_to_remove):
try:
res = self._find_environment_by_arn(resource_arn)
except KeyError:
raise ResourceNotFoundException(
"Resource not found for ARN '{}'.".format(resource_arn)
)
for key, value in tags_to_add.items():
res.tags[key] = value
for key in tags_to_remove:
del res.tags[key]
def list_tags_for_resource(self, resource_arn):
try:
res = self._find_environment_by_arn(resource_arn)
except KeyError:
raise ResourceNotFoundException(
"Resource not found for ARN '{}'.".format(resource_arn)
)
return res.tags
def _find_environment_by_arn(self, arn):
for app in self.applications.keys():
for env in self.applications[app].environments.values():
if env.environment_arn == arn:
return env
raise KeyError()
eb_backends = {}
for region in Session().get_available_regions("elasticbeanstalk"):
eb_backends[region] = EBBackend(region)
for region in Session().get_available_regions(
"elasticbeanstalk", partition_name="aws-us-gov"
):
eb_backends[region] = EBBackend(region)
for region in Session().get_available_regions(
"elasticbeanstalk", partition_name="aws-cn"
):
eb_backends[region] = EBBackend(region)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,11 @@
from __future__ import unicode_literals
from .responses import EBResponse
url_bases = [
r"https?://elasticbeanstalk.(?P<region>[a-zA-Z0-9\-_]+).amazonaws.com",
]
url_paths = {
"{0}/$": EBResponse.dispatch,
}

View File

@ -10,9 +10,10 @@ from six.moves.urllib.parse import urlparse
from moto.core.responses import AWSServiceSpec
from moto.core.responses import BaseResponse
from moto.core.responses import xml_to_json_response
from moto.core.utils import tags_from_query_string
from .exceptions import EmrError
from .models import emr_backends
from .utils import steps_from_query_string, tags_from_query_string
from .utils import steps_from_query_string
def generate_boto3_response(operation):
@ -91,7 +92,7 @@ class ElasticMapReduceResponse(BaseResponse):
@generate_boto3_response("AddTags")
def add_tags(self):
cluster_id = self._get_param("ResourceId")
tags = tags_from_query_string(self.querystring)
tags = tags_from_query_string(self.querystring, prefix="Tags")
self.backend.add_tags(cluster_id, tags)
template = self.response_template(ADD_TAGS_TEMPLATE)
return template.render()

View File

@ -22,22 +22,6 @@ def random_instance_group_id(size=13):
return "i-{0}".format(random_id())
def tags_from_query_string(querystring_dict):
prefix = "Tags"
suffix = "Key"
response_values = {}
for key, value in querystring_dict.items():
if key.startswith(prefix) and key.endswith(suffix):
tag_index = key.replace(prefix + ".", "").replace("." + suffix, "")
tag_key = querystring_dict.get("Tags.{0}.Key".format(tag_index))[0]
tag_value_key = "Tags.{0}.Value".format(tag_index)
if tag_value_key in querystring_dict:
response_values[tag_key] = querystring_dict.get(tag_value_key)[0]
else:
response_values[tag_key] = None
return response_values
def steps_from_query_string(querystring_dict):
steps = []
for step in querystring_dict:

View File

@ -34,6 +34,9 @@ class GlueBackend(BaseBackend):
except KeyError:
raise DatabaseNotFoundException(database_name)
def get_databases(self):
return [self.databases[key] for key in self.databases] if self.databases else []
def create_table(self, database_name, table_name, table_input):
database = self.get_database(database_name)

View File

@ -30,6 +30,12 @@ class GlueResponse(BaseResponse):
database = self.glue_backend.get_database(database_name)
return json.dumps({"Database": {"Name": database.name}})
def get_databases(self):
database_list = self.glue_backend.get_databases()
return json.dumps(
{"DatabaseList": [{"Name": database.name} for database in database_list]}
)
def create_table(self):
database_name = self.parameters.get("DatabaseName")
table_input = self.parameters.get("TableInput")

View File

@ -145,10 +145,7 @@ class ResourceGroupsTaggingAPIBackend(BaseBackend):
# Do S3, resource type s3
if not resource_type_filters or "s3" in resource_type_filters:
for bucket in self.s3_backend.buckets.values():
tags = []
for tag in bucket.tags.tag_set.tags:
tags.append({"Key": tag.key, "Value": tag.value})
tags = self.s3_backend.tagger.list_tags_for_resource(bucket.arn)["Tags"]
if not tags or not tag_filter(
tags
): # Skip if no tags, or invalid filter
@ -362,8 +359,9 @@ class ResourceGroupsTaggingAPIBackend(BaseBackend):
# Do S3, resource type s3
for bucket in self.s3_backend.buckets.values():
for tag in bucket.tags.tag_set.tags:
yield tag.key
tags = self.s3_backend.tagger.get_tag_dict_for_resource(bucket.arn)
for key, _ in tags.items():
yield key
# EC2 tags
def get_ec2_keys(res_id):
@ -414,9 +412,10 @@ class ResourceGroupsTaggingAPIBackend(BaseBackend):
# Do S3, resource type s3
for bucket in self.s3_backend.buckets.values():
for tag in bucket.tags.tag_set.tags:
if tag.key == tag_key:
yield tag.value
tags = self.s3_backend.tagger.get_tag_dict_for_resource(bucket.arn)
for key, value in tags.items():
if key == tag_key:
yield value
# EC2 tags
def get_ec2_values(res_id):

View File

@ -22,6 +22,8 @@ import six
from bisect import insort
from moto.core import ACCOUNT_ID, BaseBackend, BaseModel
from moto.core.utils import iso_8601_datetime_with_milliseconds, rfc_1123_datetime
from moto.cloudwatch.models import metric_providers, MetricDatum
from moto.utilities.tagging_service import TaggingService
from .exceptions import (
BucketAlreadyExists,
MissingBucket,
@ -34,7 +36,6 @@ from .exceptions import (
MalformedXML,
InvalidStorageClass,
InvalidTargetBucketForLogging,
DuplicateTagKeys,
CrossLocationLoggingProhibitted,
NoSuchPublicAccessBlockConfiguration,
InvalidPublicAccessBlockConfiguration,
@ -94,6 +95,7 @@ class FakeKey(BaseModel):
version_id=0,
max_buffer_size=DEFAULT_KEY_BUFFER_SIZE,
multipart=None,
bucket_name=None,
):
self.name = name
self.last_modified = datetime.datetime.utcnow()
@ -105,8 +107,8 @@ class FakeKey(BaseModel):
self._etag = etag
self._version_id = version_id
self._is_versioned = is_versioned
self._tagging = FakeTagging()
self.multipart = multipart
self.bucket_name = bucket_name
self._value_buffer = tempfile.SpooledTemporaryFile(max_size=max_buffer_size)
self._max_buffer_size = max_buffer_size
@ -126,6 +128,13 @@ class FakeKey(BaseModel):
self.lock.release()
return r
@property
def arn(self):
# S3 Objects don't have an ARN, but we do need something unique when creating tags against this resource
return "arn:aws:s3:::{}/{}/{}".format(
self.bucket_name, self.name, self.version_id
)
@value.setter
def value(self, new_value):
self._value_buffer.seek(0)
@ -152,9 +161,6 @@ class FakeKey(BaseModel):
self._metadata = {}
self._metadata.update(metadata)
def set_tagging(self, tagging):
self._tagging = tagging
def set_storage_class(self, storage):
if storage is not None and storage not in STORAGE_CLASS:
raise InvalidStorageClass(storage=storage)
@ -210,10 +216,6 @@ class FakeKey(BaseModel):
def metadata(self):
return self._metadata
@property
def tagging(self):
return self._tagging
@property
def response_dict(self):
res = {
@ -471,26 +473,10 @@ def get_canned_acl(acl):
return FakeAcl(grants=grants)
class FakeTagging(BaseModel):
def __init__(self, tag_set=None):
self.tag_set = tag_set or FakeTagSet()
class FakeTagSet(BaseModel):
def __init__(self, tags=None):
self.tags = tags or []
class FakeTag(BaseModel):
def __init__(self, key, value=None):
self.key = key
self.value = value
class LifecycleFilter(BaseModel):
def __init__(self, prefix=None, tag=None, and_filter=None):
self.prefix = prefix
self.tag = tag
(self.tag_key, self.tag_value) = tag if tag else (None, None)
self.and_filter = and_filter
def to_config_dict(self):
@ -499,11 +485,11 @@ class LifecycleFilter(BaseModel):
"predicate": {"type": "LifecyclePrefixPredicate", "prefix": self.prefix}
}
elif self.tag:
elif self.tag_key:
return {
"predicate": {
"type": "LifecycleTagPredicate",
"tag": {"key": self.tag.key, "value": self.tag.value},
"tag": {"key": self.tag_key, "value": self.tag_value},
}
}
@ -527,12 +513,9 @@ class LifecycleAndFilter(BaseModel):
if self.prefix is not None:
data.append({"type": "LifecyclePrefixPredicate", "prefix": self.prefix})
for tag in self.tags:
for key, value in self.tags.items():
data.append(
{
"type": "LifecycleTagPredicate",
"tag": {"key": tag.key, "value": tag.value},
}
{"type": "LifecycleTagPredicate", "tag": {"key": key, "value": value},}
)
return data
@ -787,7 +770,6 @@ class FakeBucket(BaseModel):
self.policy = None
self.website_configuration = None
self.acl = get_canned_acl("private")
self.tags = FakeTagging()
self.cors = []
self.logging = {}
self.notification_configuration = None
@ -879,7 +861,7 @@ class FakeBucket(BaseModel):
and_filter = None
if rule["Filter"].get("And"):
filters += 1
and_tags = []
and_tags = {}
if rule["Filter"]["And"].get("Tag"):
if not isinstance(rule["Filter"]["And"]["Tag"], list):
rule["Filter"]["And"]["Tag"] = [
@ -887,7 +869,7 @@ class FakeBucket(BaseModel):
]
for t in rule["Filter"]["And"]["Tag"]:
and_tags.append(FakeTag(t["Key"], t.get("Value", "")))
and_tags[t["Key"]] = t.get("Value", "")
try:
and_prefix = (
@ -901,7 +883,7 @@ class FakeBucket(BaseModel):
filter_tag = None
if rule["Filter"].get("Tag"):
filters += 1
filter_tag = FakeTag(
filter_tag = (
rule["Filter"]["Tag"]["Key"],
rule["Filter"]["Tag"].get("Value", ""),
)
@ -988,16 +970,6 @@ class FakeBucket(BaseModel):
def delete_cors(self):
self.cors = []
def set_tags(self, tagging):
self.tags = tagging
def delete_tags(self):
self.tags = FakeTagging()
@property
def tagging(self):
return self.tags
def set_logging(self, logging_config, bucket_backend):
if not logging_config:
self.logging = {}
@ -1085,6 +1057,10 @@ class FakeBucket(BaseModel):
def set_acl(self, acl):
self.acl = acl
@property
def arn(self):
return "arn:aws:s3:::{}".format(self.name)
@property
def physical_resource_id(self):
return self.name
@ -1110,7 +1086,7 @@ class FakeBucket(BaseModel):
int(time.mktime(self.creation_date.timetuple()))
), # PY2 and 3 compatible
"configurationItemMD5Hash": "",
"arn": "arn:aws:s3:::{}".format(self.name),
"arn": self.arn,
"resourceType": "AWS::S3::Bucket",
"resourceId": self.name,
"resourceName": self.name,
@ -1119,7 +1095,7 @@ class FakeBucket(BaseModel):
"resourceCreationTime": str(self.creation_date),
"relatedEvents": [],
"relationships": [],
"tags": {tag.key: tag.value for tag in self.tagging.tag_set.tags},
"tags": s3_backend.tagger.get_tag_dict_for_resource(self.arn),
"configuration": {
"name": self.name,
"owner": {"id": OWNER},
@ -1181,6 +1157,40 @@ class S3Backend(BaseBackend):
def __init__(self):
self.buckets = {}
self.account_public_access_block = None
self.tagger = TaggingService()
# Register this class as a CloudWatch Metric Provider
# Must provide a method 'get_cloudwatch_metrics' that will return a list of metrics, based on the data available
metric_providers["S3"] = self
def get_cloudwatch_metrics(self):
metrics = []
for name, bucket in self.buckets.items():
metrics.append(
MetricDatum(
namespace="AWS/S3",
name="BucketSizeBytes",
value=bucket.keys.item_size(),
dimensions=[
{"Name": "StorageType", "Value": "StandardStorage"},
{"Name": "BucketName", "Value": name},
],
timestamp=datetime.datetime.now(),
)
)
metrics.append(
MetricDatum(
namespace="AWS/S3",
name="NumberOfObjects",
value=len(bucket.keys),
dimensions=[
{"Name": "StorageType", "Value": "AllStorageTypes"},
{"Name": "BucketName", "Value": name},
],
timestamp=datetime.datetime.now(),
)
)
return metrics
def create_bucket(self, bucket_name, region_name):
if bucket_name in self.buckets:
@ -1350,23 +1360,32 @@ class S3Backend(BaseBackend):
else:
return None
def set_key_tagging(self, bucket_name, key_name, tagging, version_id=None):
key = self.get_key(bucket_name, key_name, version_id)
def get_key_tags(self, key):
return self.tagger.list_tags_for_resource(key.arn)
def set_key_tags(self, key, tags, key_name=None):
if key is None:
raise MissingKey(key_name)
key.set_tagging(tagging)
self.tagger.delete_all_tags_for_resource(key.arn)
self.tagger.tag_resource(
key.arn, [{"Key": key, "Value": value} for key, value in tags.items()],
)
return key
def put_bucket_tagging(self, bucket_name, tagging):
tag_keys = [tag.key for tag in tagging.tag_set.tags]
if len(tag_keys) != len(set(tag_keys)):
raise DuplicateTagKeys()
def get_bucket_tags(self, bucket_name):
bucket = self.get_bucket(bucket_name)
bucket.set_tags(tagging)
return self.tagger.list_tags_for_resource(bucket.arn)
def put_bucket_tags(self, bucket_name, tags):
bucket = self.get_bucket(bucket_name)
self.tagger.delete_all_tags_for_resource(bucket.arn)
self.tagger.tag_resource(
bucket.arn, [{"Key": key, "Value": value} for key, value in tags.items()],
)
def delete_bucket_tagging(self, bucket_name):
bucket = self.get_bucket(bucket_name)
bucket.delete_tags()
self.tagger.delete_all_tags_for_resource(bucket.arn)
def put_bucket_cors(self, bucket_name, cors_rules):
bucket = self.get_bucket(bucket_name)
@ -1574,6 +1593,7 @@ class S3Backend(BaseBackend):
key = self.get_key(src_bucket_name, src_key_name, version_id=src_version_id)
new_key = key.copy(dest_key_name, dest_bucket.is_versioned)
self.tagger.copy_tags(key.arn, new_key.arn)
if storage is not None:
new_key.set_storage_class(storage)

View File

@ -5,7 +5,6 @@ import sys
import six
from botocore.awsrequest import AWSPreparedRequest
from werkzeug.wrappers import Request
from moto.core.utils import str_to_rfc_1123_datetime, py2_strip_unicode_keys
from six.moves.urllib.parse import parse_qs, urlparse, unquote, parse_qsl
@ -25,6 +24,7 @@ from moto.s3bucket_path.utils import (
from .exceptions import (
BucketAlreadyExists,
DuplicateTagKeys,
S3ClientError,
MissingBucket,
MissingKey,
@ -44,9 +44,6 @@ from .models import (
FakeGrant,
FakeAcl,
FakeKey,
FakeTagging,
FakeTagSet,
FakeTag,
)
from .utils import (
bucket_name_from_url,
@ -135,7 +132,8 @@ ACTION_MAP = {
def parse_key_name(pth):
return pth.lstrip("/")
# strip the first '/' left by urlparse
return pth[1:] if pth.startswith("/") else pth
def is_delete_keys(request, path, bucket_name):
@ -379,13 +377,13 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin):
template = self.response_template(S3_OBJECT_ACL_RESPONSE)
return template.render(obj=bucket)
elif "tagging" in querystring:
bucket = self.backend.get_bucket(bucket_name)
tags = self.backend.get_bucket_tags(bucket_name)["Tags"]
# "Special Error" if no tags:
if len(bucket.tagging.tag_set.tags) == 0:
if len(tags) == 0:
template = self.response_template(S3_NO_BUCKET_TAGGING)
return 404, {}, template.render(bucket_name=bucket_name)
template = self.response_template(S3_BUCKET_TAGGING_RESPONSE)
return template.render(bucket=bucket)
template = self.response_template(S3_OBJECT_TAGGING_RESPONSE)
return template.render(tags=tags)
elif "logging" in querystring:
bucket = self.backend.get_bucket(bucket_name)
if not bucket.logging:
@ -653,7 +651,7 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin):
return ""
elif "tagging" in querystring:
tagging = self._bucket_tagging_from_xml(body)
self.backend.put_bucket_tagging(bucket_name, tagging)
self.backend.put_bucket_tags(bucket_name, tagging)
return ""
elif "website" in querystring:
self.backend.set_bucket_website_configuration(bucket_name, body)
@ -797,14 +795,6 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin):
if hasattr(request, "form"):
# Not HTTPretty
form = request.form
elif request.headers.get("Content-Type").startswith("multipart/form-data"):
request = Request.from_values(
input_stream=six.BytesIO(request.body),
content_length=request.headers["Content-Length"],
content_type=request.headers["Content-Type"],
method="POST",
)
form = request.form
else:
# HTTPretty, build new form object
body = body.decode()
@ -1107,8 +1097,9 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin):
template = self.response_template(S3_OBJECT_ACL_RESPONSE)
return 200, response_headers, template.render(obj=key)
if "tagging" in query:
tags = self.backend.get_key_tags(key)["Tags"]
template = self.response_template(S3_OBJECT_TAGGING_RESPONSE)
return 200, response_headers, template.render(obj=key)
return 200, response_headers, template.render(tags=tags)
response_headers.update(key.metadata)
response_headers.update(key.response_dict)
@ -1180,8 +1171,9 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin):
version_id = query["versionId"][0]
else:
version_id = None
key = self.backend.get_key(bucket_name, key_name, version_id=version_id)
tagging = self._tagging_from_xml(body)
self.backend.set_key_tagging(bucket_name, key_name, tagging, version_id)
self.backend.set_key_tags(key, tagging, key_name)
return 200, response_headers, ""
if "x-amz-copy-source" in request.headers:
@ -1222,7 +1214,7 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin):
tdirective = request.headers.get("x-amz-tagging-directive")
if tdirective == "REPLACE":
tagging = self._tagging_from_headers(request.headers)
new_key.set_tagging(tagging)
self.backend.set_key_tags(new_key, tagging)
template = self.response_template(S3_OBJECT_COPY_RESPONSE)
response_headers.update(new_key.response_dict)
return 200, response_headers, template.render(key=new_key)
@ -1246,7 +1238,7 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin):
new_key.website_redirect_location = request.headers.get(
"x-amz-website-redirect-location"
)
new_key.set_tagging(tagging)
self.backend.set_key_tags(new_key, tagging)
response_headers.update(new_key.response_dict)
return 200, response_headers, ""
@ -1374,55 +1366,45 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin):
return None
def _tagging_from_headers(self, headers):
tags = {}
if headers.get("x-amz-tagging"):
parsed_header = parse_qs(headers["x-amz-tagging"], keep_blank_values=True)
tags = []
for tag in parsed_header.items():
tags.append(FakeTag(tag[0], tag[1][0]))
tag_set = FakeTagSet(tags)
tagging = FakeTagging(tag_set)
return tagging
else:
return FakeTagging()
tags[tag[0]] = tag[1][0]
return tags
def _tagging_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml, force_list={"Tag": True})
tags = []
tags = {}
for tag in parsed_xml["Tagging"]["TagSet"]["Tag"]:
tags.append(FakeTag(tag["Key"], tag["Value"]))
tags[tag["Key"]] = tag["Value"]
tag_set = FakeTagSet(tags)
tagging = FakeTagging(tag_set)
return tagging
return tags
def _bucket_tagging_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
tags = []
tags = {}
# Optional if no tags are being sent:
if parsed_xml["Tagging"].get("TagSet"):
# If there is only 1 tag, then it's not a list:
if not isinstance(parsed_xml["Tagging"]["TagSet"]["Tag"], list):
tags.append(
FakeTag(
parsed_xml["Tagging"]["TagSet"]["Tag"]["Key"],
parsed_xml["Tagging"]["TagSet"]["Tag"]["Value"],
)
)
tags[parsed_xml["Tagging"]["TagSet"]["Tag"]["Key"]] = parsed_xml[
"Tagging"
]["TagSet"]["Tag"]["Value"]
else:
for tag in parsed_xml["Tagging"]["TagSet"]["Tag"]:
tags.append(FakeTag(tag["Key"], tag["Value"]))
if tag["Key"] in tags:
raise DuplicateTagKeys()
tags[tag["Key"]] = tag["Value"]
# Verify that "aws:" is not in the tags. If so, then this is a problem:
for tag in tags:
if tag.key.startswith("aws:"):
for key, _ in tags.items():
if key.startswith("aws:"):
raise NoSystemTags()
tag_set = FakeTagSet(tags)
tagging = FakeTagging(tag_set)
return tagging
return tags
def _cors_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
@ -1742,10 +1724,10 @@ S3_BUCKET_LIFECYCLE_CONFIGURATION = """<?xml version="1.0" encoding="UTF-8"?>
{% if rule.filter.prefix != None %}
<Prefix>{{ rule.filter.prefix }}</Prefix>
{% endif %}
{% if rule.filter.tag %}
{% if rule.filter.tag_key %}
<Tag>
<Key>{{ rule.filter.tag.key }}</Key>
<Value>{{ rule.filter.tag.value }}</Value>
<Key>{{ rule.filter.tag_key }}</Key>
<Value>{{ rule.filter.tag_value }}</Value>
</Tag>
{% endif %}
{% if rule.filter.and_filter %}
@ -1753,10 +1735,10 @@ S3_BUCKET_LIFECYCLE_CONFIGURATION = """<?xml version="1.0" encoding="UTF-8"?>
{% if rule.filter.and_filter.prefix != None %}
<Prefix>{{ rule.filter.and_filter.prefix }}</Prefix>
{% endif %}
{% for tag in rule.filter.and_filter.tags %}
{% for key, value in rule.filter.and_filter.tags.items() %}
<Tag>
<Key>{{ tag.key }}</Key>
<Value>{{ tag.value }}</Value>
<Key>{{ key }}</Key>
<Value>{{ value }}</Value>
</Tag>
{% endfor %}
</And>
@ -1917,22 +1899,10 @@ S3_OBJECT_TAGGING_RESPONSE = """\
<?xml version="1.0" encoding="UTF-8"?>
<Tagging xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<TagSet>
{% for tag in obj.tagging.tag_set.tags %}
{% for tag in tags %}
<Tag>
<Key>{{ tag.key }}</Key>
<Value>{{ tag.value }}</Value>
</Tag>
{% endfor %}
</TagSet>
</Tagging>"""
S3_BUCKET_TAGGING_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<Tagging>
<TagSet>
{% for tag in bucket.tagging.tag_set.tags %}
<Tag>
<Key>{{ tag.key }}</Key>
<Value>{{ tag.value }}</Value>
<Key>{{ tag.Key }}</Key>
<Value>{{ tag.Value }}</Value>
</Tag>
{% endfor %}
</TagSet>

View File

@ -15,5 +15,5 @@ url_paths = {
# path-based bucket + key
"{0}/(?P<bucket_name_path>[^/]+)/(?P<key_name>.+)": S3ResponseInstance.key_or_control_response,
# subdomain bucket + key with empty first part of path
"{0}//(?P<key_name>.*)$": S3ResponseInstance.key_or_control_response,
"{0}/(?P<key_name>/.*)$": S3ResponseInstance.key_or_control_response,
}

View File

@ -146,6 +146,12 @@ class _VersionedKeyStore(dict):
for key in self:
yield key, self.getlist(key)
def item_size(self):
size = 0
for val in self.values():
size += sys.getsizeof(val)
return size
items = iteritems = _iteritems
lists = iterlists = _iterlists
values = itervalues = _itervalues

View File

@ -5,15 +5,23 @@ class TaggingService:
self.valueName = valueName
self.tags = {}
def get_tag_dict_for_resource(self, arn):
result = {}
if self.has_tags(arn):
for k, v in self.tags[arn].items():
result[k] = v
return result
def list_tags_for_resource(self, arn):
result = []
if arn in self.tags:
if self.has_tags(arn):
for k, v in self.tags[arn].items():
result.append({self.keyName: k, self.valueName: v})
return {self.tagName: result}
def delete_all_tags_for_resource(self, arn):
del self.tags[arn]
if self.has_tags(arn):
del self.tags[arn]
def has_tags(self, arn):
return arn in self.tags
@ -27,6 +35,12 @@ class TaggingService:
else:
self.tags[arn][t[self.keyName]] = None
def copy_tags(self, from_arn, to_arn):
if self.has_tags(from_arn):
self.tag_resource(
to_arn, self.list_tags_for_resource(from_arn)[self.tagName]
)
def untag_resource_using_names(self, arn, tag_names):
for name in tag_names:
if name in self.tags.get(arn, {}):

View File

@ -69,6 +69,22 @@ def test_create_rest_api_with_tags():
response["tags"].should.equal({"MY_TAG1": "MY_VALUE1"})
@mock_apigateway
def test_create_rest_api_with_policy():
client = boto3.client("apigateway", region_name="us-west-2")
policy = '{"Version": "2012-10-17","Statement": []}'
response = client.create_rest_api(
name="my_api", description="this is my api", policy=policy
)
api_id = response["id"]
response = client.get_rest_api(restApiId=api_id)
assert "policy" in response
response["policy"].should.equal(policy)
@mock_apigateway
def test_create_rest_api_invalid_apikeysource():
client = boto3.client("apigateway", region_name="us-west-2")

View File

@ -1677,6 +1677,42 @@ def test_create_function_with_unknown_arn():
)
@mock_lambda
def test_remove_function_permission():
conn = boto3.client("lambda", _lambda_region)
zip_content = get_test_zip_file1()
conn.create_function(
FunctionName="testFunction",
Runtime="python2.7",
Role=(get_role_name()),
Handler="lambda_function.handler",
Code={"ZipFile": zip_content},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)
conn.add_permission(
FunctionName="testFunction",
StatementId="1",
Action="lambda:InvokeFunction",
Principal="432143214321",
SourceArn="arn:aws:lambda:us-west-2:account-id:function:helloworld",
SourceAccount="123412341234",
EventSourceToken="blah",
Qualifier="2",
)
remove = conn.remove_permission(
FunctionName="testFunction", StatementId="1", Qualifier="2",
)
remove["ResponseMetadata"]["HTTPStatusCode"].should.equal(204)
policy = conn.get_policy(FunctionName="testFunction", Qualifier="2")["Policy"]
policy = json.loads(policy)
policy["Statement"].should.equal([])
def create_invalid_lambda(role):
conn = boto3.client("lambda", _lambda_region)
zip_content = get_test_zip_file1()

View File

@ -835,8 +835,10 @@ def test_describe_change_set():
)
stack = cf_conn.describe_change_set(ChangeSetName="NewChangeSet")
stack["ChangeSetName"].should.equal("NewChangeSet")
stack["StackName"].should.equal("NewStack")
stack["Status"].should.equal("REVIEW_IN_PROGRESS")
cf_conn.create_change_set(
StackName="NewStack",
@ -851,15 +853,30 @@ def test_describe_change_set():
@mock_cloudformation
@mock_ec2
def test_execute_change_set_w_arn():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
ec2 = boto3.client("ec2", region_name="us-east-1")
# Verify no instances exist at the moment
ec2.describe_instances()["Reservations"].should.have.length_of(0)
# Create a Change set, and verify no resources have been created yet
change_set = cf_conn.create_change_set(
StackName="NewStack",
TemplateBody=dummy_template_json,
ChangeSetName="NewChangeSet",
ChangeSetType="CREATE",
)
ec2.describe_instances()["Reservations"].should.have.length_of(0)
cf_conn.describe_change_set(ChangeSetName="NewChangeSet")["Status"].should.equal(
"REVIEW_IN_PROGRESS"
)
# Execute change set
cf_conn.execute_change_set(ChangeSetName=change_set["Id"])
# Verify that the status has changed, and the appropriate resources have been created
cf_conn.describe_change_set(ChangeSetName="NewChangeSet")["Status"].should.equal(
"CREATE_COMPLETE"
)
ec2.describe_instances()["Reservations"].should.have.length_of(1)
@mock_cloudformation

View File

@ -1,9 +1,10 @@
import boto
from boto.ec2.cloudwatch.alarm import MetricAlarm
from boto.s3.key import Key
from datetime import datetime
import sure # noqa
from moto import mock_cloudwatch_deprecated
from moto import mock_cloudwatch_deprecated, mock_s3_deprecated
def alarm_fixture(name="tester", action=None):
@ -83,10 +84,11 @@ def test_put_metric_data():
)
metrics = conn.list_metrics()
metrics.should.have.length_of(1)
metric_names = [m for m in metrics if m.name == "metric"]
metric_names.should.have(1)
metric = metrics[0]
metric.namespace.should.equal("tester")
metric.name.should.equal("metric")
metric.name.should.equal("Metric:metric")
dict(metric.dimensions).should.equal({"InstanceId": ["i-0123456,i-0123457"]})
@ -153,3 +155,35 @@ def test_get_metric_statistics():
datapoint = datapoints[0]
datapoint.should.have.key("Minimum").which.should.equal(1.5)
datapoint.should.have.key("Timestamp").which.should.equal(metric_timestamp)
@mock_s3_deprecated
@mock_cloudwatch_deprecated
def test_cloudwatch_return_s3_metrics():
region = "us-east-1"
cw = boto.ec2.cloudwatch.connect_to_region(region)
s3 = boto.s3.connect_to_region(region)
bucket_name_1 = "test-bucket-1"
bucket_name_2 = "test-bucket-2"
bucket1 = s3.create_bucket(bucket_name=bucket_name_1)
key = Key(bucket1)
key.key = "the-key"
key.set_contents_from_string("foobar" * 4)
s3.create_bucket(bucket_name=bucket_name_2)
metrics_s3_bucket_1 = cw.list_metrics(dimensions={"BucketName": bucket_name_1})
# Verify that the OOTB S3 metrics are available for the created buckets
len(metrics_s3_bucket_1).should.be(2)
metric_names = [m.name for m in metrics_s3_bucket_1]
sorted(metric_names).should.equal(
["Metric:BucketSizeBytes", "Metric:NumberOfObjects"]
)
# Explicit clean up - the metrics for these buckets are messing with subsequent tests
key.delete()
s3.delete_bucket(bucket_name_1)
s3.delete_bucket(bucket_name_2)

View File

@ -3,6 +3,7 @@
import boto3
from botocore.exceptions import ClientError
from datetime import datetime, timedelta
from freezegun import freeze_time
from nose.tools import assert_raises
from uuid import uuid4
import pytz
@ -154,7 +155,7 @@ def test_put_metric_data_no_dimensions():
metrics.should.have.length_of(1)
metric = metrics[0]
metric["Namespace"].should.equal("tester")
metric["MetricName"].should.equal("metric")
metric["MetricName"].should.equal("Metric:metric")
@mock_cloudwatch
@ -182,7 +183,7 @@ def test_put_metric_data_with_statistics():
metrics.should.have.length_of(1)
metric = metrics[0]
metric["Namespace"].should.equal("tester")
metric["MetricName"].should.equal("statmetric")
metric["MetricName"].should.equal("Metric:statmetric")
# TODO: test statistics - https://github.com/spulec/moto/issues/1615
@ -211,6 +212,35 @@ def test_get_metric_statistics():
datapoint["Sum"].should.equal(1.5)
@mock_cloudwatch
@freeze_time("2020-02-10 18:44:05")
def test_custom_timestamp():
utc_now = datetime.now(tz=pytz.utc)
time = "2020-02-10T18:44:09Z"
cw = boto3.client("cloudwatch", "eu-west-1")
cw.put_metric_data(
Namespace="tester",
MetricData=[dict(MetricName="metric1", Value=1.5, Timestamp=time)],
)
cw.put_metric_data(
Namespace="tester",
MetricData=[
dict(MetricName="metric2", Value=1.5, Timestamp=datetime(2020, 2, 10))
],
)
stats = cw.get_metric_statistics(
Namespace="tester",
MetricName="metric",
StartTime=utc_now - timedelta(seconds=60),
EndTime=utc_now + timedelta(seconds=60),
Period=60,
Statistics=["SampleCount", "Sum"],
)
@mock_cloudwatch
def test_list_metrics():
cloudwatch = boto3.client("cloudwatch", "eu-west-1")
@ -233,8 +263,16 @@ def test_list_metrics():
# Verify format
res.should.equal(
[
{u"Namespace": "list_test_1/", u"Dimensions": [], u"MetricName": "metric1"},
{u"Namespace": "list_test_1/", u"Dimensions": [], u"MetricName": "metric1"},
{
u"Namespace": "list_test_1/",
u"Dimensions": [],
u"MetricName": "Metric:metric1",
},
{
u"Namespace": "list_test_1/",
u"Dimensions": [],
u"MetricName": "Metric:metric1",
},
]
)
# Verify unknown namespace still has no results
@ -292,3 +330,232 @@ def create_metrics(cloudwatch, namespace, metrics=5, data_points=5):
Namespace=namespace,
MetricData=[{"MetricName": metric_name, "Value": j, "Unit": "Seconds"}],
)
@mock_cloudwatch
def test_get_metric_data_within_timeframe():
utc_now = datetime.now(tz=pytz.utc)
cloudwatch = boto3.client("cloudwatch", "eu-west-1")
namespace1 = "my_namespace/"
# put metric data
values = [0, 2, 4, 3.5, 7, 100]
cloudwatch.put_metric_data(
Namespace=namespace1,
MetricData=[
{"MetricName": "metric1", "Value": val, "Unit": "Seconds"} for val in values
],
)
# get_metric_data
stats = ["Average", "Sum", "Minimum", "Maximum"]
response = cloudwatch.get_metric_data(
MetricDataQueries=[
{
"Id": "result_" + stat,
"MetricStat": {
"Metric": {"Namespace": namespace1, "MetricName": "metric1"},
"Period": 60,
"Stat": stat,
},
}
for stat in stats
],
StartTime=utc_now - timedelta(seconds=60),
EndTime=utc_now + timedelta(seconds=60),
)
#
# Assert Average/Min/Max/Sum is returned as expected
avg = [
res for res in response["MetricDataResults"] if res["Id"] == "result_Average"
][0]
avg["Label"].should.equal("metric1 Average")
avg["StatusCode"].should.equal("Complete")
[int(val) for val in avg["Values"]].should.equal([19])
sum_ = [res for res in response["MetricDataResults"] if res["Id"] == "result_Sum"][
0
]
sum_["Label"].should.equal("metric1 Sum")
sum_["StatusCode"].should.equal("Complete")
[val for val in sum_["Values"]].should.equal([sum(values)])
min_ = [
res for res in response["MetricDataResults"] if res["Id"] == "result_Minimum"
][0]
min_["Label"].should.equal("metric1 Minimum")
min_["StatusCode"].should.equal("Complete")
[int(val) for val in min_["Values"]].should.equal([0])
max_ = [
res for res in response["MetricDataResults"] if res["Id"] == "result_Maximum"
][0]
max_["Label"].should.equal("metric1 Maximum")
max_["StatusCode"].should.equal("Complete")
[int(val) for val in max_["Values"]].should.equal([100])
@mock_cloudwatch
def test_get_metric_data_partially_within_timeframe():
utc_now = datetime.now(tz=pytz.utc)
yesterday = utc_now - timedelta(days=1)
last_week = utc_now - timedelta(days=7)
cloudwatch = boto3.client("cloudwatch", "eu-west-1")
namespace1 = "my_namespace/"
# put metric data
values = [0, 2, 4, 3.5, 7, 100]
cloudwatch.put_metric_data(
Namespace=namespace1,
MetricData=[
{
"MetricName": "metric1",
"Value": 10,
"Unit": "Seconds",
"Timestamp": utc_now,
}
],
)
cloudwatch.put_metric_data(
Namespace=namespace1,
MetricData=[
{
"MetricName": "metric1",
"Value": 20,
"Unit": "Seconds",
"Timestamp": yesterday,
}
],
)
cloudwatch.put_metric_data(
Namespace=namespace1,
MetricData=[
{
"MetricName": "metric1",
"Value": 50,
"Unit": "Seconds",
"Timestamp": last_week,
}
],
)
# get_metric_data
response = cloudwatch.get_metric_data(
MetricDataQueries=[
{
"Id": "result",
"MetricStat": {
"Metric": {"Namespace": namespace1, "MetricName": "metric1"},
"Period": 60,
"Stat": "Sum",
},
}
],
StartTime=yesterday - timedelta(seconds=60),
EndTime=utc_now + timedelta(seconds=60),
)
#
# Assert Last week's data is not returned
len(response["MetricDataResults"]).should.equal(1)
sum_ = response["MetricDataResults"][0]
sum_["Label"].should.equal("metric1 Sum")
sum_["StatusCode"].should.equal("Complete")
sum_["Values"].should.equal([30.0])
@mock_cloudwatch
def test_get_metric_data_outside_timeframe():
utc_now = datetime.now(tz=pytz.utc)
last_week = utc_now - timedelta(days=7)
cloudwatch = boto3.client("cloudwatch", "eu-west-1")
namespace1 = "my_namespace/"
# put metric data
cloudwatch.put_metric_data(
Namespace=namespace1,
MetricData=[
{
"MetricName": "metric1",
"Value": 50,
"Unit": "Seconds",
"Timestamp": last_week,
}
],
)
# get_metric_data
response = cloudwatch.get_metric_data(
MetricDataQueries=[
{
"Id": "result",
"MetricStat": {
"Metric": {"Namespace": namespace1, "MetricName": "metric1"},
"Period": 60,
"Stat": "Sum",
},
}
],
StartTime=utc_now - timedelta(seconds=60),
EndTime=utc_now + timedelta(seconds=60),
)
#
# Assert Last week's data is not returned
len(response["MetricDataResults"]).should.equal(1)
response["MetricDataResults"][0]["Id"].should.equal("result")
response["MetricDataResults"][0]["StatusCode"].should.equal("Complete")
response["MetricDataResults"][0]["Values"].should.equal([])
@mock_cloudwatch
def test_get_metric_data_for_multiple_metrics():
utc_now = datetime.now(tz=pytz.utc)
cloudwatch = boto3.client("cloudwatch", "eu-west-1")
namespace = "my_namespace/"
# put metric data
cloudwatch.put_metric_data(
Namespace=namespace,
MetricData=[
{
"MetricName": "metric1",
"Value": 50,
"Unit": "Seconds",
"Timestamp": utc_now,
}
],
)
cloudwatch.put_metric_data(
Namespace=namespace,
MetricData=[
{
"MetricName": "metric2",
"Value": 25,
"Unit": "Seconds",
"Timestamp": utc_now,
}
],
)
# get_metric_data
response = cloudwatch.get_metric_data(
MetricDataQueries=[
{
"Id": "result1",
"MetricStat": {
"Metric": {"Namespace": namespace, "MetricName": "metric1"},
"Period": 60,
"Stat": "Sum",
},
},
{
"Id": "result2",
"MetricStat": {
"Metric": {"Namespace": namespace, "MetricName": "metric2"},
"Period": 60,
"Stat": "Sum",
},
},
],
StartTime=utc_now - timedelta(seconds=60),
EndTime=utc_now + timedelta(seconds=60),
)
#
len(response["MetricDataResults"]).should.equal(2)
res1 = [res for res in response["MetricDataResults"] if res["Id"] == "result1"][0]
res1["Values"].should.equal([50.0])
res2 = [res for res in response["MetricDataResults"] if res["Id"] == "result2"][0]
res2["Values"].should.equal([25.0])

View File

@ -7,6 +7,7 @@ from nose.tools import assert_raises
from moto import mock_cognitoidentity
from moto.cognitoidentity.utils import get_random_identity_id
from moto.core import ACCOUNT_ID
from uuid import UUID
@mock_cognitoidentity
@ -83,8 +84,10 @@ def test_describe_identity_pool_with_invalid_id_raises_error():
# testing a helper function
def test_get_random_identity_id():
assert len(get_random_identity_id("us-west-2")) > 0
assert len(get_random_identity_id("us-west-2").split(":")[1]) == 19
identity_id = get_random_identity_id("us-west-2")
region, id = identity_id.split(":")
region.should.equal("us-west-2")
UUID(id, version=4) # Will throw an error if it's not a valid UUID
@mock_cognitoidentity
@ -96,7 +99,6 @@ def test_get_id():
IdentityPoolId="us-west-2:12345",
Logins={"someurl": "12345"},
)
print(result)
assert (
result.get("IdentityId", "").startswith("us-west-2")
or result.get("ResponseMetadata").get("HTTPStatusCode") == 200

View File

@ -48,6 +48,5 @@ def test_get_id():
},
)
print(res.data)
json_data = json.loads(res.data.decode("utf-8"))
assert ":" in json_data["IdentityId"]

View File

@ -11,6 +11,8 @@ from moto import mock_s3
from moto.config import mock_config
from moto.core import ACCOUNT_ID
import sure # noqa
@mock_config
def test_put_configuration_recorder():

View File

@ -1454,6 +1454,13 @@ def test_filter_expression():
filter_expr.expr(row1).should.be(True)
filter_expr.expr(row2).should.be(False)
# lowercase AND test
filter_expr = moto.dynamodb2.comparisons.get_filter_expression(
"Id > :v0 and Subs < :v1", {}, {":v0": {"N": "5"}, ":v1": {"N": "7"}}
)
filter_expr.expr(row1).should.be(True)
filter_expr.expr(row2).should.be(False)
# OR test
filter_expr = moto.dynamodb2.comparisons.get_filter_expression(
"Id = :v0 OR Id=:v1", {}, {":v0": {"N": "5"}, ":v1": {"N": "8"}}
@ -2785,7 +2792,7 @@ def test_query_gsi_with_range_key():
res = dynamodb.query(
TableName="test",
IndexName="test_gsi",
KeyConditionExpression="gsi_hash_key = :gsi_hash_key AND gsi_range_key = :gsi_range_key",
KeyConditionExpression="gsi_hash_key = :gsi_hash_key and gsi_range_key = :gsi_range_key",
ExpressionAttributeValues={
":gsi_hash_key": {"S": "key1"},
":gsi_range_key": {"S": "range1"},
@ -4212,6 +4219,44 @@ def test_gsi_verify_negative_number_order():
)
@mock_dynamodb2
def test_dynamodb_max_1mb_limit():
ddb = boto3.resource("dynamodb", region_name="eu-west-1")
table_name = "populated-mock-table"
table = ddb.create_table(
TableName=table_name,
KeySchema=[
{"AttributeName": "partition_key", "KeyType": "HASH"},
{"AttributeName": "sort_key", "KeyType": "RANGE"},
],
AttributeDefinitions=[
{"AttributeName": "partition_key", "AttributeType": "S"},
{"AttributeName": "sort_key", "AttributeType": "S"},
],
BillingMode="PAY_PER_REQUEST",
)
# Populate the table
items = [
{
"partition_key": "partition_key_val", # size=30
"sort_key": "sort_key_value____" + str(i), # size=30
}
for i in range(10000, 29999)
]
with table.batch_writer() as batch:
for item in items:
batch.put_item(Item=item)
response = table.query(
KeyConditionExpression=Key("partition_key").eq("partition_key_val")
)
# We shouldn't get everything back - the total result set is well over 1MB
len(items).should.be.greater_than(response["Count"])
response["LastEvaluatedKey"].shouldnt.be(None)
def assert_raise_syntax_error(client_error, token, near):
"""
Assert whether a client_error is as expected Syntax error. Syntax error looks like: `syntax_error_template`
@ -4277,3 +4322,12 @@ def test_update_expression_with_multiple_set_clauses_must_be_comma_separated():
assert False, "Validation exception not thrown"
except dynamodb.exceptions.ClientError as e:
assert_raise_syntax_error(e, "Mystr2", "myNum Mystr2 myNum2")
@mock_dynamodb2
def test_list_tables_exclusive_start_table_name_empty():
client = boto3.client("dynamodb", region_name="us-east-1")
resp = client.list_tables(Limit=1, ExclusiveStartTableName="whatever")
len(resp["TableNames"]).should.equal(0)

130
tests/test_eb/test_eb.py Normal file
View File

@ -0,0 +1,130 @@
import boto3
import sure # noqa
from botocore.exceptions import ClientError
from moto import mock_elasticbeanstalk
@mock_elasticbeanstalk
def test_create_application():
# Create Elastic Beanstalk Application
conn = boto3.client("elasticbeanstalk", region_name="us-east-1")
app = conn.create_application(ApplicationName="myapp",)
app["Application"]["ApplicationName"].should.equal("myapp")
@mock_elasticbeanstalk
def test_create_application_dup():
conn = boto3.client("elasticbeanstalk", region_name="us-east-1")
conn.create_application(ApplicationName="myapp",)
conn.create_application.when.called_with(ApplicationName="myapp",).should.throw(
ClientError
)
@mock_elasticbeanstalk
def test_describe_applications():
# Create Elastic Beanstalk Application
conn = boto3.client("elasticbeanstalk", region_name="us-east-1")
conn.create_application(ApplicationName="myapp",)
apps = conn.describe_applications()
len(apps["Applications"]).should.equal(1)
apps["Applications"][0]["ApplicationName"].should.equal("myapp")
@mock_elasticbeanstalk
def test_create_environment():
# Create Elastic Beanstalk Environment
conn = boto3.client("elasticbeanstalk", region_name="us-east-1")
app = conn.create_application(ApplicationName="myapp",)
env = conn.create_environment(ApplicationName="myapp", EnvironmentName="myenv",)
env["EnvironmentName"].should.equal("myenv")
@mock_elasticbeanstalk
def test_describe_environments():
# List Elastic Beanstalk Envs
conn = boto3.client("elasticbeanstalk", region_name="us-east-1")
conn.create_application(ApplicationName="myapp",)
conn.create_environment(
ApplicationName="myapp", EnvironmentName="myenv",
)
envs = conn.describe_environments()
envs = envs["Environments"]
len(envs).should.equal(1)
envs[0]["ApplicationName"].should.equal("myapp")
envs[0]["EnvironmentName"].should.equal("myenv")
def tags_dict_to_list(tag_dict):
tag_list = []
for key, value in tag_dict.items():
tag_list.append({"Key": key, "Value": value})
return tag_list
def tags_list_to_dict(tag_list):
tag_dict = {}
for tag in tag_list:
tag_dict[tag["Key"]] = tag["Value"]
return tag_dict
@mock_elasticbeanstalk
def test_create_environment_tags():
conn = boto3.client("elasticbeanstalk", region_name="us-east-1")
conn.create_application(ApplicationName="myapp",)
env_tags = {"initial key": "initial value"}
env = conn.create_environment(
ApplicationName="myapp",
EnvironmentName="myenv",
Tags=tags_dict_to_list(env_tags),
)
tags = conn.list_tags_for_resource(ResourceArn=env["EnvironmentArn"],)
tags["ResourceArn"].should.equal(env["EnvironmentArn"])
tags_list_to_dict(tags["ResourceTags"]).should.equal(env_tags)
@mock_elasticbeanstalk
def test_update_tags():
conn = boto3.client("elasticbeanstalk", region_name="us-east-1")
conn.create_application(ApplicationName="myapp",)
env_tags = {
"initial key": "initial value",
"to remove": "delete me",
"to update": "original",
}
env = conn.create_environment(
ApplicationName="myapp",
EnvironmentName="myenv",
Tags=tags_dict_to_list(env_tags),
)
extra_env_tags = {
"to update": "new",
"extra key": "extra value",
}
conn.update_tags_for_resource(
ResourceArn=env["EnvironmentArn"],
TagsToAdd=tags_dict_to_list(extra_env_tags),
TagsToRemove=["to remove"],
)
total_env_tags = env_tags.copy()
total_env_tags.update(extra_env_tags)
del total_env_tags["to remove"]
tags = conn.list_tags_for_resource(ResourceArn=env["EnvironmentArn"],)
tags["ResourceArn"].should.equal(env["EnvironmentArn"])
tags_list_to_dict(tags["ResourceTags"]).should.equal(total_env_tags)
@mock_elasticbeanstalk
def test_list_available_solution_stacks():
conn = boto3.client("elasticbeanstalk", region_name="us-east-1")
stacks = conn.list_available_solution_stacks()
len(stacks["SolutionStacks"]).should.be.greater_than(0)
len(stacks["SolutionStacks"]).should.be.equal(len(stacks["SolutionStackDetails"]))

View File

@ -9,6 +9,7 @@ from nose.tools import assert_raises
import base64
import datetime
import ipaddress
import json
import six
import boto
@ -18,7 +19,7 @@ from boto.exception import EC2ResponseError, EC2ResponseError
from freezegun import freeze_time
import sure # noqa
from moto import mock_ec2_deprecated, mock_ec2
from moto import mock_ec2_deprecated, mock_ec2, mock_cloudformation
from tests.helpers import requires_boto_gte
@ -1334,6 +1335,12 @@ def test_create_instance_ebs_optimized():
instance.load()
instance.ebs_optimized.should.be(False)
instance = ec2_resource.create_instances(
ImageId="ami-12345678", MaxCount=1, MinCount=1,
)[0]
instance.load()
instance.ebs_optimized.should.be(False)
@mock_ec2
def test_run_multiple_instances_in_same_command():
@ -1414,3 +1421,40 @@ def test_describe_instance_attribute():
invalid_instance_attribute=invalid_instance_attribute
)
ex.exception.response["Error"]["Message"].should.equal(message)
@mock_ec2
@mock_cloudformation
def test_volume_size_through_cloudformation():
ec2 = boto3.client("ec2", region_name="us-east-1")
cf = boto3.client("cloudformation", region_name="us-east-1")
volume_template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
"testInstance": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "ami-d3adb33f",
"KeyName": "dummy",
"InstanceType": "t2.micro",
"BlockDeviceMappings": [
{"DeviceName": "/dev/sda2", "Ebs": {"VolumeSize": "50"}}
],
"Tags": [
{"Key": "foo", "Value": "bar"},
{"Key": "blah", "Value": "baz"},
],
},
}
},
}
template_json = json.dumps(volume_template)
cf.create_stack(StackName="test_stack", TemplateBody=template_json)
instances = ec2.describe_instances()
volume = instances["Reservations"][0]["Instances"][0]["BlockDeviceMappings"][0][
"Ebs"
]
volumes = ec2.describe_volumes(VolumeIds=[volume["VolumeId"]])
volumes["Volumes"][0]["Size"].should.equal(50)

View File

@ -52,6 +52,29 @@ def test_get_database_not_exits():
)
@mock_glue
def test_get_databases_empty():
client = boto3.client("glue", region_name="us-east-1")
response = client.get_databases()
response["DatabaseList"].should.have.length_of(0)
@mock_glue
def test_get_databases_several_items():
client = boto3.client("glue", region_name="us-east-1")
database_name_1, database_name_2 = "firstdatabase", "seconddatabase"
helpers.create_database(client, database_name_1)
helpers.create_database(client, database_name_2)
database_list = sorted(
client.get_databases()["DatabaseList"], key=lambda x: x["Name"]
)
database_list.should.have.length_of(2)
database_list[0].should.equal({"Name": database_name_1})
database_list[1].should.equal({"Name": database_name_2})
@mock_glue
def test_create_table():
client = boto3.client("glue", region_name="us-east-1")

View File

@ -3256,7 +3256,8 @@ def test_boto3_put_object_tagging_on_earliest_version():
# Older version has tags while the most recent does not
resp = s3.get_object_tagging(Bucket=bucket_name, Key=key, VersionId=first_object.id)
resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
resp["TagSet"].should.equal(
sorted_tagset = sorted(resp["TagSet"], key=lambda t: t["Key"])
sorted_tagset.should.equal(
[{"Key": "item1", "Value": "foo"}, {"Key": "item2", "Value": "bar"}]
)
@ -3334,7 +3335,8 @@ def test_boto3_put_object_tagging_on_both_version():
resp = s3.get_object_tagging(Bucket=bucket_name, Key=key, VersionId=first_object.id)
resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
resp["TagSet"].should.equal(
sorted_tagset = sorted(resp["TagSet"], key=lambda t: t["Key"])
sorted_tagset.should.equal(
[{"Key": "item1", "Value": "foo"}, {"Key": "item2", "Value": "bar"}]
)
@ -3342,7 +3344,8 @@ def test_boto3_put_object_tagging_on_both_version():
Bucket=bucket_name, Key=key, VersionId=second_object.id
)
resp["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
resp["TagSet"].should.equal(
sorted_tagset = sorted(resp["TagSet"], key=lambda t: t["Key"])
sorted_tagset.should.equal(
[{"Key": "item1", "Value": "baz"}, {"Key": "item2", "Value": "bin"}]
)
@ -3744,6 +3747,28 @@ def test_root_dir_with_empty_name_works():
store_and_read_back_a_key("/")
@parameterized(["mybucket", "my.bucket"])
@mock_s3
def test_leading_slashes_not_removed(bucket_name):
"""Make sure that leading slashes are not removed internally."""
s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
s3.create_bucket(Bucket=bucket_name)
uploaded_key = "/key"
invalid_key_1 = "key"
invalid_key_2 = "//key"
s3.put_object(Bucket=bucket_name, Key=uploaded_key, Body=b"Some body")
with assert_raises(ClientError) as e:
s3.get_object(Bucket=bucket_name, Key=invalid_key_1)
e.exception.response["Error"]["Code"].should.equal("NoSuchKey")
with assert_raises(ClientError) as e:
s3.get_object(Bucket=bucket_name, Key=invalid_key_2)
e.exception.response["Error"]["Code"].should.equal("NoSuchKey")
@parameterized(
[("foo/bar/baz",), ("foo",), ("foo/run_dt%3D2019-01-01%252012%253A30%253A00",)]
)
@ -4293,24 +4318,17 @@ def test_s3_config_dict():
FakeAcl,
FakeGrant,
FakeGrantee,
FakeTag,
FakeTagging,
FakeTagSet,
OWNER,
)
# Without any buckets:
assert not s3_config_query.get_config_resource("some_bucket")
tags = FakeTagging(
FakeTagSet(
[FakeTag("someTag", "someValue"), FakeTag("someOtherTag", "someOtherValue")]
)
)
tags = {"someTag": "someValue", "someOtherTag": "someOtherValue"}
# With 1 bucket in us-west-2:
s3_config_query.backends["global"].create_bucket("bucket1", "us-west-2")
s3_config_query.backends["global"].put_bucket_tagging("bucket1", tags)
s3_config_query.backends["global"].put_bucket_tags("bucket1", tags)
# With a log bucket:
s3_config_query.backends["global"].create_bucket("logbucket", "us-west-2")

View File

@ -77,3 +77,34 @@ def test_extract_tag_names():
expected = ["key1", "key2"]
expected.should.be.equal(actual)
def test_copy_non_existing_arn():
svc = TaggingService()
tags = [{"Key": "key1", "Value": "value1"}, {"Key": "key2", "Value": "value2"}]
svc.tag_resource("new_arn", tags)
#
svc.copy_tags("non_existing_arn", "new_arn")
# Copying from a non-existing ARN should a NOOP
# Assert the old tags still exist
actual = sorted(
svc.list_tags_for_resource("new_arn")["Tags"], key=lambda t: t["Key"]
)
actual.should.equal(tags)
def test_copy_existing_arn():
svc = TaggingService()
tags_old_arn = [{"Key": "key1", "Value": "value1"}]
tags_new_arn = [{"Key": "key2", "Value": "value2"}]
svc.tag_resource("old_arn", tags_old_arn)
svc.tag_resource("new_arn", tags_new_arn)
#
svc.copy_tags("old_arn", "new_arn")
# Assert the old tags still exist
actual = sorted(
svc.list_tags_for_resource("new_arn")["Tags"], key=lambda t: t["Key"]
)
actual.should.equal(
[{"Key": "key1", "Value": "value1"}, {"Key": "key2", "Value": "value2"}]
)