Merge pull request #7 from spulec/master

Merge upstream
This commit is contained in:
Bert Blommers 2019-10-14 09:32:22 +01:00 committed by GitHub
commit cc1693c4ec
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
32 changed files with 2270 additions and 228 deletions

View File

@ -87,7 +87,7 @@ that look like this:
]
```
It's recommended to read the comment for the `ConfigQueryModel` [base class here](moto/core/models.py).
It's recommended to read the comment for the `ConfigQueryModel`'s `list_config_service_resources` function in [base class here](moto/core/models.py).
^^ The AWS Config code will see this and format it correct for both aggregated and non-aggregated calls.
@ -102,6 +102,19 @@ An example of a working implementation of this is [S3](moto/s3/config.py).
Pagination should generally be able to pull out the resource across any region so should be sharded by `region-item-name` -- not done for S3
because S3 has a globally unique name space.
### Describing Resources
TODO: Need to fill this in when it's implemented
Fetching a resource's configuration has some similarities to listing resources, but it requires more work (to implement). Due to the
various ways that a resource can be configured, some work will need to be done to ensure that the Config dict returned is correct.
For most resource types the following is true:
1. There are regional backends with their own sets of data
1. Config aggregation can pull data from any backend region -- we assume that everything lives in the same account
The current implementation is for S3. S3 is very complex and depending on how the bucket is configured will depend on what Config will
return for it.
When implementing resource config fetching, you will need to return at a minimum `None` if the resource is not found, or a `dict` that looks
like what AWS Config would return.
It's recommended to read the comment for the `ConfigQueryModel` 's `get_config_resource` function in [base class here](moto/core/models.py).

View File

@ -5866,7 +5866,7 @@
- [ ] update_job
## sns
48% implemented
58% implemented
- [ ] add_permission
- [ ] check_if_phone_number_is_opted_out
- [ ] confirm_subscription
@ -5886,7 +5886,7 @@
- [X] list_platform_applications
- [X] list_subscriptions
- [ ] list_subscriptions_by_topic
- [ ] list_tags_for_resource
- [x] list_tags_for_resource
- [X] list_topics
- [ ] opt_in_phone_number
- [X] publish
@ -5897,9 +5897,9 @@
- [X] set_subscription_attributes
- [ ] set_topic_attributes
- [X] subscribe
- [ ] tag_resource
- [x] tag_resource
- [X] unsubscribe
- [ ] untag_resource
- [x] untag_resource
## sqs
65% implemented

View File

@ -33,6 +33,8 @@ from moto.s3.exceptions import MissingBucket, MissingKey
from moto import settings
from .utils import make_function_arn, make_function_ver_arn
from moto.sqs import sqs_backends
from moto.dynamodb2 import dynamodb_backends2
from moto.dynamodbstreams import dynamodbstreams_backends
logger = logging.getLogger(__name__)
@ -273,6 +275,70 @@ class LambdaFunction(BaseModel):
"Configuration": self.get_configuration(),
}
def update_configuration(self, config_updates):
for key, value in config_updates.items():
if key == "Description":
self.description = value
elif key == "Handler":
self.handler = value
elif key == "MemorySize":
self.memory_size = value
elif key == "Role":
self.role = value
elif key == "Runtime":
self.run_time = value
elif key == "Timeout":
self.timeout = value
elif key == "VpcConfig":
self.vpc_config = value
return self.get_configuration()
def update_function_code(self, updated_spec):
if 'DryRun' in updated_spec and updated_spec['DryRun']:
return self.get_configuration()
if 'ZipFile' in updated_spec:
self.code['ZipFile'] = updated_spec['ZipFile']
# using the "hackery" from __init__ because it seems to work
# TODOs and FIXMEs included, because they'll need to be fixed
# in both places now
try:
to_unzip_code = base64.b64decode(
bytes(updated_spec['ZipFile'], 'utf-8'))
except Exception:
to_unzip_code = base64.b64decode(updated_spec['ZipFile'])
self.code_bytes = to_unzip_code
self.code_size = len(to_unzip_code)
self.code_sha_256 = hashlib.sha256(to_unzip_code).hexdigest()
# TODO: we should be putting this in a lambda bucket
self.code['UUID'] = str(uuid.uuid4())
self.code['S3Key'] = '{}-{}'.format(self.function_name, self.code['UUID'])
elif 'S3Bucket' in updated_spec and 'S3Key' in updated_spec:
key = None
try:
# FIXME: does not validate bucket region
key = s3_backend.get_key(updated_spec['S3Bucket'], updated_spec['S3Key'])
except MissingBucket:
if do_validate_s3():
raise ValueError(
"InvalidParameterValueException",
"Error occurred while GetObject. S3 Error Code: NoSuchBucket. S3 Error Message: The specified bucket does not exist")
except MissingKey:
if do_validate_s3():
raise ValueError(
"InvalidParameterValueException",
"Error occurred while GetObject. S3 Error Code: NoSuchKey. S3 Error Message: The specified key does not exist.")
if key:
self.code_bytes = key.value
self.code_size = key.size
self.code_sha_256 = hashlib.sha256(key.value).hexdigest()
return self.get_configuration()
@staticmethod
def convert(s):
try:
@ -280,14 +346,6 @@ class LambdaFunction(BaseModel):
except Exception:
return s
@staticmethod
def is_json(test_str):
try:
response = json.loads(test_str)
except Exception:
response = test_str
return response
def _invoke_lambda(self, code, event=None, context=None):
# TODO: context not yet implemented
if event is None:
@ -692,6 +750,15 @@ class LambdaBackend(BaseBackend):
queue.lambda_event_source_mappings[esm.function_arn] = esm
return esm
for stream in json.loads(dynamodbstreams_backends[self.region_name].list_streams())['Streams']:
if stream['StreamArn'] == spec['EventSourceArn']:
spec.update({'FunctionArn': func.function_arn})
esm = EventSourceMapping(spec)
self._event_source_mappings[esm.uuid] = esm
table_name = stream['TableName']
table = dynamodb_backends2[self.region_name].get_table(table_name)
table.lambda_event_source_mappings[esm.function_arn] = esm
return esm
raise RESTError('ResourceNotFoundException', 'Invalid EventSourceArn')
def publish_function(self, function_name):
@ -811,6 +878,19 @@ class LambdaBackend(BaseBackend):
func = self._lambdas.get_function(function_name, qualifier)
func.invoke(json.dumps(event), {}, {})
def send_dynamodb_items(self, function_arn, items, source):
event = {'Records': [
{
'eventID': item.to_json()['eventID'],
'eventName': 'INSERT',
'eventVersion': item.to_json()['eventVersion'],
'eventSource': item.to_json()['eventSource'],
'awsRegion': self.region_name,
'dynamodb': item.to_json()['dynamodb'],
'eventSourceARN': source} for item in items]}
func = self._lambdas.get_arn(function_arn)
func.invoke(json.dumps(event), {}, {})
def list_tags(self, resource):
return self.get_function_by_arn(resource).tags

View File

@ -122,6 +122,20 @@ class LambdaResponse(BaseResponse):
if request.method == 'POST':
return self._add_policy(request, full_url, headers)
def configuration(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
if request.method == 'PUT':
return self._put_configuration(request)
else:
raise ValueError("Cannot handle request")
def code(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
if request.method == 'PUT':
return self._put_code()
else:
raise ValueError("Cannot handle request")
def _add_policy(self, request, full_url, headers):
path = request.path if hasattr(request, 'path') else path_url(request.url)
function_name = path.split('/')[-2]
@ -308,3 +322,30 @@ class LambdaResponse(BaseResponse):
return 204, {}, "{}"
else:
return 404, {}, "{}"
def _put_configuration(self, request):
function_name = self.path.rsplit('/', 2)[-2]
qualifier = self._get_param('Qualifier', None)
fn = self.lambda_backend.get_function(function_name, qualifier)
if fn:
config = fn.update_configuration(self.json_body)
return 200, {}, json.dumps(config)
else:
return 404, {}, "{}"
def _put_code(self):
function_name = self.path.rsplit('/', 2)[-2]
qualifier = self._get_param('Qualifier', None)
fn = self.lambda_backend.get_function(function_name, qualifier)
if fn:
if self.json_body.get('Publish', False):
fn = self.lambda_backend.publish_function(function_name)
config = fn.update_function_code(self.json_body)
return 200, {}, json.dumps(config)
else:
return 404, {}, "{}"

View File

@ -16,5 +16,7 @@ url_paths = {
r'{0}/(?P<api_version>[^/]+)/functions/(?P<function_name>[\w_-]+)/invocations/?$': response.invoke,
r'{0}/(?P<api_version>[^/]+)/functions/(?P<function_name>[\w_-]+)/invoke-async/?$': response.invoke_async,
r'{0}/(?P<api_version>[^/]+)/tags/(?P<resource_arn>.+)': response.tag,
r'{0}/(?P<api_version>[^/]+)/functions/(?P<function_name>[\w_-]+)/policy/?$': response.policy
r'{0}/(?P<api_version>[^/]+)/functions/(?P<function_name>[\w_-]+)/policy/?$': response.policy,
r'{0}/(?P<api_version>[^/]+)/functions/(?P<function_name>[\w_-]+)/configuration/?$': response.configuration,
r'{0}/(?P<api_version>[^/]+)/functions/(?P<function_name>[\w_-]+)/code/?$': response.code
}

View File

@ -254,3 +254,25 @@ class TooManyResourceIds(JsonRESTError):
def __init__(self):
super(TooManyResourceIds, self).__init__('ValidationException', "The specified list had more than 20 resource ID's. "
"It must have '20' or less items")
class ResourceNotDiscoveredException(JsonRESTError):
code = 400
def __init__(self, type, resource):
super(ResourceNotDiscoveredException, self).__init__('ResourceNotDiscoveredException',
'Resource {resource} of resourceType:{type} is unknown or has not been '
'discovered'.format(resource=resource, type=type))
class TooManyResourceKeys(JsonRESTError):
code = 400
def __init__(self, bad_list):
message = '1 validation error detected: Value \'{bad_list}\' at ' \
'\'resourceKeys\' failed to satisfy constraint: ' \
'Member must have length less than or equal to 100'.format(bad_list=bad_list)
# For PY2:
message = str(message)
super(TooManyResourceKeys, self).__init__("ValidationException", message)

View File

@ -17,7 +17,8 @@ from moto.config.exceptions import InvalidResourceTypeException, InvalidDelivery
InvalidSNSTopicARNException, MaxNumberOfDeliveryChannelsExceededException, NoAvailableDeliveryChannelException, \
NoSuchDeliveryChannelException, LastDeliveryChannelDeleteFailedException, TagKeyTooBig, \
TooManyTags, TagValueTooBig, TooManyAccountSources, InvalidParameterValueException, InvalidNextTokenException, \
NoSuchConfigurationAggregatorException, InvalidTagCharacters, DuplicateTags, InvalidLimit, InvalidResourceParameters, TooManyResourceIds
NoSuchConfigurationAggregatorException, InvalidTagCharacters, DuplicateTags, InvalidLimit, InvalidResourceParameters, \
TooManyResourceIds, ResourceNotDiscoveredException, TooManyResourceKeys
from moto.core import BaseBackend, BaseModel
from moto.s3.config import s3_config_query
@ -790,6 +791,111 @@ class ConfigBackend(BaseBackend):
return result
def get_resource_config_history(self, resource_type, id, backend_region):
"""Returns the configuration of an item in the AWS Config format of the resource for the current regional backend.
NOTE: This is --NOT-- returning history as it is not supported in moto at this time. (PR's welcome!)
As such, the later_time, earlier_time, limit, and next_token are ignored as this will only
return 1 item. (If no items, it raises an exception)
"""
# If the type isn't implemented then we won't find the item:
if resource_type not in RESOURCE_MAP:
raise ResourceNotDiscoveredException(resource_type, id)
# Is the resource type global?
if RESOURCE_MAP[resource_type].backends.get('global'):
backend_region = 'global'
# If the backend region isn't implemented then we won't find the item:
if not RESOURCE_MAP[resource_type].backends.get(backend_region):
raise ResourceNotDiscoveredException(resource_type, id)
# Get the item:
item = RESOURCE_MAP[resource_type].get_config_resource(id, backend_region=backend_region)
if not item:
raise ResourceNotDiscoveredException(resource_type, id)
item['accountId'] = DEFAULT_ACCOUNT_ID
return {'configurationItems': [item]}
def batch_get_resource_config(self, resource_keys, backend_region):
"""Returns the configuration of an item in the AWS Config format of the resource for the current regional backend.
:param resource_keys:
:param backend_region:
"""
# Can't have more than 100 items
if len(resource_keys) > 100:
raise TooManyResourceKeys(['com.amazonaws.starling.dove.ResourceKey@12345'] * len(resource_keys))
results = []
for resource in resource_keys:
# Does the resource type exist?
if not RESOURCE_MAP.get(resource['resourceType']):
# Not found so skip.
continue
# Is the resource type global?
if RESOURCE_MAP[resource['resourceType']].backends.get('global'):
backend_region = 'global'
# If the backend region isn't implemented then we won't find the item:
if not RESOURCE_MAP[resource['resourceType']].backends.get(backend_region):
continue
# Get the item:
item = RESOURCE_MAP[resource['resourceType']].get_config_resource(resource['resourceId'], backend_region=backend_region)
if not item:
continue
item['accountId'] = DEFAULT_ACCOUNT_ID
results.append(item)
return {'baseConfigurationItems': results, 'unprocessedResourceKeys': []} # At this time, moto is not adding unprocessed items.
def batch_get_aggregate_resource_config(self, aggregator_name, resource_identifiers):
"""Returns the configuration of an item in the AWS Config format of the resource for the current regional backend.
As far a moto goes -- the only real difference between this function and the `batch_get_resource_config` function is that
this will require a Config Aggregator be set up a priori and can search based on resource regions.
Note: moto will IGNORE the resource account ID in the search query.
"""
if not self.config_aggregators.get(aggregator_name):
raise NoSuchConfigurationAggregatorException()
# Can't have more than 100 items
if len(resource_identifiers) > 100:
raise TooManyResourceKeys(['com.amazonaws.starling.dove.AggregateResourceIdentifier@12345'] * len(resource_identifiers))
found = []
not_found = []
for identifier in resource_identifiers:
resource_type = identifier['ResourceType']
resource_region = identifier['SourceRegion']
resource_id = identifier['ResourceId']
resource_name = identifier.get('ResourceName', None)
# Does the resource type exist?
if not RESOURCE_MAP.get(resource_type):
not_found.append(identifier)
continue
# Get the item:
item = RESOURCE_MAP[resource_type].get_config_resource(resource_id, resource_name=resource_name,
resource_region=resource_region)
if not item:
not_found.append(identifier)
continue
item['accountId'] = DEFAULT_ACCOUNT_ID
found.append(item)
return {'BaseConfigurationItems': found, 'UnprocessedResourceIdentifiers': not_found}
config_backends = {}
boto3_session = Session()

View File

@ -102,16 +102,18 @@ class ConfigResponse(BaseResponse):
self._get_param('NextToken'))
return json.dumps(schema)
"""
def get_resource_config_history(self):
schema = self.config_backend.get_resource_config_history(self._get_param('resourceType'),
self._get_param('resourceId'),
self.region)
return json.dumps(schema)
def batch_get_resource_config(self):
# TODO implement me!
return ""
schema = self.config_backend.batch_get_resource_config(self._get_param('resourceKeys'),
self.region)
return json.dumps(schema)
def batch_get_aggregate_resource_config(self):
# TODO implement me!
return ""
def get_resource_config_history(self):
# TODO implement me!
return ""
"""
schema = self.config_backend.batch_get_aggregate_resource_config(self._get_param('ConfigurationAggregatorName'),
self._get_param('ResourceIdentifiers'))
return json.dumps(schema)

View File

@ -554,7 +554,7 @@ class ConfigQueryModel(object):
This supports both aggregated and non-aggregated listing. The following notes the difference:
- Non Aggregated Listing -
- Non-Aggregated Listing -
This only lists resources within a region. The way that this is implemented in moto is based on the region
for the resource backend.
@ -593,8 +593,31 @@ class ConfigQueryModel(object):
"""
raise NotImplementedError()
def get_config_resource(self):
"""TODO implement me."""
def get_config_resource(self, resource_id, resource_name=None, backend_region=None, resource_region=None):
"""For AWS Config. This will query the backend for the specific resource type configuration.
This supports both aggregated, and non-aggregated fetching -- for batched fetching -- the Config batching requests
will call this function N times to fetch the N objects needing to be fetched.
- Non-Aggregated Fetching -
This only fetches a resource config within a region. The way that this is implemented in moto is based on the region
for the resource backend.
You must set the `backend_region` to the region that the API request arrived from. `resource_region` should be set to `None`.
- Aggregated Fetching -
This fetches resources from all potential regional backends. For non-global resource types, this should collect a full
list of resources from all the backends, and then be able to filter from the resource region. This is because an
aggregator can aggregate resources from multiple regions. In moto, aggregated regions will *assume full aggregation
from all resources in all regions for a given resource type*.
...
:param resource_id:
:param resource_name:
:param backend_region:
:param resource_region:
:return:
"""
raise NotImplementedError()

View File

@ -799,21 +799,6 @@ class ConditionExpressionParser:
else: # pragma: no cover
raise ValueError("Unknown expression node kind %r" % node.kind)
def _print_debug(self, nodes): # pragma: no cover
print('ROOT')
for node in nodes:
self._print_node_recursive(node, depth=1)
def _print_node_recursive(self, node, depth=0): # pragma: no cover
if len(node.children) > 0:
print(' ' * depth, node.nonterminal, node.kind)
for child in node.children:
self._print_node_recursive(child, depth=depth + 1)
else:
print(' ' * depth, node.nonterminal, node.kind, node.value)
def _assert(self, condition, message, nodes):
if not condition:
raise ValueError(message + " " + " ".join([t.text for t in nodes]))

View File

@ -1,2 +1,7 @@
class InvalidIndexNameError(ValueError):
pass
class ItemSizeTooLarge(Exception):
message = 'Item size has exceeded the maximum allowed size'
pass

View File

@ -16,7 +16,7 @@ from moto.core.exceptions import JsonRESTError
from .comparisons import get_comparison_func
from .comparisons import get_filter_expression
from .comparisons import get_expected
from .exceptions import InvalidIndexNameError
from .exceptions import InvalidIndexNameError, ItemSizeTooLarge
class DynamoJsonEncoder(json.JSONEncoder):
@ -30,6 +30,10 @@ def dynamo_json_dump(dynamo_object):
return json.dumps(dynamo_object, cls=DynamoJsonEncoder)
def bytesize(val):
return len(str(val).encode('utf-8'))
class DynamoType(object):
"""
http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html#DataModelDataTypes
@ -49,16 +53,16 @@ class DynamoType(object):
)
def __lt__(self, other):
return self.value < other.value
return self.cast_value < other.cast_value
def __le__(self, other):
return self.value <= other.value
return self.cast_value <= other.cast_value
def __gt__(self, other):
return self.value > other.value
return self.cast_value > other.cast_value
def __ge__(self, other):
return self.value >= other.value
return self.cast_value >= other.cast_value
def __repr__(self):
return "DynamoType: {0}".format(self.to_json())
@ -99,6 +103,22 @@ class DynamoType(object):
return None
def size(self):
if self.is_number():
value_size = len(str(self.value))
elif self.is_set():
sub_type = self.type[0]
value_size = sum([DynamoType({sub_type: v}).size() for v in self.value])
elif self.is_list():
value_size = sum([DynamoType(v).size() for v in self.value])
elif self.is_map():
value_size = sum([bytesize(k) + DynamoType(v).size() for k, v in self.value.items()])
elif type(self.value) == bool:
value_size = 1
else:
value_size = bytesize(self.value)
return value_size
def to_json(self):
return {self.type: self.value}
@ -126,6 +146,39 @@ class DynamoType(object):
return self.type == other.type
# https://github.com/spulec/moto/issues/1874
# Ensure that the total size of an item does not exceed 400kb
class LimitedSizeDict(dict):
def __init__(self, *args, **kwargs):
self.update(*args, **kwargs)
def __setitem__(self, key, value):
current_item_size = sum([item.size() if type(item) == DynamoType else bytesize(str(item)) for item in (list(self.keys()) + list(self.values()))])
new_item_size = bytesize(key) + (value.size() if type(value) == DynamoType else bytesize(str(value)))
# Official limit is set to 400000 (400KB)
# Manual testing confirms that the actual limit is between 409 and 410KB
# We'll set the limit to something in between to be safe
if (current_item_size + new_item_size) > 405000:
raise ItemSizeTooLarge
super(LimitedSizeDict, self).__setitem__(key, value)
def update(self, *args, **kwargs):
if args:
if len(args) > 1:
raise TypeError("update expected at most 1 arguments, "
"got %d" % len(args))
other = dict(args[0])
for key in other:
self[key] = other[key]
for key in kwargs:
self[key] = kwargs[key]
def setdefault(self, key, value=None):
if key not in self:
self[key] = value
return self[key]
class Item(BaseModel):
def __init__(self, hash_key, hash_key_type, range_key, range_key_type, attrs):
@ -134,7 +187,7 @@ class Item(BaseModel):
self.range_key = range_key
self.range_key_type = range_key_type
self.attrs = {}
self.attrs = LimitedSizeDict()
for key, value in attrs.items():
self.attrs[key] = DynamoType(value)
@ -435,6 +488,15 @@ class StreamShard(BaseModel):
seq = len(self.items) + self.starting_sequence_number
self.items.append(
StreamRecord(self.table, t, event_name, old, new, seq))
result = None
from moto.awslambda import lambda_backends
for arn, esm in self.table.lambda_event_source_mappings.items():
region = arn[len('arn:aws:lambda:'):arn.index(':', len('arn:aws:lambda:'))]
result = lambda_backends[region].send_dynamodb_items(arn, self.items, esm.event_source_arn)
if result:
self.items = []
def get(self, start, quantity):
start -= self.starting_sequence_number
@ -477,6 +539,7 @@ class Table(BaseModel):
# 'AttributeName': 'string' # Can contain this
}
self.set_stream_specification(streams)
self.lambda_event_source_mappings = {}
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
@ -619,18 +682,29 @@ class Table(BaseModel):
def has_range_key(self):
return self.range_key_attr is not None
def get_item(self, hash_key, range_key=None):
def get_item(self, hash_key, range_key=None, projection_expression=None):
if self.has_range_key and not range_key:
raise ValueError(
"Table has a range key, but no range key was passed into get_item")
try:
result = None
if range_key:
return self.items[hash_key][range_key]
result = self.items[hash_key][range_key]
elif hash_key in self.items:
result = self.items[hash_key]
if hash_key in self.items:
return self.items[hash_key]
if projection_expression and result:
expressions = [x.strip() for x in projection_expression.split(',')]
result = copy.deepcopy(result)
for attr in list(result.attrs):
if attr not in expressions:
result.attrs.pop(attr)
raise KeyError
if not result:
raise KeyError
return result
except KeyError:
return None
@ -824,7 +898,7 @@ class Table(BaseModel):
exclusive_start_key, index_name)
return results, scanned_count, last_evaluated_key
def _trim_results(self, results, limit, exclusive_start_key, scaned_index=None):
def _trim_results(self, results, limit, exclusive_start_key, scanned_index=None):
if exclusive_start_key is not None:
hash_key = DynamoType(exclusive_start_key.get(self.hash_key_attr))
range_key = exclusive_start_key.get(self.range_key_attr)
@ -844,10 +918,10 @@ class Table(BaseModel):
if results[-1].range_key is not None:
last_evaluated_key[self.range_key_attr] = results[-1].range_key
if scaned_index:
if scanned_index:
all_indexes = self.all_indexes()
indexes_by_name = dict((i['IndexName'], i) for i in all_indexes)
idx = indexes_by_name[scaned_index]
idx = indexes_by_name[scanned_index]
idx_col_list = [i['AttributeName'] for i in idx['KeySchema']]
for col in idx_col_list:
last_evaluated_key[col] = results[-1].attrs[col]
@ -996,12 +1070,12 @@ class DynamoDBBackend(BaseBackend):
def get_table(self, table_name):
return self.tables.get(table_name)
def get_item(self, table_name, keys):
def get_item(self, table_name, keys, projection_expression=None):
table = self.get_table(table_name)
if not table:
raise ValueError("No table found")
hash_key, range_key = self.get_keys_value(table, keys)
return table.get_item(hash_key, range_key)
return table.get_item(hash_key, range_key, projection_expression)
def query(self, table_name, hash_key_dict, range_comparison, range_value_dicts,
limit, exclusive_start_key, scan_index_forward, projection_expression, index_name=None,

View File

@ -6,7 +6,7 @@ import re
from moto.core.responses import BaseResponse
from moto.core.utils import camelcase_to_underscores, amzn_request_id
from .exceptions import InvalidIndexNameError
from .exceptions import InvalidIndexNameError, ItemSizeTooLarge
from .models import dynamodb_backends, dynamo_json_dump
@ -255,6 +255,9 @@ class DynamoHandler(BaseResponse):
name, item, expected, condition_expression,
expression_attribute_names, expression_attribute_values,
overwrite)
except ItemSizeTooLarge:
er = 'com.amazonaws.dynamodb.v20111205#ValidationException'
return self.error(er, ItemSizeTooLarge.message)
except ValueError:
er = 'com.amazonaws.dynamodb.v20111205#ConditionalCheckFailedException'
return self.error(er, 'A condition specified in the operation could not be evaluated.')
@ -305,8 +308,15 @@ class DynamoHandler(BaseResponse):
def get_item(self):
name = self.body['TableName']
key = self.body['Key']
projection_expression = self.body.get('ProjectionExpression')
expression_attribute_names = self.body.get('ExpressionAttributeNames', {})
projection_expression = self._adjust_projection_expression(
projection_expression, expression_attribute_names
)
try:
item = self.dynamodb_backend.get_item(name, key)
item = self.dynamodb_backend.get_item(name, key, projection_expression)
except ValueError:
er = 'com.amazon.coral.validate#ValidationException'
return self.error(er, 'Validation Exception')
@ -338,9 +348,16 @@ class DynamoHandler(BaseResponse):
er = 'com.amazon.coral.validate#ValidationException'
return self.error(er, 'Provided list of item keys contains duplicates')
attributes_to_get = table_request.get('AttributesToGet')
projection_expression = table_request.get('ProjectionExpression')
expression_attribute_names = table_request.get('ExpressionAttributeNames', {})
projection_expression = self._adjust_projection_expression(
projection_expression, expression_attribute_names
)
results["Responses"][table_name] = []
for key in keys:
item = self.dynamodb_backend.get_item(table_name, key)
item = self.dynamodb_backend.get_item(table_name, key, projection_expression)
if item:
item_describe = item.describe_attrs(attributes_to_get)
results["Responses"][table_name].append(
@ -370,20 +387,9 @@ class DynamoHandler(BaseResponse):
filter_expression = self.body.get('FilterExpression')
expression_attribute_values = self.body.get('ExpressionAttributeValues', {})
if projection_expression and expression_attribute_names:
expressions = [x.strip() for x in projection_expression.split(',')]
projection_expression = None
for expression in expressions:
if projection_expression is not None:
projection_expression = projection_expression + ", "
else:
projection_expression = ""
if expression in expression_attribute_names:
projection_expression = projection_expression + \
expression_attribute_names[expression]
else:
projection_expression = projection_expression + expression
projection_expression = self._adjust_projection_expression(
projection_expression, expression_attribute_names
)
filter_kwargs = {}
@ -519,6 +525,25 @@ class DynamoHandler(BaseResponse):
return dynamo_json_dump(result)
def _adjust_projection_expression(self, projection_expression, expression_attribute_names):
if projection_expression and expression_attribute_names:
expressions = [x.strip() for x in projection_expression.split(',')]
projection_expr = None
for expression in expressions:
if projection_expr is not None:
projection_expr = projection_expr + ", "
else:
projection_expr = ""
if expression in expression_attribute_names:
projection_expr = projection_expr + \
expression_attribute_names[expression]
else:
projection_expr = projection_expr + expression
return projection_expr
return projection_expression
def scan(self):
name = self.body['TableName']
@ -658,6 +683,9 @@ class DynamoHandler(BaseResponse):
name, key, update_expression, attribute_updates, expression_attribute_names,
expression_attribute_values, expected, condition_expression
)
except ItemSizeTooLarge:
er = 'com.amazonaws.dynamodb.v20111205#ValidationException'
return self.error(er, ItemSizeTooLarge.message)
except ValueError:
er = 'com.amazonaws.dynamodb.v20111205#ConditionalCheckFailedException'
return self.error(er, 'A condition specified in the operation could not be evaluated.')

View File

@ -790,6 +790,8 @@ class EC2ContainerServiceBackend(BaseBackend):
cluster_name = cluster_str.split('/')[-1]
if cluster_name not in self.clusters:
raise Exception("{0} is not a cluster".format(cluster_name))
if not list_container_instance_ids:
raise JsonRESTError('InvalidParameterException', 'Container instance cannot be empty')
failures = []
container_instance_objects = []
for container_instance_id in list_container_instance_ids:

View File

@ -222,12 +222,8 @@ class DeliveryStream(BaseModel):
self.redshift_role_arn = stream_kwargs.get('redshift_role_arn')
self.redshift_copy_command = stream_kwargs.get('redshift_copy_command')
self.s3_role_arn = stream_kwargs.get('s3_role_arn')
self.s3_bucket_arn = stream_kwargs.get('s3_bucket_arn')
self.s3_prefix = stream_kwargs.get('s3_prefix')
self.s3_compression_format = stream_kwargs.get(
's3_compression_format', 'UNCOMPRESSED')
self.s3_buffering_hings = stream_kwargs.get('s3_buffering_hings')
self.s3_config = stream_kwargs.get('s3_config')
self.extended_s3_config = stream_kwargs.get('extended_s3_config')
self.redshift_s3_role_arn = stream_kwargs.get('redshift_s3_role_arn')
self.redshift_s3_bucket_arn = stream_kwargs.get(
@ -235,8 +231,8 @@ class DeliveryStream(BaseModel):
self.redshift_s3_prefix = stream_kwargs.get('redshift_s3_prefix')
self.redshift_s3_compression_format = stream_kwargs.get(
'redshift_s3_compression_format', 'UNCOMPRESSED')
self.redshift_s3_buffering_hings = stream_kwargs.get(
'redshift_s3_buffering_hings')
self.redshift_s3_buffering_hints = stream_kwargs.get(
'redshift_s3_buffering_hints')
self.records = []
self.status = 'ACTIVE'
@ -248,16 +244,15 @@ class DeliveryStream(BaseModel):
return 'arn:aws:firehose:us-east-1:123456789012:deliverystream/{0}'.format(self.name)
def destinations_to_dict(self):
if self.s3_role_arn:
if self.s3_config:
return [{
'DestinationId': 'string',
'S3DestinationDescription': {
'RoleARN': self.s3_role_arn,
'BucketARN': self.s3_bucket_arn,
'Prefix': self.s3_prefix,
'BufferingHints': self.s3_buffering_hings,
'CompressionFormat': self.s3_compression_format,
}
'S3DestinationDescription': self.s3_config,
}]
elif self.extended_s3_config:
return [{
'DestinationId': 'string',
'ExtendedS3DestinationDescription': self.extended_s3_config,
}]
else:
return [{
@ -268,7 +263,7 @@ class DeliveryStream(BaseModel):
"RoleARN": self.redshift_role_arn,
"S3DestinationDescription": {
"BucketARN": self.redshift_s3_bucket_arn,
"BufferingHints": self.redshift_s3_buffering_hings,
"BufferingHints": self.redshift_s3_buffering_hints,
"CompressionFormat": self.redshift_s3_compression_format,
"Prefix": self.redshift_s3_prefix,
"RoleARN": self.redshift_s3_role_arn

View File

@ -149,6 +149,10 @@ class KinesisResponse(BaseResponse):
stream_name = self.parameters['DeliveryStreamName']
redshift_config = self.parameters.get(
'RedshiftDestinationConfiguration')
s3_config = self.parameters.get(
'S3DestinationConfiguration')
extended_s3_config = self.parameters.get(
'ExtendedS3DestinationConfiguration')
if redshift_config:
redshift_s3_config = redshift_config['S3Configuration']
@ -163,18 +167,13 @@ class KinesisResponse(BaseResponse):
'redshift_s3_bucket_arn': redshift_s3_config['BucketARN'],
'redshift_s3_prefix': redshift_s3_config['Prefix'],
'redshift_s3_compression_format': redshift_s3_config.get('CompressionFormat'),
'redshift_s3_buffering_hings': redshift_s3_config['BufferingHints'],
}
else:
# S3 Config
s3_config = self.parameters['S3DestinationConfiguration']
stream_kwargs = {
's3_role_arn': s3_config['RoleARN'],
's3_bucket_arn': s3_config['BucketARN'],
's3_prefix': s3_config['Prefix'],
's3_compression_format': s3_config.get('CompressionFormat'),
's3_buffering_hings': s3_config['BufferingHints'],
'redshift_s3_buffering_hints': redshift_s3_config['BufferingHints'],
}
elif s3_config:
stream_kwargs = {'s3_config': s3_config}
elif extended_s3_config:
stream_kwargs = {'extended_s3_config': extended_s3_config}
stream = self.kinesis_backend.create_delivery_stream(
stream_name, **stream_kwargs)
return json.dumps({

View File

@ -1,7 +1,5 @@
from __future__ import unicode_literals
import datetime
import boto.rds
from jinja2 import Template
@ -14,95 +12,6 @@ from moto.rds2.models import rds2_backends
class Database(BaseModel):
def __init__(self, **kwargs):
self.status = "available"
self.is_replica = False
self.replicas = []
self.region = kwargs.get('region')
self.engine = kwargs.get("engine")
self.engine_version = kwargs.get("engine_version")
if self.engine_version is None:
self.engine_version = "5.6.21"
self.iops = kwargs.get("iops")
self.storage_encrypted = kwargs.get("storage_encrypted", False)
if self.storage_encrypted:
self.kms_key_id = kwargs.get("kms_key_id", "default_kms_key_id")
else:
self.kms_key_id = kwargs.get("kms_key_id")
self.storage_type = kwargs.get("storage_type")
self.master_username = kwargs.get('master_username')
self.master_password = kwargs.get('master_password')
self.auto_minor_version_upgrade = kwargs.get(
'auto_minor_version_upgrade')
if self.auto_minor_version_upgrade is None:
self.auto_minor_version_upgrade = True
self.allocated_storage = kwargs.get('allocated_storage')
self.db_instance_identifier = kwargs.get('db_instance_identifier')
self.source_db_identifier = kwargs.get("source_db_identifier")
self.db_instance_class = kwargs.get('db_instance_class')
self.port = kwargs.get('port')
self.db_name = kwargs.get("db_name")
self.publicly_accessible = kwargs.get("publicly_accessible")
if self.publicly_accessible is None:
self.publicly_accessible = True
self.copy_tags_to_snapshot = kwargs.get("copy_tags_to_snapshot")
if self.copy_tags_to_snapshot is None:
self.copy_tags_to_snapshot = False
self.backup_retention_period = kwargs.get("backup_retention_period")
if self.backup_retention_period is None:
self.backup_retention_period = 1
self.availability_zone = kwargs.get("availability_zone")
self.multi_az = kwargs.get("multi_az")
self.db_subnet_group_name = kwargs.get("db_subnet_group_name")
self.instance_create_time = str(datetime.datetime.utcnow())
if self.db_subnet_group_name:
self.db_subnet_group = rds_backends[
self.region].describe_subnet_groups(self.db_subnet_group_name)[0]
else:
self.db_subnet_group = []
self.security_groups = kwargs.get('security_groups', [])
# PreferredBackupWindow
# PreferredMaintenanceWindow
# backup_retention_period = self._get_param("BackupRetentionPeriod")
# OptionGroupName
# DBParameterGroupName
# VpcSecurityGroupIds.member.N
@property
def db_instance_arn(self):
return "arn:aws:rds:{0}:1234567890:db:{1}".format(
self.region, self.db_instance_identifier)
@property
def physical_resource_id(self):
return self.db_instance_identifier
@property
def address(self):
return "{0}.aaaaaaaaaa.{1}.rds.amazonaws.com".format(self.db_instance_identifier, self.region)
def add_replica(self, replica):
self.replicas.append(replica.db_instance_identifier)
def remove_replica(self, replica):
self.replicas.remove(replica.db_instance_identifier)
def set_as_replica(self):
self.is_replica = True
self.replicas = []
def update(self, db_kwargs):
for key, value in db_kwargs.items():
if value is not None:
setattr(self, key, value)
def get_cfn_attribute(self, attribute_name):
if attribute_name == 'Endpoint.Address':
return self.address

View File

@ -1,3 +1,5 @@
import json
from moto.core.exceptions import InvalidNextTokenException
from moto.core.models import ConfigQueryModel
from moto.s3 import s3_backends
@ -66,5 +68,35 @@ class S3ConfigQuery(ConfigQueryModel):
return [{'type': 'AWS::S3::Bucket', 'id': bucket, 'name': bucket, 'region': self.backends['global'].buckets[bucket].region_name}
for bucket in bucket_list], new_token
def get_config_resource(self, resource_id, resource_name=None, backend_region=None, resource_region=None):
# backend_region is ignored for S3 as the backend is 'global'
# Get the bucket:
bucket = self.backends['global'].buckets.get(resource_id, {})
if not bucket:
return
# Are we filtering based on region?
if resource_region and bucket.region_name != resource_region:
return
# Are we also filtering on bucket name?
if resource_name and bucket.name != resource_name:
return
# Format the bucket to the AWS Config format:
config_data = bucket.to_config_dict()
# The 'configuration' field is also a JSON string:
config_data['configuration'] = json.dumps(config_data['configuration'])
# Supplementary config need all values converted to JSON strings if they are not strings already:
for field, value in config_data['supplementaryConfiguration'].items():
if not isinstance(value, str):
config_data['supplementaryConfiguration'][field] = json.dumps(value)
return config_data
s3_config_query = S3ConfigQuery(s3_backends)

View File

@ -1,4 +1,6 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import base64
import datetime
@ -10,6 +12,7 @@ import random
import string
import tempfile
import sys
import time
import uuid
import six
@ -32,6 +35,7 @@ STORAGE_CLASS = ["STANDARD", "REDUCED_REDUNDANCY", "STANDARD_IA", "ONEZONE_IA",
"INTELLIGENT_TIERING", "GLACIER", "DEEP_ARCHIVE"]
DEFAULT_KEY_BUFFER_SIZE = 16 * 1024 * 1024
DEFAULT_TEXT_ENCODING = sys.getdefaultencoding()
OWNER = '75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a'
class FakeDeleteMarker(BaseModel):
@ -316,6 +320,14 @@ PERMISSION_READ = 'READ'
PERMISSION_WRITE_ACP = 'WRITE_ACP'
PERMISSION_READ_ACP = 'READ_ACP'
CAMEL_CASED_PERMISSIONS = {
'FULL_CONTROL': 'FullControl',
'WRITE': 'Write',
'READ': 'Read',
'WRITE_ACP': 'WriteAcp',
'READ_ACP': 'ReadAcp'
}
class FakeGrant(BaseModel):
@ -346,10 +358,43 @@ class FakeAcl(BaseModel):
def __repr__(self):
return "FakeAcl(grants: {})".format(self.grants)
def to_config_dict(self):
"""Returns the object into the format expected by AWS Config"""
data = {
'grantSet': None, # Always setting this to None. Feel free to change.
'owner': {'displayName': None, 'id': OWNER}
}
# Add details for each Grant:
grant_list = []
for grant in self.grants:
permissions = grant.permissions if isinstance(grant.permissions, list) else [grant.permissions]
for permission in permissions:
for grantee in grant.grantees:
# Config does not add the owner if its permissions are FULL_CONTROL:
if permission == 'FULL_CONTROL' and grantee.id == OWNER:
continue
if grantee.uri:
grant_list.append({'grantee': grantee.uri.split('http://acs.amazonaws.com/groups/s3/')[1],
'permission': CAMEL_CASED_PERMISSIONS[permission]})
else:
grant_list.append({
'grantee': {
'id': grantee.id,
'displayName': None if not grantee.display_name else grantee.display_name
},
'permission': CAMEL_CASED_PERMISSIONS[permission]
})
if grant_list:
data['grantList'] = grant_list
return data
def get_canned_acl(acl):
owner_grantee = FakeGrantee(
id='75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a')
owner_grantee = FakeGrantee(id=OWNER)
grants = [FakeGrant([owner_grantee], [PERMISSION_FULL_CONTROL])]
if acl == 'private':
pass # no other permissions
@ -401,6 +446,34 @@ class LifecycleFilter(BaseModel):
self.tag = tag
self.and_filter = and_filter
def to_config_dict(self):
if self.prefix is not None:
return {
'predicate': {
'type': 'LifecyclePrefixPredicate',
'prefix': self.prefix
}
}
elif self.tag:
return {
'predicate': {
'type': 'LifecycleTagPredicate',
'tag': {
'key': self.tag.key,
'value': self.tag.value
}
}
}
else:
return {
'predicate': {
'type': 'LifecycleAndOperator',
'operands': self.and_filter.to_config_dict()
}
}
class LifecycleAndFilter(BaseModel):
@ -408,6 +481,17 @@ class LifecycleAndFilter(BaseModel):
self.prefix = prefix
self.tags = tags
def to_config_dict(self):
data = []
if self.prefix is not None:
data.append({'type': 'LifecyclePrefixPredicate', 'prefix': self.prefix})
for tag in self.tags:
data.append({'type': 'LifecycleTagPredicate', 'tag': {'key': tag.key, 'value': tag.value}})
return data
class LifecycleRule(BaseModel):
@ -430,6 +514,46 @@ class LifecycleRule(BaseModel):
self.nvt_storage_class = nvt_storage_class
self.aimu_days = aimu_days
def to_config_dict(self):
"""Converts the object to the AWS Config data dict.
Note: The following are missing that should be added in the future:
- transitions (returns None for now)
- noncurrentVersionTransitions (returns None for now)
- LifeCycle Filters that are NOT prefix
:param kwargs:
:return:
"""
lifecycle_dict = {
'id': self.id,
'prefix': self.prefix,
'status': self.status,
'expirationInDays': self.expiration_days,
'expiredObjectDeleteMarker': self.expired_object_delete_marker,
'noncurrentVersionExpirationInDays': -1 or self.nve_noncurrent_days,
'expirationDate': self.expiration_date,
'transitions': None, # Replace me with logic to fill in
'noncurrentVersionTransitions': None, # Replace me with logic to fill in
}
if self.aimu_days:
lifecycle_dict['abortIncompleteMultipartUpload'] = {'daysAfterInitiation': self.aimu_days}
else:
lifecycle_dict['abortIncompleteMultipartUpload'] = None
# Format the filter:
if self.prefix is None and self.filter is None:
lifecycle_dict['filter'] = {'predicate': None}
elif self.prefix:
lifecycle_dict['filter'] = None
else:
lifecycle_dict['filter'] = self.filter.to_config_dict()
return lifecycle_dict
class CorsRule(BaseModel):
@ -450,6 +574,23 @@ class Notification(BaseModel):
self.events = events
self.filters = filters if filters else {}
def to_config_dict(self):
data = {}
# Type and ARN will be filled in by NotificationConfiguration's to_config_dict:
data['events'] = [event for event in self.events]
if self.filters:
data['filter'] = {'s3KeyFilter': {'filterRules': [
{'name': fr['Name'], 'value': fr['Value']} for fr in self.filters['S3Key']['FilterRule']
]}}
else:
data['filter'] = None
data['objectPrefixes'] = [] # Not sure why this is a thing since AWS just seems to return this as filters ¯\_(ツ)_/¯
return data
class NotificationConfiguration(BaseModel):
@ -461,6 +602,29 @@ class NotificationConfiguration(BaseModel):
self.cloud_function = [Notification(c["CloudFunction"], c["Event"], filters=c.get("Filter"), id=c.get("Id"))
for c in cloud_function] if cloud_function else []
def to_config_dict(self):
data = {'configurations': {}}
for topic in self.topic:
topic_config = topic.to_config_dict()
topic_config['topicARN'] = topic.arn
topic_config['type'] = 'TopicConfiguration'
data['configurations'][topic.id] = topic_config
for queue in self.queue:
queue_config = queue.to_config_dict()
queue_config['queueARN'] = queue.arn
queue_config['type'] = 'QueueConfiguration'
data['configurations'][queue.id] = queue_config
for cloud_function in self.cloud_function:
cf_config = cloud_function.to_config_dict()
cf_config['queueARN'] = cloud_function.arn
cf_config['type'] = 'LambdaConfiguration'
data['configurations'][cloud_function.id] = cf_config
return data
class FakeBucket(BaseModel):
@ -735,6 +899,67 @@ class FakeBucket(BaseModel):
bucket = s3_backend.create_bucket(resource_name, region_name)
return bucket
def to_config_dict(self):
"""Return the AWS Config JSON format of this S3 bucket.
Note: The following features are not implemented and will need to be if you care about them:
- Bucket Accelerate Configuration
"""
config_dict = {
'version': '1.3',
'configurationItemCaptureTime': str(self.creation_date),
'configurationItemStatus': 'ResourceDiscovered',
'configurationStateId': str(int(time.mktime(self.creation_date.timetuple()))), # PY2 and 3 compatible
'configurationItemMD5Hash': '',
'arn': "arn:aws:s3:::{}".format(self.name),
'resourceType': 'AWS::S3::Bucket',
'resourceId': self.name,
'resourceName': self.name,
'awsRegion': self.region_name,
'availabilityZone': 'Regional',
'resourceCreationTime': str(self.creation_date),
'relatedEvents': [],
'relationships': [],
'tags': {tag.key: tag.value for tag in self.tagging.tag_set.tags},
'configuration': {
'name': self.name,
'owner': {'id': OWNER},
'creationDate': self.creation_date.isoformat()
}
}
# Make the supplementary configuration:
# TODO: Implement Public Access Block Support
s_config = {'AccessControlList': self.acl.to_config_dict()}
# TODO implement Accelerate Configuration:
s_config['BucketAccelerateConfiguration'] = {'status': None}
if self.rules:
s_config['BucketLifecycleConfiguration'] = {
"rules": [rule.to_config_dict() for rule in self.rules]
}
s_config['BucketLoggingConfiguration'] = {
'destinationBucketName': self.logging.get('TargetBucket', None),
'logFilePrefix': self.logging.get('TargetPrefix', None)
}
s_config['BucketPolicy'] = {
'policyText': self.policy if self.policy else None
}
s_config['IsRequesterPaysEnabled'] = 'false' if self.payer == 'BucketOwner' else 'true'
if self.notification_configuration:
s_config['BucketNotificationConfiguration'] = self.notification_configuration.to_config_dict()
else:
s_config['BucketNotificationConfiguration'] = {'configurations': {}}
config_dict['supplementaryConfiguration'] = s_config
return config_dict
class S3Backend(BaseBackend):

View File

@ -10,6 +10,14 @@ class SNSNotFoundError(RESTError):
"NotFound", message)
class ResourceNotFoundError(RESTError):
code = 404
def __init__(self):
super(ResourceNotFoundError, self).__init__(
'ResourceNotFound', 'Resource does not exist')
class DuplicateSnsEndpointError(RESTError):
code = 400
@ -42,6 +50,14 @@ class InvalidParameterValue(RESTError):
"InvalidParameterValue", message)
class TagLimitExceededError(RESTError):
code = 400
def __init__(self):
super(TagLimitExceededError, self).__init__(
'TagLimitExceeded', 'Could not complete request: tag quota of per resource exceeded')
class InternalError(RESTError):
code = 500

View File

@ -18,7 +18,7 @@ from moto.awslambda import lambda_backends
from .exceptions import (
SNSNotFoundError, DuplicateSnsEndpointError, SnsEndpointDisabled, SNSInvalidParameter,
InvalidParameterValue, InternalError
InvalidParameterValue, InternalError, ResourceNotFoundError, TagLimitExceededError
)
from .utils import make_arn_for_topic, make_arn_for_subscription
@ -44,6 +44,8 @@ class Topic(BaseModel):
self.subscriptions_confimed = 0
self.subscriptions_deleted = 0
self._tags = {}
def publish(self, message, subject=None, message_attributes=None):
message_id = six.text_type(uuid.uuid4())
subscriptions, _ = self.sns_backend.list_subscriptions(self.arn)
@ -277,7 +279,7 @@ class SNSBackend(BaseBackend):
def update_sms_attributes(self, attrs):
self.sms_attributes.update(attrs)
def create_topic(self, name, attributes=None):
def create_topic(self, name, attributes=None, tags=None):
fails_constraints = not re.match(r'^[a-zA-Z0-9_-]{1,256}$', name)
if fails_constraints:
raise InvalidParameterValue("Topic names must be made up of only uppercase and lowercase ASCII letters, numbers, underscores, and hyphens, and must be between 1 and 256 characters long.")
@ -285,6 +287,8 @@ class SNSBackend(BaseBackend):
if attributes:
for attribute in attributes:
setattr(candidate_topic, camelcase_to_underscores(attribute), attributes[attribute])
if tags:
candidate_topic._tags = tags
if candidate_topic.arn in self.topics:
return self.topics[candidate_topic.arn]
else:
@ -499,6 +503,31 @@ class SNSBackend(BaseBackend):
raise SNSInvalidParameter("Invalid parameter: FilterPolicy: Match value must be String, number, true, false, or null")
def list_tags_for_resource(self, resource_arn):
if resource_arn not in self.topics:
raise ResourceNotFoundError
return self.topics[resource_arn]._tags
def tag_resource(self, resource_arn, tags):
if resource_arn not in self.topics:
raise ResourceNotFoundError
updated_tags = self.topics[resource_arn]._tags.copy()
updated_tags.update(tags)
if len(updated_tags) > 50:
raise TagLimitExceededError
self.topics[resource_arn]._tags = updated_tags
def untag_resource(self, resource_arn, tag_keys):
if resource_arn not in self.topics:
raise ResourceNotFoundError
for key in tag_keys:
self.topics[resource_arn]._tags.pop(key, None)
sns_backends = {}
for region in Session().get_available_regions('sns'):

View File

@ -30,6 +30,10 @@ class SNSResponse(BaseResponse):
in attributes
)
def _get_tags(self):
tags = self._get_list_prefix('Tags.member')
return {tag['key']: tag['value'] for tag in tags}
def _parse_message_attributes(self, prefix='', value_namespace='Value.'):
message_attributes = self._get_object_map(
'MessageAttributes.entry',
@ -85,7 +89,8 @@ class SNSResponse(BaseResponse):
def create_topic(self):
name = self._get_param('Name')
attributes = self._get_attributes()
topic = self.backend.create_topic(name, attributes)
tags = self._get_tags()
topic = self.backend.create_topic(name, attributes, tags)
if self.request_json:
return json.dumps({
@ -691,6 +696,30 @@ class SNSResponse(BaseResponse):
template = self.response_template(CONFIRM_SUBSCRIPTION_TEMPLATE)
return template.render(sub_arn='{0}:68762e72-e9b1-410a-8b3b-903da69ee1d5'.format(arn))
def list_tags_for_resource(self):
arn = self._get_param('ResourceArn')
result = self.backend.list_tags_for_resource(arn)
template = self.response_template(LIST_TAGS_FOR_RESOURCE_TEMPLATE)
return template.render(tags=result)
def tag_resource(self):
arn = self._get_param('ResourceArn')
tags = self._get_tags()
self.backend.tag_resource(arn, tags)
return self.response_template(TAG_RESOURCE_TEMPLATE).render()
def untag_resource(self):
arn = self._get_param('ResourceArn')
tag_keys = self._get_multi_param('TagKeys.member')
self.backend.untag_resource(arn, tag_keys)
return self.response_template(UNTAG_RESOURCE_TEMPLATE).render()
CREATE_TOPIC_TEMPLATE = """<CreateTopicResponse xmlns="http://sns.amazonaws.com/doc/2010-03-31/">
<CreateTopicResult>
@ -1072,3 +1101,33 @@ CONFIRM_SUBSCRIPTION_TEMPLATE = """<ConfirmSubscriptionResponse xmlns="http://sn
<RequestId>16eb4dde-7b3c-5b3e-a22a-1fe2a92d3293</RequestId>
</ResponseMetadata>
</ConfirmSubscriptionResponse>"""
LIST_TAGS_FOR_RESOURCE_TEMPLATE = """<ListTagsForResourceResponse xmlns="http://sns.amazonaws.com/doc/2010-03-31/">
<ListTagsForResourceResult>
<Tags>
{% for name, value in tags.items() %}
<member>
<Key>{{ name }}</Key>
<Value>{{ value }}</Value>
</member>
{% endfor %}
</Tags>
</ListTagsForResourceResult>
<ResponseMetadata>
<RequestId>97fa763f-861b-5223-a946-20251f2a42e2</RequestId>
</ResponseMetadata>
</ListTagsForResourceResponse>"""
TAG_RESOURCE_TEMPLATE = """<TagResourceResponse xmlns="http://sns.amazonaws.com/doc/2010-03-31/">
<TagResourceResult/>
<ResponseMetadata>
<RequestId>fd4ab1da-692f-50a7-95ad-e7c665877d98</RequestId>
</ResponseMetadata>
</TagResourceResponse>"""
UNTAG_RESOURCE_TEMPLATE = """<UntagResourceResponse xmlns="http://sns.amazonaws.com/doc/2010-03-31/">
<UntagResourceResult/>
<ResponseMetadata>
<RequestId>14eb7b1a-4cbd-5a56-80db-2d06412df769</RequestId>
</ResponseMetadata>
</UntagResourceResponse>"""

View File

@ -415,7 +415,7 @@ class SQSBackend(BaseBackend):
self.__dict__ = {}
self.__init__(region_name)
def create_queue(self, name, **kwargs):
def create_queue(self, name, tags=None, **kwargs):
queue = self.queues.get(name)
if queue:
try:
@ -454,6 +454,10 @@ class SQSBackend(BaseBackend):
pass
queue = Queue(name, region=self.region_name, **kwargs)
self.queues[name] = queue
if tags:
queue.tags = tags
return queue
def list_queues(self, queue_name_prefix):
@ -654,6 +658,10 @@ class SQSBackend(BaseBackend):
def untag_queue(self, queue_name, tag_keys):
queue = self.get_queue(queue_name)
if len(tag_keys) == 0:
raise RESTError('InvalidParameterValue', 'Tag keys must be between 1 and 128 characters in length.')
for key in tag_keys:
try:
del queue.tags[key]

View File

@ -33,6 +33,12 @@ class SQSResponse(BaseResponse):
self._attribute = self._get_map_prefix('Attribute', key_end='.Name', value_end='.Value')
return self._attribute
@property
def tags(self):
if not hasattr(self, '_tags'):
self._tags = self._get_map_prefix('Tag', key_end='.Key', value_end='.Value')
return self._tags
def _get_queue_name(self):
try:
queue_name = self.querystring.get('QueueUrl')[0].split("/")[-1]
@ -73,12 +79,12 @@ class SQSResponse(BaseResponse):
queue_name = self._get_param("QueueName")
try:
queue = self.sqs_backend.create_queue(queue_name, **self.attribute)
queue = self.sqs_backend.create_queue(queue_name, self.tags, **self.attribute)
except MessageAttributesInvalid as e:
return self._error('InvalidParameterValue', e.description)
template = self.response_template(CREATE_QUEUE_RESPONSE)
return template.render(queue=queue, request_url=request_url)
return template.render(queue_url=queue.url(request_url))
def get_queue_url(self):
request_url = urlparse(self.uri)
@ -400,7 +406,11 @@ class SQSResponse(BaseResponse):
queue_name = self._get_queue_name()
tag_keys = self._get_multi_param('TagKey')
self.sqs_backend.untag_queue(queue_name, tag_keys)
try:
self.sqs_backend.untag_queue(queue_name, tag_keys)
except QueueDoesNotExist as e:
return self._error('AWS.SimpleQueueService.NonExistentQueue',
e.description)
template = self.response_template(UNTAG_QUEUE_RESPONSE)
return template.render()
@ -416,8 +426,7 @@ class SQSResponse(BaseResponse):
CREATE_QUEUE_RESPONSE = """<CreateQueueResponse>
<CreateQueueResult>
<QueueUrl>{{ queue.url(request_url) }}</QueueUrl>
<VisibilityTimeout>{{ queue.visibility_timeout }}</VisibilityTimeout>
<QueueUrl>{{ queue_url }}</QueueUrl>
</CreateQueueResult>
<ResponseMetadata>
<RequestId></RequestId>

View File

@ -12,7 +12,7 @@ import zipfile
import sure # noqa
from freezegun import freeze_time
from moto import mock_lambda, mock_s3, mock_ec2, mock_sns, mock_logs, settings, mock_sqs
from moto import mock_dynamodb2, mock_lambda, mock_s3, mock_ec2, mock_sns, mock_logs, settings, mock_sqs
from nose.tools import assert_raises
from botocore.exceptions import ClientError
@ -1027,6 +1027,54 @@ def test_invoke_function_from_sqs():
assert False, "Test Failed"
@mock_logs
@mock_lambda
@mock_dynamodb2
def test_invoke_function_from_dynamodb():
logs_conn = boto3.client("logs")
dynamodb = boto3.client('dynamodb')
table_name = 'table_with_stream'
table = dynamodb.create_table(TableName=table_name,
KeySchema=[{'AttributeName':'id','KeyType':'HASH'}],
AttributeDefinitions=[{'AttributeName':'id','AttributeType':'S'}],
StreamSpecification={'StreamEnabled': True,
'StreamViewType': 'NEW_AND_OLD_IMAGES'})
conn = boto3.client('lambda')
func = conn.create_function(FunctionName='testFunction', Runtime='python2.7',
Role='test-iam-role',
Handler='lambda_function.lambda_handler',
Code={'ZipFile': get_test_zip_file3()},
Description='test lambda function executed after a DynamoDB table is updated',
Timeout=3, MemorySize=128, Publish=True)
response = conn.create_event_source_mapping(
EventSourceArn=table['TableDescription']['LatestStreamArn'],
FunctionName=func['FunctionArn']
)
assert response['EventSourceArn'] == table['TableDescription']['LatestStreamArn']
assert response['State'] == 'Enabled'
dynamodb.put_item(TableName=table_name, Item={'id': { 'S': 'item 1' }})
start = time.time()
while (time.time() - start) < 30:
result = logs_conn.describe_log_streams(logGroupName='/aws/lambda/testFunction')
log_streams = result.get('logStreams')
if not log_streams:
time.sleep(1)
continue
assert len(log_streams) == 1
result = logs_conn.get_log_events(logGroupName='/aws/lambda/testFunction', logStreamName=log_streams[0]['logStreamName'])
for event in result.get('events'):
if event['message'] == 'get_test_zip_file3 success':
return
time.sleep(1)
assert False, "Test Failed"
@mock_logs
@mock_lambda
@mock_sqs
@ -1245,3 +1293,175 @@ def test_delete_event_source_mapping():
assert response['State'] == 'Deleting'
conn.get_event_source_mapping.when.called_with(UUID=response['UUID'])\
.should.throw(botocore.client.ClientError)
@mock_lambda
@mock_s3
def test_update_configuration():
s3_conn = boto3.client('s3', 'us-west-2')
s3_conn.create_bucket(Bucket='test-bucket')
zip_content = get_test_zip_file2()
s3_conn.put_object(Bucket='test-bucket', Key='test.zip', Body=zip_content)
conn = boto3.client('lambda', 'us-west-2')
fxn = conn.create_function(
FunctionName='testFunction',
Runtime='python2.7',
Role='test-iam-role',
Handler='lambda_function.lambda_handler',
Code={
'S3Bucket': 'test-bucket',
'S3Key': 'test.zip',
},
Description='test lambda function',
Timeout=3,
MemorySize=128,
Publish=True,
)
assert fxn['Description'] == 'test lambda function'
assert fxn['Handler'] == 'lambda_function.lambda_handler'
assert fxn['MemorySize'] == 128
assert fxn['Runtime'] == 'python2.7'
assert fxn['Timeout'] == 3
updated_config = conn.update_function_configuration(
FunctionName='testFunction',
Description='updated test lambda function',
Handler='lambda_function.new_lambda_handler',
Runtime='python3.6',
Timeout=7
)
assert updated_config['ResponseMetadata']['HTTPStatusCode'] == 200
assert updated_config['Description'] == 'updated test lambda function'
assert updated_config['Handler'] == 'lambda_function.new_lambda_handler'
assert updated_config['MemorySize'] == 128
assert updated_config['Runtime'] == 'python3.6'
assert updated_config['Timeout'] == 7
@mock_lambda
def test_update_function_zip():
conn = boto3.client('lambda', 'us-west-2')
zip_content_one = get_test_zip_file1()
fxn = conn.create_function(
FunctionName='testFunctionZip',
Runtime='python2.7',
Role='test-iam-role',
Handler='lambda_function.lambda_handler',
Code={
'ZipFile': zip_content_one,
},
Description='test lambda function',
Timeout=3,
MemorySize=128,
Publish=True,
)
zip_content_two = get_test_zip_file2()
fxn_updated = conn.update_function_code(
FunctionName='testFunctionZip',
ZipFile=zip_content_two,
Publish=True
)
response = conn.get_function(
FunctionName='testFunctionZip',
Qualifier='2'
)
response['Configuration'].pop('LastModified')
response['ResponseMetadata']['HTTPStatusCode'].should.equal(200)
assert len(response['Code']) == 2
assert response['Code']['RepositoryType'] == 'S3'
assert response['Code']['Location'].startswith('s3://awslambda-{0}-tasks.s3-{0}.amazonaws.com'.format(_lambda_region))
response['Configuration'].should.equal(
{
"CodeSha256": hashlib.sha256(zip_content_two).hexdigest(),
"CodeSize": len(zip_content_two),
"Description": "test lambda function",
"FunctionArn": 'arn:aws:lambda:{}:123456789012:function:testFunctionZip:2'.format(_lambda_region),
"FunctionName": "testFunctionZip",
"Handler": "lambda_function.lambda_handler",
"MemorySize": 128,
"Role": "test-iam-role",
"Runtime": "python2.7",
"Timeout": 3,
"Version": '2',
"VpcConfig": {
"SecurityGroupIds": [],
"SubnetIds": [],
}
},
)
@mock_lambda
@mock_s3
def test_update_function_s3():
s3_conn = boto3.client('s3', 'us-west-2')
s3_conn.create_bucket(Bucket='test-bucket')
zip_content = get_test_zip_file1()
s3_conn.put_object(Bucket='test-bucket', Key='test.zip', Body=zip_content)
conn = boto3.client('lambda', 'us-west-2')
fxn = conn.create_function(
FunctionName='testFunctionS3',
Runtime='python2.7',
Role='test-iam-role',
Handler='lambda_function.lambda_handler',
Code={
'S3Bucket': 'test-bucket',
'S3Key': 'test.zip',
},
Description='test lambda function',
Timeout=3,
MemorySize=128,
Publish=True,
)
zip_content_two = get_test_zip_file2()
s3_conn.put_object(Bucket='test-bucket', Key='test2.zip', Body=zip_content_two)
fxn_updated = conn.update_function_code(
FunctionName='testFunctionS3',
S3Bucket='test-bucket',
S3Key='test2.zip',
Publish=True
)
response = conn.get_function(
FunctionName='testFunctionS3',
Qualifier='2'
)
response['Configuration'].pop('LastModified')
response['ResponseMetadata']['HTTPStatusCode'].should.equal(200)
assert len(response['Code']) == 2
assert response['Code']['RepositoryType'] == 'S3'
assert response['Code']['Location'].startswith('s3://awslambda-{0}-tasks.s3-{0}.amazonaws.com'.format(_lambda_region))
response['Configuration'].should.equal(
{
"CodeSha256": hashlib.sha256(zip_content_two).hexdigest(),
"CodeSize": len(zip_content_two),
"Description": "test lambda function",
"FunctionArn": 'arn:aws:lambda:{}:123456789012:function:testFunctionS3:2'.format(_lambda_region),
"FunctionName": "testFunctionS3",
"Handler": "lambda_function.lambda_handler",
"MemorySize": 128,
"Role": "test-iam-role",
"Runtime": "python2.7",
"Timeout": 3,
"Version": '2',
"VpcConfig": {
"SecurityGroupIds": [],
"SubnetIds": [],
}
},
)

View File

@ -1184,3 +1184,159 @@ def test_list_aggregate_discovered_resource():
with assert_raises(ClientError) as ce:
client.list_aggregate_discovered_resources(ConfigurationAggregatorName='testing', ResourceType='AWS::S3::Bucket', Limit=101)
assert '101' in ce.exception.response['Error']['Message']
@mock_config
@mock_s3
def test_get_resource_config_history():
"""NOTE: We are only really testing the Config part. For each individual service, please add tests
for that individual service's "get_config_resource" function.
"""
client = boto3.client('config', region_name='us-west-2')
# With an invalid resource type:
with assert_raises(ClientError) as ce:
client.get_resource_config_history(resourceType='NOT::A::RESOURCE', resourceId='notcreatedyet')
assert ce.exception.response['Error'] == {'Message': 'Resource notcreatedyet of resourceType:NOT::A::RESOURCE is unknown or has '
'not been discovered', 'Code': 'ResourceNotDiscoveredException'}
# With nothing created yet:
with assert_raises(ClientError) as ce:
client.get_resource_config_history(resourceType='AWS::S3::Bucket', resourceId='notcreatedyet')
assert ce.exception.response['Error'] == {'Message': 'Resource notcreatedyet of resourceType:AWS::S3::Bucket is unknown or has '
'not been discovered', 'Code': 'ResourceNotDiscoveredException'}
# Create an S3 bucket:
s3_client = boto3.client('s3', region_name='us-west-2')
for x in range(0, 10):
s3_client.create_bucket(Bucket='bucket{}'.format(x), CreateBucketConfiguration={'LocationConstraint': 'us-west-2'})
# Now try:
result = client.get_resource_config_history(resourceType='AWS::S3::Bucket', resourceId='bucket1')['configurationItems']
assert len(result) == 1
assert result[0]['resourceName'] == result[0]['resourceId'] == 'bucket1'
assert result[0]['arn'] == 'arn:aws:s3:::bucket1'
@mock_config
@mock_s3
def test_batch_get_resource_config():
"""NOTE: We are only really testing the Config part. For each individual service, please add tests
for that individual service's "get_config_resource" function.
"""
client = boto3.client('config', region_name='us-west-2')
# With more than 100 resourceKeys:
with assert_raises(ClientError) as ce:
client.batch_get_resource_config(resourceKeys=[{'resourceType': 'AWS::S3::Bucket', 'resourceId': 'someBucket'}] * 101)
assert 'Member must have length less than or equal to 100' in ce.exception.response['Error']['Message']
# With invalid resource types and resources that don't exist:
result = client.batch_get_resource_config(resourceKeys=[
{'resourceType': 'NOT::A::RESOURCE', 'resourceId': 'NotAThing'}, {'resourceType': 'AWS::S3::Bucket', 'resourceId': 'NotAThing'},
])
assert not result['baseConfigurationItems']
assert not result['unprocessedResourceKeys']
# Create some S3 buckets:
s3_client = boto3.client('s3', region_name='us-west-2')
for x in range(0, 10):
s3_client.create_bucket(Bucket='bucket{}'.format(x), CreateBucketConfiguration={'LocationConstraint': 'us-west-2'})
# Get them all:
keys = [{'resourceType': 'AWS::S3::Bucket', 'resourceId': 'bucket{}'.format(x)} for x in range(0, 10)]
result = client.batch_get_resource_config(resourceKeys=keys)
assert len(result['baseConfigurationItems']) == 10
buckets_missing = ['bucket{}'.format(x) for x in range(0, 10)]
for r in result['baseConfigurationItems']:
buckets_missing.remove(r['resourceName'])
assert not buckets_missing
@mock_config
@mock_s3
def test_batch_get_aggregate_resource_config():
"""NOTE: We are only really testing the Config part. For each individual service, please add tests
for that individual service's "get_config_resource" function.
"""
from moto.config.models import DEFAULT_ACCOUNT_ID
client = boto3.client('config', region_name='us-west-2')
# Without an aggregator:
bad_ri = {'SourceAccountId': '000000000000', 'SourceRegion': 'not-a-region', 'ResourceType': 'NOT::A::RESOURCE', 'ResourceId': 'nope'}
with assert_raises(ClientError) as ce:
client.batch_get_aggregate_resource_config(ConfigurationAggregatorName='lolno', ResourceIdentifiers=[bad_ri])
assert 'The configuration aggregator does not exist' in ce.exception.response['Error']['Message']
# Create the aggregator:
account_aggregation_source = {
'AccountIds': [
'012345678910',
'111111111111',
'222222222222'
],
'AllAwsRegions': True
}
client.put_configuration_aggregator(
ConfigurationAggregatorName='testing',
AccountAggregationSources=[account_aggregation_source]
)
# With more than 100 items:
with assert_raises(ClientError) as ce:
client.batch_get_aggregate_resource_config(ConfigurationAggregatorName='testing', ResourceIdentifiers=[bad_ri] * 101)
assert 'Member must have length less than or equal to 100' in ce.exception.response['Error']['Message']
# Create some S3 buckets:
s3_client = boto3.client('s3', region_name='us-west-2')
for x in range(0, 10):
s3_client.create_bucket(Bucket='bucket{}'.format(x), CreateBucketConfiguration={'LocationConstraint': 'us-west-2'})
s3_client_eu = boto3.client('s3', region_name='eu-west-1')
for x in range(10, 12):
s3_client_eu.create_bucket(Bucket='eu-bucket{}'.format(x), CreateBucketConfiguration={'LocationConstraint': 'eu-west-1'})
# Now try with resources that exist and ones that don't:
identifiers = [{'SourceAccountId': DEFAULT_ACCOUNT_ID, 'SourceRegion': 'us-west-2', 'ResourceType': 'AWS::S3::Bucket',
'ResourceId': 'bucket{}'.format(x)} for x in range(0, 10)]
identifiers += [{'SourceAccountId': DEFAULT_ACCOUNT_ID, 'SourceRegion': 'eu-west-1', 'ResourceType': 'AWS::S3::Bucket',
'ResourceId': 'eu-bucket{}'.format(x)} for x in range(10, 12)]
identifiers += [bad_ri]
result = client.batch_get_aggregate_resource_config(ConfigurationAggregatorName='testing', ResourceIdentifiers=identifiers)
assert len(result['UnprocessedResourceIdentifiers']) == 1
assert result['UnprocessedResourceIdentifiers'][0] == bad_ri
# Verify all the buckets are there:
assert len(result['BaseConfigurationItems']) == 12
missing_buckets = ['bucket{}'.format(x) for x in range(0, 10)] + ['eu-bucket{}'.format(x) for x in range(10, 12)]
for r in result['BaseConfigurationItems']:
missing_buckets.remove(r['resourceName'])
assert not missing_buckets
# Verify that if the resource name and ID are correct that things are good:
identifiers = [{'SourceAccountId': DEFAULT_ACCOUNT_ID, 'SourceRegion': 'us-west-2', 'ResourceType': 'AWS::S3::Bucket',
'ResourceId': 'bucket1', 'ResourceName': 'bucket1'}]
result = client.batch_get_aggregate_resource_config(ConfigurationAggregatorName='testing', ResourceIdentifiers=identifiers)
assert not result['UnprocessedResourceIdentifiers']
assert len(result['BaseConfigurationItems']) == 1 and result['BaseConfigurationItems'][0]['resourceName'] == 'bucket1'
# Verify that if the resource name and ID mismatch that we don't get a result:
identifiers = [{'SourceAccountId': DEFAULT_ACCOUNT_ID, 'SourceRegion': 'us-west-2', 'ResourceType': 'AWS::S3::Bucket',
'ResourceId': 'bucket1', 'ResourceName': 'bucket2'}]
result = client.batch_get_aggregate_resource_config(ConfigurationAggregatorName='testing', ResourceIdentifiers=identifiers)
assert not result['BaseConfigurationItems']
assert len(result['UnprocessedResourceIdentifiers']) == 1
assert len(result['UnprocessedResourceIdentifiers']) == 1 and result['UnprocessedResourceIdentifiers'][0]['ResourceName'] == 'bucket2'
# Verify that if the region is incorrect that we don't get a result:
identifiers = [{'SourceAccountId': DEFAULT_ACCOUNT_ID, 'SourceRegion': 'eu-west-1', 'ResourceType': 'AWS::S3::Bucket',
'ResourceId': 'bucket1'}]
result = client.batch_get_aggregate_resource_config(ConfigurationAggregatorName='testing', ResourceIdentifiers=identifiers)
assert not result['BaseConfigurationItems']
assert len(result['UnprocessedResourceIdentifiers']) == 1
assert len(result['UnprocessedResourceIdentifiers']) == 1 and result['UnprocessedResourceIdentifiers'][0]['SourceRegion'] == 'eu-west-1'

View File

@ -369,7 +369,80 @@ def test_query_returns_consumed_capacity():
@mock_dynamodb2
def test_basic_projection_expressions():
def test_basic_projection_expression_using_get_item():
dynamodb = boto3.resource('dynamodb', region_name='us-east-1')
# Create the DynamoDB table.
table = dynamodb.create_table(
TableName='users',
KeySchema=[
{
'AttributeName': 'forum_name',
'KeyType': 'HASH'
},
{
'AttributeName': 'subject',
'KeyType': 'RANGE'
},
],
AttributeDefinitions=[
{
'AttributeName': 'forum_name',
'AttributeType': 'S'
},
{
'AttributeName': 'subject',
'AttributeType': 'S'
},
],
ProvisionedThroughput={
'ReadCapacityUnits': 5,
'WriteCapacityUnits': 5
}
)
table = dynamodb.Table('users')
table.put_item(Item={
'forum_name': 'the-key',
'subject': '123',
'body': 'some test message'
})
table.put_item(Item={
'forum_name': 'not-the-key',
'subject': '123',
'body': 'some other test message'
})
result = table.get_item(
Key = {
'forum_name': 'the-key',
'subject': '123'
},
ProjectionExpression='body, subject'
)
result['Item'].should.be.equal({
'subject': '123',
'body': 'some test message'
})
# The projection expression should not remove data from storage
result = table.get_item(
Key = {
'forum_name': 'the-key',
'subject': '123'
}
)
result['Item'].should.be.equal({
'forum_name': 'the-key',
'subject': '123',
'body': 'some test message'
})
@mock_dynamodb2
def test_basic_projection_expressions_using_query():
dynamodb = boto3.resource('dynamodb', region_name='us-east-1')
# Create the DynamoDB table.
@ -452,6 +525,7 @@ def test_basic_projection_expressions():
assert 'body' in results['Items'][1]
assert 'forum_name' in results['Items'][1]
@mock_dynamodb2
def test_basic_projection_expressions_using_scan():
dynamodb = boto3.resource('dynamodb', region_name='us-east-1')
@ -538,7 +612,73 @@ def test_basic_projection_expressions_using_scan():
@mock_dynamodb2
def test_basic_projection_expressions_with_attr_expression_names():
def test_basic_projection_expression_using_get_item_with_attr_expression_names():
dynamodb = boto3.resource('dynamodb', region_name='us-east-1')
# Create the DynamoDB table.
table = dynamodb.create_table(
TableName='users',
KeySchema=[
{
'AttributeName': 'forum_name',
'KeyType': 'HASH'
},
{
'AttributeName': 'subject',
'KeyType': 'RANGE'
},
],
AttributeDefinitions=[
{
'AttributeName': 'forum_name',
'AttributeType': 'S'
},
{
'AttributeName': 'subject',
'AttributeType': 'S'
},
],
ProvisionedThroughput={
'ReadCapacityUnits': 5,
'WriteCapacityUnits': 5
}
)
table = dynamodb.Table('users')
table.put_item(Item={
'forum_name': 'the-key',
'subject': '123',
'body': 'some test message',
'attachment': 'something'
})
table.put_item(Item={
'forum_name': 'not-the-key',
'subject': '123',
'body': 'some other test message',
'attachment': 'something'
})
result = table.get_item(
Key={
'forum_name': 'the-key',
'subject': '123'
},
ProjectionExpression='#rl, #rt, subject',
ExpressionAttributeNames={
'#rl': 'body',
'#rt': 'attachment'
},
)
result['Item'].should.be.equal({
'subject': '123',
'body': 'some test message',
'attachment': 'something'
})
@mock_dynamodb2
def test_basic_projection_expressions_using_query_with_attr_expression_names():
dynamodb = boto3.resource('dynamodb', region_name='us-east-1')
# Create the DynamoDB table.
@ -603,6 +743,7 @@ def test_basic_projection_expressions_with_attr_expression_names():
assert 'attachment' in results['Items'][0]
assert results['Items'][0]['attachment'] == 'something'
@mock_dynamodb2
def test_basic_projection_expressions_using_scan_with_attr_expression_names():
dynamodb = boto3.resource('dynamodb', region_name='us-east-1')
@ -2250,6 +2391,76 @@ def test_batch_items_returns_all():
assert [item['username']['S'] for item in returned_items] == ['user1', 'user2', 'user3']
@mock_dynamodb2
def test_batch_items_with_basic_projection_expression():
dynamodb = _create_user_table()
returned_items = dynamodb.batch_get_item(RequestItems={
'users': {
'Keys': [{
'username': {'S': 'user0'}
}, {
'username': {'S': 'user1'}
}, {
'username': {'S': 'user2'}
}, {
'username': {'S': 'user3'}
}],
'ConsistentRead': True,
'ProjectionExpression': 'username'
}
})['Responses']['users']
returned_items.should.have.length_of(3)
[item['username']['S'] for item in returned_items].should.be.equal(['user1', 'user2', 'user3'])
[item.get('foo') for item in returned_items].should.be.equal([None, None, None])
# The projection expression should not remove data from storage
returned_items = dynamodb.batch_get_item(RequestItems = {
'users': {
'Keys': [{
'username': {'S': 'user0'}
}, {
'username': {'S': 'user1'}
}, {
'username': {'S': 'user2'}
}, {
'username': {'S': 'user3'}
}],
'ConsistentRead': True
}
})['Responses']['users']
[item['username']['S'] for item in returned_items].should.be.equal(['user1', 'user2', 'user3'])
[item['foo']['S'] for item in returned_items].should.be.equal(['bar', 'bar', 'bar'])
@mock_dynamodb2
def test_batch_items_with_basic_projection_expression_and_attr_expression_names():
dynamodb = _create_user_table()
returned_items = dynamodb.batch_get_item(RequestItems={
'users': {
'Keys': [{
'username': {'S': 'user0'}
}, {
'username': {'S': 'user1'}
}, {
'username': {'S': 'user2'}
}, {
'username': {'S': 'user3'}
}],
'ConsistentRead': True,
'ProjectionExpression': '#rl',
'ExpressionAttributeNames': {
'#rl': 'username'
},
}
})['Responses']['users']
returned_items.should.have.length_of(3)
[item['username']['S'] for item in returned_items].should.be.equal(['user1', 'user2', 'user3'])
[item.get('foo') for item in returned_items].should.be.equal([None, None, None])
@mock_dynamodb2
def test_batch_items_should_throw_exception_for_duplicate_request():
client = _create_user_table()
@ -2295,6 +2506,74 @@ def test_index_with_unknown_attributes_should_fail():
ex.exception.response['Error']['Message'].should.contain(expected_exception)
@mock_dynamodb2
def test_sorted_query_with_numerical_sort_key():
dynamodb = boto3.resource('dynamodb', region_name='us-east-1')
dynamodb.create_table(TableName="CarCollection",
KeySchema=[{ 'AttributeName': "CarModel", 'KeyType': 'HASH'},
{'AttributeName': "CarPrice", 'KeyType': 'RANGE'}],
AttributeDefinitions=[{'AttributeName': "CarModel", 'AttributeType': "S"},
{'AttributeName': "CarPrice", 'AttributeType': "N"}],
ProvisionedThroughput={'ReadCapacityUnits': 1, 'WriteCapacityUnits': 1})
def create_item(price):
return {"CarModel": "M", "CarPrice": price}
table = dynamodb.Table('CarCollection')
items = list(map(create_item, [2, 1, 10, 3]))
for item in items:
table.put_item(Item=item)
response = table.query(KeyConditionExpression=Key('CarModel').eq("M"))
response_items = response['Items']
assert len(items) == len(response_items)
assert all(isinstance(item["CarPrice"], Decimal) for item in response_items)
response_prices = [item["CarPrice"] for item in response_items]
expected_prices = [Decimal(item["CarPrice"]) for item in items]
expected_prices.sort()
assert expected_prices == response_prices, "result items are not sorted by numerical value"
# https://github.com/spulec/moto/issues/1874
@mock_dynamodb2
def test_item_size_is_under_400KB():
dynamodb = boto3.resource('dynamodb')
client = boto3.client('dynamodb')
dynamodb.create_table(
TableName='moto-test',
KeySchema=[{'AttributeName': 'id', 'KeyType': 'HASH'}],
AttributeDefinitions=[{'AttributeName': 'id', 'AttributeType': 'S'}],
ProvisionedThroughput={'ReadCapacityUnits': 1, 'WriteCapacityUnits': 1}
)
table = dynamodb.Table('moto-test')
large_item = 'x' * 410 * 1000
assert_failure_due_to_item_size(func=client.put_item,
TableName='moto-test',
Item={'id': {'S': 'foo'}, 'item': {'S': large_item}})
assert_failure_due_to_item_size(func=table.put_item, Item = {'id': 'bar', 'item': large_item})
assert_failure_due_to_item_size(func=client.update_item,
TableName='moto-test',
Key={'id': {'S': 'foo2'}},
UpdateExpression='set item=:Item',
ExpressionAttributeValues={':Item': {'S': large_item}})
# Assert op fails when updating a nested item
assert_failure_due_to_item_size(func=table.put_item,
Item={'id': 'bar', 'itemlist': [{'item': large_item}]})
assert_failure_due_to_item_size(func=client.put_item,
TableName='moto-test',
Item={'id': {'S': 'foo'}, 'itemlist': {'L': [{'M': {'item1': {'S': large_item}}}]}})
def assert_failure_due_to_item_size(func, **kwargs):
with assert_raises(ClientError) as ex:
func(**kwargs)
ex.exception.response['Error']['Code'].should.equal('ValidationException')
ex.exception.response['Error']['Message'].should.equal('Item size has exceeded the maximum allowed size')
def _create_user_table():
client = boto3.client('dynamodb', region_name='us-east-1')
client.create_table(

View File

@ -889,6 +889,12 @@ def test_describe_container_instances():
instance.keys().should.contain('runningTasksCount')
instance.keys().should.contain('pendingTasksCount')
with assert_raises(ClientError) as e:
ecs_client.describe_container_instances(
cluster=test_cluster_name,
containerInstances=[]
)
@mock_ec2
@mock_ecs

View File

@ -9,7 +9,41 @@ import sure # noqa
from moto import mock_kinesis
def create_stream(client, stream_name):
def create_s3_delivery_stream(client, stream_name):
return client.create_delivery_stream(
DeliveryStreamName=stream_name,
DeliveryStreamType="DirectPut",
ExtendedS3DestinationConfiguration={
'RoleARN': 'arn:aws:iam::123456789012:role/firehose_delivery_role',
'BucketARN': 'arn:aws:s3:::kinesis-test',
'Prefix': 'myFolder/',
'CompressionFormat': 'UNCOMPRESSED',
'DataFormatConversionConfiguration': {
'Enabled': True,
'InputFormatConfiguration': {
'Deserializer': {
'HiveJsonSerDe': {
},
},
},
'OutputFormatConfiguration': {
'Serializer': {
'ParquetSerDe': {
'Compression': 'SNAPPY',
},
},
},
'SchemaConfiguration': {
'DatabaseName': stream_name,
'RoleARN': 'arn:aws:iam::123456789012:role/firehose_delivery_role',
'TableName': 'outputTable',
},
},
})
def create_redshift_delivery_stream(client, stream_name):
return client.create_delivery_stream(
DeliveryStreamName=stream_name,
RedshiftDestinationConfiguration={
@ -36,10 +70,10 @@ def create_stream(client, stream_name):
@mock_kinesis
def test_create_stream():
def test_create_redshift_delivery_stream():
client = boto3.client('firehose', region_name='us-east-1')
response = create_stream(client, 'stream1')
response = create_redshift_delivery_stream(client, 'stream1')
stream_arn = response['DeliveryStreamARN']
response = client.describe_delivery_stream(DeliveryStreamName='stream1')
@ -82,6 +116,60 @@ def test_create_stream():
})
@mock_kinesis
def test_create_s3_delivery_stream():
client = boto3.client('firehose', region_name='us-east-1')
response = create_s3_delivery_stream(client, 'stream1')
stream_arn = response['DeliveryStreamARN']
response = client.describe_delivery_stream(DeliveryStreamName='stream1')
stream_description = response['DeliveryStreamDescription']
# Sure and Freezegun don't play nicely together
_ = stream_description.pop('CreateTimestamp')
_ = stream_description.pop('LastUpdateTimestamp')
stream_description.should.equal({
'DeliveryStreamName': 'stream1',
'DeliveryStreamARN': stream_arn,
'DeliveryStreamStatus': 'ACTIVE',
'VersionId': 'string',
'Destinations': [
{
'DestinationId': 'string',
'ExtendedS3DestinationDescription': {
'RoleARN': 'arn:aws:iam::123456789012:role/firehose_delivery_role',
'BucketARN': 'arn:aws:s3:::kinesis-test',
'Prefix': 'myFolder/',
'CompressionFormat': 'UNCOMPRESSED',
'DataFormatConversionConfiguration': {
'Enabled': True,
'InputFormatConfiguration': {
'Deserializer': {
'HiveJsonSerDe': {
},
},
},
'OutputFormatConfiguration': {
'Serializer': {
'ParquetSerDe': {
'Compression': 'SNAPPY',
},
},
},
'SchemaConfiguration': {
'DatabaseName': 'stream1',
'RoleARN': 'arn:aws:iam::123456789012:role/firehose_delivery_role',
'TableName': 'outputTable',
},
},
},
},
],
"HasMoreDestinations": False,
})
@mock_kinesis
def test_create_stream_without_redshift():
client = boto3.client('firehose', region_name='us-east-1')
@ -145,8 +233,8 @@ def test_deescribe_non_existant_stream():
def test_list_and_delete_stream():
client = boto3.client('firehose', region_name='us-east-1')
create_stream(client, 'stream1')
create_stream(client, 'stream2')
create_redshift_delivery_stream(client, 'stream1')
create_redshift_delivery_stream(client, 'stream2')
set(client.list_delivery_streams()['DeliveryStreamNames']).should.equal(
set(['stream1', 'stream2']))
@ -161,7 +249,7 @@ def test_list_and_delete_stream():
def test_put_record():
client = boto3.client('firehose', region_name='us-east-1')
create_stream(client, 'stream1')
create_redshift_delivery_stream(client, 'stream1')
client.put_record(
DeliveryStreamName='stream1',
Record={
@ -174,7 +262,7 @@ def test_put_record():
def test_put_record_batch():
client = boto3.client('firehose', region_name='us-east-1')
create_stream(client, 'stream1')
create_redshift_delivery_stream(client, 'stream1')
client.put_record_batch(
DeliveryStreamName='stream1',
Records=[

View File

@ -289,8 +289,8 @@ def test_multipart_etag_quotes_stripped():
part2 = b'1'
etag2 = multipart.upload_part_from_file(BytesIO(part2), 2).etag
# Strip quotes from etags
etag1 = etag1.replace('"','')
etag2 = etag2.replace('"','')
etag1 = etag1.replace('"', '')
etag2 = etag2.replace('"', '')
xml = "<Part><PartNumber>{0}</PartNumber><ETag>{1}</ETag></Part>"
xml = xml.format(1, etag1) + xml.format(2, etag2)
xml = "<CompleteMultipartUpload>{0}</CompleteMultipartUpload>".format(xml)
@ -1592,7 +1592,8 @@ def test_boto3_copy_object_with_versioning():
response = client.create_multipart_upload(Bucket='blah', Key='test4')
upload_id = response['UploadId']
response = client.upload_part_copy(Bucket='blah', Key='test4', CopySource={'Bucket': 'blah', 'Key': 'test3', 'VersionId': obj3_version_new},
response = client.upload_part_copy(Bucket='blah', Key='test4',
CopySource={'Bucket': 'blah', 'Key': 'test3', 'VersionId': obj3_version_new},
UploadId=upload_id, PartNumber=1)
etag = response["CopyPartResult"]["ETag"]
client.complete_multipart_upload(
@ -2284,7 +2285,7 @@ def test_put_bucket_notification():
assert not result.get("QueueConfigurations")
assert result["LambdaFunctionConfigurations"][0]["Id"]
assert result["LambdaFunctionConfigurations"][0]["LambdaFunctionArn"] == \
"arn:aws:lambda:us-east-1:012345678910:function:lambda"
"arn:aws:lambda:us-east-1:012345678910:function:lambda"
assert result["LambdaFunctionConfigurations"][0]["Events"][0] == "s3:ObjectCreated:*"
assert len(result["LambdaFunctionConfigurations"][0]["Events"]) == 1
assert len(result["LambdaFunctionConfigurations"][0]["Filter"]["Key"]["FilterRules"]) == 1
@ -2367,7 +2368,7 @@ def test_put_bucket_notification_errors():
assert err.exception.response["Error"]["Code"] == "InvalidArgument"
assert err.exception.response["Error"]["Message"] == \
"The notification destination service region is not valid for the bucket location constraint"
"The notification destination service region is not valid for the bucket location constraint"
# Invalid event name:
with assert_raises(ClientError) as err:
@ -2949,7 +2950,7 @@ TEST_XML = """\
def test_boto3_bucket_name_too_long():
s3 = boto3.client('s3', region_name='us-east-1')
with assert_raises(ClientError) as exc:
s3.create_bucket(Bucket='x'*64)
s3.create_bucket(Bucket='x' * 64)
exc.exception.response['Error']['Code'].should.equal('InvalidBucketName')
@ -2957,7 +2958,7 @@ def test_boto3_bucket_name_too_long():
def test_boto3_bucket_name_too_short():
s3 = boto3.client('s3', region_name='us-east-1')
with assert_raises(ClientError) as exc:
s3.create_bucket(Bucket='x'*2)
s3.create_bucket(Bucket='x' * 2)
exc.exception.response['Error']['Code'].should.equal('InvalidBucketName')
@ -2979,7 +2980,7 @@ def test_can_enable_bucket_acceleration():
Bucket=bucket_name,
AccelerateConfiguration={'Status': 'Enabled'},
)
resp.keys().should.have.length_of(1) # Response contains nothing (only HTTP headers)
resp.keys().should.have.length_of(1) # Response contains nothing (only HTTP headers)
resp = s3.get_bucket_accelerate_configuration(Bucket=bucket_name)
resp.should.have.key('Status')
resp['Status'].should.equal('Enabled')
@ -2998,7 +2999,7 @@ def test_can_suspend_bucket_acceleration():
Bucket=bucket_name,
AccelerateConfiguration={'Status': 'Suspended'},
)
resp.keys().should.have.length_of(1) # Response contains nothing (only HTTP headers)
resp.keys().should.have.length_of(1) # Response contains nothing (only HTTP headers)
resp = s3.get_bucket_accelerate_configuration(Bucket=bucket_name)
resp.should.have.key('Status')
resp['Status'].should.equal('Suspended')
@ -3013,7 +3014,7 @@ def test_suspending_acceleration_on_not_configured_bucket_does_nothing():
Bucket=bucket_name,
AccelerateConfiguration={'Status': 'Suspended'},
)
resp.keys().should.have.length_of(1) # Response contains nothing (only HTTP headers)
resp.keys().should.have.length_of(1) # Response contains nothing (only HTTP headers)
resp = s3.get_bucket_accelerate_configuration(Bucket=bucket_name)
resp.shouldnt.have.key('Status')
@ -3173,3 +3174,342 @@ def test_list_config_discovered_resources():
s3_config_query.list_config_service_resources(None, None, 1, 'notabucket')
assert 'The nextToken provided is invalid' in inte.exception.message
@mock_s3
def test_s3_lifecycle_config_dict():
from moto.s3.config import s3_config_query
# With 1 bucket in us-west-2:
s3_config_query.backends['global'].create_bucket('bucket1', 'us-west-2')
# And a lifecycle policy
lifecycle = [
{
'ID': 'rule1',
'Status': 'Enabled',
'Filter': {'Prefix': ''},
'Expiration': {'Days': 1}
},
{
'ID': 'rule2',
'Status': 'Enabled',
'Filter': {
'And': {
'Prefix': 'some/path',
'Tag': [
{'Key': 'TheKey', 'Value': 'TheValue'}
]
}
},
'Expiration': {'Days': 1}
},
{
'ID': 'rule3',
'Status': 'Enabled',
'Filter': {},
'Expiration': {'Days': 1}
},
{
'ID': 'rule4',
'Status': 'Enabled',
'Filter': {'Prefix': ''},
'AbortIncompleteMultipartUpload': {'DaysAfterInitiation': 1}
}
]
s3_config_query.backends['global'].set_bucket_lifecycle('bucket1', lifecycle)
# Get the rules for this:
lifecycles = [rule.to_config_dict() for rule in s3_config_query.backends['global'].buckets['bucket1'].rules]
# Verify the first:
assert lifecycles[0] == {
'id': 'rule1',
'prefix': None,
'status': 'Enabled',
'expirationInDays': 1,
'expiredObjectDeleteMarker': None,
'noncurrentVersionExpirationInDays': -1,
'expirationDate': None,
'transitions': None,
'noncurrentVersionTransitions': None,
'abortIncompleteMultipartUpload': None,
'filter': {
'predicate': {
'type': 'LifecyclePrefixPredicate',
'prefix': ''
}
}
}
# Verify the second:
assert lifecycles[1] == {
'id': 'rule2',
'prefix': None,
'status': 'Enabled',
'expirationInDays': 1,
'expiredObjectDeleteMarker': None,
'noncurrentVersionExpirationInDays': -1,
'expirationDate': None,
'transitions': None,
'noncurrentVersionTransitions': None,
'abortIncompleteMultipartUpload': None,
'filter': {
'predicate': {
'type': 'LifecycleAndOperator',
'operands': [
{
'type': 'LifecyclePrefixPredicate',
'prefix': 'some/path'
},
{
'type': 'LifecycleTagPredicate',
'tag': {
'key': 'TheKey',
'value': 'TheValue'
}
},
]
}
}
}
# And the third:
assert lifecycles[2] == {
'id': 'rule3',
'prefix': None,
'status': 'Enabled',
'expirationInDays': 1,
'expiredObjectDeleteMarker': None,
'noncurrentVersionExpirationInDays': -1,
'expirationDate': None,
'transitions': None,
'noncurrentVersionTransitions': None,
'abortIncompleteMultipartUpload': None,
'filter': {'predicate': None}
}
# And the last:
assert lifecycles[3] == {
'id': 'rule4',
'prefix': None,
'status': 'Enabled',
'expirationInDays': None,
'expiredObjectDeleteMarker': None,
'noncurrentVersionExpirationInDays': -1,
'expirationDate': None,
'transitions': None,
'noncurrentVersionTransitions': None,
'abortIncompleteMultipartUpload': {'daysAfterInitiation': 1},
'filter': {
'predicate': {
'type': 'LifecyclePrefixPredicate',
'prefix': ''
}
}
}
@mock_s3
def test_s3_notification_config_dict():
from moto.s3.config import s3_config_query
# With 1 bucket in us-west-2:
s3_config_query.backends['global'].create_bucket('bucket1', 'us-west-2')
# And some notifications:
notifications = {
'TopicConfiguration': [{
'Id': 'Topic',
"Topic": 'arn:aws:sns:us-west-2:012345678910:mytopic',
"Event": [
"s3:ReducedRedundancyLostObject",
"s3:ObjectRestore:Completed"
]
}],
'QueueConfiguration': [{
'Id': 'Queue',
'Queue': 'arn:aws:sqs:us-west-2:012345678910:myqueue',
'Event': [
"s3:ObjectRemoved:Delete"
],
'Filter': {
'S3Key': {
'FilterRule': [
{
'Name': 'prefix',
'Value': 'stuff/here/'
}
]
}
}
}],
'CloudFunctionConfiguration': [{
'Id': 'Lambda',
'CloudFunction': 'arn:aws:lambda:us-west-2:012345678910:function:mylambda',
'Event': [
"s3:ObjectCreated:Post",
"s3:ObjectCreated:Copy",
"s3:ObjectCreated:Put"
],
'Filter': {
'S3Key': {
'FilterRule': [
{
'Name': 'suffix',
'Value': '.png'
}
]
}
}
}]
}
s3_config_query.backends['global'].put_bucket_notification_configuration('bucket1', notifications)
# Get the notifications for this:
notifications = s3_config_query.backends['global'].buckets['bucket1'].notification_configuration.to_config_dict()
# Verify it all:
assert notifications == {
'configurations': {
'Topic': {
'events': ['s3:ReducedRedundancyLostObject', 's3:ObjectRestore:Completed'],
'filter': None,
'objectPrefixes': [],
'topicARN': 'arn:aws:sns:us-west-2:012345678910:mytopic',
'type': 'TopicConfiguration'
},
'Queue': {
'events': ['s3:ObjectRemoved:Delete'],
'filter': {
's3KeyFilter': {
'filterRules': [{
'name': 'prefix',
'value': 'stuff/here/'
}]
}
},
'objectPrefixes': [],
'queueARN': 'arn:aws:sqs:us-west-2:012345678910:myqueue',
'type': 'QueueConfiguration'
},
'Lambda': {
'events': ['s3:ObjectCreated:Post', 's3:ObjectCreated:Copy', 's3:ObjectCreated:Put'],
'filter': {
's3KeyFilter': {
'filterRules': [{
'name': 'suffix',
'value': '.png'
}]
}
},
'objectPrefixes': [],
'queueARN': 'arn:aws:lambda:us-west-2:012345678910:function:mylambda',
'type': 'LambdaConfiguration'
}
}
}
@mock_s3
def test_s3_acl_to_config_dict():
from moto.s3.config import s3_config_query
from moto.s3.models import FakeAcl, FakeGrant, FakeGrantee, OWNER
# With 1 bucket in us-west-2:
s3_config_query.backends['global'].create_bucket('logbucket', 'us-west-2')
# Get the config dict with nothing other than the owner details:
acls = s3_config_query.backends['global'].buckets['logbucket'].acl.to_config_dict()
assert acls == {
'grantSet': None,
'owner': {'displayName': None, 'id': OWNER}
}
# Add some Log Bucket ACLs:
log_acls = FakeAcl([
FakeGrant([FakeGrantee(uri="http://acs.amazonaws.com/groups/s3/LogDelivery")], "WRITE"),
FakeGrant([FakeGrantee(uri="http://acs.amazonaws.com/groups/s3/LogDelivery")], "READ_ACP"),
FakeGrant([FakeGrantee(id=OWNER)], "FULL_CONTROL")
])
s3_config_query.backends['global'].set_bucket_acl('logbucket', log_acls)
acls = s3_config_query.backends['global'].buckets['logbucket'].acl.to_config_dict()
assert acls == {
'grantSet': None,
'grantList': [{'grantee': 'LogDelivery', 'permission': 'Write'}, {'grantee': 'LogDelivery', 'permission': 'ReadAcp'}],
'owner': {'displayName': None, 'id': OWNER}
}
# Give the owner less than full_control permissions:
log_acls = FakeAcl([FakeGrant([FakeGrantee(id=OWNER)], "READ_ACP"), FakeGrant([FakeGrantee(id=OWNER)], "WRITE_ACP")])
s3_config_query.backends['global'].set_bucket_acl('logbucket', log_acls)
acls = s3_config_query.backends['global'].buckets['logbucket'].acl.to_config_dict()
assert acls == {
'grantSet': None,
'grantList': [
{'grantee': {'id': OWNER, 'displayName': None}, 'permission': 'ReadAcp'},
{'grantee': {'id': OWNER, 'displayName': None}, 'permission': 'WriteAcp'}
],
'owner': {'displayName': None, 'id': OWNER}
}
@mock_s3
def test_s3_config_dict():
from moto.s3.config import s3_config_query
from moto.s3.models import FakeAcl, FakeGrant, FakeGrantee, FakeTag, FakeTagging, FakeTagSet, OWNER
# Without any buckets:
assert not s3_config_query.get_config_resource('some_bucket')
tags = FakeTagging(FakeTagSet([FakeTag('someTag', 'someValue'), FakeTag('someOtherTag', 'someOtherValue')]))
# With 1 bucket in us-west-2:
s3_config_query.backends['global'].create_bucket('bucket1', 'us-west-2')
s3_config_query.backends['global'].put_bucket_tagging('bucket1', tags)
# With a log bucket:
s3_config_query.backends['global'].create_bucket('logbucket', 'us-west-2')
log_acls = FakeAcl([
FakeGrant([FakeGrantee(uri="http://acs.amazonaws.com/groups/s3/LogDelivery")], "WRITE"),
FakeGrant([FakeGrantee(uri="http://acs.amazonaws.com/groups/s3/LogDelivery")], "READ_ACP"),
FakeGrant([FakeGrantee(id=OWNER)], "FULL_CONTROL")
])
s3_config_query.backends['global'].set_bucket_acl('logbucket', log_acls)
s3_config_query.backends['global'].put_bucket_logging('bucket1', {'TargetBucket': 'logbucket', 'TargetPrefix': ''})
# Get the us-west-2 bucket and verify that it works properly:
bucket1_result = s3_config_query.get_config_resource('bucket1')
# Just verify a few things:
assert bucket1_result['arn'] == 'arn:aws:s3:::bucket1'
assert bucket1_result['awsRegion'] == 'us-west-2'
assert bucket1_result['resourceName'] == bucket1_result['resourceId'] == 'bucket1'
assert bucket1_result['tags'] == {'someTag': 'someValue', 'someOtherTag': 'someOtherValue'}
assert isinstance(bucket1_result['configuration'], str)
exist_list = ['AccessControlList', 'BucketAccelerateConfiguration', 'BucketLoggingConfiguration', 'BucketPolicy',
'IsRequesterPaysEnabled', 'BucketNotificationConfiguration']
for exist in exist_list:
assert isinstance(bucket1_result['supplementaryConfiguration'][exist], str)
# Verify the logging config:
assert json.loads(bucket1_result['supplementaryConfiguration']['BucketLoggingConfiguration']) == \
{'destinationBucketName': 'logbucket', 'logFilePrefix': ''}
# Verify the policy:
assert json.loads(bucket1_result['supplementaryConfiguration']['BucketPolicy']) == {'policyText': None}
# Filter by correct region:
assert bucket1_result == s3_config_query.get_config_resource('bucket1', resource_region='us-west-2')
# By incorrect region:
assert not s3_config_query.get_config_resource('bucket1', resource_region='eu-west-1')
# With correct resource ID and name:
assert bucket1_result == s3_config_query.get_config_resource('bucket1', resource_name='bucket1')
# With an incorrect resource name:
assert not s3_config_query.get_config_resource('bucket1', resource_name='eu-bucket-1')

View File

@ -44,6 +44,36 @@ def test_create_topic_with_attributes():
attributes['DisplayName'].should.equal('test-topic')
@mock_sns
def test_create_topic_with_tags():
conn = boto3.client("sns", region_name="us-east-1")
response = conn.create_topic(
Name='some-topic-with-tags',
Tags=[
{
'Key': 'tag_key_1',
'Value': 'tag_value_1'
},
{
'Key': 'tag_key_2',
'Value': 'tag_value_2'
}
]
)
topic_arn = response['TopicArn']
conn.list_tags_for_resource(ResourceArn=topic_arn)['Tags'].should.equal([
{
'Key': 'tag_key_1',
'Value': 'tag_value_1'
},
{
'Key': 'tag_key_2',
'Value': 'tag_value_2'
}
])
@mock_sns
def test_create_topic_should_be_indempodent():
conn = boto3.client("sns", region_name="us-east-1")
@ -200,3 +230,204 @@ def test_add_remove_permissions():
TopicArn=response['TopicArn'],
Label='Test1234'
)
@mock_sns
def test_tag_topic():
conn = boto3.client('sns', region_name='us-east-1')
response = conn.create_topic(
Name = 'some-topic-with-tags'
)
topic_arn = response['TopicArn']
conn.tag_resource(
ResourceArn=topic_arn,
Tags=[
{
'Key': 'tag_key_1',
'Value': 'tag_value_1'
}
]
)
conn.list_tags_for_resource(ResourceArn = topic_arn)['Tags'].should.equal([
{
'Key': 'tag_key_1',
'Value': 'tag_value_1'
}
])
conn.tag_resource(
ResourceArn=topic_arn,
Tags=[
{
'Key': 'tag_key_2',
'Value': 'tag_value_2'
}
]
)
conn.list_tags_for_resource(ResourceArn = topic_arn)['Tags'].should.equal([
{
'Key': 'tag_key_1',
'Value': 'tag_value_1'
},
{
'Key': 'tag_key_2',
'Value': 'tag_value_2'
}
])
conn.tag_resource(
ResourceArn = topic_arn,
Tags = [
{
'Key': 'tag_key_1',
'Value': 'tag_value_X'
}
]
)
conn.list_tags_for_resource(ResourceArn = topic_arn)['Tags'].should.equal([
{
'Key': 'tag_key_1',
'Value': 'tag_value_X'
},
{
'Key': 'tag_key_2',
'Value': 'tag_value_2'
}
])
@mock_sns
def test_untag_topic():
conn = boto3.client('sns', region_name = 'us-east-1')
response = conn.create_topic(
Name = 'some-topic-with-tags',
Tags = [
{
'Key': 'tag_key_1',
'Value': 'tag_value_1'
},
{
'Key': 'tag_key_2',
'Value': 'tag_value_2'
}
]
)
topic_arn = response['TopicArn']
conn.untag_resource(
ResourceArn = topic_arn,
TagKeys = [
'tag_key_1'
]
)
conn.list_tags_for_resource(ResourceArn = topic_arn)['Tags'].should.equal([
{
'Key': 'tag_key_2',
'Value': 'tag_value_2'
}
])
# removing a non existing tag should not raise any error
conn.untag_resource(
ResourceArn = topic_arn,
TagKeys = [
'not-existing-tag'
]
)
conn.list_tags_for_resource(ResourceArn = topic_arn)['Tags'].should.equal([
{
'Key': 'tag_key_2',
'Value': 'tag_value_2'
}
])
@mock_sns
def test_list_tags_for_resource_error():
conn = boto3.client('sns', region_name = 'us-east-1')
conn.create_topic(
Name = 'some-topic-with-tags',
Tags = [
{
'Key': 'tag_key_1',
'Value': 'tag_value_X'
}
]
)
conn.list_tags_for_resource.when.called_with(
ResourceArn = 'not-existing-topic'
).should.throw(
ClientError,
'Resource does not exist'
)
@mock_sns
def test_tag_resource_errors():
conn = boto3.client('sns', region_name = 'us-east-1')
response = conn.create_topic(
Name = 'some-topic-with-tags',
Tags = [
{
'Key': 'tag_key_1',
'Value': 'tag_value_X'
}
]
)
topic_arn = response['TopicArn']
conn.tag_resource.when.called_with(
ResourceArn = 'not-existing-topic',
Tags = [
{
'Key': 'tag_key_1',
'Value': 'tag_value_1'
}
]
).should.throw(
ClientError,
'Resource does not exist'
)
too_many_tags = [{'Key': 'tag_key_{}'.format(i), 'Value': 'tag_value_{}'.format(i)} for i in range(51)]
conn.tag_resource.when.called_with(
ResourceArn = topic_arn,
Tags = too_many_tags
).should.throw(
ClientError,
'Could not complete request: tag quota of per resource exceeded'
)
# when the request fails, the tags should not be updated
conn.list_tags_for_resource(ResourceArn = topic_arn)['Tags'].should.equal([
{
'Key': 'tag_key_1',
'Value': 'tag_value_X'
}
])
@mock_sns
def test_untag_resource_error():
conn = boto3.client('sns', region_name = 'us-east-1')
conn.create_topic(
Name = 'some-topic-with-tags',
Tags = [
{
'Key': 'tag_key_1',
'Value': 'tag_value_X'
}
]
)
conn.untag_resource.when.called_with(
ResourceArn = 'not-existing-topic',
TagKeys = [
'tag_key_1'
]
).should.throw(
ClientError,
'Resource does not exist'
)

View File

@ -140,6 +140,22 @@ def test_create_queue_kms():
queue.attributes.get('KmsDataKeyReusePeriodSeconds').should.equal('600')
@mock_sqs
def test_create_queue_with_tags():
client = boto3.client('sqs', region_name='us-east-1')
response = client.create_queue(
QueueName = 'test-queue-with-tags',
tags = {
'tag_key_1': 'tag_value_1'
}
)
queue_url = response['QueueUrl']
client.list_queue_tags(QueueUrl = queue_url)['Tags'].should.equal({
'tag_key_1': 'tag_value_1'
})
@mock_sqs
def test_get_nonexistent_queue():
sqs = boto3.resource('sqs', region_name='us-east-1')
@ -959,6 +975,48 @@ def test_tags():
resp['Tags'].should.contain('test1')
resp['Tags'].should_not.contain('test2')
# removing a non existing tag should not raise any error
client.untag_queue(
QueueUrl=queue_url,
TagKeys=[
'not-existing-tag'
]
)
client.list_queue_tags(QueueUrl=queue_url)['Tags'].should.equal({
'test1': 'value1'
})
@mock_sqs
def test_untag_queue_errors():
client = boto3.client('sqs', region_name='us-east-1')
response = client.create_queue(
QueueName='test-queue-with-tags',
tags={
'tag_key_1': 'tag_value_1'
}
)
queue_url = response['QueueUrl']
client.untag_queue.when.called_with(
QueueUrl=queue_url + '-not-existing',
TagKeys=[
'tag_key_1'
]
).should.throw(
ClientError,
"The specified queue does not exist for this wsdl version."
)
client.untag_queue.when.called_with(
QueueUrl=queue_url,
TagKeys=[]
).should.throw(
ClientError,
'Tag keys must be between 1 and 128 characters in length.'
)
@mock_sqs
def test_create_fifo_queue_with_dlq():