Merge pull request #11 from spulec/master

Merge upstream
This commit is contained in:
Bert Blommers 2019-10-24 23:52:06 -07:00 committed by GitHub
commit dda14ce916
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 248 additions and 38 deletions

View File

@ -688,7 +688,7 @@ class ConfigBackend(BaseBackend):
del self.delivery_channels[channel_name] del self.delivery_channels[channel_name]
def list_discovered_resources(self, resource_type, backend_region, resource_ids, resource_name, limit, next_token): def list_discovered_resources(self, resource_type, backend_region, resource_ids, resource_name, limit, next_token):
"""This will query against the mocked AWS Config listing function that must exist for the resource backend. """This will query against the mocked AWS Config (non-aggregated) listing function that must exist for the resource backend.
:param resource_type: :param resource_type:
:param backend_region: :param backend_region:
@ -716,6 +716,7 @@ class ConfigBackend(BaseBackend):
# call upon the resource type's Config Query class to retrieve the list of resources that match the criteria: # call upon the resource type's Config Query class to retrieve the list of resources that match the criteria:
if RESOURCE_MAP.get(resource_type, {}): if RESOURCE_MAP.get(resource_type, {}):
# Is this a global resource type? -- if so, re-write the region to 'global': # Is this a global resource type? -- if so, re-write the region to 'global':
backend_query_region = backend_region # Always provide the backend this request arrived from.
if RESOURCE_MAP[resource_type].backends.get('global'): if RESOURCE_MAP[resource_type].backends.get('global'):
backend_region = 'global' backend_region = 'global'
@ -724,7 +725,8 @@ class ConfigBackend(BaseBackend):
if RESOURCE_MAP[resource_type].backends.get(backend_region): if RESOURCE_MAP[resource_type].backends.get(backend_region):
# Fetch the resources for the backend's region: # Fetch the resources for the backend's region:
identifiers, new_token = \ identifiers, new_token = \
RESOURCE_MAP[resource_type].list_config_service_resources(resource_ids, resource_name, limit, next_token) RESOURCE_MAP[resource_type].list_config_service_resources(resource_ids, resource_name, limit, next_token,
backend_region=backend_query_region)
result = {'resourceIdentifiers': [ result = {'resourceIdentifiers': [
{ {
@ -803,6 +805,7 @@ class ConfigBackend(BaseBackend):
raise ResourceNotDiscoveredException(resource_type, id) raise ResourceNotDiscoveredException(resource_type, id)
# Is the resource type global? # Is the resource type global?
backend_query_region = backend_region # Always provide the backend this request arrived from.
if RESOURCE_MAP[resource_type].backends.get('global'): if RESOURCE_MAP[resource_type].backends.get('global'):
backend_region = 'global' backend_region = 'global'
@ -811,7 +814,7 @@ class ConfigBackend(BaseBackend):
raise ResourceNotDiscoveredException(resource_type, id) raise ResourceNotDiscoveredException(resource_type, id)
# Get the item: # Get the item:
item = RESOURCE_MAP[resource_type].get_config_resource(id, backend_region=backend_region) item = RESOURCE_MAP[resource_type].get_config_resource(id, backend_region=backend_query_region)
if not item: if not item:
raise ResourceNotDiscoveredException(resource_type, id) raise ResourceNotDiscoveredException(resource_type, id)
@ -837,15 +840,17 @@ class ConfigBackend(BaseBackend):
continue continue
# Is the resource type global? # Is the resource type global?
config_backend_region = backend_region
backend_query_region = backend_region # Always provide the backend this request arrived from.
if RESOURCE_MAP[resource['resourceType']].backends.get('global'): if RESOURCE_MAP[resource['resourceType']].backends.get('global'):
backend_region = 'global' config_backend_region = 'global'
# If the backend region isn't implemented then we won't find the item: # If the backend region isn't implemented then we won't find the item:
if not RESOURCE_MAP[resource['resourceType']].backends.get(backend_region): if not RESOURCE_MAP[resource['resourceType']].backends.get(config_backend_region):
continue continue
# Get the item: # Get the item:
item = RESOURCE_MAP[resource['resourceType']].get_config_resource(resource['resourceId'], backend_region=backend_region) item = RESOURCE_MAP[resource['resourceType']].get_config_resource(resource['resourceId'], backend_region=backend_query_region)
if not item: if not item:
continue continue

View File

@ -935,7 +935,13 @@ class OpLessThan(Op):
def expr(self, item): def expr(self, item):
lhs = self.lhs.expr(item) lhs = self.lhs.expr(item)
rhs = self.rhs.expr(item) rhs = self.rhs.expr(item)
return lhs < rhs # In python3 None is not a valid comparator when using < or > so must be handled specially
if lhs and rhs:
return lhs < rhs
elif lhs is None and rhs:
return True
else:
return False
class OpGreaterThan(Op): class OpGreaterThan(Op):
@ -944,7 +950,13 @@ class OpGreaterThan(Op):
def expr(self, item): def expr(self, item):
lhs = self.lhs.expr(item) lhs = self.lhs.expr(item)
rhs = self.rhs.expr(item) rhs = self.rhs.expr(item)
return lhs > rhs # In python3 None is not a valid comparator when using < or > so must be handled specially
if lhs and rhs:
return lhs > rhs
elif lhs and rhs is None:
return True
else:
return False
class OpEqual(Op): class OpEqual(Op):
@ -971,7 +983,13 @@ class OpLessThanOrEqual(Op):
def expr(self, item): def expr(self, item):
lhs = self.lhs.expr(item) lhs = self.lhs.expr(item)
rhs = self.rhs.expr(item) rhs = self.rhs.expr(item)
return lhs <= rhs # In python3 None is not a valid comparator when using < or > so must be handled specially
if lhs and rhs:
return lhs <= rhs
elif lhs is None and rhs or lhs is None and rhs is None:
return True
else:
return False
class OpGreaterThanOrEqual(Op): class OpGreaterThanOrEqual(Op):
@ -980,7 +998,13 @@ class OpGreaterThanOrEqual(Op):
def expr(self, item): def expr(self, item):
lhs = self.lhs.expr(item) lhs = self.lhs.expr(item)
rhs = self.rhs.expr(item) rhs = self.rhs.expr(item)
return lhs >= rhs # In python3 None is not a valid comparator when using < or > so must be handled specially
if lhs and rhs:
return lhs >= rhs
elif lhs and rhs is None or lhs is None and rhs is None:
return True
else:
return False
class OpOr(Op): class OpOr(Op):
@ -1099,7 +1123,19 @@ class FuncBetween(Func):
super(FuncBetween, self).__init__(attribute, start, end) super(FuncBetween, self).__init__(attribute, start, end)
def expr(self, item): def expr(self, item):
return self.start.expr(item) <= self.attr.expr(item) <= self.end.expr(item) # In python3 None is not a valid comparator when using < or > so must be handled specially
start = self.start.expr(item)
attr = self.attr.expr(item)
end = self.end.expr(item)
if start and attr and end:
return start <= attr <= end
elif start is None and attr is None:
# None is between None and None as well as None is between None and any number
return True
elif start is None and attr and end:
return attr <= end
else:
return False
class FuncIn(Func): class FuncIn(Func):

View File

@ -9,6 +9,7 @@ import uuid
import six import six
import boto3 import boto3
from botocore.exceptions import ParamValidationError
from moto.compat import OrderedDict from moto.compat import OrderedDict
from moto.core import BaseBackend, BaseModel from moto.core import BaseBackend, BaseModel
from moto.core.utils import unix_time from moto.core.utils import unix_time
@ -36,9 +37,9 @@ def bytesize(val):
def attribute_is_list(attr): def attribute_is_list(attr):
""" """
Checks if attribute denotes a list, and returns the regular expression if so Checks if attribute denotes a list, and returns the name of the list and the given list index if so
:param attr: attr or attr[index] :param attr: attr or attr[index]
:return: attr, re or None :return: attr, index or None
""" """
list_index_update = re.match('(.+)\\[([0-9]+)\\]', attr) list_index_update = re.match('(.+)\\[([0-9]+)\\]', attr)
if list_index_update: if list_index_update:
@ -302,6 +303,8 @@ class Item(BaseModel):
attr, list_index = attribute_is_list(key.split('.')[0]) attr, list_index = attribute_is_list(key.split('.')[0])
# If value not exists, changes value to a default if needed, else its the same as it was # If value not exists, changes value to a default if needed, else its the same as it was
value = self._get_default(value) value = self._get_default(value)
# If operation == list_append, get the original value and append it
value = self._get_appended_list(value, expression_attribute_values)
if type(value) != DynamoType: if type(value) != DynamoType:
if value in expression_attribute_values: if value in expression_attribute_values:
@ -370,6 +373,18 @@ class Item(BaseModel):
else: else:
raise NotImplementedError('{} update action not yet supported'.format(action)) raise NotImplementedError('{} update action not yet supported'.format(action))
def _get_appended_list(self, value, expression_attribute_values):
if type(value) != DynamoType:
list_append_re = re.match('list_append\\((.+),(.+)\\)', value)
if list_append_re:
new_value = expression_attribute_values[list_append_re.group(2).strip()]
old_list = self.attrs[list_append_re.group(1)]
if not old_list.is_list():
raise ParamValidationError
old_list.value.extend(new_value['L'])
value = old_list
return value
def _get_default(self, value): def _get_default(self, value):
if value.startswith('if_not_exists'): if value.startswith('if_not_exists'):
# Function signature # Function signature

View File

@ -331,6 +331,12 @@ class IoTBackend(BaseBackend):
# can raise ResourceNotFoundError # can raise ResourceNotFoundError
thing = self.describe_thing(thing_name) thing = self.describe_thing(thing_name)
# detach all principals
for k in list(self.principal_things.keys()):
if k[1] == thing_name:
del self.principal_things[k]
del self.things[thing.arn] del self.things[thing.arn]
def delete_thing_type(self, thing_type_name): def delete_thing_type(self, thing_type_name):

View File

@ -125,6 +125,9 @@ class LogStream:
return events_page, get_paging_token_from_index(back_index, True), get_paging_token_from_index(next_index) return events_page, get_paging_token_from_index(back_index, True), get_paging_token_from_index(next_index)
def filter_log_events(self, log_group_name, log_stream_names, start_time, end_time, limit, next_token, filter_pattern, interleaved): def filter_log_events(self, log_group_name, log_stream_names, start_time, end_time, limit, next_token, filter_pattern, interleaved):
if filter_pattern:
raise NotImplementedError('filter_pattern is not yet implemented')
def filter_func(event): def filter_func(event):
if start_time and event.timestamp < start_time: if start_time and event.timestamp < start_time:
return False return False

View File

@ -8,8 +8,8 @@ from moto.s3 import s3_backends
class S3ConfigQuery(ConfigQueryModel): class S3ConfigQuery(ConfigQueryModel):
def list_config_service_resources(self, resource_ids, resource_name, limit, next_token, backend_region=None, resource_region=None): def list_config_service_resources(self, resource_ids, resource_name, limit, next_token, backend_region=None, resource_region=None):
# S3 need not care about "backend_region" as S3 is global. The resource_region only matters for aggregated queries as you can # The resource_region only matters for aggregated queries as you can filter on bucket regions for them.
# filter on bucket regions for them. For other resource types, you would need to iterate appropriately for the backend_region. # For other resource types, you would need to iterate appropriately for the backend_region.
# Resource IDs are the same as S3 bucket names # Resource IDs are the same as S3 bucket names
# For aggregation -- did we get both a resource ID and a resource name? # For aggregation -- did we get both a resource ID and a resource name?
@ -31,12 +31,13 @@ class S3ConfigQuery(ConfigQueryModel):
if bucket in filter_buckets: if bucket in filter_buckets:
bucket_list.append(bucket) bucket_list.append(bucket)
# If a resource_region was supplied (aggregated only), then filter on bucket region too: # Filter on the proper region if supplied:
if resource_region: region_filter = backend_region or resource_region
if region_filter:
region_buckets = [] region_buckets = []
for bucket in bucket_list: for bucket in bucket_list:
if self.backends['global'].buckets[bucket].region_name == resource_region: if self.backends['global'].buckets[bucket].region_name == region_filter:
region_buckets.append(bucket) region_buckets.append(bucket)
bucket_list = region_buckets bucket_list = region_buckets
@ -69,8 +70,6 @@ class S3ConfigQuery(ConfigQueryModel):
for bucket in bucket_list], new_token for bucket in bucket_list], new_token
def get_config_resource(self, resource_id, resource_name=None, backend_region=None, resource_region=None): def get_config_resource(self, resource_id, resource_name=None, backend_region=None, resource_region=None):
# backend_region is ignored for S3 as the backend is 'global'
# Get the bucket: # Get the bucket:
bucket = self.backends['global'].buckets.get(resource_id, {}) bucket = self.backends['global'].buckets.get(resource_id, {})
@ -78,7 +77,8 @@ class S3ConfigQuery(ConfigQueryModel):
return return
# Are we filtering based on region? # Are we filtering based on region?
if resource_region and bucket.region_name != resource_region: region_filter = backend_region or resource_region
if region_filter and bucket.region_name != region_filter:
return return
# Are we also filtering on bucket name? # Are we also filtering on bucket name?

View File

@ -1,6 +1,7 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from __future__ import unicode_literals from __future__ import unicode_literals
import json
import os import os
import base64 import base64
import datetime import datetime
@ -520,7 +521,6 @@ class LifecycleRule(BaseModel):
Note: The following are missing that should be added in the future: Note: The following are missing that should be added in the future:
- transitions (returns None for now) - transitions (returns None for now)
- noncurrentVersionTransitions (returns None for now) - noncurrentVersionTransitions (returns None for now)
- LifeCycle Filters that are NOT prefix
:param kwargs: :param kwargs:
:return: :return:
@ -530,9 +530,9 @@ class LifecycleRule(BaseModel):
'id': self.id, 'id': self.id,
'prefix': self.prefix, 'prefix': self.prefix,
'status': self.status, 'status': self.status,
'expirationInDays': self.expiration_days, 'expirationInDays': int(self.expiration_days) if self.expiration_days else None,
'expiredObjectDeleteMarker': self.expired_object_delete_marker, 'expiredObjectDeleteMarker': self.expired_object_delete_marker,
'noncurrentVersionExpirationInDays': -1 or self.nve_noncurrent_days, 'noncurrentVersionExpirationInDays': -1 or int(self.nve_noncurrent_days),
'expirationDate': self.expiration_date, 'expirationDate': self.expiration_date,
'transitions': None, # Replace me with logic to fill in 'transitions': None, # Replace me with logic to fill in
'noncurrentVersionTransitions': None, # Replace me with logic to fill in 'noncurrentVersionTransitions': None, # Replace me with logic to fill in
@ -930,7 +930,9 @@ class FakeBucket(BaseModel):
# Make the supplementary configuration: # Make the supplementary configuration:
# TODO: Implement Public Access Block Support # TODO: Implement Public Access Block Support
s_config = {'AccessControlList': self.acl.to_config_dict()}
# This is a dobule-wrapped JSON for some reason...
s_config = {'AccessControlList': json.dumps(json.dumps(self.acl.to_config_dict()))}
# TODO implement Accelerate Configuration: # TODO implement Accelerate Configuration:
s_config['BucketAccelerateConfiguration'] = {'status': None} s_config['BucketAccelerateConfiguration'] = {'status': None}

View File

@ -1028,6 +1028,10 @@ def test_list_discovered_resource():
for x in range(0, 10): for x in range(0, 10):
s3_client.create_bucket(Bucket='bucket{}'.format(x), CreateBucketConfiguration={'LocationConstraint': 'us-west-2'}) s3_client.create_bucket(Bucket='bucket{}'.format(x), CreateBucketConfiguration={'LocationConstraint': 'us-west-2'})
# And with an EU bucket -- this should not show up for the us-west-2 config backend:
eu_client = boto3.client('s3', region_name='eu-west-1')
eu_client.create_bucket(Bucket='eu-bucket', CreateBucketConfiguration={'LocationConstraint': 'eu-west-1'})
# Now try: # Now try:
result = client.list_discovered_resources(resourceType='AWS::S3::Bucket') result = client.list_discovered_resources(resourceType='AWS::S3::Bucket')
assert len(result['resourceIdentifiers']) == 10 assert len(result['resourceIdentifiers']) == 10
@ -1039,6 +1043,9 @@ def test_list_discovered_resource():
} }
assert not result.get('nextToken') assert not result.get('nextToken')
result = client.list_discovered_resources(resourceType='AWS::S3::Bucket', resourceName='eu-bucket')
assert not result['resourceIdentifiers']
# Test that pagination places a proper nextToken in the response and also that the limit works: # Test that pagination places a proper nextToken in the response and also that the limit works:
result = client.list_discovered_resources(resourceType='AWS::S3::Bucket', limit=1, nextToken='bucket1') result = client.list_discovered_resources(resourceType='AWS::S3::Bucket', limit=1, nextToken='bucket1')
assert len(result['resourceIdentifiers']) == 1 assert len(result['resourceIdentifiers']) == 1
@ -1217,6 +1224,13 @@ def test_get_resource_config_history():
assert result[0]['resourceName'] == result[0]['resourceId'] == 'bucket1' assert result[0]['resourceName'] == result[0]['resourceId'] == 'bucket1'
assert result[0]['arn'] == 'arn:aws:s3:::bucket1' assert result[0]['arn'] == 'arn:aws:s3:::bucket1'
# Make a bucket in a different region and verify that it does not show up in the config backend:
s3_client = boto3.client('s3', region_name='eu-west-1')
s3_client.create_bucket(Bucket='eu-bucket', CreateBucketConfiguration={'LocationConstraint': 'eu-west-1'})
with assert_raises(ClientError) as ce:
client.get_resource_config_history(resourceType='AWS::S3::Bucket', resourceId='eu-bucket')
assert ce.exception.response['Error']['Code'] == 'ResourceNotDiscoveredException'
@mock_config @mock_config
@mock_s3 @mock_s3
@ -1254,6 +1268,13 @@ def test_batch_get_resource_config():
assert not buckets_missing assert not buckets_missing
# Make a bucket in a different region and verify that it does not show up in the config backend:
s3_client = boto3.client('s3', region_name='eu-west-1')
s3_client.create_bucket(Bucket='eu-bucket', CreateBucketConfiguration={'LocationConstraint': 'eu-west-1'})
keys = [{'resourceType': 'AWS::S3::Bucket', 'resourceId': 'eu-bucket'}]
result = client.batch_get_resource_config(resourceKeys=keys)
assert not result['baseConfigurationItems']
@mock_config @mock_config
@mock_s3 @mock_s3

View File

@ -11,7 +11,7 @@ import requests
from moto import mock_dynamodb2, mock_dynamodb2_deprecated from moto import mock_dynamodb2, mock_dynamodb2_deprecated
from moto.dynamodb2 import dynamodb_backend2 from moto.dynamodb2 import dynamodb_backend2
from boto.exception import JSONResponseError from boto.exception import JSONResponseError
from botocore.exceptions import ClientError from botocore.exceptions import ClientError, ParamValidationError
from tests.helpers import requires_boto_gte from tests.helpers import requires_boto_gte
import tests.backport_assert_raises import tests.backport_assert_raises
@ -1323,7 +1323,7 @@ def test_bad_scan_filter():
except ClientError as err: except ClientError as err:
err.response['Error']['Code'].should.equal('ValidationError') err.response['Error']['Code'].should.equal('ValidationError')
else: else:
raise RuntimeError('Should of raised ResourceInUseException') raise RuntimeError('Should have raised ResourceInUseException')
@mock_dynamodb2 @mock_dynamodb2
@ -1375,7 +1375,7 @@ def test_duplicate_create():
except ClientError as err: except ClientError as err:
err.response['Error']['Code'].should.equal('ResourceInUseException') err.response['Error']['Code'].should.equal('ResourceInUseException')
else: else:
raise RuntimeError('Should of raised ResourceInUseException') raise RuntimeError('Should have raised ResourceInUseException')
@mock_dynamodb2 @mock_dynamodb2
@ -1400,7 +1400,7 @@ def test_delete_table():
except ClientError as err: except ClientError as err:
err.response['Error']['Code'].should.equal('ResourceNotFoundException') err.response['Error']['Code'].should.equal('ResourceNotFoundException')
else: else:
raise RuntimeError('Should of raised ResourceNotFoundException') raise RuntimeError('Should have raised ResourceNotFoundException')
@mock_dynamodb2 @mock_dynamodb2
@ -2734,6 +2734,13 @@ def test_item_size_is_under_400KB():
Item={'id': {'S': 'foo'}, 'itemlist': {'L': [{'M': {'item1': {'S': large_item}}}]}}) Item={'id': {'S': 'foo'}, 'itemlist': {'L': [{'M': {'item1': {'S': large_item}}}]}})
def assert_failure_due_to_item_size(func, **kwargs):
with assert_raises(ClientError) as ex:
func(**kwargs)
ex.exception.response['Error']['Code'].should.equal('ValidationException')
ex.exception.response['Error']['Message'].should.equal('Item size has exceeded the maximum allowed size')
@mock_dynamodb2 @mock_dynamodb2
# https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_Query.html#DDB-Query-request-KeyConditionExpression # https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_Query.html#DDB-Query-request-KeyConditionExpression
def test_hash_key_cannot_use_begins_with_operations(): def test_hash_key_cannot_use_begins_with_operations():
@ -2759,11 +2766,74 @@ def test_hash_key_cannot_use_begins_with_operations():
ex.exception.response['Error']['Message'].should.equal('Query key condition not supported') ex.exception.response['Error']['Message'].should.equal('Query key condition not supported')
def assert_failure_due_to_item_size(func, **kwargs): @mock_dynamodb2
with assert_raises(ClientError) as ex: def test_update_supports_complex_expression_attribute_values():
func(**kwargs) client = boto3.client('dynamodb')
ex.exception.response['Error']['Code'].should.equal('ValidationException')
ex.exception.response['Error']['Message'].should.equal('Item size has exceeded the maximum allowed size') client.create_table(AttributeDefinitions=[{'AttributeName': 'SHA256', 'AttributeType': 'S'}],
TableName='TestTable',
KeySchema=[{'AttributeName': 'SHA256', 'KeyType': 'HASH'}],
ProvisionedThroughput={'ReadCapacityUnits': 5, 'WriteCapacityUnits': 5})
client.update_item(TableName='TestTable',
Key={'SHA256': {'S': 'sha-of-file'}},
UpdateExpression=('SET MD5 = :md5,'
'MyStringSet = :string_set,'
'MyMap = :map' ),
ExpressionAttributeValues={':md5': {'S': 'md5-of-file'},
':string_set': {'SS': ['string1', 'string2']},
':map': {'M': {'EntryKey': {'SS': ['thing1', 'thing2']}}}})
result = client.get_item(TableName='TestTable', Key={'SHA256': {'S': 'sha-of-file'}})['Item']
result.should.equal({u'MyStringSet': {u'SS': [u'string1', u'string2']},
'MyMap': {u'M': {u'EntryKey': {u'SS': [u'thing1', u'thing2']}}},
'SHA256': {u'S': u'sha-of-file'},
'MD5': {u'S': u'md5-of-file'}})
@mock_dynamodb2
def test_update_supports_list_append():
client = boto3.client('dynamodb')
client.create_table(AttributeDefinitions=[{'AttributeName': 'SHA256', 'AttributeType': 'S'}],
TableName='TestTable',
KeySchema=[{'AttributeName': 'SHA256', 'KeyType': 'HASH'}],
ProvisionedThroughput={'ReadCapacityUnits': 5, 'WriteCapacityUnits': 5})
client.put_item(TableName='TestTable',
Item={'SHA256': {'S': 'sha-of-file'}, 'crontab': {'L': [{'S': 'bar1'}]}})
# Update item using list_append expression
client.update_item(TableName='TestTable',
Key={'SHA256': {'S': 'sha-of-file'}},
UpdateExpression="SET crontab = list_append(crontab, :i)",
ExpressionAttributeValues={':i': {'L': [{'S': 'bar2'}]}})
# Verify item is appended to the existing list
result = client.get_item(TableName='TestTable', Key={'SHA256': {'S': 'sha-of-file'}})['Item']
result.should.equal({'SHA256': {'S': 'sha-of-file'},
'crontab': {'L': [{'S': 'bar1'}, {'S': 'bar2'}]}})
@mock_dynamodb2
def test_update_catches_invalid_list_append_operation():
client = boto3.client('dynamodb')
client.create_table(AttributeDefinitions=[{'AttributeName': 'SHA256', 'AttributeType': 'S'}],
TableName='TestTable',
KeySchema=[{'AttributeName': 'SHA256', 'KeyType': 'HASH'}],
ProvisionedThroughput={'ReadCapacityUnits': 5, 'WriteCapacityUnits': 5})
client.put_item(TableName='TestTable',
Item={'SHA256': {'S': 'sha-of-file'}, 'crontab': {'L': [{'S': 'bar1'}]}})
# Update item using invalid list_append expression
with assert_raises(ParamValidationError) as ex:
client.update_item(TableName='TestTable',
Key={'SHA256': {'S': 'sha-of-file'}},
UpdateExpression="SET crontab = list_append(crontab, :i)",
ExpressionAttributeValues={':i': [{'S': 'bar2'}]})
# Verify correct error is returned
str(ex.exception).should.match("Parameter validation failed:")
str(ex.exception).should.match("Invalid type for parameter ExpressionAttributeValues.")
def _create_user_table(): def _create_user_table():

View File

@ -519,6 +519,25 @@ def test_principal_thing():
res.should.have.key('principals').which.should.have.length_of(0) res.should.have.key('principals').which.should.have.length_of(0)
@mock_iot
def test_delete_principal_thing():
client = boto3.client('iot', region_name='ap-northeast-1')
thing_name = 'my-thing'
thing = client.create_thing(thingName=thing_name)
cert = client.create_keys_and_certificate(setAsActive=True)
cert_arn = cert['certificateArn']
cert_id = cert['certificateId']
client.attach_thing_principal(thingName=thing_name, principal=cert_arn)
client.delete_thing(thingName=thing_name)
res = client.list_principal_things(principal=cert_arn)
res.should.have.key('things').which.should.have.length_of(0)
client.update_certificate(certificateId=cert_id, newStatus="INACTIVE")
client.delete_certificate(certificateId=cert_id)
@mock_iot @mock_iot
def test_thing_groups(): def test_thing_groups():
client = boto3.client('iot', region_name='ap-northeast-1') client = boto3.client('iot', region_name='ap-northeast-1')

View File

@ -973,10 +973,6 @@ def test_generate_random(number_of_bytes):
response = client.generate_random(NumberOfBytes=number_of_bytes) response = client.generate_random(NumberOfBytes=number_of_bytes)
# Plaintext must NOT be base64-encoded
with assert_raises(Exception):
base64.b64decode(response["Plaintext"], validate=True)
response["Plaintext"].should.be.a(bytes) response["Plaintext"].should.be.a(bytes)
len(response["Plaintext"]).should.equal(number_of_bytes) len(response["Plaintext"]).should.equal(number_of_bytes)

View File

@ -1,10 +1,12 @@
import boto3 import boto3
import os
import sure # noqa import sure # noqa
import six import six
from botocore.exceptions import ClientError from botocore.exceptions import ClientError
from moto import mock_logs, settings from moto import mock_logs, settings
from nose.tools import assert_raises from nose.tools import assert_raises
from nose import SkipTest
_logs_region = 'us-east-1' if settings.TEST_SERVER_MODE else 'us-west-2' _logs_region = 'us-east-1' if settings.TEST_SERVER_MODE else 'us-west-2'
@ -128,6 +130,36 @@ def test_filter_logs_interleaved():
resulting_event['timestamp'].should.equal(original_message['timestamp']) resulting_event['timestamp'].should.equal(original_message['timestamp'])
resulting_event['message'].should.equal(original_message['message']) resulting_event['message'].should.equal(original_message['message'])
@mock_logs
def test_filter_logs_raises_if_filter_pattern():
if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'true':
raise SkipTest('Does not work in server mode due to error in Workzeug')
conn = boto3.client('logs', 'us-west-2')
log_group_name = 'dummy'
log_stream_name = 'stream'
conn.create_log_group(logGroupName=log_group_name)
conn.create_log_stream(
logGroupName=log_group_name,
logStreamName=log_stream_name
)
messages = [
{'timestamp': 0, 'message': 'hello'},
{'timestamp': 0, 'message': 'world'}
]
conn.put_log_events(
logGroupName=log_group_name,
logStreamName=log_stream_name,
logEvents=messages
)
with assert_raises(NotImplementedError):
conn.filter_log_events(
logGroupName=log_group_name,
logStreamNames=[log_stream_name],
filterPattern='{$.message = "hello"}',
)
@mock_logs @mock_logs
def test_put_retention_policy(): def test_put_retention_policy():
conn = boto3.client('logs', 'us-west-2') conn = boto3.client('logs', 'us-west-2')
@ -142,6 +174,7 @@ def test_put_retention_policy():
response = conn.delete_log_group(logGroupName=log_group_name) response = conn.delete_log_group(logGroupName=log_group_name)
@mock_logs @mock_logs
def test_delete_retention_policy(): def test_delete_retention_policy():
conn = boto3.client('logs', 'us-west-2') conn = boto3.client('logs', 'us-west-2')

View File

@ -3728,6 +3728,10 @@ def test_s3_config_dict():
assert json.loads(bucket1_result['supplementaryConfiguration']['BucketLoggingConfiguration']) == \ assert json.loads(bucket1_result['supplementaryConfiguration']['BucketLoggingConfiguration']) == \
{'destinationBucketName': 'logbucket', 'logFilePrefix': ''} {'destinationBucketName': 'logbucket', 'logFilePrefix': ''}
# Verify that the AccessControlList is a double-wrapped JSON string:
assert json.loads(json.loads(bucket1_result['supplementaryConfiguration']['AccessControlList'])) == \
{'grantSet': None, 'owner': {'displayName': None, 'id': '75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a'}}
# Verify the policy: # Verify the policy:
assert json.loads(bucket1_result['supplementaryConfiguration']['BucketPolicy']) == {'policyText': policy} assert json.loads(bucket1_result['supplementaryConfiguration']['BucketPolicy']) == {'policyText': policy}