Merge branch 'master' into bugfix/2384
This commit is contained in:
commit
34d4379b9f
@ -273,6 +273,70 @@ class LambdaFunction(BaseModel):
|
|||||||
"Configuration": self.get_configuration(),
|
"Configuration": self.get_configuration(),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
def update_configuration(self, config_updates):
|
||||||
|
for key, value in config_updates.items():
|
||||||
|
if key == "Description":
|
||||||
|
self.description = value
|
||||||
|
elif key == "Handler":
|
||||||
|
self.handler = value
|
||||||
|
elif key == "MemorySize":
|
||||||
|
self.memory_size = value
|
||||||
|
elif key == "Role":
|
||||||
|
self.role = value
|
||||||
|
elif key == "Runtime":
|
||||||
|
self.run_time = value
|
||||||
|
elif key == "Timeout":
|
||||||
|
self.timeout = value
|
||||||
|
elif key == "VpcConfig":
|
||||||
|
self.vpc_config = value
|
||||||
|
|
||||||
|
return self.get_configuration()
|
||||||
|
|
||||||
|
def update_function_code(self, updated_spec):
|
||||||
|
if 'DryRun' in updated_spec and updated_spec['DryRun']:
|
||||||
|
return self.get_configuration()
|
||||||
|
|
||||||
|
if 'ZipFile' in updated_spec:
|
||||||
|
self.code['ZipFile'] = updated_spec['ZipFile']
|
||||||
|
|
||||||
|
# using the "hackery" from __init__ because it seems to work
|
||||||
|
# TODOs and FIXMEs included, because they'll need to be fixed
|
||||||
|
# in both places now
|
||||||
|
try:
|
||||||
|
to_unzip_code = base64.b64decode(
|
||||||
|
bytes(updated_spec['ZipFile'], 'utf-8'))
|
||||||
|
except Exception:
|
||||||
|
to_unzip_code = base64.b64decode(updated_spec['ZipFile'])
|
||||||
|
|
||||||
|
self.code_bytes = to_unzip_code
|
||||||
|
self.code_size = len(to_unzip_code)
|
||||||
|
self.code_sha_256 = hashlib.sha256(to_unzip_code).hexdigest()
|
||||||
|
|
||||||
|
# TODO: we should be putting this in a lambda bucket
|
||||||
|
self.code['UUID'] = str(uuid.uuid4())
|
||||||
|
self.code['S3Key'] = '{}-{}'.format(self.function_name, self.code['UUID'])
|
||||||
|
elif 'S3Bucket' in updated_spec and 'S3Key' in updated_spec:
|
||||||
|
key = None
|
||||||
|
try:
|
||||||
|
# FIXME: does not validate bucket region
|
||||||
|
key = s3_backend.get_key(updated_spec['S3Bucket'], updated_spec['S3Key'])
|
||||||
|
except MissingBucket:
|
||||||
|
if do_validate_s3():
|
||||||
|
raise ValueError(
|
||||||
|
"InvalidParameterValueException",
|
||||||
|
"Error occurred while GetObject. S3 Error Code: NoSuchBucket. S3 Error Message: The specified bucket does not exist")
|
||||||
|
except MissingKey:
|
||||||
|
if do_validate_s3():
|
||||||
|
raise ValueError(
|
||||||
|
"InvalidParameterValueException",
|
||||||
|
"Error occurred while GetObject. S3 Error Code: NoSuchKey. S3 Error Message: The specified key does not exist.")
|
||||||
|
if key:
|
||||||
|
self.code_bytes = key.value
|
||||||
|
self.code_size = key.size
|
||||||
|
self.code_sha_256 = hashlib.sha256(key.value).hexdigest()
|
||||||
|
|
||||||
|
return self.get_configuration()
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def convert(s):
|
def convert(s):
|
||||||
try:
|
try:
|
||||||
@ -280,14 +344,6 @@ class LambdaFunction(BaseModel):
|
|||||||
except Exception:
|
except Exception:
|
||||||
return s
|
return s
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def is_json(test_str):
|
|
||||||
try:
|
|
||||||
response = json.loads(test_str)
|
|
||||||
except Exception:
|
|
||||||
response = test_str
|
|
||||||
return response
|
|
||||||
|
|
||||||
def _invoke_lambda(self, code, event=None, context=None):
|
def _invoke_lambda(self, code, event=None, context=None):
|
||||||
# TODO: context not yet implemented
|
# TODO: context not yet implemented
|
||||||
if event is None:
|
if event is None:
|
||||||
|
@ -122,6 +122,20 @@ class LambdaResponse(BaseResponse):
|
|||||||
if request.method == 'POST':
|
if request.method == 'POST':
|
||||||
return self._add_policy(request, full_url, headers)
|
return self._add_policy(request, full_url, headers)
|
||||||
|
|
||||||
|
def configuration(self, request, full_url, headers):
|
||||||
|
self.setup_class(request, full_url, headers)
|
||||||
|
if request.method == 'PUT':
|
||||||
|
return self._put_configuration(request)
|
||||||
|
else:
|
||||||
|
raise ValueError("Cannot handle request")
|
||||||
|
|
||||||
|
def code(self, request, full_url, headers):
|
||||||
|
self.setup_class(request, full_url, headers)
|
||||||
|
if request.method == 'PUT':
|
||||||
|
return self._put_code()
|
||||||
|
else:
|
||||||
|
raise ValueError("Cannot handle request")
|
||||||
|
|
||||||
def _add_policy(self, request, full_url, headers):
|
def _add_policy(self, request, full_url, headers):
|
||||||
path = request.path if hasattr(request, 'path') else path_url(request.url)
|
path = request.path if hasattr(request, 'path') else path_url(request.url)
|
||||||
function_name = path.split('/')[-2]
|
function_name = path.split('/')[-2]
|
||||||
@ -308,3 +322,30 @@ class LambdaResponse(BaseResponse):
|
|||||||
return 204, {}, "{}"
|
return 204, {}, "{}"
|
||||||
else:
|
else:
|
||||||
return 404, {}, "{}"
|
return 404, {}, "{}"
|
||||||
|
|
||||||
|
def _put_configuration(self, request):
|
||||||
|
function_name = self.path.rsplit('/', 2)[-2]
|
||||||
|
qualifier = self._get_param('Qualifier', None)
|
||||||
|
|
||||||
|
fn = self.lambda_backend.get_function(function_name, qualifier)
|
||||||
|
|
||||||
|
if fn:
|
||||||
|
config = fn.update_configuration(self.json_body)
|
||||||
|
return 200, {}, json.dumps(config)
|
||||||
|
else:
|
||||||
|
return 404, {}, "{}"
|
||||||
|
|
||||||
|
def _put_code(self):
|
||||||
|
function_name = self.path.rsplit('/', 2)[-2]
|
||||||
|
qualifier = self._get_param('Qualifier', None)
|
||||||
|
|
||||||
|
fn = self.lambda_backend.get_function(function_name, qualifier)
|
||||||
|
|
||||||
|
if fn:
|
||||||
|
if self.json_body.get('Publish', False):
|
||||||
|
fn = self.lambda_backend.publish_function(function_name)
|
||||||
|
|
||||||
|
config = fn.update_function_code(self.json_body)
|
||||||
|
return 200, {}, json.dumps(config)
|
||||||
|
else:
|
||||||
|
return 404, {}, "{}"
|
||||||
|
@ -16,5 +16,7 @@ url_paths = {
|
|||||||
r'{0}/(?P<api_version>[^/]+)/functions/(?P<function_name>[\w_-]+)/invocations/?$': response.invoke,
|
r'{0}/(?P<api_version>[^/]+)/functions/(?P<function_name>[\w_-]+)/invocations/?$': response.invoke,
|
||||||
r'{0}/(?P<api_version>[^/]+)/functions/(?P<function_name>[\w_-]+)/invoke-async/?$': response.invoke_async,
|
r'{0}/(?P<api_version>[^/]+)/functions/(?P<function_name>[\w_-]+)/invoke-async/?$': response.invoke_async,
|
||||||
r'{0}/(?P<api_version>[^/]+)/tags/(?P<resource_arn>.+)': response.tag,
|
r'{0}/(?P<api_version>[^/]+)/tags/(?P<resource_arn>.+)': response.tag,
|
||||||
r'{0}/(?P<api_version>[^/]+)/functions/(?P<function_name>[\w_-]+)/policy/?$': response.policy
|
r'{0}/(?P<api_version>[^/]+)/functions/(?P<function_name>[\w_-]+)/policy/?$': response.policy,
|
||||||
|
r'{0}/(?P<api_version>[^/]+)/functions/(?P<function_name>[\w_-]+)/configuration/?$': response.configuration,
|
||||||
|
r'{0}/(?P<api_version>[^/]+)/functions/(?P<function_name>[\w_-]+)/code/?$': response.code
|
||||||
}
|
}
|
||||||
|
@ -799,21 +799,6 @@ class ConditionExpressionParser:
|
|||||||
else: # pragma: no cover
|
else: # pragma: no cover
|
||||||
raise ValueError("Unknown expression node kind %r" % node.kind)
|
raise ValueError("Unknown expression node kind %r" % node.kind)
|
||||||
|
|
||||||
def _print_debug(self, nodes): # pragma: no cover
|
|
||||||
print('ROOT')
|
|
||||||
for node in nodes:
|
|
||||||
self._print_node_recursive(node, depth=1)
|
|
||||||
|
|
||||||
def _print_node_recursive(self, node, depth=0): # pragma: no cover
|
|
||||||
if len(node.children) > 0:
|
|
||||||
print(' ' * depth, node.nonterminal, node.kind)
|
|
||||||
for child in node.children:
|
|
||||||
self._print_node_recursive(child, depth=depth + 1)
|
|
||||||
else:
|
|
||||||
print(' ' * depth, node.nonterminal, node.kind, node.value)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def _assert(self, condition, message, nodes):
|
def _assert(self, condition, message, nodes):
|
||||||
if not condition:
|
if not condition:
|
||||||
raise ValueError(message + " " + " ".join([t.text for t in nodes]))
|
raise ValueError(message + " " + " ".join([t.text for t in nodes]))
|
||||||
|
@ -1,2 +1,7 @@
|
|||||||
class InvalidIndexNameError(ValueError):
|
class InvalidIndexNameError(ValueError):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class ItemSizeTooLarge(Exception):
|
||||||
|
message = 'Item size has exceeded the maximum allowed size'
|
||||||
|
pass
|
||||||
|
@ -16,7 +16,7 @@ from moto.core.exceptions import JsonRESTError
|
|||||||
from .comparisons import get_comparison_func
|
from .comparisons import get_comparison_func
|
||||||
from .comparisons import get_filter_expression
|
from .comparisons import get_filter_expression
|
||||||
from .comparisons import get_expected
|
from .comparisons import get_expected
|
||||||
from .exceptions import InvalidIndexNameError
|
from .exceptions import InvalidIndexNameError, ItemSizeTooLarge
|
||||||
|
|
||||||
|
|
||||||
class DynamoJsonEncoder(json.JSONEncoder):
|
class DynamoJsonEncoder(json.JSONEncoder):
|
||||||
@ -30,6 +30,10 @@ def dynamo_json_dump(dynamo_object):
|
|||||||
return json.dumps(dynamo_object, cls=DynamoJsonEncoder)
|
return json.dumps(dynamo_object, cls=DynamoJsonEncoder)
|
||||||
|
|
||||||
|
|
||||||
|
def bytesize(val):
|
||||||
|
return len(str(val).encode('utf-8'))
|
||||||
|
|
||||||
|
|
||||||
class DynamoType(object):
|
class DynamoType(object):
|
||||||
"""
|
"""
|
||||||
http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html#DataModelDataTypes
|
http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html#DataModelDataTypes
|
||||||
@ -99,6 +103,22 @@ class DynamoType(object):
|
|||||||
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
def size(self):
|
||||||
|
if self.is_number():
|
||||||
|
value_size = len(str(self.value))
|
||||||
|
elif self.is_set():
|
||||||
|
sub_type = self.type[0]
|
||||||
|
value_size = sum([DynamoType({sub_type: v}).size() for v in self.value])
|
||||||
|
elif self.is_list():
|
||||||
|
value_size = sum([DynamoType(v).size() for v in self.value])
|
||||||
|
elif self.is_map():
|
||||||
|
value_size = sum([bytesize(k) + DynamoType(v).size() for k, v in self.value.items()])
|
||||||
|
elif type(self.value) == bool:
|
||||||
|
value_size = 1
|
||||||
|
else:
|
||||||
|
value_size = bytesize(self.value)
|
||||||
|
return value_size
|
||||||
|
|
||||||
def to_json(self):
|
def to_json(self):
|
||||||
return {self.type: self.value}
|
return {self.type: self.value}
|
||||||
|
|
||||||
@ -126,6 +146,39 @@ class DynamoType(object):
|
|||||||
return self.type == other.type
|
return self.type == other.type
|
||||||
|
|
||||||
|
|
||||||
|
# https://github.com/spulec/moto/issues/1874
|
||||||
|
# Ensure that the total size of an item does not exceed 400kb
|
||||||
|
class LimitedSizeDict(dict):
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
self.update(*args, **kwargs)
|
||||||
|
|
||||||
|
def __setitem__(self, key, value):
|
||||||
|
current_item_size = sum([item.size() if type(item) == DynamoType else bytesize(str(item)) for item in (list(self.keys()) + list(self.values()))])
|
||||||
|
new_item_size = bytesize(key) + (value.size() if type(value) == DynamoType else bytesize(str(value)))
|
||||||
|
# Official limit is set to 400000 (400KB)
|
||||||
|
# Manual testing confirms that the actual limit is between 409 and 410KB
|
||||||
|
# We'll set the limit to something in between to be safe
|
||||||
|
if (current_item_size + new_item_size) > 405000:
|
||||||
|
raise ItemSizeTooLarge
|
||||||
|
super(LimitedSizeDict, self).__setitem__(key, value)
|
||||||
|
|
||||||
|
def update(self, *args, **kwargs):
|
||||||
|
if args:
|
||||||
|
if len(args) > 1:
|
||||||
|
raise TypeError("update expected at most 1 arguments, "
|
||||||
|
"got %d" % len(args))
|
||||||
|
other = dict(args[0])
|
||||||
|
for key in other:
|
||||||
|
self[key] = other[key]
|
||||||
|
for key in kwargs:
|
||||||
|
self[key] = kwargs[key]
|
||||||
|
|
||||||
|
def setdefault(self, key, value=None):
|
||||||
|
if key not in self:
|
||||||
|
self[key] = value
|
||||||
|
return self[key]
|
||||||
|
|
||||||
|
|
||||||
class Item(BaseModel):
|
class Item(BaseModel):
|
||||||
|
|
||||||
def __init__(self, hash_key, hash_key_type, range_key, range_key_type, attrs):
|
def __init__(self, hash_key, hash_key_type, range_key, range_key_type, attrs):
|
||||||
@ -134,7 +187,7 @@ class Item(BaseModel):
|
|||||||
self.range_key = range_key
|
self.range_key = range_key
|
||||||
self.range_key_type = range_key_type
|
self.range_key_type = range_key_type
|
||||||
|
|
||||||
self.attrs = {}
|
self.attrs = LimitedSizeDict()
|
||||||
for key, value in attrs.items():
|
for key, value in attrs.items():
|
||||||
self.attrs[key] = DynamoType(value)
|
self.attrs[key] = DynamoType(value)
|
||||||
|
|
||||||
@ -824,7 +877,7 @@ class Table(BaseModel):
|
|||||||
exclusive_start_key, index_name)
|
exclusive_start_key, index_name)
|
||||||
return results, scanned_count, last_evaluated_key
|
return results, scanned_count, last_evaluated_key
|
||||||
|
|
||||||
def _trim_results(self, results, limit, exclusive_start_key, scaned_index=None):
|
def _trim_results(self, results, limit, exclusive_start_key, scanned_index=None):
|
||||||
if exclusive_start_key is not None:
|
if exclusive_start_key is not None:
|
||||||
hash_key = DynamoType(exclusive_start_key.get(self.hash_key_attr))
|
hash_key = DynamoType(exclusive_start_key.get(self.hash_key_attr))
|
||||||
range_key = exclusive_start_key.get(self.range_key_attr)
|
range_key = exclusive_start_key.get(self.range_key_attr)
|
||||||
@ -844,10 +897,10 @@ class Table(BaseModel):
|
|||||||
if results[-1].range_key is not None:
|
if results[-1].range_key is not None:
|
||||||
last_evaluated_key[self.range_key_attr] = results[-1].range_key
|
last_evaluated_key[self.range_key_attr] = results[-1].range_key
|
||||||
|
|
||||||
if scaned_index:
|
if scanned_index:
|
||||||
all_indexes = self.all_indexes()
|
all_indexes = self.all_indexes()
|
||||||
indexes_by_name = dict((i['IndexName'], i) for i in all_indexes)
|
indexes_by_name = dict((i['IndexName'], i) for i in all_indexes)
|
||||||
idx = indexes_by_name[scaned_index]
|
idx = indexes_by_name[scanned_index]
|
||||||
idx_col_list = [i['AttributeName'] for i in idx['KeySchema']]
|
idx_col_list = [i['AttributeName'] for i in idx['KeySchema']]
|
||||||
for col in idx_col_list:
|
for col in idx_col_list:
|
||||||
last_evaluated_key[col] = results[-1].attrs[col]
|
last_evaluated_key[col] = results[-1].attrs[col]
|
||||||
|
@ -6,7 +6,7 @@ import re
|
|||||||
|
|
||||||
from moto.core.responses import BaseResponse
|
from moto.core.responses import BaseResponse
|
||||||
from moto.core.utils import camelcase_to_underscores, amzn_request_id
|
from moto.core.utils import camelcase_to_underscores, amzn_request_id
|
||||||
from .exceptions import InvalidIndexNameError
|
from .exceptions import InvalidIndexNameError, ItemSizeTooLarge
|
||||||
from .models import dynamodb_backends, dynamo_json_dump
|
from .models import dynamodb_backends, dynamo_json_dump
|
||||||
|
|
||||||
|
|
||||||
@ -255,6 +255,9 @@ class DynamoHandler(BaseResponse):
|
|||||||
name, item, expected, condition_expression,
|
name, item, expected, condition_expression,
|
||||||
expression_attribute_names, expression_attribute_values,
|
expression_attribute_names, expression_attribute_values,
|
||||||
overwrite)
|
overwrite)
|
||||||
|
except ItemSizeTooLarge:
|
||||||
|
er = 'com.amazonaws.dynamodb.v20111205#ValidationException'
|
||||||
|
return self.error(er, ItemSizeTooLarge.message)
|
||||||
except ValueError:
|
except ValueError:
|
||||||
er = 'com.amazonaws.dynamodb.v20111205#ConditionalCheckFailedException'
|
er = 'com.amazonaws.dynamodb.v20111205#ConditionalCheckFailedException'
|
||||||
return self.error(er, 'A condition specified in the operation could not be evaluated.')
|
return self.error(er, 'A condition specified in the operation could not be evaluated.')
|
||||||
@ -658,6 +661,9 @@ class DynamoHandler(BaseResponse):
|
|||||||
name, key, update_expression, attribute_updates, expression_attribute_names,
|
name, key, update_expression, attribute_updates, expression_attribute_names,
|
||||||
expression_attribute_values, expected, condition_expression
|
expression_attribute_values, expected, condition_expression
|
||||||
)
|
)
|
||||||
|
except ItemSizeTooLarge:
|
||||||
|
er = 'com.amazonaws.dynamodb.v20111205#ValidationException'
|
||||||
|
return self.error(er, ItemSizeTooLarge.message)
|
||||||
except ValueError:
|
except ValueError:
|
||||||
er = 'com.amazonaws.dynamodb.v20111205#ConditionalCheckFailedException'
|
er = 'com.amazonaws.dynamodb.v20111205#ConditionalCheckFailedException'
|
||||||
return self.error(er, 'A condition specified in the operation could not be evaluated.')
|
return self.error(er, 'A condition specified in the operation could not be evaluated.')
|
||||||
|
@ -1,7 +1,5 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import datetime
|
|
||||||
|
|
||||||
import boto.rds
|
import boto.rds
|
||||||
from jinja2 import Template
|
from jinja2 import Template
|
||||||
|
|
||||||
@ -14,95 +12,6 @@ from moto.rds2.models import rds2_backends
|
|||||||
|
|
||||||
class Database(BaseModel):
|
class Database(BaseModel):
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
|
||||||
self.status = "available"
|
|
||||||
|
|
||||||
self.is_replica = False
|
|
||||||
self.replicas = []
|
|
||||||
|
|
||||||
self.region = kwargs.get('region')
|
|
||||||
self.engine = kwargs.get("engine")
|
|
||||||
self.engine_version = kwargs.get("engine_version")
|
|
||||||
if self.engine_version is None:
|
|
||||||
self.engine_version = "5.6.21"
|
|
||||||
self.iops = kwargs.get("iops")
|
|
||||||
self.storage_encrypted = kwargs.get("storage_encrypted", False)
|
|
||||||
if self.storage_encrypted:
|
|
||||||
self.kms_key_id = kwargs.get("kms_key_id", "default_kms_key_id")
|
|
||||||
else:
|
|
||||||
self.kms_key_id = kwargs.get("kms_key_id")
|
|
||||||
self.storage_type = kwargs.get("storage_type")
|
|
||||||
self.master_username = kwargs.get('master_username')
|
|
||||||
self.master_password = kwargs.get('master_password')
|
|
||||||
self.auto_minor_version_upgrade = kwargs.get(
|
|
||||||
'auto_minor_version_upgrade')
|
|
||||||
if self.auto_minor_version_upgrade is None:
|
|
||||||
self.auto_minor_version_upgrade = True
|
|
||||||
self.allocated_storage = kwargs.get('allocated_storage')
|
|
||||||
self.db_instance_identifier = kwargs.get('db_instance_identifier')
|
|
||||||
self.source_db_identifier = kwargs.get("source_db_identifier")
|
|
||||||
self.db_instance_class = kwargs.get('db_instance_class')
|
|
||||||
self.port = kwargs.get('port')
|
|
||||||
self.db_name = kwargs.get("db_name")
|
|
||||||
self.publicly_accessible = kwargs.get("publicly_accessible")
|
|
||||||
if self.publicly_accessible is None:
|
|
||||||
self.publicly_accessible = True
|
|
||||||
|
|
||||||
self.copy_tags_to_snapshot = kwargs.get("copy_tags_to_snapshot")
|
|
||||||
if self.copy_tags_to_snapshot is None:
|
|
||||||
self.copy_tags_to_snapshot = False
|
|
||||||
|
|
||||||
self.backup_retention_period = kwargs.get("backup_retention_period")
|
|
||||||
if self.backup_retention_period is None:
|
|
||||||
self.backup_retention_period = 1
|
|
||||||
|
|
||||||
self.availability_zone = kwargs.get("availability_zone")
|
|
||||||
self.multi_az = kwargs.get("multi_az")
|
|
||||||
self.db_subnet_group_name = kwargs.get("db_subnet_group_name")
|
|
||||||
self.instance_create_time = str(datetime.datetime.utcnow())
|
|
||||||
if self.db_subnet_group_name:
|
|
||||||
self.db_subnet_group = rds_backends[
|
|
||||||
self.region].describe_subnet_groups(self.db_subnet_group_name)[0]
|
|
||||||
else:
|
|
||||||
self.db_subnet_group = []
|
|
||||||
|
|
||||||
self.security_groups = kwargs.get('security_groups', [])
|
|
||||||
|
|
||||||
# PreferredBackupWindow
|
|
||||||
# PreferredMaintenanceWindow
|
|
||||||
# backup_retention_period = self._get_param("BackupRetentionPeriod")
|
|
||||||
# OptionGroupName
|
|
||||||
# DBParameterGroupName
|
|
||||||
# VpcSecurityGroupIds.member.N
|
|
||||||
|
|
||||||
@property
|
|
||||||
def db_instance_arn(self):
|
|
||||||
return "arn:aws:rds:{0}:1234567890:db:{1}".format(
|
|
||||||
self.region, self.db_instance_identifier)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def physical_resource_id(self):
|
|
||||||
return self.db_instance_identifier
|
|
||||||
|
|
||||||
@property
|
|
||||||
def address(self):
|
|
||||||
return "{0}.aaaaaaaaaa.{1}.rds.amazonaws.com".format(self.db_instance_identifier, self.region)
|
|
||||||
|
|
||||||
def add_replica(self, replica):
|
|
||||||
self.replicas.append(replica.db_instance_identifier)
|
|
||||||
|
|
||||||
def remove_replica(self, replica):
|
|
||||||
self.replicas.remove(replica.db_instance_identifier)
|
|
||||||
|
|
||||||
def set_as_replica(self):
|
|
||||||
self.is_replica = True
|
|
||||||
self.replicas = []
|
|
||||||
|
|
||||||
def update(self, db_kwargs):
|
|
||||||
for key, value in db_kwargs.items():
|
|
||||||
if value is not None:
|
|
||||||
setattr(self, key, value)
|
|
||||||
|
|
||||||
def get_cfn_attribute(self, attribute_name):
|
def get_cfn_attribute(self, attribute_name):
|
||||||
if attribute_name == 'Endpoint.Address':
|
if attribute_name == 'Endpoint.Address':
|
||||||
return self.address
|
return self.address
|
||||||
|
@ -1245,3 +1245,175 @@ def test_delete_event_source_mapping():
|
|||||||
assert response['State'] == 'Deleting'
|
assert response['State'] == 'Deleting'
|
||||||
conn.get_event_source_mapping.when.called_with(UUID=response['UUID'])\
|
conn.get_event_source_mapping.when.called_with(UUID=response['UUID'])\
|
||||||
.should.throw(botocore.client.ClientError)
|
.should.throw(botocore.client.ClientError)
|
||||||
|
|
||||||
|
|
||||||
|
@mock_lambda
|
||||||
|
@mock_s3
|
||||||
|
def test_update_configuration():
|
||||||
|
s3_conn = boto3.client('s3', 'us-west-2')
|
||||||
|
s3_conn.create_bucket(Bucket='test-bucket')
|
||||||
|
|
||||||
|
zip_content = get_test_zip_file2()
|
||||||
|
s3_conn.put_object(Bucket='test-bucket', Key='test.zip', Body=zip_content)
|
||||||
|
conn = boto3.client('lambda', 'us-west-2')
|
||||||
|
|
||||||
|
fxn = conn.create_function(
|
||||||
|
FunctionName='testFunction',
|
||||||
|
Runtime='python2.7',
|
||||||
|
Role='test-iam-role',
|
||||||
|
Handler='lambda_function.lambda_handler',
|
||||||
|
Code={
|
||||||
|
'S3Bucket': 'test-bucket',
|
||||||
|
'S3Key': 'test.zip',
|
||||||
|
},
|
||||||
|
Description='test lambda function',
|
||||||
|
Timeout=3,
|
||||||
|
MemorySize=128,
|
||||||
|
Publish=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
assert fxn['Description'] == 'test lambda function'
|
||||||
|
assert fxn['Handler'] == 'lambda_function.lambda_handler'
|
||||||
|
assert fxn['MemorySize'] == 128
|
||||||
|
assert fxn['Runtime'] == 'python2.7'
|
||||||
|
assert fxn['Timeout'] == 3
|
||||||
|
|
||||||
|
updated_config = conn.update_function_configuration(
|
||||||
|
FunctionName='testFunction',
|
||||||
|
Description='updated test lambda function',
|
||||||
|
Handler='lambda_function.new_lambda_handler',
|
||||||
|
Runtime='python3.6',
|
||||||
|
Timeout=7
|
||||||
|
)
|
||||||
|
|
||||||
|
assert updated_config['ResponseMetadata']['HTTPStatusCode'] == 200
|
||||||
|
assert updated_config['Description'] == 'updated test lambda function'
|
||||||
|
assert updated_config['Handler'] == 'lambda_function.new_lambda_handler'
|
||||||
|
assert updated_config['MemorySize'] == 128
|
||||||
|
assert updated_config['Runtime'] == 'python3.6'
|
||||||
|
assert updated_config['Timeout'] == 7
|
||||||
|
|
||||||
|
|
||||||
|
@mock_lambda
|
||||||
|
def test_update_function_zip():
|
||||||
|
conn = boto3.client('lambda', 'us-west-2')
|
||||||
|
|
||||||
|
zip_content_one = get_test_zip_file1()
|
||||||
|
|
||||||
|
fxn = conn.create_function(
|
||||||
|
FunctionName='testFunctionZip',
|
||||||
|
Runtime='python2.7',
|
||||||
|
Role='test-iam-role',
|
||||||
|
Handler='lambda_function.lambda_handler',
|
||||||
|
Code={
|
||||||
|
'ZipFile': zip_content_one,
|
||||||
|
},
|
||||||
|
Description='test lambda function',
|
||||||
|
Timeout=3,
|
||||||
|
MemorySize=128,
|
||||||
|
Publish=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
zip_content_two = get_test_zip_file2()
|
||||||
|
|
||||||
|
fxn_updated = conn.update_function_code(
|
||||||
|
FunctionName='testFunctionZip',
|
||||||
|
ZipFile=zip_content_two,
|
||||||
|
Publish=True
|
||||||
|
)
|
||||||
|
|
||||||
|
response = conn.get_function(
|
||||||
|
FunctionName='testFunctionZip',
|
||||||
|
Qualifier='2'
|
||||||
|
)
|
||||||
|
response['Configuration'].pop('LastModified')
|
||||||
|
|
||||||
|
response['ResponseMetadata']['HTTPStatusCode'].should.equal(200)
|
||||||
|
assert len(response['Code']) == 2
|
||||||
|
assert response['Code']['RepositoryType'] == 'S3'
|
||||||
|
assert response['Code']['Location'].startswith('s3://awslambda-{0}-tasks.s3-{0}.amazonaws.com'.format(_lambda_region))
|
||||||
|
response['Configuration'].should.equal(
|
||||||
|
{
|
||||||
|
"CodeSha256": hashlib.sha256(zip_content_two).hexdigest(),
|
||||||
|
"CodeSize": len(zip_content_two),
|
||||||
|
"Description": "test lambda function",
|
||||||
|
"FunctionArn": 'arn:aws:lambda:{}:123456789012:function:testFunctionZip:2'.format(_lambda_region),
|
||||||
|
"FunctionName": "testFunctionZip",
|
||||||
|
"Handler": "lambda_function.lambda_handler",
|
||||||
|
"MemorySize": 128,
|
||||||
|
"Role": "test-iam-role",
|
||||||
|
"Runtime": "python2.7",
|
||||||
|
"Timeout": 3,
|
||||||
|
"Version": '2',
|
||||||
|
"VpcConfig": {
|
||||||
|
"SecurityGroupIds": [],
|
||||||
|
"SubnetIds": [],
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
@mock_lambda
|
||||||
|
@mock_s3
|
||||||
|
def test_update_function_s3():
|
||||||
|
s3_conn = boto3.client('s3', 'us-west-2')
|
||||||
|
s3_conn.create_bucket(Bucket='test-bucket')
|
||||||
|
|
||||||
|
zip_content = get_test_zip_file1()
|
||||||
|
s3_conn.put_object(Bucket='test-bucket', Key='test.zip', Body=zip_content)
|
||||||
|
|
||||||
|
conn = boto3.client('lambda', 'us-west-2')
|
||||||
|
|
||||||
|
fxn = conn.create_function(
|
||||||
|
FunctionName='testFunctionS3',
|
||||||
|
Runtime='python2.7',
|
||||||
|
Role='test-iam-role',
|
||||||
|
Handler='lambda_function.lambda_handler',
|
||||||
|
Code={
|
||||||
|
'S3Bucket': 'test-bucket',
|
||||||
|
'S3Key': 'test.zip',
|
||||||
|
},
|
||||||
|
Description='test lambda function',
|
||||||
|
Timeout=3,
|
||||||
|
MemorySize=128,
|
||||||
|
Publish=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
zip_content_two = get_test_zip_file2()
|
||||||
|
s3_conn.put_object(Bucket='test-bucket', Key='test2.zip', Body=zip_content_two)
|
||||||
|
|
||||||
|
fxn_updated = conn.update_function_code(
|
||||||
|
FunctionName='testFunctionS3',
|
||||||
|
S3Bucket='test-bucket',
|
||||||
|
S3Key='test2.zip',
|
||||||
|
Publish=True
|
||||||
|
)
|
||||||
|
|
||||||
|
response = conn.get_function(
|
||||||
|
FunctionName='testFunctionS3',
|
||||||
|
Qualifier='2'
|
||||||
|
)
|
||||||
|
response['Configuration'].pop('LastModified')
|
||||||
|
|
||||||
|
response['ResponseMetadata']['HTTPStatusCode'].should.equal(200)
|
||||||
|
assert len(response['Code']) == 2
|
||||||
|
assert response['Code']['RepositoryType'] == 'S3'
|
||||||
|
assert response['Code']['Location'].startswith('s3://awslambda-{0}-tasks.s3-{0}.amazonaws.com'.format(_lambda_region))
|
||||||
|
response['Configuration'].should.equal(
|
||||||
|
{
|
||||||
|
"CodeSha256": hashlib.sha256(zip_content_two).hexdigest(),
|
||||||
|
"CodeSize": len(zip_content_two),
|
||||||
|
"Description": "test lambda function",
|
||||||
|
"FunctionArn": 'arn:aws:lambda:{}:123456789012:function:testFunctionS3:2'.format(_lambda_region),
|
||||||
|
"FunctionName": "testFunctionS3",
|
||||||
|
"Handler": "lambda_function.lambda_handler",
|
||||||
|
"MemorySize": 128,
|
||||||
|
"Role": "test-iam-role",
|
||||||
|
"Runtime": "python2.7",
|
||||||
|
"Timeout": 3,
|
||||||
|
"Version": '2',
|
||||||
|
"VpcConfig": {
|
||||||
|
"SecurityGroupIds": [],
|
||||||
|
"SubnetIds": [],
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
@ -2324,6 +2324,45 @@ def test_sorted_query_with_numerical_sort_key():
|
|||||||
assert expected_prices == response_prices, "result items are not sorted by numerical value"
|
assert expected_prices == response_prices, "result items are not sorted by numerical value"
|
||||||
|
|
||||||
|
|
||||||
|
# https://github.com/spulec/moto/issues/1874
|
||||||
|
@mock_dynamodb2
|
||||||
|
def test_item_size_is_under_400KB():
|
||||||
|
dynamodb = boto3.resource('dynamodb')
|
||||||
|
client = boto3.client('dynamodb')
|
||||||
|
|
||||||
|
dynamodb.create_table(
|
||||||
|
TableName='moto-test',
|
||||||
|
KeySchema=[{'AttributeName': 'id', 'KeyType': 'HASH'}],
|
||||||
|
AttributeDefinitions=[{'AttributeName': 'id', 'AttributeType': 'S'}],
|
||||||
|
ProvisionedThroughput={'ReadCapacityUnits': 1, 'WriteCapacityUnits': 1}
|
||||||
|
)
|
||||||
|
table = dynamodb.Table('moto-test')
|
||||||
|
|
||||||
|
large_item = 'x' * 410 * 1000
|
||||||
|
assert_failure_due_to_item_size(func=client.put_item,
|
||||||
|
TableName='moto-test',
|
||||||
|
Item={'id': {'S': 'foo'}, 'item': {'S': large_item}})
|
||||||
|
assert_failure_due_to_item_size(func=table.put_item, Item = {'id': 'bar', 'item': large_item})
|
||||||
|
assert_failure_due_to_item_size(func=client.update_item,
|
||||||
|
TableName='moto-test',
|
||||||
|
Key={'id': {'S': 'foo2'}},
|
||||||
|
UpdateExpression='set item=:Item',
|
||||||
|
ExpressionAttributeValues={':Item': {'S': large_item}})
|
||||||
|
# Assert op fails when updating a nested item
|
||||||
|
assert_failure_due_to_item_size(func=table.put_item,
|
||||||
|
Item={'id': 'bar', 'itemlist': [{'item': large_item}]})
|
||||||
|
assert_failure_due_to_item_size(func=client.put_item,
|
||||||
|
TableName='moto-test',
|
||||||
|
Item={'id': {'S': 'foo'}, 'itemlist': {'L': [{'M': {'item1': {'S': large_item}}}]}})
|
||||||
|
|
||||||
|
|
||||||
|
def assert_failure_due_to_item_size(func, **kwargs):
|
||||||
|
with assert_raises(ClientError) as ex:
|
||||||
|
func(**kwargs)
|
||||||
|
ex.exception.response['Error']['Code'].should.equal('ValidationException')
|
||||||
|
ex.exception.response['Error']['Message'].should.equal('Item size has exceeded the maximum allowed size')
|
||||||
|
|
||||||
|
|
||||||
def _create_user_table():
|
def _create_user_table():
|
||||||
client = boto3.client('dynamodb', region_name='us-east-1')
|
client = boto3.client('dynamodb', region_name='us-east-1')
|
||||||
client.create_table(
|
client.create_table(
|
||||||
|
Loading…
x
Reference in New Issue
Block a user