adding layers support for lambda backend (#3563)
* adding layers support for lambda backend * improving lambda layer tests * adding lambda list_layers and fixing tests * make format
This commit is contained in:
parent
8fe5a680a5
commit
9784e1b487
@ -41,7 +41,13 @@ from .exceptions import (
|
||||
InvalidRoleFormat,
|
||||
InvalidParameterValueException,
|
||||
)
|
||||
from .utils import make_function_arn, make_function_ver_arn
|
||||
from .utils import (
|
||||
make_function_arn,
|
||||
make_function_ver_arn,
|
||||
make_layer_arn,
|
||||
make_layer_ver_arn,
|
||||
split_layer_arn,
|
||||
)
|
||||
from moto.sqs import sqs_backends
|
||||
from moto.dynamodb2 import dynamodb_backends2
|
||||
from moto.dynamodbstreams import dynamodbstreams_backends
|
||||
@ -150,6 +156,143 @@ class _DockerDataVolumeContext:
|
||||
raise # multiple processes trying to use same volume?
|
||||
|
||||
|
||||
def _zipfile_content(zipfile):
|
||||
# more hackery to handle unicode/bytes/str in python3 and python2 -
|
||||
# argh!
|
||||
try:
|
||||
to_unzip_code = base64.b64decode(bytes(zipfile, "utf-8"))
|
||||
except Exception:
|
||||
to_unzip_code = base64.b64decode(zipfile)
|
||||
|
||||
return to_unzip_code, len(to_unzip_code), hashlib.sha256(to_unzip_code).hexdigest()
|
||||
|
||||
|
||||
def _validate_s3_bucket_and_key(data):
|
||||
key = None
|
||||
try:
|
||||
# FIXME: does not validate bucket region
|
||||
key = s3_backend.get_object(data["S3Bucket"], data["S3Key"])
|
||||
except MissingBucket:
|
||||
if do_validate_s3():
|
||||
raise InvalidParameterValueException(
|
||||
"Error occurred while GetObject. S3 Error Code: NoSuchBucket. S3 Error Message: The specified bucket does not exist"
|
||||
)
|
||||
except MissingKey:
|
||||
if do_validate_s3():
|
||||
raise ValueError(
|
||||
"InvalidParameterValueException",
|
||||
"Error occurred while GetObject. S3 Error Code: NoSuchKey. S3 Error Message: The specified key does not exist.",
|
||||
)
|
||||
return key
|
||||
|
||||
|
||||
class LayerVersion(CloudFormationModel):
|
||||
def __init__(self, spec, region):
|
||||
# required
|
||||
self.region = region
|
||||
self.name = spec["LayerName"]
|
||||
self.content = spec["Content"]
|
||||
|
||||
# optional
|
||||
self.description = spec.get("Description", "")
|
||||
self.compatible_runtimes = spec.get("CompatibleRuntimes", [])
|
||||
self.license_info = spec.get("LicenseInfo", "")
|
||||
|
||||
# auto-generated
|
||||
self.created_date = datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
|
||||
self.version = None
|
||||
self._attached = False
|
||||
self._layer = None
|
||||
|
||||
if "ZipFile" in self.content:
|
||||
self.code_bytes, self.code_size, self.code_sha_256 = _zipfile_content(
|
||||
self.content["ZipFile"]
|
||||
)
|
||||
else:
|
||||
key = _validate_s3_bucket_and_key(self.content)
|
||||
if key:
|
||||
self.code_bytes = key.value
|
||||
self.code_size = key.size
|
||||
self.code_sha_256 = hashlib.sha256(key.value).hexdigest()
|
||||
|
||||
@property
|
||||
def arn(self):
|
||||
if self.version:
|
||||
return make_layer_ver_arn(self.region, ACCOUNT_ID, self.name, self.version)
|
||||
raise ValueError("Layer version is not set")
|
||||
|
||||
def attach(self, layer, version):
|
||||
self._attached = True
|
||||
self._layer = layer
|
||||
self.version = version
|
||||
|
||||
def get_layer_version(self):
|
||||
return {
|
||||
"Version": self.version,
|
||||
"LayerVersionArn": self.arn,
|
||||
"CreatedDate": self.created_date,
|
||||
"CompatibleRuntimes": self.compatible_runtimes,
|
||||
"Description": self.description,
|
||||
"LicenseInfo": self.license_info,
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def cloudformation_name_type():
|
||||
return "LayerVersion"
|
||||
|
||||
@staticmethod
|
||||
def cloudformation_type():
|
||||
return "AWS::Lambda::LayerVersion"
|
||||
|
||||
@classmethod
|
||||
def create_from_cloudformation_json(
|
||||
cls, resource_name, cloudformation_json, region_name
|
||||
):
|
||||
properties = cloudformation_json["Properties"]
|
||||
optional_properties = (
|
||||
"Description",
|
||||
"CompatibleRuntimes",
|
||||
"LicenseInfo",
|
||||
)
|
||||
|
||||
# required
|
||||
spec = {
|
||||
"Content": properties["Content"],
|
||||
"LayerName": resource_name,
|
||||
}
|
||||
for prop in optional_properties:
|
||||
if prop in properties:
|
||||
spec[prop] = properties[prop]
|
||||
|
||||
backend = lambda_backends[region_name]
|
||||
layer_version = backend.publish_layer_version(spec)
|
||||
return layer_version
|
||||
|
||||
|
||||
class Layer(object):
|
||||
def __init__(self, name, region):
|
||||
self.region = region
|
||||
self.name = name
|
||||
|
||||
self.layer_arn = make_layer_arn(region, ACCOUNT_ID, self.name)
|
||||
self._latest_version = 0
|
||||
self.layer_versions = {}
|
||||
|
||||
def attach_version(self, layer_version):
|
||||
self._latest_version += 1
|
||||
layer_version.attach(self, self._latest_version)
|
||||
self.layer_versions[str(self._latest_version)] = layer_version
|
||||
|
||||
def to_dict(self):
|
||||
return {
|
||||
"LayerName": self.name,
|
||||
"LayerArn": self.layer_arn,
|
||||
"LatestMatchingVersion": self.layer_versions[
|
||||
str(self._latest_version)
|
||||
].get_layer_version(),
|
||||
}
|
||||
|
||||
|
||||
class LambdaFunction(CloudFormationModel, DockerModel):
|
||||
def __init__(self, spec, region, validate_s3=True, version=1):
|
||||
DockerModel.__init__(self)
|
||||
@ -171,6 +314,7 @@ class LambdaFunction(CloudFormationModel, DockerModel):
|
||||
self.memory_size = spec.get("MemorySize", 128)
|
||||
self.publish = spec.get("Publish", False) # this is ignored currently
|
||||
self.timeout = spec.get("Timeout", 3)
|
||||
self.layers = self._get_layers_data(spec.get("Layers", []))
|
||||
|
||||
self.logs_group_name = "/aws/lambda/{}".format(self.function_name)
|
||||
self.logs_backend.ensure_log_group(self.logs_group_name, [])
|
||||
@ -185,37 +329,15 @@ class LambdaFunction(CloudFormationModel, DockerModel):
|
||||
self.last_modified = datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
|
||||
|
||||
if "ZipFile" in self.code:
|
||||
# more hackery to handle unicode/bytes/str in python3 and python2 -
|
||||
# argh!
|
||||
try:
|
||||
to_unzip_code = base64.b64decode(bytes(self.code["ZipFile"], "utf-8"))
|
||||
except Exception:
|
||||
to_unzip_code = base64.b64decode(self.code["ZipFile"])
|
||||
|
||||
self.code_bytes = to_unzip_code
|
||||
self.code_size = len(to_unzip_code)
|
||||
self.code_sha_256 = hashlib.sha256(to_unzip_code).hexdigest()
|
||||
self.code_bytes, self.code_size, self.code_sha_256 = _zipfile_content(
|
||||
self.code["ZipFile"]
|
||||
)
|
||||
|
||||
# TODO: we should be putting this in a lambda bucket
|
||||
self.code["UUID"] = str(uuid.uuid4())
|
||||
self.code["S3Key"] = "{}-{}".format(self.function_name, self.code["UUID"])
|
||||
else:
|
||||
# validate s3 bucket and key
|
||||
key = None
|
||||
try:
|
||||
# FIXME: does not validate bucket region
|
||||
key = s3_backend.get_object(self.code["S3Bucket"], self.code["S3Key"])
|
||||
except MissingBucket:
|
||||
if do_validate_s3():
|
||||
raise InvalidParameterValueException(
|
||||
"Error occurred while GetObject. S3 Error Code: NoSuchBucket. S3 Error Message: The specified bucket does not exist"
|
||||
)
|
||||
except MissingKey:
|
||||
if do_validate_s3():
|
||||
raise ValueError(
|
||||
"InvalidParameterValueException",
|
||||
"Error occurred while GetObject. S3 Error Code: NoSuchKey. S3 Error Message: The specified key does not exist.",
|
||||
)
|
||||
key = _validate_s3_bucket_and_key(self.code)
|
||||
if key:
|
||||
self.code_bytes = key.value
|
||||
self.code_size = key.size
|
||||
@ -248,6 +370,21 @@ class LambdaFunction(CloudFormationModel, DockerModel):
|
||||
def __repr__(self):
|
||||
return json.dumps(self.get_configuration())
|
||||
|
||||
def _get_layers_data(self, layers_versions_arns):
|
||||
backend = lambda_backends[self.region]
|
||||
layer_versions = [
|
||||
backend.layers_versions_by_arn(layer_version)
|
||||
for layer_version in layers_versions_arns
|
||||
]
|
||||
if not all(layer_versions):
|
||||
raise ValueError(
|
||||
"InvalidParameterValueException",
|
||||
"One or more LayerVersion does not exist {0}".format(
|
||||
layers_versions_arns
|
||||
),
|
||||
)
|
||||
return [{"Arn": lv.arn, "CodeSize": lv.code_size} for lv in layer_versions]
|
||||
|
||||
def get_configuration(self):
|
||||
config = {
|
||||
"CodeSha256": self.code_sha_256,
|
||||
@ -264,6 +401,7 @@ class LambdaFunction(CloudFormationModel, DockerModel):
|
||||
"Timeout": self.timeout,
|
||||
"Version": str(self.version),
|
||||
"VpcConfig": self.vpc_config,
|
||||
"Layers": self.layers,
|
||||
}
|
||||
if self.environment_vars:
|
||||
config["Environment"] = {"Variables": self.environment_vars}
|
||||
@ -308,6 +446,8 @@ class LambdaFunction(CloudFormationModel, DockerModel):
|
||||
self._vpc_config = value
|
||||
elif key == "Environment":
|
||||
self.environment_vars = value["Variables"]
|
||||
elif key == "Layers":
|
||||
self.layers = self._get_layers_data(value)
|
||||
|
||||
return self.get_configuration()
|
||||
|
||||
@ -868,10 +1008,43 @@ class LambdaStorage(object):
|
||||
return result
|
||||
|
||||
|
||||
class LayerStorage(object):
|
||||
def __init__(self):
|
||||
self._layers = {}
|
||||
self._arns = weakref.WeakValueDictionary()
|
||||
|
||||
def put_layer_version(self, layer_version):
|
||||
"""
|
||||
:param layer_version: LayerVersion
|
||||
"""
|
||||
if layer_version.name not in self._layers:
|
||||
self._layers[layer_version.name] = Layer(
|
||||
layer_version.name, layer_version.region
|
||||
)
|
||||
self._layers[layer_version.name].attach_version(layer_version)
|
||||
|
||||
def list_layers(self):
|
||||
return [layer.to_dict() for layer in self._layers.values()]
|
||||
|
||||
def get_layer_versions(self, layer_name):
|
||||
if layer_name in self._layers:
|
||||
return list(iter(self._layers[layer_name].layer_versions.values()))
|
||||
return []
|
||||
|
||||
def get_layer_version_by_arn(self, layer_version_arn):
|
||||
split_arn = split_layer_arn(layer_version_arn)
|
||||
if split_arn.layer_name in self._layers:
|
||||
return self._layers[split_arn.layer_name].layer_versions.get(
|
||||
split_arn.version, None
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
class LambdaBackend(BaseBackend):
|
||||
def __init__(self, region_name):
|
||||
self._lambdas = LambdaStorage()
|
||||
self._event_source_mappings = {}
|
||||
self._layers = LayerStorage()
|
||||
self.region_name = region_name
|
||||
|
||||
def reset(self):
|
||||
@ -941,6 +1114,26 @@ class LambdaBackend(BaseBackend):
|
||||
return esm
|
||||
raise RESTError("ResourceNotFoundException", "Invalid EventSourceArn")
|
||||
|
||||
def publish_layer_version(self, spec):
|
||||
required = ["LayerName", "Content"]
|
||||
for param in required:
|
||||
if not spec.get(param):
|
||||
raise RESTError(
|
||||
"InvalidParameterValueException", "Missing {}".format(param)
|
||||
)
|
||||
layer_version = LayerVersion(spec, self.region_name)
|
||||
self._layers.put_layer_version(layer_version)
|
||||
return layer_version
|
||||
|
||||
def list_layers(self):
|
||||
return self._layers.list_layers()
|
||||
|
||||
def get_layer_versions(self, layer_name):
|
||||
return self._layers.get_layer_versions(layer_name)
|
||||
|
||||
def layers_versions_by_arn(self, layer_version_arn):
|
||||
return self._layers.get_layer_version_by_arn(layer_version_arn)
|
||||
|
||||
def publish_function(self, function_name):
|
||||
return self._lambdas.publish_function(function_name)
|
||||
|
||||
|
@ -64,6 +64,18 @@ class LambdaResponse(BaseResponse):
|
||||
else:
|
||||
raise ValueError("Cannot handle request")
|
||||
|
||||
def list_layers(self, request, full_url, headers):
|
||||
self.setup_class(request, full_url, headers)
|
||||
if request.method == "GET":
|
||||
return self._list_layers(request, headers)
|
||||
|
||||
def layers_versions(self, request, full_url, headers):
|
||||
self.setup_class(request, full_url, headers)
|
||||
if request.method == "GET":
|
||||
return self._get_layer_versions(request, full_url, headers)
|
||||
if request.method == "POST":
|
||||
return self._publish_layer_version(request, full_url, headers)
|
||||
|
||||
def function(self, request, full_url, headers):
|
||||
self.setup_class(request, full_url, headers)
|
||||
if request.method == "GET":
|
||||
@ -430,3 +442,26 @@ class LambdaResponse(BaseResponse):
|
||||
)
|
||||
|
||||
return 200, {}, json.dumps({"ReservedConcurrentExecutions": resp})
|
||||
|
||||
def _list_layers(self, request, headers):
|
||||
layers = self.lambda_backend.list_layers()
|
||||
return (200, {}, json.dumps({"Layers": layers}))
|
||||
|
||||
def _get_layer_versions(self, request, full_url, headers):
|
||||
layer_name = self.path.rsplit("/", 2)[-2]
|
||||
layer_versions = self.lambda_backend.get_layer_versions(layer_name)
|
||||
return (
|
||||
200,
|
||||
{},
|
||||
json.dumps(
|
||||
{"LayerVersions": [lv.get_layer_version() for lv in layer_versions]}
|
||||
),
|
||||
)
|
||||
|
||||
def _publish_layer_version(self, request, full_url, headers):
|
||||
spec = self.json_body
|
||||
if "LayerName" not in spec:
|
||||
spec["LayerName"] = self.path.rsplit("/", 2)[-2]
|
||||
layer_version = self.lambda_backend.publish_layer_version(spec)
|
||||
config = layer_version.get_layer_version()
|
||||
return 201, {}, json.dumps(config)
|
||||
|
@ -20,4 +20,6 @@ url_paths = {
|
||||
r"{0}/(?P<api_version>[^/]+)/functions/(?P<function_name>[\w_-]+)/configuration/?$": response.configuration,
|
||||
r"{0}/(?P<api_version>[^/]+)/functions/(?P<function_name>[\w_-]+)/code/?$": response.code,
|
||||
r"{0}/(?P<api_version>[^/]+)/functions/(?P<function_name>[\w_-]+)/concurrency/?$": response.function_concurrency,
|
||||
r"{0}/(?P<api_version>[^/]+)/layers/?$": response.list_layers,
|
||||
r"{0}/(?P<api_version>[^/]+)/layers/(?P<layer_name>[\w_-]+)/versions/?$": response.layers_versions,
|
||||
}
|
||||
|
@ -1,20 +1,34 @@
|
||||
from collections import namedtuple
|
||||
from functools import partial
|
||||
|
||||
ARN = namedtuple("ARN", ["region", "account", "function_name", "version"])
|
||||
LAYER_ARN = namedtuple("LAYER_ARN", ["region", "account", "layer_name", "version"])
|
||||
|
||||
|
||||
def make_function_arn(region, account, name):
|
||||
return "arn:aws:lambda:{0}:{1}:function:{2}".format(region, account, name)
|
||||
def make_arn(resource_type, region, account, name):
|
||||
return "arn:aws:lambda:{0}:{1}:{2}:{3}".format(region, account, resource_type, name)
|
||||
|
||||
|
||||
def make_function_ver_arn(region, account, name, version="1"):
|
||||
arn = make_function_arn(region, account, name)
|
||||
make_function_arn = partial(make_arn, "function")
|
||||
make_layer_arn = partial(make_arn, "layer")
|
||||
|
||||
|
||||
def make_ver_arn(resource_type, region, account, name, version="1"):
|
||||
arn = make_arn(resource_type, region, account, name)
|
||||
return "{0}:{1}".format(arn, version)
|
||||
|
||||
|
||||
def split_function_arn(arn):
|
||||
arn = arn.replace("arn:aws:lambda:")
|
||||
make_function_ver_arn = partial(make_ver_arn, "function")
|
||||
make_layer_ver_arn = partial(make_ver_arn, "layer")
|
||||
|
||||
|
||||
def split_arn(arn_type, arn):
|
||||
arn = arn.replace("arn:aws:lambda:", "")
|
||||
|
||||
region, account, _, name, version = arn.split(":")
|
||||
|
||||
return ARN(region, account, name, version)
|
||||
return arn_type(region, account, name, version)
|
||||
|
||||
|
||||
split_function_arn = partial(split_arn, ARN)
|
||||
split_layer_arn = partial(split_arn, LAYER_ARN)
|
||||
|
@ -24,6 +24,7 @@ from moto import (
|
||||
mock_sqs,
|
||||
)
|
||||
from moto.sts.models import ACCOUNT_ID
|
||||
from moto.core.exceptions import RESTError
|
||||
import pytest
|
||||
from botocore.exceptions import ClientError
|
||||
|
||||
@ -398,6 +399,7 @@ def test_create_function_from_aws_bucket():
|
||||
},
|
||||
"ResponseMetadata": {"HTTPStatusCode": 201},
|
||||
"State": "Active",
|
||||
"Layers": [],
|
||||
}
|
||||
)
|
||||
|
||||
@ -442,6 +444,7 @@ def test_create_function_from_zipfile():
|
||||
"VpcConfig": {"SecurityGroupIds": [], "SubnetIds": []},
|
||||
"ResponseMetadata": {"HTTPStatusCode": 201},
|
||||
"State": "Active",
|
||||
"Layers": [],
|
||||
}
|
||||
)
|
||||
|
||||
@ -786,6 +789,7 @@ def test_list_create_list_get_delete_list():
|
||||
"Version": "$LATEST",
|
||||
"VpcConfig": {"SecurityGroupIds": [], "SubnetIds": []},
|
||||
"State": "Active",
|
||||
"Layers": [],
|
||||
},
|
||||
"ResponseMetadata": {"HTTPStatusCode": 200},
|
||||
}
|
||||
@ -988,6 +992,7 @@ def test_get_function_created_with_zipfile():
|
||||
"Version": "$LATEST",
|
||||
"VpcConfig": {"SecurityGroupIds": [], "SubnetIds": []},
|
||||
"State": "Active",
|
||||
"Layers": [],
|
||||
}
|
||||
)
|
||||
|
||||
@ -1678,6 +1683,7 @@ def test_update_function_zip():
|
||||
"Version": "2",
|
||||
"VpcConfig": {"SecurityGroupIds": [], "SubnetIds": []},
|
||||
"State": "Active",
|
||||
"Layers": [],
|
||||
}
|
||||
)
|
||||
|
||||
@ -1744,6 +1750,7 @@ def test_update_function_s3():
|
||||
"Version": "2",
|
||||
"VpcConfig": {"SecurityGroupIds": [], "SubnetIds": []},
|
||||
"State": "Active",
|
||||
"Layers": [],
|
||||
}
|
||||
)
|
||||
|
||||
@ -1886,6 +1893,113 @@ def test_get_function_concurrency():
|
||||
result["ReservedConcurrentExecutions"].should.equal(expected_concurrency)
|
||||
|
||||
|
||||
@mock_lambda
|
||||
@mock_s3
|
||||
@freeze_time("2015-01-01 00:00:00")
|
||||
def test_get_lambda_layers():
|
||||
s3_conn = boto3.client("s3", _lambda_region)
|
||||
s3_conn.create_bucket(
|
||||
Bucket="test-bucket",
|
||||
CreateBucketConfiguration={"LocationConstraint": _lambda_region},
|
||||
)
|
||||
|
||||
zip_content = get_test_zip_file1()
|
||||
s3_conn.put_object(Bucket="test-bucket", Key="test.zip", Body=zip_content)
|
||||
conn = boto3.client("lambda", _lambda_region)
|
||||
|
||||
with pytest.raises((RESTError, ClientError)):
|
||||
conn.publish_layer_version(
|
||||
LayerName="testLayer",
|
||||
Content={},
|
||||
CompatibleRuntimes=["python3.6"],
|
||||
LicenseInfo="MIT",
|
||||
)
|
||||
conn.publish_layer_version(
|
||||
LayerName="testLayer",
|
||||
Content={"ZipFile": get_test_zip_file1()},
|
||||
CompatibleRuntimes=["python3.6"],
|
||||
LicenseInfo="MIT",
|
||||
)
|
||||
conn.publish_layer_version(
|
||||
LayerName="testLayer",
|
||||
Content={"S3Bucket": "test-bucket", "S3Key": "test.zip"},
|
||||
CompatibleRuntimes=["python3.6"],
|
||||
LicenseInfo="MIT",
|
||||
)
|
||||
|
||||
result = conn.list_layer_versions(LayerName="testLayer")
|
||||
|
||||
for version in result["LayerVersions"]:
|
||||
version.pop("CreatedDate")
|
||||
result["LayerVersions"].sort(key=lambda x: x["Version"])
|
||||
expected_arn = "arn:aws:lambda:{0}:{1}:layer:testLayer:".format(
|
||||
_lambda_region, ACCOUNT_ID
|
||||
)
|
||||
result["LayerVersions"].should.equal(
|
||||
[
|
||||
{
|
||||
"Version": 1,
|
||||
"LayerVersionArn": expected_arn + "1",
|
||||
"CompatibleRuntimes": ["python3.6"],
|
||||
"Description": "",
|
||||
"LicenseInfo": "MIT",
|
||||
},
|
||||
{
|
||||
"Version": 2,
|
||||
"LayerVersionArn": expected_arn + "2",
|
||||
"CompatibleRuntimes": ["python3.6"],
|
||||
"Description": "",
|
||||
"LicenseInfo": "MIT",
|
||||
},
|
||||
]
|
||||
)
|
||||
|
||||
conn.create_function(
|
||||
FunctionName="testFunction",
|
||||
Runtime="python2.7",
|
||||
Role=get_role_name(),
|
||||
Handler="lambda_function.lambda_handler",
|
||||
Code={"S3Bucket": "test-bucket", "S3Key": "test.zip"},
|
||||
Description="test lambda function",
|
||||
Timeout=3,
|
||||
MemorySize=128,
|
||||
Publish=True,
|
||||
Environment={"Variables": {"test_variable": "test_value"}},
|
||||
Layers=[(expected_arn + "1")],
|
||||
)
|
||||
|
||||
result = conn.get_function_configuration(FunctionName="testFunction")
|
||||
result["Layers"].should.equal(
|
||||
[{"Arn": (expected_arn + "1"), "CodeSize": len(zip_content)}]
|
||||
)
|
||||
result = conn.update_function_configuration(
|
||||
FunctionName="testFunction", Layers=[(expected_arn + "2")]
|
||||
)
|
||||
result["Layers"].should.equal(
|
||||
[{"Arn": (expected_arn + "2"), "CodeSize": len(zip_content)}]
|
||||
)
|
||||
|
||||
# Test get layer versions for non existant layer
|
||||
result = conn.list_layer_versions(LayerName="testLayer2")
|
||||
result["LayerVersions"].should.equal([])
|
||||
|
||||
# Test create function with non existant layer version
|
||||
with pytest.raises((ValueError, ClientError)):
|
||||
conn.create_function(
|
||||
FunctionName="testFunction",
|
||||
Runtime="python2.7",
|
||||
Role=get_role_name(),
|
||||
Handler="lambda_function.lambda_handler",
|
||||
Code={"S3Bucket": "test-bucket", "S3Key": "test.zip"},
|
||||
Description="test lambda function",
|
||||
Timeout=3,
|
||||
MemorySize=128,
|
||||
Publish=True,
|
||||
Environment={"Variables": {"test_variable": "test_value"}},
|
||||
Layers=[(expected_arn + "3")],
|
||||
)
|
||||
|
||||
|
||||
def create_invalid_lambda(role):
|
||||
conn = boto3.client("lambda", _lambda_region)
|
||||
zip_content = get_test_zip_file1()
|
||||
|
@ -1,5 +1,7 @@
|
||||
from __future__ import unicode_literals
|
||||
import json
|
||||
import io
|
||||
import zipfile
|
||||
|
||||
from decimal import Decimal
|
||||
|
||||
@ -1819,6 +1821,71 @@ def lambda_handler(event, context):
|
||||
result["Concurrency"]["ReservedConcurrentExecutions"].should.equal(10)
|
||||
|
||||
|
||||
def _make_zipfile(func_str):
|
||||
zip_output = io.BytesIO()
|
||||
zip_file = zipfile.ZipFile(zip_output, "w", zipfile.ZIP_DEFLATED)
|
||||
zip_file.writestr("lambda_function.py", func_str)
|
||||
zip_file.close()
|
||||
zip_output.seek(0)
|
||||
return zip_output.read()
|
||||
|
||||
|
||||
@mock_cloudformation
|
||||
@mock_s3
|
||||
@mock_lambda
|
||||
def test_lambda_layer():
|
||||
# switch this to python as backend lambda only supports python execution.
|
||||
layer_code = """
|
||||
def lambda_handler(event, context):
|
||||
return (event, context)
|
||||
"""
|
||||
region = "us-east-1"
|
||||
bucket_name = "test_bucket"
|
||||
s3_conn = boto3.client("s3", region)
|
||||
s3_conn.create_bucket(Bucket=bucket_name)
|
||||
|
||||
zip_content = _make_zipfile(layer_code)
|
||||
s3_conn.put_object(Bucket=bucket_name, Key="test.zip", Body=zip_content)
|
||||
template = {
|
||||
"AWSTemplateFormatVersion": "2010-09-09",
|
||||
"Resources": {
|
||||
"lambdaTest": {
|
||||
"Type": "AWS::Lambda::LayerVersion",
|
||||
"Properties": {
|
||||
"Content": {"S3Bucket": bucket_name, "S3Key": "test.zip",},
|
||||
"LayerName": "testLayer",
|
||||
"Description": "Test Layer",
|
||||
"CompatibleRuntimes": ["python2.7", "python3.6"],
|
||||
"LicenseInfo": "MIT",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
template_json = json.dumps(template)
|
||||
cf_conn = boto3.client("cloudformation", region)
|
||||
cf_conn.create_stack(StackName="test_stack", TemplateBody=template_json)
|
||||
|
||||
lambda_conn = boto3.client("lambda", region)
|
||||
result = lambda_conn.list_layers()
|
||||
layer_name = result["Layers"][0]["LayerName"]
|
||||
result = lambda_conn.list_layer_versions(LayerName=layer_name)
|
||||
result["LayerVersions"][0].pop("CreatedDate")
|
||||
result["LayerVersions"].should.equal(
|
||||
[
|
||||
{
|
||||
"Version": 1,
|
||||
"LayerVersionArn": "arn:aws:lambda:{}:{}:layer:{}:1".format(
|
||||
region, ACCOUNT_ID, layer_name
|
||||
),
|
||||
"CompatibleRuntimes": ["python2.7", "python3.6"],
|
||||
"Description": "Test Layer",
|
||||
"LicenseInfo": "MIT",
|
||||
}
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
@mock_cloudformation
|
||||
@mock_ec2
|
||||
def test_nat_gateway():
|
||||
|
Loading…
Reference in New Issue
Block a user