lambda + SNS enhancements (#1048)
* updates - support lambda messages from SNS - run lambda in docker container * decode output * populate timeout * simplify * whoops * skeletons of cloudwatchlogs * impl filter log streams * fix logging * PEP fixes * PEP fixes * fix reset * fix reset * add new endpoint * fix region name * add docker * try to fix tests * try to fix travis issue with boto * fix escaping in urls * fix environment variables * fix PEP * more pep * switch back to precise * another fix attempt * fix typo * fix lambda invoke * fix more unittests * work on getting this to work in new scheme * fix py2 * fix error * fix tests when running in server mode * more lambda fixes * try running with latest docker adapted from aiodocker * switch to docker python client * pep fixes * switch to docker volume * fix unittest * fix invoke from sns * fix zip2tar * add hack impl for get_function with zip * try fix * fix for py < 3.6 * add volume refcount * try to fix travis * docker test * fix yaml * try fix * update endpoints * fix * another attempt * try again * fix recursive import * refactor fix * revert changes with better fix * more reverts * wait for service to come up * add back detached mode * sleep and add another exception type * put this back for logging * put back with note * whoops :) * docker in docker! * fix invalid url * hopefully last fix! * fix lambda regions * fix protocol * travis!!!! * just run lambda test for now * use one print * fix escaping * another attempt * yet another * re-enable all tests * fixes * fix for py2 * revert change * fix for py2.7 * fix output ordering * remove this given there's a new unittest that covers it * changes based on review - add skeleton logs test file - switch to docker image that matches test env - fix mock_logs import * add readme entry
This commit is contained in:
parent
ca8ce8705b
commit
9008b85299
25
.travis.yml
25
.travis.yml
@ -1,23 +1,36 @@
|
|||||||
language: python
|
language: python
|
||||||
sudo: false
|
sudo: false
|
||||||
|
services:
|
||||||
|
- docker
|
||||||
python:
|
python:
|
||||||
- 2.7
|
- 2.7
|
||||||
- 3.6
|
- 3.6
|
||||||
env:
|
env:
|
||||||
- TEST_SERVER_MODE=false
|
- TEST_SERVER_MODE=false
|
||||||
- TEST_SERVER_MODE=true
|
- TEST_SERVER_MODE=true
|
||||||
|
before_install:
|
||||||
|
- export BOTO_CONFIG=/dev/null
|
||||||
install:
|
install:
|
||||||
- travis_retry pip install boto==2.45.0
|
# We build moto first so the docker container doesn't try to compile it as well, also note we don't use
|
||||||
- travis_retry pip install boto3
|
# -d for docker run so the logs show up in travis
|
||||||
- travis_retry pip install .
|
# Python images come from here: https://hub.docker.com/_/python/
|
||||||
- travis_retry pip install -r requirements-dev.txt
|
|
||||||
- travis_retry pip install coveralls==1.1
|
|
||||||
- |
|
- |
|
||||||
|
python setup.py sdist
|
||||||
|
|
||||||
if [ "$TEST_SERVER_MODE" = "true" ]; then
|
if [ "$TEST_SERVER_MODE" = "true" ]; then
|
||||||
AWS_SECRET_ACCESS_KEY=server_secret AWS_ACCESS_KEY_ID=server_key moto_server -p 5000&
|
docker run --rm -t --name motoserver -e TEST_SERVER_MODE=true -e AWS_SECRET_ACCESS_KEY=server_secret -e AWS_ACCESS_KEY_ID=server_key -v `pwd`:/moto -p 5000:5000 -v /var/run/docker.sock:/var/run/docker.sock python:${TRAVIS_PYTHON_VERSION}-stretch /moto/travis_moto_server.sh &
|
||||||
export AWS_SECRET_ACCESS_KEY=foobar_secret
|
export AWS_SECRET_ACCESS_KEY=foobar_secret
|
||||||
export AWS_ACCESS_KEY_ID=foobar_key
|
export AWS_ACCESS_KEY_ID=foobar_key
|
||||||
fi
|
fi
|
||||||
|
travis_retry pip install boto==2.45.0
|
||||||
|
travis_retry pip install boto3
|
||||||
|
travis_retry pip install dist/moto*.gz
|
||||||
|
travis_retry pip install coveralls==1.1
|
||||||
|
travis_retry pip install -r requirements-dev.txt
|
||||||
|
|
||||||
|
if [ "$TEST_SERVER_MODE" = "true" ]; then
|
||||||
|
python wait_for.py
|
||||||
|
fi
|
||||||
script:
|
script:
|
||||||
- make test
|
- make test
|
||||||
after_success:
|
after_success:
|
||||||
|
@ -96,6 +96,8 @@ It gets even better! Moto isn't just for Python code and it isn't just for S3. L
|
|||||||
|------------------------------------------------------------------------------|
|
|------------------------------------------------------------------------------|
|
||||||
| Lambda | @mock_lambda | basic endpoints done |
|
| Lambda | @mock_lambda | basic endpoints done |
|
||||||
|------------------------------------------------------------------------------|
|
|------------------------------------------------------------------------------|
|
||||||
|
| Logs | @mock_logs | basic endpoints done |
|
||||||
|
|------------------------------------------------------------------------------|
|
||||||
| Kinesis | @mock_kinesis | core endpoints done |
|
| Kinesis | @mock_kinesis | core endpoints done |
|
||||||
|------------------------------------------------------------------------------|
|
|------------------------------------------------------------------------------|
|
||||||
| KMS | @mock_kms | basic endpoints done |
|
| KMS | @mock_kms | basic endpoints done |
|
||||||
|
@ -39,6 +39,7 @@ from .ssm import mock_ssm # flake8: noqa
|
|||||||
from .route53 import mock_route53, mock_route53_deprecated # flake8: noqa
|
from .route53 import mock_route53, mock_route53_deprecated # flake8: noqa
|
||||||
from .swf import mock_swf, mock_swf_deprecated # flake8: noqa
|
from .swf import mock_swf, mock_swf_deprecated # flake8: noqa
|
||||||
from .xray import mock_xray # flake8: noqa
|
from .xray import mock_xray # flake8: noqa
|
||||||
|
from .logs import mock_logs, mock_logs_deprecated # flake8: noqa
|
||||||
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
@ -1,34 +1,150 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
import base64
|
import base64
|
||||||
|
from collections import defaultdict
|
||||||
import datetime
|
import datetime
|
||||||
|
import docker.errors
|
||||||
import hashlib
|
import hashlib
|
||||||
import io
|
import io
|
||||||
|
import logging
|
||||||
import os
|
import os
|
||||||
import json
|
import json
|
||||||
import sys
|
import re
|
||||||
import zipfile
|
import zipfile
|
||||||
|
import uuid
|
||||||
try:
|
import functools
|
||||||
from StringIO import StringIO
|
import tarfile
|
||||||
except:
|
import calendar
|
||||||
from io import StringIO
|
import threading
|
||||||
|
import traceback
|
||||||
|
import requests.adapters
|
||||||
|
|
||||||
import boto.awslambda
|
import boto.awslambda
|
||||||
from moto.core import BaseBackend, BaseModel
|
from moto.core import BaseBackend, BaseModel
|
||||||
|
from moto.core.utils import unix_time_millis
|
||||||
from moto.s3.models import s3_backend
|
from moto.s3.models import s3_backend
|
||||||
|
from moto.logs.models import logs_backends
|
||||||
from moto.s3.exceptions import MissingBucket, MissingKey
|
from moto.s3.exceptions import MissingBucket, MissingKey
|
||||||
|
from moto import settings
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
from tempfile import TemporaryDirectory
|
||||||
|
except ImportError:
|
||||||
|
from backports.tempfile import TemporaryDirectory
|
||||||
|
|
||||||
|
|
||||||
|
_stderr_regex = re.compile(r'START|END|REPORT RequestId: .*')
|
||||||
|
_orig_adapter_send = requests.adapters.HTTPAdapter.send
|
||||||
|
|
||||||
|
|
||||||
|
def zip2tar(zip_bytes):
|
||||||
|
with TemporaryDirectory() as td:
|
||||||
|
tarname = os.path.join(td, 'data.tar')
|
||||||
|
timeshift = int((datetime.datetime.now() -
|
||||||
|
datetime.datetime.utcnow()).total_seconds())
|
||||||
|
with zipfile.ZipFile(io.BytesIO(zip_bytes), 'r') as zipf, \
|
||||||
|
tarfile.TarFile(tarname, 'w') as tarf:
|
||||||
|
for zipinfo in zipf.infolist():
|
||||||
|
if zipinfo.filename[-1] == '/': # is_dir() is py3.6+
|
||||||
|
continue
|
||||||
|
|
||||||
|
tarinfo = tarfile.TarInfo(name=zipinfo.filename)
|
||||||
|
tarinfo.size = zipinfo.file_size
|
||||||
|
tarinfo.mtime = calendar.timegm(zipinfo.date_time) - timeshift
|
||||||
|
infile = zipf.open(zipinfo.filename)
|
||||||
|
tarf.addfile(tarinfo, infile)
|
||||||
|
|
||||||
|
with open(tarname, 'rb') as f:
|
||||||
|
tar_data = f.read()
|
||||||
|
return tar_data
|
||||||
|
|
||||||
|
|
||||||
|
class _VolumeRefCount:
|
||||||
|
__slots__ = "refcount", "volume"
|
||||||
|
|
||||||
|
def __init__(self, refcount, volume):
|
||||||
|
self.refcount = refcount
|
||||||
|
self.volume = volume
|
||||||
|
|
||||||
|
|
||||||
|
class _DockerDataVolumeContext:
|
||||||
|
_data_vol_map = defaultdict(lambda: _VolumeRefCount(0, None)) # {sha256: _VolumeRefCount}
|
||||||
|
_lock = threading.Lock()
|
||||||
|
|
||||||
|
def __init__(self, lambda_func):
|
||||||
|
self._lambda_func = lambda_func
|
||||||
|
self._vol_ref = None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def name(self):
|
||||||
|
return self._vol_ref.volume.name
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
# See if volume is already known
|
||||||
|
with self.__class__._lock:
|
||||||
|
self._vol_ref = self.__class__._data_vol_map[self._lambda_func.code_sha_256]
|
||||||
|
self._vol_ref.refcount += 1
|
||||||
|
if self._vol_ref.refcount > 1:
|
||||||
|
return self
|
||||||
|
|
||||||
|
# See if the volume already exists
|
||||||
|
for vol in self._lambda_func.docker_client.volumes.list():
|
||||||
|
if vol.name == self._lambda_func.code_sha_256:
|
||||||
|
self._vol_ref.volume = vol
|
||||||
|
return self
|
||||||
|
|
||||||
|
# It doesn't exist so we need to create it
|
||||||
|
self._vol_ref.volume = self._lambda_func.docker_client.volumes.create(self._lambda_func.code_sha_256)
|
||||||
|
container = self._lambda_func.docker_client.containers.run('alpine', 'sleep 100', volumes={self.name: '/tmp/data'}, detach=True)
|
||||||
|
try:
|
||||||
|
tar_bytes = zip2tar(self._lambda_func.code_bytes)
|
||||||
|
container.put_archive('/tmp/data', tar_bytes)
|
||||||
|
finally:
|
||||||
|
container.remove(force=True)
|
||||||
|
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||||
|
with self.__class__._lock:
|
||||||
|
self._vol_ref.refcount -= 1
|
||||||
|
if self._vol_ref.refcount == 0:
|
||||||
|
try:
|
||||||
|
self._vol_ref.volume.remove()
|
||||||
|
except docker.errors.APIError as e:
|
||||||
|
if e.status_code != 409:
|
||||||
|
raise
|
||||||
|
|
||||||
|
raise # multiple processes trying to use same volume?
|
||||||
|
|
||||||
|
|
||||||
class LambdaFunction(BaseModel):
|
class LambdaFunction(BaseModel):
|
||||||
|
def __init__(self, spec, region, validate_s3=True):
|
||||||
def __init__(self, spec, validate_s3=True):
|
|
||||||
# required
|
# required
|
||||||
|
self.region = region
|
||||||
self.code = spec['Code']
|
self.code = spec['Code']
|
||||||
self.function_name = spec['FunctionName']
|
self.function_name = spec['FunctionName']
|
||||||
self.handler = spec['Handler']
|
self.handler = spec['Handler']
|
||||||
self.role = spec['Role']
|
self.role = spec['Role']
|
||||||
self.run_time = spec['Runtime']
|
self.run_time = spec['Runtime']
|
||||||
|
self.logs_backend = logs_backends[self.region]
|
||||||
|
self.environment_vars = spec.get('Environment', {}).get('Variables', {})
|
||||||
|
self.docker_client = docker.from_env()
|
||||||
|
|
||||||
|
# Unfortunately mocking replaces this method w/o fallback enabled, so we
|
||||||
|
# need to replace it if we detect it's been mocked
|
||||||
|
if requests.adapters.HTTPAdapter.send != _orig_adapter_send:
|
||||||
|
_orig_get_adapter = self.docker_client.api.get_adapter
|
||||||
|
|
||||||
|
def replace_adapter_send(*args, **kwargs):
|
||||||
|
adapter = _orig_get_adapter(*args, **kwargs)
|
||||||
|
|
||||||
|
if isinstance(adapter, requests.adapters.HTTPAdapter):
|
||||||
|
adapter.send = functools.partial(_orig_adapter_send, adapter)
|
||||||
|
return adapter
|
||||||
|
self.docker_client.api.get_adapter = replace_adapter_send
|
||||||
|
|
||||||
# optional
|
# optional
|
||||||
self.description = spec.get('Description', '')
|
self.description = spec.get('Description', '')
|
||||||
@ -36,13 +152,18 @@ class LambdaFunction(BaseModel):
|
|||||||
self.publish = spec.get('Publish', False) # this is ignored currently
|
self.publish = spec.get('Publish', False) # this is ignored currently
|
||||||
self.timeout = spec.get('Timeout', 3)
|
self.timeout = spec.get('Timeout', 3)
|
||||||
|
|
||||||
|
self.logs_group_name = '/aws/lambda/{}'.format(self.function_name)
|
||||||
|
self.logs_backend.ensure_log_group(self.logs_group_name, [])
|
||||||
|
|
||||||
# this isn't finished yet. it needs to find out the VpcId value
|
# this isn't finished yet. it needs to find out the VpcId value
|
||||||
self._vpc_config = spec.get(
|
self._vpc_config = spec.get(
|
||||||
'VpcConfig', {'SubnetIds': [], 'SecurityGroupIds': []})
|
'VpcConfig', {'SubnetIds': [], 'SecurityGroupIds': []})
|
||||||
|
|
||||||
# auto-generated
|
# auto-generated
|
||||||
self.version = '$LATEST'
|
self.version = '$LATEST'
|
||||||
self.last_modified = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')
|
self.last_modified = datetime.datetime.utcnow().strftime(
|
||||||
|
'%Y-%m-%d %H:%M:%S')
|
||||||
|
|
||||||
if 'ZipFile' in self.code:
|
if 'ZipFile' in self.code:
|
||||||
# more hackery to handle unicode/bytes/str in python3 and python2 -
|
# more hackery to handle unicode/bytes/str in python3 and python2 -
|
||||||
# argh!
|
# argh!
|
||||||
@ -52,12 +173,13 @@ class LambdaFunction(BaseModel):
|
|||||||
except Exception:
|
except Exception:
|
||||||
to_unzip_code = base64.b64decode(self.code['ZipFile'])
|
to_unzip_code = base64.b64decode(self.code['ZipFile'])
|
||||||
|
|
||||||
zbuffer = io.BytesIO()
|
self.code_bytes = to_unzip_code
|
||||||
zbuffer.write(to_unzip_code)
|
|
||||||
zip_file = zipfile.ZipFile(zbuffer, 'r', zipfile.ZIP_DEFLATED)
|
|
||||||
self.code = zip_file.read("".join(zip_file.namelist()))
|
|
||||||
self.code_size = len(to_unzip_code)
|
self.code_size = len(to_unzip_code)
|
||||||
self.code_sha_256 = hashlib.sha256(to_unzip_code).hexdigest()
|
self.code_sha_256 = hashlib.sha256(to_unzip_code).hexdigest()
|
||||||
|
|
||||||
|
# TODO: we should be putting this in a lambda bucket
|
||||||
|
self.code['UUID'] = str(uuid.uuid4())
|
||||||
|
self.code['S3Key'] = '{}-{}'.format(self.function_name, self.code['UUID'])
|
||||||
else:
|
else:
|
||||||
# validate s3 bucket and key
|
# validate s3 bucket and key
|
||||||
key = None
|
key = None
|
||||||
@ -76,10 +198,12 @@ class LambdaFunction(BaseModel):
|
|||||||
"InvalidParameterValueException",
|
"InvalidParameterValueException",
|
||||||
"Error occurred while GetObject. S3 Error Code: NoSuchKey. S3 Error Message: The specified key does not exist.")
|
"Error occurred while GetObject. S3 Error Code: NoSuchKey. S3 Error Message: The specified key does not exist.")
|
||||||
if key:
|
if key:
|
||||||
|
self.code_bytes = key.value
|
||||||
self.code_size = key.size
|
self.code_size = key.size
|
||||||
self.code_sha_256 = hashlib.sha256(key.value).hexdigest()
|
self.code_sha_256 = hashlib.sha256(key.value).hexdigest()
|
||||||
self.function_arn = 'arn:aws:lambda:123456789012:function:{0}'.format(
|
|
||||||
self.function_name)
|
self.function_arn = 'arn:aws:lambda:{}:123456789012:function:{}'.format(
|
||||||
|
self.region, self.function_name)
|
||||||
|
|
||||||
self.tags = dict()
|
self.tags = dict()
|
||||||
|
|
||||||
@ -94,7 +218,7 @@ class LambdaFunction(BaseModel):
|
|||||||
return json.dumps(self.get_configuration())
|
return json.dumps(self.get_configuration())
|
||||||
|
|
||||||
def get_configuration(self):
|
def get_configuration(self):
|
||||||
return {
|
config = {
|
||||||
"CodeSha256": self.code_sha_256,
|
"CodeSha256": self.code_sha_256,
|
||||||
"CodeSize": self.code_size,
|
"CodeSize": self.code_size,
|
||||||
"Description": self.description,
|
"Description": self.description,
|
||||||
@ -110,70 +234,105 @@ class LambdaFunction(BaseModel):
|
|||||||
"VpcConfig": self.vpc_config,
|
"VpcConfig": self.vpc_config,
|
||||||
}
|
}
|
||||||
|
|
||||||
def get_code(self):
|
if self.environment_vars:
|
||||||
if isinstance(self.code, dict):
|
config['Environment'] = {
|
||||||
return {
|
'Variables': self.environment_vars
|
||||||
"Code": {
|
|
||||||
"Location": "s3://lambda-functions.aws.amazon.com/{0}".format(self.code['S3Key']),
|
|
||||||
"RepositoryType": "S3"
|
|
||||||
},
|
|
||||||
"Configuration": self.get_configuration(),
|
|
||||||
}
|
|
||||||
else:
|
|
||||||
return {
|
|
||||||
"Configuration": self.get_configuration(),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
def convert(self, s):
|
return config
|
||||||
|
|
||||||
|
def get_code(self):
|
||||||
|
return {
|
||||||
|
"Code": {
|
||||||
|
"Location": "s3://awslambda-{0}-tasks.s3-{0}.amazonaws.com/{1}".format(self.region, self.code['S3Key']),
|
||||||
|
"RepositoryType": "S3"
|
||||||
|
},
|
||||||
|
"Configuration": self.get_configuration(),
|
||||||
|
}
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def convert(s):
|
||||||
try:
|
try:
|
||||||
return str(s, encoding='utf-8')
|
return str(s, encoding='utf-8')
|
||||||
except:
|
except:
|
||||||
return s
|
return s
|
||||||
|
|
||||||
def is_json(self, test_str):
|
@staticmethod
|
||||||
|
def is_json(test_str):
|
||||||
try:
|
try:
|
||||||
response = json.loads(test_str)
|
response = json.loads(test_str)
|
||||||
except:
|
except:
|
||||||
response = test_str
|
response = test_str
|
||||||
return response
|
return response
|
||||||
|
|
||||||
def _invoke_lambda(self, code, event={}, context={}):
|
def _invoke_lambda(self, code, event=None, context=None):
|
||||||
# TO DO: context not yet implemented
|
# TODO: context not yet implemented
|
||||||
try:
|
if event is None:
|
||||||
mycode = "\n".join(['import json',
|
event = dict()
|
||||||
self.convert(self.code),
|
if context is None:
|
||||||
self.convert('print(json.dumps(lambda_handler(%s, %s)))' % (self.is_json(self.convert(event)), context))])
|
context = {}
|
||||||
|
|
||||||
except Exception as ex:
|
|
||||||
print("Exception %s", ex)
|
|
||||||
|
|
||||||
errored = False
|
|
||||||
try:
|
try:
|
||||||
original_stdout = sys.stdout
|
# TODO: I believe we can keep the container running and feed events as needed
|
||||||
original_stderr = sys.stderr
|
# also need to hook it up to the other services so it can make kws/s3 etc calls
|
||||||
codeOut = StringIO()
|
# Should get invoke_id /RequestId from invovation
|
||||||
codeErr = StringIO()
|
env_vars = {
|
||||||
sys.stdout = codeOut
|
"AWS_LAMBDA_FUNCTION_TIMEOUT": self.timeout,
|
||||||
sys.stderr = codeErr
|
"AWS_LAMBDA_FUNCTION_NAME": self.function_name,
|
||||||
exec(mycode)
|
"AWS_LAMBDA_FUNCTION_MEMORY_SIZE": self.memory_size,
|
||||||
exec_err = codeErr.getvalue()
|
"AWS_LAMBDA_FUNCTION_VERSION": self.version,
|
||||||
exec_out = codeOut.getvalue()
|
"AWS_REGION": self.region,
|
||||||
result = self.convert(exec_out.strip())
|
}
|
||||||
if exec_err:
|
|
||||||
result = "\n".join([exec_out.strip(), self.convert(exec_err)])
|
env_vars.update(self.environment_vars)
|
||||||
except Exception as ex:
|
|
||||||
errored = True
|
container = output = exit_code = None
|
||||||
result = '%s\n\n\nException %s' % (mycode, ex)
|
with _DockerDataVolumeContext(self) as data_vol:
|
||||||
finally:
|
try:
|
||||||
codeErr.close()
|
run_kwargs = dict(links={'motoserver': 'motoserver'}) if settings.TEST_SERVER_MODE else {}
|
||||||
codeOut.close()
|
container = self.docker_client.containers.run(
|
||||||
sys.stdout = original_stdout
|
"lambci/lambda:{}".format(self.run_time),
|
||||||
sys.stderr = original_stderr
|
[self.handler, json.dumps(event)], remove=False,
|
||||||
return self.convert(result), errored
|
mem_limit="{}m".format(self.memory_size),
|
||||||
|
volumes=["{}:/var/task".format(data_vol.name)], environment=env_vars, detach=True, **run_kwargs)
|
||||||
|
finally:
|
||||||
|
if container:
|
||||||
|
exit_code = container.wait()
|
||||||
|
output = container.logs(stdout=False, stderr=True)
|
||||||
|
output += container.logs(stdout=True, stderr=False)
|
||||||
|
container.remove()
|
||||||
|
|
||||||
|
output = output.decode('utf-8')
|
||||||
|
|
||||||
|
# Send output to "logs" backend
|
||||||
|
invoke_id = uuid.uuid4().hex
|
||||||
|
log_stream_name = "{date.year}/{date.month:02d}/{date.day:02d}/[{version}]{invoke_id}".format(
|
||||||
|
date=datetime.datetime.utcnow(), version=self.version, invoke_id=invoke_id
|
||||||
|
)
|
||||||
|
|
||||||
|
self.logs_backend.create_log_stream(self.logs_group_name, log_stream_name)
|
||||||
|
|
||||||
|
log_events = [{'timestamp': unix_time_millis(), "message": line}
|
||||||
|
for line in output.splitlines()]
|
||||||
|
self.logs_backend.put_log_events(self.logs_group_name, log_stream_name, log_events, None)
|
||||||
|
|
||||||
|
if exit_code != 0:
|
||||||
|
raise Exception(
|
||||||
|
'lambda invoke failed output: {}'.format(output))
|
||||||
|
|
||||||
|
# strip out RequestId lines
|
||||||
|
output = os.linesep.join([line for line in self.convert(output).splitlines() if not _stderr_regex.match(line)])
|
||||||
|
return output, False
|
||||||
|
except BaseException as e:
|
||||||
|
traceback.print_exc()
|
||||||
|
return "error running lambda: {}".format(e), True
|
||||||
|
|
||||||
def invoke(self, body, request_headers, response_headers):
|
def invoke(self, body, request_headers, response_headers):
|
||||||
payload = dict()
|
payload = dict()
|
||||||
|
|
||||||
|
if body:
|
||||||
|
body = json.loads(body)
|
||||||
|
|
||||||
# Get the invocation type:
|
# Get the invocation type:
|
||||||
res, errored = self._invoke_lambda(code=self.code, event=body)
|
res, errored = self._invoke_lambda(code=self.code, event=body)
|
||||||
if request_headers.get("x-amz-invocation-type") == "RequestResponse":
|
if request_headers.get("x-amz-invocation-type") == "RequestResponse":
|
||||||
@ -189,7 +348,8 @@ class LambdaFunction(BaseModel):
|
|||||||
return result
|
return result
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
|
def create_from_cloudformation_json(cls, resource_name, cloudformation_json,
|
||||||
|
region_name):
|
||||||
properties = cloudformation_json['Properties']
|
properties = cloudformation_json['Properties']
|
||||||
|
|
||||||
# required
|
# required
|
||||||
@ -212,17 +372,19 @@ class LambdaFunction(BaseModel):
|
|||||||
# this snippet converts this plaintext code to a proper base64-encoded ZIP file.
|
# this snippet converts this plaintext code to a proper base64-encoded ZIP file.
|
||||||
if 'ZipFile' in properties['Code']:
|
if 'ZipFile' in properties['Code']:
|
||||||
spec['Code']['ZipFile'] = base64.b64encode(
|
spec['Code']['ZipFile'] = base64.b64encode(
|
||||||
cls._create_zipfile_from_plaintext_code(spec['Code']['ZipFile']))
|
cls._create_zipfile_from_plaintext_code(
|
||||||
|
spec['Code']['ZipFile']))
|
||||||
|
|
||||||
backend = lambda_backends[region_name]
|
backend = lambda_backends[region_name]
|
||||||
fn = backend.create_function(spec)
|
fn = backend.create_function(spec)
|
||||||
return fn
|
return fn
|
||||||
|
|
||||||
def get_cfn_attribute(self, attribute_name):
|
def get_cfn_attribute(self, attribute_name):
|
||||||
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
|
from moto.cloudformation.exceptions import \
|
||||||
|
UnformattedGetAttTemplateException
|
||||||
if attribute_name == 'Arn':
|
if attribute_name == 'Arn':
|
||||||
region = 'us-east-1'
|
return 'arn:aws:lambda:{0}:123456789012:function:{1}'.format(
|
||||||
return 'arn:aws:lambda:{0}:123456789012:function:{1}'.format(region, self.function_name)
|
self.region, self.function_name)
|
||||||
raise UnformattedGetAttTemplateException()
|
raise UnformattedGetAttTemplateException()
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@ -236,7 +398,6 @@ class LambdaFunction(BaseModel):
|
|||||||
|
|
||||||
|
|
||||||
class EventSourceMapping(BaseModel):
|
class EventSourceMapping(BaseModel):
|
||||||
|
|
||||||
def __init__(self, spec):
|
def __init__(self, spec):
|
||||||
# required
|
# required
|
||||||
self.function_name = spec['FunctionName']
|
self.function_name = spec['FunctionName']
|
||||||
@ -246,10 +407,12 @@ class EventSourceMapping(BaseModel):
|
|||||||
# optional
|
# optional
|
||||||
self.batch_size = spec.get('BatchSize', 100)
|
self.batch_size = spec.get('BatchSize', 100)
|
||||||
self.enabled = spec.get('Enabled', True)
|
self.enabled = spec.get('Enabled', True)
|
||||||
self.starting_position_timestamp = spec.get('StartingPositionTimestamp', None)
|
self.starting_position_timestamp = spec.get('StartingPositionTimestamp',
|
||||||
|
None)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
|
def create_from_cloudformation_json(cls, resource_name, cloudformation_json,
|
||||||
|
region_name):
|
||||||
properties = cloudformation_json['Properties']
|
properties = cloudformation_json['Properties']
|
||||||
spec = {
|
spec = {
|
||||||
'FunctionName': properties['FunctionName'],
|
'FunctionName': properties['FunctionName'],
|
||||||
@ -264,12 +427,12 @@ class EventSourceMapping(BaseModel):
|
|||||||
|
|
||||||
|
|
||||||
class LambdaVersion(BaseModel):
|
class LambdaVersion(BaseModel):
|
||||||
|
|
||||||
def __init__(self, spec):
|
def __init__(self, spec):
|
||||||
self.version = spec['Version']
|
self.version = spec['Version']
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
|
def create_from_cloudformation_json(cls, resource_name, cloudformation_json,
|
||||||
|
region_name):
|
||||||
properties = cloudformation_json['Properties']
|
properties = cloudformation_json['Properties']
|
||||||
spec = {
|
spec = {
|
||||||
'Version': properties.get('Version')
|
'Version': properties.get('Version')
|
||||||
@ -278,9 +441,14 @@ class LambdaVersion(BaseModel):
|
|||||||
|
|
||||||
|
|
||||||
class LambdaBackend(BaseBackend):
|
class LambdaBackend(BaseBackend):
|
||||||
|
def __init__(self, region_name):
|
||||||
def __init__(self):
|
|
||||||
self._functions = {}
|
self._functions = {}
|
||||||
|
self.region_name = region_name
|
||||||
|
|
||||||
|
def reset(self):
|
||||||
|
region_name = self.region_name
|
||||||
|
self.__dict__ = {}
|
||||||
|
self.__init__(region_name)
|
||||||
|
|
||||||
def has_function(self, function_name):
|
def has_function(self, function_name):
|
||||||
return function_name in self._functions
|
return function_name in self._functions
|
||||||
@ -289,7 +457,7 @@ class LambdaBackend(BaseBackend):
|
|||||||
return self.get_function_by_arn(function_arn) is not None
|
return self.get_function_by_arn(function_arn) is not None
|
||||||
|
|
||||||
def create_function(self, spec):
|
def create_function(self, spec):
|
||||||
fn = LambdaFunction(spec)
|
fn = LambdaFunction(spec, self.region_name)
|
||||||
self._functions[fn.function_name] = fn
|
self._functions[fn.function_name] = fn
|
||||||
return fn
|
return fn
|
||||||
|
|
||||||
@ -308,6 +476,42 @@ class LambdaBackend(BaseBackend):
|
|||||||
def list_functions(self):
|
def list_functions(self):
|
||||||
return self._functions.values()
|
return self._functions.values()
|
||||||
|
|
||||||
|
def send_message(self, function_name, message):
|
||||||
|
event = {
|
||||||
|
"Records": [
|
||||||
|
{
|
||||||
|
"EventVersion": "1.0",
|
||||||
|
"EventSubscriptionArn": "arn:aws:sns:EXAMPLE",
|
||||||
|
"EventSource": "aws:sns",
|
||||||
|
"Sns": {
|
||||||
|
"SignatureVersion": "1",
|
||||||
|
"Timestamp": "1970-01-01T00:00:00.000Z",
|
||||||
|
"Signature": "EXAMPLE",
|
||||||
|
"SigningCertUrl": "EXAMPLE",
|
||||||
|
"MessageId": "95df01b4-ee98-5cb9-9903-4c221d41eb5e",
|
||||||
|
"Message": message,
|
||||||
|
"MessageAttributes": {
|
||||||
|
"Test": {
|
||||||
|
"Type": "String",
|
||||||
|
"Value": "TestString"
|
||||||
|
},
|
||||||
|
"TestBinary": {
|
||||||
|
"Type": "Binary",
|
||||||
|
"Value": "TestBinary"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"Type": "Notification",
|
||||||
|
"UnsubscribeUrl": "EXAMPLE",
|
||||||
|
"TopicArn": "arn:aws:sns:EXAMPLE",
|
||||||
|
"Subject": "TestInvoke"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
}
|
||||||
|
self._functions[function_name].invoke(json.dumps(event), {}, {})
|
||||||
|
pass
|
||||||
|
|
||||||
def list_tags(self, resource):
|
def list_tags(self, resource):
|
||||||
return self.get_function_by_arn(resource).tags
|
return self.get_function_by_arn(resource).tags
|
||||||
|
|
||||||
@ -328,10 +532,8 @@ def do_validate_s3():
|
|||||||
return os.environ.get('VALIDATE_LAMBDA_S3', '') in ['', '1', 'true']
|
return os.environ.get('VALIDATE_LAMBDA_S3', '') in ['', '1', 'true']
|
||||||
|
|
||||||
|
|
||||||
lambda_backends = {}
|
|
||||||
for region in boto.awslambda.regions():
|
|
||||||
lambda_backends[region.name] = LambdaBackend()
|
|
||||||
|
|
||||||
# Handle us forgotten regions, unless Lambda truly only runs out of US and
|
# Handle us forgotten regions, unless Lambda truly only runs out of US and
|
||||||
for region in ['ap-southeast-2']:
|
lambda_backends = {_region.name: LambdaBackend(_region.name)
|
||||||
lambda_backends[region] = LambdaBackend()
|
for _region in boto.awslambda.regions()}
|
||||||
|
|
||||||
|
lambda_backends['ap-southeast-2'] = LambdaBackend('ap-southeast-2')
|
||||||
|
@ -9,8 +9,8 @@ response = LambdaResponse()
|
|||||||
|
|
||||||
url_paths = {
|
url_paths = {
|
||||||
'{0}/(?P<api_version>[^/]+)/functions/?$': response.root,
|
'{0}/(?P<api_version>[^/]+)/functions/?$': response.root,
|
||||||
'{0}/(?P<api_version>[^/]+)/functions/(?P<function_name>[\w_-]+)/?$': response.function,
|
r'{0}/(?P<api_version>[^/]+)/functions/(?P<function_name>[\w_-]+)/?$': response.function,
|
||||||
'{0}/(?P<api_version>[^/]+)/functions/(?P<function_name>[\w_-]+)/invocations/?$': response.invoke,
|
r'{0}/(?P<api_version>[^/]+)/functions/(?P<function_name>[\w_-]+)/invocations/?$': response.invoke,
|
||||||
'{0}/(?P<api_version>[^/]+)/functions/(?P<function_name>[\w_-]+)/invoke-async/?$': response.invoke_async,
|
r'{0}/(?P<api_version>[^/]+)/functions/(?P<function_name>[\w_-]+)/invoke-async/?$': response.invoke_async,
|
||||||
'{0}/(?P<api_version>[^/]+)/tags/(?P<resource_arn>.+)': response.tag
|
r'{0}/(?P<api_version>[^/]+)/tags/(?P<resource_arn>.+)': response.tag
|
||||||
}
|
}
|
||||||
|
@ -22,6 +22,7 @@ from moto.iam import iam_backends
|
|||||||
from moto.instance_metadata import instance_metadata_backends
|
from moto.instance_metadata import instance_metadata_backends
|
||||||
from moto.kinesis import kinesis_backends
|
from moto.kinesis import kinesis_backends
|
||||||
from moto.kms import kms_backends
|
from moto.kms import kms_backends
|
||||||
|
from moto.logs import logs_backends
|
||||||
from moto.opsworks import opsworks_backends
|
from moto.opsworks import opsworks_backends
|
||||||
from moto.polly import polly_backends
|
from moto.polly import polly_backends
|
||||||
from moto.rds2 import rds2_backends
|
from moto.rds2 import rds2_backends
|
||||||
@ -55,6 +56,7 @@ BACKENDS = {
|
|||||||
'iam': iam_backends,
|
'iam': iam_backends,
|
||||||
'moto_api': moto_api_backends,
|
'moto_api': moto_api_backends,
|
||||||
'instance_metadata': instance_metadata_backends,
|
'instance_metadata': instance_metadata_backends,
|
||||||
|
'logs': logs_backends,
|
||||||
'kinesis': kinesis_backends,
|
'kinesis': kinesis_backends,
|
||||||
'kms': kms_backends,
|
'kms': kms_backends,
|
||||||
'opsworks': opsworks_backends,
|
'opsworks': opsworks_backends,
|
||||||
|
@ -3667,6 +3667,5 @@ class EC2Backend(BaseBackend, InstanceBackend, TagBackend, AmiBackend,
|
|||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
ec2_backends = {}
|
ec2_backends = {region.name: EC2Backend(region.name)
|
||||||
for region in RegionsAndZonesBackend.regions:
|
for region in RegionsAndZonesBackend.regions}
|
||||||
ec2_backends[region.name] = EC2Backend(region.name)
|
|
||||||
|
@ -18,8 +18,8 @@ class EC2ContainerServiceResponse(BaseResponse):
|
|||||||
except ValueError:
|
except ValueError:
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
def _get_param(self, param):
|
def _get_param(self, param, if_none=None):
|
||||||
return self.request_params.get(param, None)
|
return self.request_params.get(param, if_none)
|
||||||
|
|
||||||
def create_cluster(self):
|
def create_cluster(self):
|
||||||
cluster_name = self._get_param('clusterName')
|
cluster_name = self._get_param('clusterName')
|
||||||
|
5
moto/logs/__init__.py
Normal file
5
moto/logs/__init__.py
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
from .models import logs_backends
|
||||||
|
from ..core.models import base_decorator, deprecated_base_decorator
|
||||||
|
|
||||||
|
mock_logs = base_decorator(logs_backends)
|
||||||
|
mock_logs_deprecated = deprecated_base_decorator(logs_backends)
|
228
moto/logs/models.py
Normal file
228
moto/logs/models.py
Normal file
@ -0,0 +1,228 @@
|
|||||||
|
from moto.core import BaseBackend
|
||||||
|
import boto.logs
|
||||||
|
from moto.core.utils import unix_time_millis
|
||||||
|
|
||||||
|
|
||||||
|
class LogEvent:
|
||||||
|
_event_id = 0
|
||||||
|
|
||||||
|
def __init__(self, ingestion_time, log_event):
|
||||||
|
self.ingestionTime = ingestion_time
|
||||||
|
self.timestamp = log_event["timestamp"]
|
||||||
|
self.message = log_event['message']
|
||||||
|
self.eventId = self.__class__._event_id
|
||||||
|
self.__class__._event_id += 1
|
||||||
|
|
||||||
|
def to_filter_dict(self):
|
||||||
|
return {
|
||||||
|
"eventId": self.eventId,
|
||||||
|
"ingestionTime": self.ingestionTime,
|
||||||
|
# "logStreamName":
|
||||||
|
"message": self.message,
|
||||||
|
"timestamp": self.timestamp
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class LogStream:
|
||||||
|
_log_ids = 0
|
||||||
|
|
||||||
|
def __init__(self, region, log_group, name):
|
||||||
|
self.region = region
|
||||||
|
self.arn = "arn:aws:logs:{region}:{id}:log-group:{log_group}:log-stream:{log_stream}".format(
|
||||||
|
region=region, id=self.__class__._log_ids, log_group=log_group, log_stream=name)
|
||||||
|
self.creationTime = unix_time_millis()
|
||||||
|
self.firstEventTimestamp = None
|
||||||
|
self.lastEventTimestamp = None
|
||||||
|
self.lastIngestionTime = None
|
||||||
|
self.logStreamName = name
|
||||||
|
self.storedBytes = 0
|
||||||
|
self.uploadSequenceToken = 0 # I'm guessing this is token needed for sequenceToken by put_events
|
||||||
|
self.events = []
|
||||||
|
|
||||||
|
self.__class__._log_ids += 1
|
||||||
|
|
||||||
|
def to_describe_dict(self):
|
||||||
|
return {
|
||||||
|
"arn": self.arn,
|
||||||
|
"creationTime": self.creationTime,
|
||||||
|
"firstEventTimestamp": self.firstEventTimestamp,
|
||||||
|
"lastEventTimestamp": self.lastEventTimestamp,
|
||||||
|
"lastIngestionTime": self.lastIngestionTime,
|
||||||
|
"logStreamName": self.logStreamName,
|
||||||
|
"storedBytes": self.storedBytes,
|
||||||
|
"uploadSequenceToken": str(self.uploadSequenceToken),
|
||||||
|
}
|
||||||
|
|
||||||
|
def put_log_events(self, log_group_name, log_stream_name, log_events, sequence_token):
|
||||||
|
# TODO: ensure sequence_token
|
||||||
|
# TODO: to be thread safe this would need a lock
|
||||||
|
self.lastIngestionTime = unix_time_millis()
|
||||||
|
# TODO: make this match AWS if possible
|
||||||
|
self.storedBytes += sum([len(log_event["message"]) for log_event in log_events])
|
||||||
|
self.events += [LogEvent(self.lastIngestionTime, log_event) for log_event in log_events]
|
||||||
|
self.uploadSequenceToken += 1
|
||||||
|
|
||||||
|
return self.uploadSequenceToken
|
||||||
|
|
||||||
|
def get_log_events(self, log_group_name, log_stream_name, start_time, end_time, limit, next_token, start_from_head):
|
||||||
|
def filter_func(event):
|
||||||
|
if start_time and event.timestamp < start_time:
|
||||||
|
return False
|
||||||
|
|
||||||
|
if end_time and event.timestamp > end_time:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
events = sorted(filter(filter_func, self.events), key=lambda event: event.timestamp, reverse=start_from_head)
|
||||||
|
back_token = next_token
|
||||||
|
if next_token is None:
|
||||||
|
next_token = 0
|
||||||
|
|
||||||
|
events_page = events[next_token: next_token + limit]
|
||||||
|
next_token += limit
|
||||||
|
if next_token >= len(self.events):
|
||||||
|
next_token = None
|
||||||
|
|
||||||
|
return events_page, back_token, next_token
|
||||||
|
|
||||||
|
def filter_log_events(self, log_group_name, log_stream_names, start_time, end_time, limit, next_token, filter_pattern, interleaved):
|
||||||
|
def filter_func(event):
|
||||||
|
if start_time and event.timestamp < start_time:
|
||||||
|
return False
|
||||||
|
|
||||||
|
if end_time and event.timestamp > end_time:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
events = []
|
||||||
|
for event in sorted(filter(filter_func, self.events), key=lambda x: x.timestamp):
|
||||||
|
event_obj = event.to_filter_dict()
|
||||||
|
event_obj['logStreamName'] = self.logStreamName
|
||||||
|
events.append(event_obj)
|
||||||
|
return events
|
||||||
|
|
||||||
|
|
||||||
|
class LogGroup:
|
||||||
|
def __init__(self, region, name, tags):
|
||||||
|
self.name = name
|
||||||
|
self.region = region
|
||||||
|
self.tags = tags
|
||||||
|
self.streams = dict() # {name: LogStream}
|
||||||
|
|
||||||
|
def create_log_stream(self, log_stream_name):
|
||||||
|
assert log_stream_name not in self.streams
|
||||||
|
self.streams[log_stream_name] = LogStream(self.region, self.name, log_stream_name)
|
||||||
|
|
||||||
|
def delete_log_stream(self, log_stream_name):
|
||||||
|
assert log_stream_name in self.streams
|
||||||
|
del self.streams[log_stream_name]
|
||||||
|
|
||||||
|
def describe_log_streams(self, descending, limit, log_group_name, log_stream_name_prefix, next_token, order_by):
|
||||||
|
log_streams = [stream.to_describe_dict() for name, stream in self.streams.items() if name.startswith(log_stream_name_prefix)]
|
||||||
|
|
||||||
|
def sorter(stream):
|
||||||
|
return stream.name if order_by == 'logStreamName' else stream.lastEventTimestamp
|
||||||
|
|
||||||
|
if next_token is None:
|
||||||
|
next_token = 0
|
||||||
|
|
||||||
|
log_streams = sorted(log_streams, key=sorter, reverse=descending)
|
||||||
|
new_token = next_token + limit
|
||||||
|
log_streams_page = log_streams[next_token: new_token]
|
||||||
|
if new_token >= len(log_streams):
|
||||||
|
new_token = None
|
||||||
|
|
||||||
|
return log_streams_page, new_token
|
||||||
|
|
||||||
|
def put_log_events(self, log_group_name, log_stream_name, log_events, sequence_token):
|
||||||
|
assert log_stream_name in self.streams
|
||||||
|
stream = self.streams[log_stream_name]
|
||||||
|
return stream.put_log_events(log_group_name, log_stream_name, log_events, sequence_token)
|
||||||
|
|
||||||
|
def get_log_events(self, log_group_name, log_stream_name, start_time, end_time, limit, next_token, start_from_head):
|
||||||
|
assert log_stream_name in self.streams
|
||||||
|
stream = self.streams[log_stream_name]
|
||||||
|
return stream.get_log_events(log_group_name, log_stream_name, start_time, end_time, limit, next_token, start_from_head)
|
||||||
|
|
||||||
|
def filter_log_events(self, log_group_name, log_stream_names, start_time, end_time, limit, next_token, filter_pattern, interleaved):
|
||||||
|
assert not filter_pattern # TODO: impl
|
||||||
|
|
||||||
|
streams = [stream for name, stream in self.streams.items() if not log_stream_names or name in log_stream_names]
|
||||||
|
|
||||||
|
events = []
|
||||||
|
for stream in streams:
|
||||||
|
events += stream.filter_log_events(log_group_name, log_stream_names, start_time, end_time, limit, next_token, filter_pattern, interleaved)
|
||||||
|
|
||||||
|
if interleaved:
|
||||||
|
events = sorted(events, key=lambda event: event.timestamp)
|
||||||
|
|
||||||
|
if next_token is None:
|
||||||
|
next_token = 0
|
||||||
|
|
||||||
|
events_page = events[next_token: next_token + limit]
|
||||||
|
next_token += limit
|
||||||
|
if next_token >= len(events):
|
||||||
|
next_token = None
|
||||||
|
|
||||||
|
searched_streams = [{"logStreamName": stream.logStreamName, "searchedCompletely": True} for stream in streams]
|
||||||
|
return events_page, next_token, searched_streams
|
||||||
|
|
||||||
|
|
||||||
|
class LogsBackend(BaseBackend):
|
||||||
|
def __init__(self, region_name):
|
||||||
|
self.region_name = region_name
|
||||||
|
self.groups = dict() # { logGroupName: LogGroup}
|
||||||
|
|
||||||
|
def reset(self):
|
||||||
|
region_name = self.region_name
|
||||||
|
self.__dict__ = {}
|
||||||
|
self.__init__(region_name)
|
||||||
|
|
||||||
|
def create_log_group(self, log_group_name, tags):
|
||||||
|
assert log_group_name not in self.groups
|
||||||
|
self.groups[log_group_name] = LogGroup(self.region_name, log_group_name, tags)
|
||||||
|
|
||||||
|
def ensure_log_group(self, log_group_name, tags):
|
||||||
|
if log_group_name in self.groups:
|
||||||
|
return
|
||||||
|
self.groups[log_group_name] = LogGroup(self.region_name, log_group_name, tags)
|
||||||
|
|
||||||
|
def delete_log_group(self, log_group_name):
|
||||||
|
assert log_group_name in self.groups
|
||||||
|
del self.groups[log_group_name]
|
||||||
|
|
||||||
|
def create_log_stream(self, log_group_name, log_stream_name):
|
||||||
|
assert log_group_name in self.groups
|
||||||
|
log_group = self.groups[log_group_name]
|
||||||
|
return log_group.create_log_stream(log_stream_name)
|
||||||
|
|
||||||
|
def delete_log_stream(self, log_group_name, log_stream_name):
|
||||||
|
assert log_group_name in self.groups
|
||||||
|
log_group = self.groups[log_group_name]
|
||||||
|
return log_group.delete_log_stream(log_stream_name)
|
||||||
|
|
||||||
|
def describe_log_streams(self, descending, limit, log_group_name, log_stream_name_prefix, next_token, order_by):
|
||||||
|
assert log_group_name in self.groups
|
||||||
|
log_group = self.groups[log_group_name]
|
||||||
|
return log_group.describe_log_streams(descending, limit, log_group_name, log_stream_name_prefix, next_token, order_by)
|
||||||
|
|
||||||
|
def put_log_events(self, log_group_name, log_stream_name, log_events, sequence_token):
|
||||||
|
# TODO: add support for sequence_tokens
|
||||||
|
assert log_group_name in self.groups
|
||||||
|
log_group = self.groups[log_group_name]
|
||||||
|
return log_group.put_log_events(log_group_name, log_stream_name, log_events, sequence_token)
|
||||||
|
|
||||||
|
def get_log_events(self, log_group_name, log_stream_name, start_time, end_time, limit, next_token, start_from_head):
|
||||||
|
assert log_group_name in self.groups
|
||||||
|
log_group = self.groups[log_group_name]
|
||||||
|
return log_group.get_log_events(log_group_name, log_stream_name, start_time, end_time, limit, next_token, start_from_head)
|
||||||
|
|
||||||
|
def filter_log_events(self, log_group_name, log_stream_names, start_time, end_time, limit, next_token, filter_pattern, interleaved):
|
||||||
|
assert log_group_name in self.groups
|
||||||
|
log_group = self.groups[log_group_name]
|
||||||
|
return log_group.filter_log_events(log_group_name, log_stream_names, start_time, end_time, limit, next_token, filter_pattern, interleaved)
|
||||||
|
|
||||||
|
|
||||||
|
logs_backends = {region.name: LogsBackend(region.name) for region in boto.logs.regions()}
|
114
moto/logs/responses.py
Normal file
114
moto/logs/responses.py
Normal file
@ -0,0 +1,114 @@
|
|||||||
|
from moto.core.responses import BaseResponse
|
||||||
|
from .models import logs_backends
|
||||||
|
import json
|
||||||
|
|
||||||
|
|
||||||
|
# See http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/Welcome.html
|
||||||
|
|
||||||
|
class LogsResponse(BaseResponse):
|
||||||
|
@property
|
||||||
|
def logs_backend(self):
|
||||||
|
return logs_backends[self.region]
|
||||||
|
|
||||||
|
@property
|
||||||
|
def request_params(self):
|
||||||
|
try:
|
||||||
|
return json.loads(self.body)
|
||||||
|
except ValueError:
|
||||||
|
return {}
|
||||||
|
|
||||||
|
def _get_param(self, param, if_none=None):
|
||||||
|
return self.request_params.get(param, if_none)
|
||||||
|
|
||||||
|
def create_log_group(self):
|
||||||
|
log_group_name = self._get_param('logGroupName')
|
||||||
|
tags = self._get_param('tags')
|
||||||
|
assert 1 <= len(log_group_name) <= 512 # TODO: assert pattern
|
||||||
|
|
||||||
|
self.logs_backend.create_log_group(log_group_name, tags)
|
||||||
|
return ''
|
||||||
|
|
||||||
|
def delete_log_group(self):
|
||||||
|
log_group_name = self._get_param('logGroupName')
|
||||||
|
self.logs_backend.delete_log_group(log_group_name)
|
||||||
|
return ''
|
||||||
|
|
||||||
|
def create_log_stream(self):
|
||||||
|
log_group_name = self._get_param('logGroupName')
|
||||||
|
log_stream_name = self._get_param('logStreamName')
|
||||||
|
self.logs_backend.create_log_stream(log_group_name, log_stream_name)
|
||||||
|
return ''
|
||||||
|
|
||||||
|
def delete_log_stream(self):
|
||||||
|
log_group_name = self._get_param('logGroupName')
|
||||||
|
log_stream_name = self._get_param('logStreamName')
|
||||||
|
self.logs_backend.delete_log_stream(log_group_name, log_stream_name)
|
||||||
|
return ''
|
||||||
|
|
||||||
|
def describe_log_streams(self):
|
||||||
|
log_group_name = self._get_param('logGroupName')
|
||||||
|
log_stream_name_prefix = self._get_param('logStreamNamePrefix')
|
||||||
|
descending = self._get_param('descending', False)
|
||||||
|
limit = self._get_param('limit', 50)
|
||||||
|
assert limit <= 50
|
||||||
|
next_token = self._get_param('nextToken')
|
||||||
|
order_by = self._get_param('orderBy', 'LogStreamName')
|
||||||
|
assert order_by in {'LogStreamName', 'LastEventTime'}
|
||||||
|
|
||||||
|
if order_by == 'LastEventTime':
|
||||||
|
assert not log_stream_name_prefix
|
||||||
|
|
||||||
|
streams, next_token = self.logs_backend.describe_log_streams(
|
||||||
|
descending, limit, log_group_name, log_stream_name_prefix,
|
||||||
|
next_token, order_by)
|
||||||
|
return json.dumps({
|
||||||
|
"logStreams": streams,
|
||||||
|
"nextToken": next_token
|
||||||
|
})
|
||||||
|
|
||||||
|
def put_log_events(self):
|
||||||
|
log_group_name = self._get_param('logGroupName')
|
||||||
|
log_stream_name = self._get_param('logStreamName')
|
||||||
|
log_events = self._get_param('logEvents')
|
||||||
|
sequence_token = self._get_param('sequenceToken')
|
||||||
|
|
||||||
|
next_sequence_token = self.logs_backend.put_log_events(log_group_name, log_stream_name, log_events, sequence_token)
|
||||||
|
return json.dumps({'nextSequenceToken': next_sequence_token})
|
||||||
|
|
||||||
|
def get_log_events(self):
|
||||||
|
log_group_name = self._get_param('logGroupName')
|
||||||
|
log_stream_name = self._get_param('logStreamName')
|
||||||
|
start_time = self._get_param('startTime')
|
||||||
|
end_time = self._get_param("endTime")
|
||||||
|
limit = self._get_param('limit', 10000)
|
||||||
|
assert limit <= 10000
|
||||||
|
next_token = self._get_param('nextToken')
|
||||||
|
start_from_head = self._get_param('startFromHead')
|
||||||
|
|
||||||
|
events, next_backward_token, next_foward_token = \
|
||||||
|
self.logs_backend.get_log_events(log_group_name, log_stream_name, start_time, end_time, limit, next_token, start_from_head)
|
||||||
|
|
||||||
|
return json.dumps({
|
||||||
|
"events": events,
|
||||||
|
"nextBackwardToken": next_backward_token,
|
||||||
|
"nextForwardToken": next_foward_token
|
||||||
|
})
|
||||||
|
|
||||||
|
def filter_log_events(self):
|
||||||
|
log_group_name = self._get_param('logGroupName')
|
||||||
|
log_stream_names = self._get_param('logStreamNames', [])
|
||||||
|
start_time = self._get_param('startTime')
|
||||||
|
# impl, see: http://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/FilterAndPatternSyntax.html
|
||||||
|
filter_pattern = self._get_param('filterPattern')
|
||||||
|
interleaved = self._get_param('interleaved', False)
|
||||||
|
end_time = self._get_param("endTime")
|
||||||
|
limit = self._get_param('limit', 10000)
|
||||||
|
assert limit <= 10000
|
||||||
|
next_token = self._get_param('nextToken')
|
||||||
|
|
||||||
|
events, next_token, searched_streams = self.logs_backend.filter_log_events(log_group_name, log_stream_names, start_time, end_time, limit, next_token, filter_pattern, interleaved)
|
||||||
|
return json.dumps({
|
||||||
|
"events": events,
|
||||||
|
"nextToken": next_token,
|
||||||
|
"searchedLogStreams": searched_streams
|
||||||
|
})
|
9
moto/logs/urls.py
Normal file
9
moto/logs/urls.py
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
from .responses import LogsResponse
|
||||||
|
|
||||||
|
url_bases = [
|
||||||
|
"https?://logs.(.+).amazonaws.com",
|
||||||
|
]
|
||||||
|
|
||||||
|
url_paths = {
|
||||||
|
'{0}/$': LogsResponse.dispatch,
|
||||||
|
}
|
@ -4,7 +4,7 @@ from .responses import S3ResponseInstance
|
|||||||
|
|
||||||
url_bases = [
|
url_bases = [
|
||||||
"https?://s3(.*).amazonaws.com",
|
"https?://s3(.*).amazonaws.com",
|
||||||
"https?://(?P<bucket_name>[a-zA-Z0-9\-_.]*)\.?s3(.*).amazonaws.com"
|
r"https?://(?P<bucket_name>[a-zA-Z0-9\-_.]*)\.?s3(.*).amazonaws.com"
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,22 +1,23 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import argparse
|
||||||
import json
|
import json
|
||||||
import re
|
import re
|
||||||
import sys
|
import sys
|
||||||
import argparse
|
|
||||||
import six
|
|
||||||
|
|
||||||
from six.moves.urllib.parse import urlencode
|
|
||||||
|
|
||||||
from threading import Lock
|
from threading import Lock
|
||||||
|
|
||||||
|
import six
|
||||||
from flask import Flask
|
from flask import Flask
|
||||||
from flask.testing import FlaskClient
|
from flask.testing import FlaskClient
|
||||||
|
|
||||||
|
from six.moves.urllib.parse import urlencode
|
||||||
from werkzeug.routing import BaseConverter
|
from werkzeug.routing import BaseConverter
|
||||||
from werkzeug.serving import run_simple
|
from werkzeug.serving import run_simple
|
||||||
|
|
||||||
from moto.backends import BACKENDS
|
from moto.backends import BACKENDS
|
||||||
from moto.core.utils import convert_flask_to_httpretty_response
|
from moto.core.utils import convert_flask_to_httpretty_response
|
||||||
|
|
||||||
|
|
||||||
HTTP_METHODS = ["GET", "POST", "PUT", "DELETE", "HEAD", "PATCH"]
|
HTTP_METHODS = ["GET", "POST", "PUT", "DELETE", "HEAD", "PATCH"]
|
||||||
|
|
||||||
|
|
||||||
@ -61,7 +62,7 @@ class DomainDispatcherApplication(object):
|
|||||||
host = "instance_metadata"
|
host = "instance_metadata"
|
||||||
else:
|
else:
|
||||||
host = environ['HTTP_HOST'].split(':')[0]
|
host = environ['HTTP_HOST'].split(':')[0]
|
||||||
if host == "localhost":
|
if host in {'localhost', 'motoserver'} or host.startswith("192.168."):
|
||||||
# Fall back to parsing auth header to find service
|
# Fall back to parsing auth header to find service
|
||||||
# ['Credential=sdffdsa', '20170220', 'us-east-1', 'sns', 'aws4_request']
|
# ['Credential=sdffdsa', '20170220', 'us-east-1', 'sns', 'aws4_request']
|
||||||
try:
|
try:
|
||||||
|
@ -12,6 +12,8 @@ from moto.compat import OrderedDict
|
|||||||
from moto.core import BaseBackend, BaseModel
|
from moto.core import BaseBackend, BaseModel
|
||||||
from moto.core.utils import iso_8601_datetime_with_milliseconds
|
from moto.core.utils import iso_8601_datetime_with_milliseconds
|
||||||
from moto.sqs import sqs_backends
|
from moto.sqs import sqs_backends
|
||||||
|
from moto.awslambda import lambda_backends
|
||||||
|
|
||||||
from .exceptions import (
|
from .exceptions import (
|
||||||
SNSNotFoundError, DuplicateSnsEndpointError, SnsEndpointDisabled, SNSInvalidParameter
|
SNSNotFoundError, DuplicateSnsEndpointError, SnsEndpointDisabled, SNSInvalidParameter
|
||||||
)
|
)
|
||||||
@ -88,6 +90,11 @@ class Subscription(BaseModel):
|
|||||||
elif self.protocol in ['http', 'https']:
|
elif self.protocol in ['http', 'https']:
|
||||||
post_data = self.get_post_data(message, message_id)
|
post_data = self.get_post_data(message, message_id)
|
||||||
requests.post(self.endpoint, json=post_data)
|
requests.post(self.endpoint, json=post_data)
|
||||||
|
elif self.protocol == 'lambda':
|
||||||
|
# TODO: support bad function name
|
||||||
|
function_name = self.endpoint.split(":")[-1]
|
||||||
|
region = self.arn.split(':')[3]
|
||||||
|
lambda_backends[region].send_message(function_name, message)
|
||||||
|
|
||||||
def get_post_data(self, message, message_id):
|
def get_post_data(self, message, message_id):
|
||||||
return {
|
return {
|
||||||
|
@ -6,6 +6,7 @@ coverage
|
|||||||
flake8
|
flake8
|
||||||
freezegun
|
freezegun
|
||||||
flask
|
flask
|
||||||
|
boto>=2.45.0
|
||||||
boto3>=1.4.4
|
boto3>=1.4.4
|
||||||
botocore>=1.5.77
|
botocore>=1.5.77
|
||||||
six>=1.9
|
six>=1.9
|
||||||
|
15
setup.py
15
setup.py
@ -1,6 +1,9 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
import setuptools
|
||||||
from setuptools import setup, find_packages
|
from setuptools import setup, find_packages
|
||||||
|
import sys
|
||||||
|
|
||||||
|
|
||||||
install_requires = [
|
install_requires = [
|
||||||
"Jinja2>=2.8",
|
"Jinja2>=2.8",
|
||||||
@ -17,12 +20,21 @@ install_requires = [
|
|||||||
"pytz",
|
"pytz",
|
||||||
"python-dateutil<3.0.0,>=2.1",
|
"python-dateutil<3.0.0,>=2.1",
|
||||||
"mock",
|
"mock",
|
||||||
|
"docker>=2.5.1"
|
||||||
]
|
]
|
||||||
|
|
||||||
extras_require = {
|
extras_require = {
|
||||||
'server': ['flask'],
|
'server': ['flask'],
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# https://hynek.me/articles/conditional-python-dependencies/
|
||||||
|
if int(setuptools.__version__.split(".", 1)[0]) < 18:
|
||||||
|
if sys.version_info[0:2] < (3, 3):
|
||||||
|
install_requires.append("backports.tempfile")
|
||||||
|
else:
|
||||||
|
extras_require[":python_version<'3.3'"] = ["backports.tempfile"]
|
||||||
|
|
||||||
|
|
||||||
setup(
|
setup(
|
||||||
name='moto',
|
name='moto',
|
||||||
version='1.1.19',
|
version='1.1.19',
|
||||||
@ -47,6 +59,9 @@ setup(
|
|||||||
"Programming Language :: Python :: 2.7",
|
"Programming Language :: Python :: 2.7",
|
||||||
"Programming Language :: Python :: 3",
|
"Programming Language :: Python :: 3",
|
||||||
"Programming Language :: Python :: 3.3",
|
"Programming Language :: Python :: 3.3",
|
||||||
|
"Programming Language :: Python :: 3.4",
|
||||||
|
"Programming Language :: Python :: 3.5",
|
||||||
|
"Programming Language :: Python :: 3.6",
|
||||||
"License :: OSI Approved :: Apache Software License",
|
"License :: OSI Approved :: Apache Software License",
|
||||||
"Topic :: Software Development :: Testing",
|
"Topic :: Software Development :: Testing",
|
||||||
],
|
],
|
||||||
|
@ -12,11 +12,13 @@ import sure # noqa
|
|||||||
from freezegun import freeze_time
|
from freezegun import freeze_time
|
||||||
from moto import mock_lambda, mock_s3, mock_ec2, settings
|
from moto import mock_lambda, mock_s3, mock_ec2, settings
|
||||||
|
|
||||||
|
_lambda_region = 'us-east-1' if settings.TEST_SERVER_MODE else 'us-west-2'
|
||||||
|
|
||||||
def _process_lamda(pfunc):
|
|
||||||
|
def _process_lambda(func_str):
|
||||||
zip_output = io.BytesIO()
|
zip_output = io.BytesIO()
|
||||||
zip_file = zipfile.ZipFile(zip_output, 'w', zipfile.ZIP_DEFLATED)
|
zip_file = zipfile.ZipFile(zip_output, 'w', zipfile.ZIP_DEFLATED)
|
||||||
zip_file.writestr('lambda_function.zip', pfunc)
|
zip_file.writestr('lambda_function.py', func_str)
|
||||||
zip_file.close()
|
zip_file.close()
|
||||||
zip_output.seek(0)
|
zip_output.seek(0)
|
||||||
return zip_output.read()
|
return zip_output.read()
|
||||||
@ -27,21 +29,23 @@ def get_test_zip_file1():
|
|||||||
def lambda_handler(event, context):
|
def lambda_handler(event, context):
|
||||||
return event
|
return event
|
||||||
"""
|
"""
|
||||||
return _process_lamda(pfunc)
|
return _process_lambda(pfunc)
|
||||||
|
|
||||||
|
|
||||||
def get_test_zip_file2():
|
def get_test_zip_file2():
|
||||||
pfunc = """
|
func_str = """
|
||||||
|
import boto3
|
||||||
|
|
||||||
def lambda_handler(event, context):
|
def lambda_handler(event, context):
|
||||||
|
ec2 = boto3.resource('ec2', region_name='us-west-2', endpoint_url='http://{base_url}')
|
||||||
|
|
||||||
volume_id = event.get('volume_id')
|
volume_id = event.get('volume_id')
|
||||||
print('get volume details for %s' % volume_id)
|
|
||||||
import boto3
|
|
||||||
ec2 = boto3.resource('ec2', region_name='us-west-2', endpoint_url="http://{base_url}")
|
|
||||||
vol = ec2.Volume(volume_id)
|
vol = ec2.Volume(volume_id)
|
||||||
print('Volume - %s state=%s, size=%s' % (volume_id, vol.state, vol.size))
|
|
||||||
|
print('get volume details for %s\\nVolume - %s state=%s, size=%s' % (volume_id, volume_id, vol.state, vol.size))
|
||||||
return event
|
return event
|
||||||
""".format(base_url="localhost:5000" if settings.TEST_SERVER_MODE else "ec2.us-west-2.amazonaws.com")
|
""".format(base_url="motoserver:5000" if settings.TEST_SERVER_MODE else "ec2.us-west-2.amazonaws.com")
|
||||||
return _process_lamda(pfunc)
|
return _process_lambda(func_str)
|
||||||
|
|
||||||
|
|
||||||
@mock_lambda
|
@mock_lambda
|
||||||
@ -58,7 +62,7 @@ def test_invoke_requestresponse_function():
|
|||||||
FunctionName='testFunction',
|
FunctionName='testFunction',
|
||||||
Runtime='python2.7',
|
Runtime='python2.7',
|
||||||
Role='test-iam-role',
|
Role='test-iam-role',
|
||||||
Handler='lambda_function.handler',
|
Handler='lambda_function.lambda_handler',
|
||||||
Code={
|
Code={
|
||||||
'ZipFile': get_test_zip_file1(),
|
'ZipFile': get_test_zip_file1(),
|
||||||
},
|
},
|
||||||
@ -73,10 +77,13 @@ def test_invoke_requestresponse_function():
|
|||||||
Payload=json.dumps(in_data))
|
Payload=json.dumps(in_data))
|
||||||
|
|
||||||
success_result["StatusCode"].should.equal(202)
|
success_result["StatusCode"].should.equal(202)
|
||||||
base64.b64decode(success_result["LogResult"]).decode(
|
result_obj = json.loads(
|
||||||
'utf-8').should.equal(json.dumps(in_data))
|
base64.b64decode(success_result["LogResult"]).decode('utf-8'))
|
||||||
json.loads(success_result["Payload"].read().decode(
|
|
||||||
'utf-8')).should.equal(in_data)
|
result_obj.should.equal(in_data)
|
||||||
|
|
||||||
|
payload = success_result["Payload"].read().decode('utf-8')
|
||||||
|
json.loads(payload).should.equal(in_data)
|
||||||
|
|
||||||
|
|
||||||
@mock_lambda
|
@mock_lambda
|
||||||
@ -86,7 +93,7 @@ def test_invoke_event_function():
|
|||||||
FunctionName='testFunction',
|
FunctionName='testFunction',
|
||||||
Runtime='python2.7',
|
Runtime='python2.7',
|
||||||
Role='test-iam-role',
|
Role='test-iam-role',
|
||||||
Handler='lambda_function.handler',
|
Handler='lambda_function.lambda_handler',
|
||||||
Code={
|
Code={
|
||||||
'ZipFile': get_test_zip_file1(),
|
'ZipFile': get_test_zip_file1(),
|
||||||
},
|
},
|
||||||
@ -110,36 +117,47 @@ def test_invoke_event_function():
|
|||||||
'utf-8')).should.equal({})
|
'utf-8')).should.equal({})
|
||||||
|
|
||||||
|
|
||||||
@mock_ec2
|
if settings.TEST_SERVER_MODE:
|
||||||
@mock_lambda
|
@mock_ec2
|
||||||
def test_invoke_function_get_ec2_volume():
|
@mock_lambda
|
||||||
conn = boto3.resource("ec2", "us-west-2")
|
def test_invoke_function_get_ec2_volume():
|
||||||
vol = conn.create_volume(Size=99, AvailabilityZone='us-west-2')
|
conn = boto3.resource("ec2", "us-west-2")
|
||||||
vol = conn.Volume(vol.id)
|
vol = conn.create_volume(Size=99, AvailabilityZone='us-west-2')
|
||||||
|
vol = conn.Volume(vol.id)
|
||||||
|
|
||||||
conn = boto3.client('lambda', 'us-west-2')
|
conn = boto3.client('lambda', 'us-west-2')
|
||||||
conn.create_function(
|
conn.create_function(
|
||||||
FunctionName='testFunction',
|
FunctionName='testFunction',
|
||||||
Runtime='python2.7',
|
Runtime='python2.7',
|
||||||
Role='test-iam-role',
|
Role='test-iam-role',
|
||||||
Handler='lambda_function.handler',
|
Handler='lambda_function.lambda_handler',
|
||||||
Code={
|
Code={
|
||||||
'ZipFile': get_test_zip_file2(),
|
'ZipFile': get_test_zip_file2(),
|
||||||
},
|
},
|
||||||
Description='test lambda function',
|
Description='test lambda function',
|
||||||
Timeout=3,
|
Timeout=3,
|
||||||
MemorySize=128,
|
MemorySize=128,
|
||||||
Publish=True,
|
Publish=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
in_data = {'volume_id': vol.id}
|
in_data = {'volume_id': vol.id}
|
||||||
result = conn.invoke(FunctionName='testFunction',
|
result = conn.invoke(FunctionName='testFunction',
|
||||||
InvocationType='RequestResponse', Payload=json.dumps(in_data))
|
InvocationType='RequestResponse', Payload=json.dumps(in_data))
|
||||||
result["StatusCode"].should.equal(202)
|
result["StatusCode"].should.equal(202)
|
||||||
msg = 'get volume details for %s\nVolume - %s state=%s, size=%s\n%s' % (
|
msg = 'get volume details for %s\nVolume - %s state=%s, size=%s\n%s' % (
|
||||||
vol.id, vol.id, vol.state, vol.size, json.dumps(in_data))
|
vol.id, vol.id, vol.state, vol.size, json.dumps(in_data))
|
||||||
base64.b64decode(result["LogResult"]).decode('utf-8').should.equal(msg)
|
|
||||||
result['Payload'].read().decode('utf-8').should.equal(msg)
|
log_result = base64.b64decode(result["LogResult"]).decode('utf-8')
|
||||||
|
|
||||||
|
# fix for running under travis (TODO: investigate why it has an extra newline)
|
||||||
|
log_result = log_result.replace('\n\n', '\n')
|
||||||
|
log_result.should.equal(msg)
|
||||||
|
|
||||||
|
payload = result['Payload'].read().decode('utf-8')
|
||||||
|
|
||||||
|
# fix for running under travis (TODO: investigate why it has an extra newline)
|
||||||
|
payload = payload.replace('\n\n', '\n')
|
||||||
|
payload.should.equal(msg)
|
||||||
|
|
||||||
|
|
||||||
@mock_lambda
|
@mock_lambda
|
||||||
@ -150,7 +168,7 @@ def test_create_based_on_s3_with_missing_bucket():
|
|||||||
FunctionName='testFunction',
|
FunctionName='testFunction',
|
||||||
Runtime='python2.7',
|
Runtime='python2.7',
|
||||||
Role='test-iam-role',
|
Role='test-iam-role',
|
||||||
Handler='lambda_function.handler',
|
Handler='lambda_function.lambda_handler',
|
||||||
Code={
|
Code={
|
||||||
'S3Bucket': 'this-bucket-does-not-exist',
|
'S3Bucket': 'this-bucket-does-not-exist',
|
||||||
'S3Key': 'test.zip',
|
'S3Key': 'test.zip',
|
||||||
@ -181,7 +199,7 @@ def test_create_function_from_aws_bucket():
|
|||||||
FunctionName='testFunction',
|
FunctionName='testFunction',
|
||||||
Runtime='python2.7',
|
Runtime='python2.7',
|
||||||
Role='test-iam-role',
|
Role='test-iam-role',
|
||||||
Handler='lambda_function.handler',
|
Handler='lambda_function.lambda_handler',
|
||||||
Code={
|
Code={
|
||||||
'S3Bucket': 'test-bucket',
|
'S3Bucket': 'test-bucket',
|
||||||
'S3Key': 'test.zip',
|
'S3Key': 'test.zip',
|
||||||
@ -202,10 +220,10 @@ def test_create_function_from_aws_bucket():
|
|||||||
result.pop('LastModified')
|
result.pop('LastModified')
|
||||||
result.should.equal({
|
result.should.equal({
|
||||||
'FunctionName': 'testFunction',
|
'FunctionName': 'testFunction',
|
||||||
'FunctionArn': 'arn:aws:lambda:123456789012:function:testFunction',
|
'FunctionArn': 'arn:aws:lambda:{}:123456789012:function:testFunction'.format(_lambda_region),
|
||||||
'Runtime': 'python2.7',
|
'Runtime': 'python2.7',
|
||||||
'Role': 'test-iam-role',
|
'Role': 'test-iam-role',
|
||||||
'Handler': 'lambda_function.handler',
|
'Handler': 'lambda_function.lambda_handler',
|
||||||
"CodeSha256": hashlib.sha256(zip_content).hexdigest(),
|
"CodeSha256": hashlib.sha256(zip_content).hexdigest(),
|
||||||
"CodeSize": len(zip_content),
|
"CodeSize": len(zip_content),
|
||||||
'Description': 'test lambda function',
|
'Description': 'test lambda function',
|
||||||
@ -230,7 +248,7 @@ def test_create_function_from_zipfile():
|
|||||||
FunctionName='testFunction',
|
FunctionName='testFunction',
|
||||||
Runtime='python2.7',
|
Runtime='python2.7',
|
||||||
Role='test-iam-role',
|
Role='test-iam-role',
|
||||||
Handler='lambda_function.handler',
|
Handler='lambda_function.lambda_handler',
|
||||||
Code={
|
Code={
|
||||||
'ZipFile': zip_content,
|
'ZipFile': zip_content,
|
||||||
},
|
},
|
||||||
@ -247,10 +265,10 @@ def test_create_function_from_zipfile():
|
|||||||
|
|
||||||
result.should.equal({
|
result.should.equal({
|
||||||
'FunctionName': 'testFunction',
|
'FunctionName': 'testFunction',
|
||||||
'FunctionArn': 'arn:aws:lambda:123456789012:function:testFunction',
|
'FunctionArn': 'arn:aws:lambda:{}:123456789012:function:testFunction'.format(_lambda_region),
|
||||||
'Runtime': 'python2.7',
|
'Runtime': 'python2.7',
|
||||||
'Role': 'test-iam-role',
|
'Role': 'test-iam-role',
|
||||||
'Handler': 'lambda_function.handler',
|
'Handler': 'lambda_function.lambda_handler',
|
||||||
'CodeSize': len(zip_content),
|
'CodeSize': len(zip_content),
|
||||||
'Description': 'test lambda function',
|
'Description': 'test lambda function',
|
||||||
'Timeout': 3,
|
'Timeout': 3,
|
||||||
@ -281,7 +299,7 @@ def test_get_function():
|
|||||||
FunctionName='testFunction',
|
FunctionName='testFunction',
|
||||||
Runtime='python2.7',
|
Runtime='python2.7',
|
||||||
Role='test-iam-role',
|
Role='test-iam-role',
|
||||||
Handler='lambda_function.handler',
|
Handler='lambda_function.lambda_handler',
|
||||||
Code={
|
Code={
|
||||||
'S3Bucket': 'test-bucket',
|
'S3Bucket': 'test-bucket',
|
||||||
'S3Key': 'test.zip',
|
'S3Key': 'test.zip',
|
||||||
@ -301,16 +319,16 @@ def test_get_function():
|
|||||||
|
|
||||||
result.should.equal({
|
result.should.equal({
|
||||||
"Code": {
|
"Code": {
|
||||||
"Location": "s3://lambda-functions.aws.amazon.com/test.zip",
|
"Location": "s3://awslambda-{0}-tasks.s3-{0}.amazonaws.com/test.zip".format(_lambda_region),
|
||||||
"RepositoryType": "S3"
|
"RepositoryType": "S3"
|
||||||
},
|
},
|
||||||
"Configuration": {
|
"Configuration": {
|
||||||
"CodeSha256": hashlib.sha256(zip_content).hexdigest(),
|
"CodeSha256": hashlib.sha256(zip_content).hexdigest(),
|
||||||
"CodeSize": len(zip_content),
|
"CodeSize": len(zip_content),
|
||||||
"Description": "test lambda function",
|
"Description": "test lambda function",
|
||||||
"FunctionArn": "arn:aws:lambda:123456789012:function:testFunction",
|
"FunctionArn": 'arn:aws:lambda:{}:123456789012:function:testFunction'.format(_lambda_region),
|
||||||
"FunctionName": "testFunction",
|
"FunctionName": "testFunction",
|
||||||
"Handler": "lambda_function.handler",
|
"Handler": "lambda_function.lambda_handler",
|
||||||
"MemorySize": 128,
|
"MemorySize": 128,
|
||||||
"Role": "test-iam-role",
|
"Role": "test-iam-role",
|
||||||
"Runtime": "python2.7",
|
"Runtime": "python2.7",
|
||||||
@ -339,7 +357,7 @@ def test_delete_function():
|
|||||||
FunctionName='testFunction',
|
FunctionName='testFunction',
|
||||||
Runtime='python2.7',
|
Runtime='python2.7',
|
||||||
Role='test-iam-role',
|
Role='test-iam-role',
|
||||||
Handler='lambda_function.handler',
|
Handler='lambda_function.lambda_handler',
|
||||||
Code={
|
Code={
|
||||||
'S3Bucket': 'test-bucket',
|
'S3Bucket': 'test-bucket',
|
||||||
'S3Key': 'test.zip',
|
'S3Key': 'test.zip',
|
||||||
@ -383,7 +401,7 @@ def test_list_create_list_get_delete_list():
|
|||||||
FunctionName='testFunction',
|
FunctionName='testFunction',
|
||||||
Runtime='python2.7',
|
Runtime='python2.7',
|
||||||
Role='test-iam-role',
|
Role='test-iam-role',
|
||||||
Handler='lambda_function.handler',
|
Handler='lambda_function.lambda_handler',
|
||||||
Code={
|
Code={
|
||||||
'S3Bucket': 'test-bucket',
|
'S3Bucket': 'test-bucket',
|
||||||
'S3Key': 'test.zip',
|
'S3Key': 'test.zip',
|
||||||
@ -395,16 +413,16 @@ def test_list_create_list_get_delete_list():
|
|||||||
)
|
)
|
||||||
expected_function_result = {
|
expected_function_result = {
|
||||||
"Code": {
|
"Code": {
|
||||||
"Location": "s3://lambda-functions.aws.amazon.com/test.zip",
|
"Location": "s3://awslambda-{0}-tasks.s3-{0}.amazonaws.com/test.zip".format(_lambda_region),
|
||||||
"RepositoryType": "S3"
|
"RepositoryType": "S3"
|
||||||
},
|
},
|
||||||
"Configuration": {
|
"Configuration": {
|
||||||
"CodeSha256": hashlib.sha256(zip_content).hexdigest(),
|
"CodeSha256": hashlib.sha256(zip_content).hexdigest(),
|
||||||
"CodeSize": len(zip_content),
|
"CodeSize": len(zip_content),
|
||||||
"Description": "test lambda function",
|
"Description": "test lambda function",
|
||||||
"FunctionArn": "arn:aws:lambda:123456789012:function:testFunction",
|
"FunctionArn": 'arn:aws:lambda:{}:123456789012:function:testFunction'.format(_lambda_region),
|
||||||
"FunctionName": "testFunction",
|
"FunctionName": "testFunction",
|
||||||
"Handler": "lambda_function.handler",
|
"Handler": "lambda_function.lambda_handler",
|
||||||
"MemorySize": 128,
|
"MemorySize": 128,
|
||||||
"Role": "test-iam-role",
|
"Role": "test-iam-role",
|
||||||
"Runtime": "python2.7",
|
"Runtime": "python2.7",
|
||||||
@ -437,12 +455,12 @@ def test_list_create_list_get_delete_list():
|
|||||||
@mock_lambda
|
@mock_lambda
|
||||||
def test_invoke_lambda_error():
|
def test_invoke_lambda_error():
|
||||||
lambda_fx = """
|
lambda_fx = """
|
||||||
def lambda_handler(event, context):
|
def lambda_handler(event, context):
|
||||||
raise Exception('failsauce')
|
raise Exception('failsauce')
|
||||||
"""
|
"""
|
||||||
zip_output = io.BytesIO()
|
zip_output = io.BytesIO()
|
||||||
zip_file = zipfile.ZipFile(zip_output, 'w', zipfile.ZIP_DEFLATED)
|
zip_file = zipfile.ZipFile(zip_output, 'w', zipfile.ZIP_DEFLATED)
|
||||||
zip_file.writestr('lambda_function.zip', lambda_fx)
|
zip_file.writestr('lambda_function.py', lambda_fx)
|
||||||
zip_file.close()
|
zip_file.close()
|
||||||
zip_output.seek(0)
|
zip_output.seek(0)
|
||||||
|
|
||||||
@ -605,13 +623,15 @@ def test_get_function_created_with_zipfile():
|
|||||||
response['Configuration'].pop('LastModified')
|
response['Configuration'].pop('LastModified')
|
||||||
|
|
||||||
response['ResponseMetadata']['HTTPStatusCode'].should.equal(200)
|
response['ResponseMetadata']['HTTPStatusCode'].should.equal(200)
|
||||||
assert 'Code' not in response
|
assert len(response['Code']) == 2
|
||||||
|
assert response['Code']['RepositoryType'] == 'S3'
|
||||||
|
assert response['Code']['Location'].startswith('s3://awslambda-{0}-tasks.s3-{0}.amazonaws.com'.format(_lambda_region))
|
||||||
response['Configuration'].should.equal(
|
response['Configuration'].should.equal(
|
||||||
{
|
{
|
||||||
"CodeSha256": hashlib.sha256(zip_content).hexdigest(),
|
"CodeSha256": hashlib.sha256(zip_content).hexdigest(),
|
||||||
"CodeSize": len(zip_content),
|
"CodeSize": len(zip_content),
|
||||||
"Description": "test lambda function",
|
"Description": "test lambda function",
|
||||||
"FunctionArn": "arn:aws:lambda:123456789012:function:testFunction",
|
"FunctionArn":'arn:aws:lambda:{}:123456789012:function:testFunction'.format(_lambda_region),
|
||||||
"FunctionName": "testFunction",
|
"FunctionName": "testFunction",
|
||||||
"Handler": "lambda_function.handler",
|
"Handler": "lambda_function.handler",
|
||||||
"MemorySize": 128,
|
"MemorySize": 128,
|
||||||
|
14
tests/test_logs/test_logs.py
Normal file
14
tests/test_logs/test_logs.py
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
import boto3
|
||||||
|
import sure # noqa
|
||||||
|
|
||||||
|
from moto import mock_logs, settings
|
||||||
|
|
||||||
|
_logs_region = 'us-east-1' if settings.TEST_SERVER_MODE else 'us-west-2'
|
||||||
|
|
||||||
|
|
||||||
|
@mock_logs
|
||||||
|
def test_log_group_create():
|
||||||
|
conn = boto3.client('logs', 'us-west-2')
|
||||||
|
log_group_name = 'dummy'
|
||||||
|
response = conn.create_log_group(logGroupName=log_group_name)
|
||||||
|
response = conn.delete_log_group(logGroupName=log_group_name)
|
5
travis_moto_server.sh
Executable file
5
travis_moto_server.sh
Executable file
@ -0,0 +1,5 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
set -e
|
||||||
|
pip install flask
|
||||||
|
pip install /moto/dist/moto*.gz
|
||||||
|
moto_server -H 0.0.0.0 -p 5000
|
31
wait_for.py
Executable file
31
wait_for.py
Executable file
@ -0,0 +1,31 @@
|
|||||||
|
import time
|
||||||
|
|
||||||
|
try:
|
||||||
|
# py2
|
||||||
|
import urllib2 as urllib
|
||||||
|
from urllib2 import URLError
|
||||||
|
import socket
|
||||||
|
import httplib
|
||||||
|
|
||||||
|
EXCEPTIONS = (URLError, socket.error, httplib.BadStatusLine)
|
||||||
|
except ImportError:
|
||||||
|
# py3
|
||||||
|
import urllib.request as urllib
|
||||||
|
from urllib.error import URLError
|
||||||
|
|
||||||
|
EXCEPTIONS = (URLError, ConnectionResetError)
|
||||||
|
|
||||||
|
|
||||||
|
start_ts = time.time()
|
||||||
|
print("Waiting for service to come up")
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
urllib.urlopen('http://localhost:5000/', timeout=1)
|
||||||
|
break
|
||||||
|
except EXCEPTIONS:
|
||||||
|
elapsed_s = time.time() - start_ts
|
||||||
|
if elapsed_s > 30:
|
||||||
|
raise
|
||||||
|
|
||||||
|
print('.')
|
||||||
|
time.sleep(1)
|
Loading…
x
Reference in New Issue
Block a user