moto/tests/test_logs/test_logs.py

129 lines
3.6 KiB
Python
Raw Normal View History

lambda + SNS enhancements (#1048) * updates - support lambda messages from SNS - run lambda in docker container * decode output * populate timeout * simplify * whoops * skeletons of cloudwatchlogs * impl filter log streams * fix logging * PEP fixes * PEP fixes * fix reset * fix reset * add new endpoint * fix region name * add docker * try to fix tests * try to fix travis issue with boto * fix escaping in urls * fix environment variables * fix PEP * more pep * switch back to precise * another fix attempt * fix typo * fix lambda invoke * fix more unittests * work on getting this to work in new scheme * fix py2 * fix error * fix tests when running in server mode * more lambda fixes * try running with latest docker adapted from aiodocker * switch to docker python client * pep fixes * switch to docker volume * fix unittest * fix invoke from sns * fix zip2tar * add hack impl for get_function with zip * try fix * fix for py < 3.6 * add volume refcount * try to fix travis * docker test * fix yaml * try fix * update endpoints * fix * another attempt * try again * fix recursive import * refactor fix * revert changes with better fix * more reverts * wait for service to come up * add back detached mode * sleep and add another exception type * put this back for logging * put back with note * whoops :) * docker in docker! * fix invalid url * hopefully last fix! * fix lambda regions * fix protocol * travis!!!! * just run lambda test for now * use one print * fix escaping * another attempt * yet another * re-enable all tests * fixes * fix for py2 * revert change * fix for py2.7 * fix output ordering * remove this given there's a new unittest that covers it * changes based on review - add skeleton logs test file - switch to docker image that matches test env - fix mock_logs import * add readme entry
2017-09-27 23:04:58 +00:00
import boto3
import sure # noqa
import six
2018-01-14 05:35:53 +00:00
from botocore.exceptions import ClientError
lambda + SNS enhancements (#1048) * updates - support lambda messages from SNS - run lambda in docker container * decode output * populate timeout * simplify * whoops * skeletons of cloudwatchlogs * impl filter log streams * fix logging * PEP fixes * PEP fixes * fix reset * fix reset * add new endpoint * fix region name * add docker * try to fix tests * try to fix travis issue with boto * fix escaping in urls * fix environment variables * fix PEP * more pep * switch back to precise * another fix attempt * fix typo * fix lambda invoke * fix more unittests * work on getting this to work in new scheme * fix py2 * fix error * fix tests when running in server mode * more lambda fixes * try running with latest docker adapted from aiodocker * switch to docker python client * pep fixes * switch to docker volume * fix unittest * fix invoke from sns * fix zip2tar * add hack impl for get_function with zip * try fix * fix for py < 3.6 * add volume refcount * try to fix travis * docker test * fix yaml * try fix * update endpoints * fix * another attempt * try again * fix recursive import * refactor fix * revert changes with better fix * more reverts * wait for service to come up * add back detached mode * sleep and add another exception type * put this back for logging * put back with note * whoops :) * docker in docker! * fix invalid url * hopefully last fix! * fix lambda regions * fix protocol * travis!!!! * just run lambda test for now * use one print * fix escaping * another attempt * yet another * re-enable all tests * fixes * fix for py2 * revert change * fix for py2.7 * fix output ordering * remove this given there's a new unittest that covers it * changes based on review - add skeleton logs test file - switch to docker image that matches test env - fix mock_logs import * add readme entry
2017-09-27 23:04:58 +00:00
from moto import mock_logs, settings
2018-01-14 05:35:53 +00:00
from nose.tools import assert_raises
lambda + SNS enhancements (#1048) * updates - support lambda messages from SNS - run lambda in docker container * decode output * populate timeout * simplify * whoops * skeletons of cloudwatchlogs * impl filter log streams * fix logging * PEP fixes * PEP fixes * fix reset * fix reset * add new endpoint * fix region name * add docker * try to fix tests * try to fix travis issue with boto * fix escaping in urls * fix environment variables * fix PEP * more pep * switch back to precise * another fix attempt * fix typo * fix lambda invoke * fix more unittests * work on getting this to work in new scheme * fix py2 * fix error * fix tests when running in server mode * more lambda fixes * try running with latest docker adapted from aiodocker * switch to docker python client * pep fixes * switch to docker volume * fix unittest * fix invoke from sns * fix zip2tar * add hack impl for get_function with zip * try fix * fix for py < 3.6 * add volume refcount * try to fix travis * docker test * fix yaml * try fix * update endpoints * fix * another attempt * try again * fix recursive import * refactor fix * revert changes with better fix * more reverts * wait for service to come up * add back detached mode * sleep and add another exception type * put this back for logging * put back with note * whoops :) * docker in docker! * fix invalid url * hopefully last fix! * fix lambda regions * fix protocol * travis!!!! * just run lambda test for now * use one print * fix escaping * another attempt * yet another * re-enable all tests * fixes * fix for py2 * revert change * fix for py2.7 * fix output ordering * remove this given there's a new unittest that covers it * changes based on review - add skeleton logs test file - switch to docker image that matches test env - fix mock_logs import * add readme entry
2017-09-27 23:04:58 +00:00
_logs_region = 'us-east-1' if settings.TEST_SERVER_MODE else 'us-west-2'
@mock_logs
def test_log_group_create():
conn = boto3.client('logs', 'us-west-2')
log_group_name = 'dummy'
response = conn.create_log_group(logGroupName=log_group_name)
response = conn.describe_log_groups(logGroupNamePrefix=log_group_name)
assert len(response['logGroups']) == 1
lambda + SNS enhancements (#1048) * updates - support lambda messages from SNS - run lambda in docker container * decode output * populate timeout * simplify * whoops * skeletons of cloudwatchlogs * impl filter log streams * fix logging * PEP fixes * PEP fixes * fix reset * fix reset * add new endpoint * fix region name * add docker * try to fix tests * try to fix travis issue with boto * fix escaping in urls * fix environment variables * fix PEP * more pep * switch back to precise * another fix attempt * fix typo * fix lambda invoke * fix more unittests * work on getting this to work in new scheme * fix py2 * fix error * fix tests when running in server mode * more lambda fixes * try running with latest docker adapted from aiodocker * switch to docker python client * pep fixes * switch to docker volume * fix unittest * fix invoke from sns * fix zip2tar * add hack impl for get_function with zip * try fix * fix for py < 3.6 * add volume refcount * try to fix travis * docker test * fix yaml * try fix * update endpoints * fix * another attempt * try again * fix recursive import * refactor fix * revert changes with better fix * more reverts * wait for service to come up * add back detached mode * sleep and add another exception type * put this back for logging * put back with note * whoops :) * docker in docker! * fix invalid url * hopefully last fix! * fix lambda regions * fix protocol * travis!!!! * just run lambda test for now * use one print * fix escaping * another attempt * yet another * re-enable all tests * fixes * fix for py2 * revert change * fix for py2.7 * fix output ordering * remove this given there's a new unittest that covers it * changes based on review - add skeleton logs test file - switch to docker image that matches test env - fix mock_logs import * add readme entry
2017-09-27 23:04:58 +00:00
response = conn.delete_log_group(logGroupName=log_group_name)
2018-01-14 05:35:53 +00:00
@mock_logs
def test_exceptions():
conn = boto3.client('logs', 'us-west-2')
log_group_name = 'dummy'
log_stream_name = 'dummp-stream'
conn.create_log_group(logGroupName=log_group_name)
with assert_raises(ClientError):
conn.create_log_group(logGroupName=log_group_name)
# descrine_log_groups is not implemented yet
conn.create_log_stream(
logGroupName=log_group_name,
logStreamName=log_stream_name
)
with assert_raises(ClientError):
conn.create_log_stream(
logGroupName=log_group_name,
logStreamName=log_stream_name
)
conn.put_log_events(
logGroupName=log_group_name,
logStreamName=log_stream_name,
logEvents=[
{
'timestamp': 0,
'message': 'line'
2018-01-14 05:35:53 +00:00
},
],
)
with assert_raises(ClientError):
conn.put_log_events(
logGroupName=log_group_name,
logStreamName="invalid-stream",
logEvents=[
{
'timestamp': 0,
'message': 'line'
},
],
)
@mock_logs
def test_put_logs():
conn = boto3.client('logs', 'us-west-2')
log_group_name = 'dummy'
log_stream_name = 'stream'
conn.create_log_group(logGroupName=log_group_name)
conn.create_log_stream(
logGroupName=log_group_name,
logStreamName=log_stream_name
)
messages = [
{'timestamp': 0, 'message': 'hello'},
{'timestamp': 0, 'message': 'world'}
]
putRes = conn.put_log_events(
logGroupName=log_group_name,
logStreamName=log_stream_name,
logEvents=messages
)
res = conn.get_log_events(
logGroupName=log_group_name,
logStreamName=log_stream_name
)
events = res['events']
nextSequenceToken = putRes['nextSequenceToken']
assert isinstance(nextSequenceToken, six.string_types) == True
assert len(nextSequenceToken) == 56
events.should.have.length_of(2)
@mock_logs
def test_filter_logs_interleaved():
conn = boto3.client('logs', 'us-west-2')
log_group_name = 'dummy'
log_stream_name = 'stream'
conn.create_log_group(logGroupName=log_group_name)
conn.create_log_stream(
logGroupName=log_group_name,
logStreamName=log_stream_name
)
messages = [
{'timestamp': 0, 'message': 'hello'},
{'timestamp': 0, 'message': 'world'}
]
conn.put_log_events(
logGroupName=log_group_name,
logStreamName=log_stream_name,
logEvents=messages
)
res = conn.filter_log_events(
logGroupName=log_group_name,
logStreamNames=[log_stream_name],
interleaved=True,
)
events = res['events']
for original_message, resulting_event in zip(messages, events):
resulting_event['eventId'].should.equal(str(resulting_event['eventId']))
resulting_event['timestamp'].should.equal(original_message['timestamp'])
resulting_event['message'].should.equal(original_message['message'])