Merge branch 'master' into bugfix/1615-cloudwatch-statistics

This commit is contained in:
Steve Pulec 2018-05-29 22:19:25 -04:00 committed by GitHub
commit 69587c151c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
22 changed files with 310 additions and 36 deletions

View File

@ -675,3 +675,4 @@ lambda_backends = {_region.name: LambdaBackend(_region.name)
for _region in boto.awslambda.regions()}
lambda_backends['ap-southeast-2'] = LambdaBackend('ap-southeast-2')
lambda_backends['us-gov-west-1'] = LambdaBackend('us-gov-west-1')

View File

@ -295,6 +295,14 @@ class Job(threading.Thread, BaseModel):
}
if self.job_stopped:
result['stoppedAt'] = datetime2int(self.job_stopped_at)
result['container'] = {}
result['container']['command'] = ['/bin/sh -c "for a in `seq 1 10`; do echo Hello World; sleep 1; done"']
result['container']['privileged'] = False
result['container']['readonlyRootFilesystem'] = False
result['container']['ulimits'] = {}
result['container']['vcpus'] = 1
result['container']['volumes'] = ''
result['container']['logStreamName'] = self.log_stream_name
if self.job_stopped_reason is not None:
result['statusReason'] = self.job_stopped_reason
return result
@ -378,6 +386,7 @@ class Job(threading.Thread, BaseModel):
# Send to cloudwatch
log_group = '/aws/batch/job'
stream_name = '{0}/default/{1}'.format(self.job_definition.name, self.job_id)
self.log_stream_name = stream_name
self._log_backend.ensure_log_group(log_group, None)
self._log_backend.create_log_stream(log_group, stream_name)
self._log_backend.put_log_events(log_group, stream_name, logs, None)

View File

@ -229,8 +229,13 @@ class CloudWatchBackend(BaseBackend):
def put_metric_data(self, namespace, metric_data):
for metric_member in metric_data:
# Preserve "datetime" for get_metric_statistics comparisons
timestamp = metric_member.get('Timestamp')
if timestamp is not None and type(timestamp) != datetime:
timestamp = datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S.%fZ')
timestamp = timestamp.replace(tzinfo=tzutc())
self.metric_data.append(MetricDatum(
namespace, metric_member['MetricName'], float(metric_member.get('Value', 0)), metric_member.get('Dimensions.member', _EMPTY_LIST), metric_member.get('Timestamp')))
namespace, metric_member['MetricName'], float(metric_member.get('Value', 0)), metric_member.get('Dimensions.member', _EMPTY_LIST), timestamp))
def get_metric_statistics(self, namespace, metric_name, start_time, end_time, period, stats):
period_delta = timedelta(seconds=period)

View File

@ -272,7 +272,7 @@ GET_METRIC_STATISTICS_TEMPLATE = """<GetMetricStatisticsResponse xmlns="http://m
</ResponseMetadata>
<GetMetricStatisticsResult>
<Label> {{ label }} </Label>
<Label>{{ label }}</Label>
<Datapoints>
{% for datapoint in datapoints %}
<Datapoint>

View File

@ -132,6 +132,9 @@ class LogGroup:
def __init__(self, region, name, tags):
self.name = name
self.region = region
self.arn = "arn:aws:logs:{region}:1:log-group:{log_group}".format(
region=region, log_group=name)
self.creationTime = unix_time_millis()
self.tags = tags
self.streams = dict() # {name: LogStream}
@ -197,6 +200,16 @@ class LogGroup:
searched_streams = [{"logStreamName": stream.logStreamName, "searchedCompletely": True} for stream in streams]
return events_page, next_token, searched_streams
def to_describe_dict(self):
return {
"arn": self.arn,
"creationTime": self.creationTime,
"logGroupName": self.name,
"metricFilterCount": 0,
"retentionInDays": 30,
"storedBytes": sum(s.storedBytes for s in self.streams.values()),
}
class LogsBackend(BaseBackend):
def __init__(self, region_name):
@ -223,6 +236,21 @@ class LogsBackend(BaseBackend):
raise ResourceNotFoundException()
del self.groups[log_group_name]
def describe_log_groups(self, limit, log_group_name_prefix, next_token):
if log_group_name_prefix is None:
log_group_name_prefix = ''
if next_token is None:
next_token = 0
groups = sorted(group.to_describe_dict() for name, group in self.groups.items() if name.startswith(log_group_name_prefix))
groups_page = groups[next_token:next_token + limit]
next_token += limit
if next_token >= len(groups):
next_token = None
return groups_page, next_token
def create_log_stream(self, log_group_name, log_stream_name):
if log_group_name not in self.groups:
raise ResourceNotFoundException()

View File

@ -33,6 +33,18 @@ class LogsResponse(BaseResponse):
self.logs_backend.delete_log_group(log_group_name)
return ''
def describe_log_groups(self):
log_group_name_prefix = self._get_param('logGroupNamePrefix')
next_token = self._get_param('nextToken')
limit = self._get_param('limit', 50)
assert limit <= 50
groups, next_token = self.logs_backend.describe_log_groups(
limit, log_group_name_prefix, next_token)
return json.dumps({
"logGroups": groups,
"nextToken": next_token
})
def create_log_stream(self):
log_group_name = self._get_param('logGroupName')
log_stream_name = self._get_param('logStreamName')

View File

@ -73,7 +73,8 @@ class Cluster(TaggableResourceMixin, BaseModel):
preferred_maintenance_window, cluster_parameter_group_name,
automated_snapshot_retention_period, port, cluster_version,
allow_version_upgrade, number_of_nodes, publicly_accessible,
encrypted, region_name, tags=None, iam_roles_arn=None):
encrypted, region_name, tags=None, iam_roles_arn=None,
restored_from_snapshot=False):
super(Cluster, self).__init__(region_name, tags)
self.redshift_backend = redshift_backend
self.cluster_identifier = cluster_identifier
@ -119,6 +120,7 @@ class Cluster(TaggableResourceMixin, BaseModel):
self.number_of_nodes = 1
self.iam_roles_arn = iam_roles_arn or []
self.restored_from_snapshot = restored_from_snapshot
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
@ -242,7 +244,15 @@ class Cluster(TaggableResourceMixin, BaseModel):
"IamRoleArn": iam_role_arn
} for iam_role_arn in self.iam_roles_arn]
}
if self.restored_from_snapshot:
json_response['RestoreStatus'] = {
'Status': 'completed',
'CurrentRestoreRateInMegaBytesPerSecond': 123.0,
'SnapshotSizeInMegaBytes': 123,
'ProgressInMegaBytes': 123,
'ElapsedTimeInSeconds': 123,
'EstimatedTimeToCompletionInSeconds': 123
}
try:
json_response['ClusterSnapshotCopyStatus'] = self.cluster_snapshot_copy_status
except AttributeError:
@ -639,7 +649,8 @@ class RedshiftBackend(BaseBackend):
"cluster_version": snapshot.cluster.cluster_version,
"number_of_nodes": snapshot.cluster.number_of_nodes,
"encrypted": snapshot.cluster.encrypted,
"tags": snapshot.cluster.tags
"tags": snapshot.cluster.tags,
"restored_from_snapshot": True
}
create_kwargs.update(kwargs)
return self.create_cluster(**create_kwargs)

View File

@ -859,6 +859,9 @@ class S3Backend(BaseBackend):
if str(key.version_id) != str(version_id)
]
)
if not bucket.keys.getlist(key_name):
bucket.keys.pop(key_name)
return True
except KeyError:
return False

View File

@ -706,8 +706,11 @@ class ResponseObject(_TemplateEnvironmentMixin):
if 'x-amz-copy-source' in request.headers:
# Copy key
src_key_parsed = urlparse(unquote(request.headers.get("x-amz-copy-source")))
src_bucket, src_key = src_key_parsed.path.lstrip("/").split("/", 1)
# you can have a quoted ?version=abc with a version Id, so work on
# we need to parse the unquoted string first
src_key_parsed = urlparse(request.headers.get("x-amz-copy-source"))
src_bucket, src_key = unquote(src_key_parsed.path).\
lstrip("/").split("/", 1)
src_version_id = parse_qs(src_key_parsed.query).get(
'versionId', [None])[0]
self.backend.copy_key(src_bucket, src_key, bucket_name, key_name,

View File

@ -186,9 +186,17 @@ def main(argv=sys.argv[1:]):
parser.add_argument(
'-s', '--ssl',
action='store_true',
help='Enable SSL encrypted connection (use https://... URL)',
help='Enable SSL encrypted connection with auto-generated certificate (use https://... URL)',
default=False
)
parser.add_argument(
'-c', '--ssl-cert', type=str,
help='Path to SSL certificate',
default=None)
parser.add_argument(
'-k', '--ssl-key', type=str,
help='Path to SSL private key',
default=None)
args = parser.parse_args(argv)
@ -197,9 +205,15 @@ def main(argv=sys.argv[1:]):
create_backend_app, service=args.service)
main_app.debug = True
ssl_context = None
if args.ssl_key and args.ssl_cert:
ssl_context = (args.ssl_cert, args.ssl_key)
elif args.ssl:
ssl_context = 'adhoc'
run_simple(args.host, args.port, main_app,
threaded=True, use_reloader=args.reload,
ssl_context='adhoc' if args.ssl else None)
ssl_context=ssl_context)
if __name__ == '__main__':

View File

@ -1,6 +1,7 @@
from __future__ import unicode_literals
import email
from email.utils import parseaddr
from moto.core import BaseBackend, BaseModel
from .exceptions import MessageRejectedError
@ -84,13 +85,27 @@ class SESBackend(BaseBackend):
return message
def send_raw_email(self, source, destinations, raw_data):
if source not in self.addresses:
raise MessageRejectedError(
"Did not have authority to send from email %s" % source
)
if source is not None:
_, source_email_address = parseaddr(source)
if source_email_address not in self.addresses:
raise MessageRejectedError(
"Did not have authority to send from email %s" % source_email_address
)
recipient_count = len(destinations)
message = email.message_from_string(raw_data)
if source is None:
if message['from'] is None:
raise MessageRejectedError(
"Source not specified"
)
_, source_email_address = parseaddr(message['from'])
if source_email_address not in self.addresses:
raise MessageRejectedError(
"Did not have authority to send from email %s" % source_email_address
)
for header in 'TO', 'CC', 'BCC':
recipient_count += sum(
d.strip() and 1 or 0

View File

@ -75,7 +75,10 @@ class EmailResponse(BaseResponse):
return template.render(message=message)
def send_raw_email(self):
source = self.querystring.get('Source')[0]
source = self.querystring.get('Source')
if source is not None:
source, = source
raw_data = self.querystring.get('RawMessage.Data')[0]
raw_data = base64.b64decode(raw_data)
if six.PY3:

View File

@ -240,7 +240,7 @@ class SNSBackend(BaseBackend):
self.sms_attributes.update(attrs)
def create_topic(self, name):
fails_constraints = not re.match(r'^[a-zA-Z0-9](?:[A-Za-z0-9_-]{0,253}[a-zA-Z0-9])?$', name)
fails_constraints = not re.match(r'^[a-zA-Z0-9_-]{1,256}$', name)
if fails_constraints:
raise InvalidParameterValue("Topic names must be made up of only uppercase and lowercase ASCII letters, numbers, underscores, and hyphens, and must be between 1 and 256 characters long.")
candidate_topic = Topic(name, self)

View File

@ -234,11 +234,17 @@ class Queue(BaseModel):
self.last_modified_timestamp = now
def _setup_dlq(self, policy_json):
try:
self.redrive_policy = json.loads(policy_json)
except ValueError:
raise RESTError('InvalidParameterValue', 'Redrive policy does not contain valid json')
def _setup_dlq(self, policy):
if isinstance(policy, six.text_type):
try:
self.redrive_policy = json.loads(policy)
except ValueError:
raise RESTError('InvalidParameterValue', 'Redrive policy is not a dict or valid json')
elif isinstance(policy, dict):
self.redrive_policy = policy
else:
raise RESTError('InvalidParameterValue', 'Redrive policy is not a dict or valid json')
if 'deadLetterTargetArn' not in self.redrive_policy:
raise RESTError('InvalidParameterValue', 'Redrive policy does not contain deadLetterTargetArn')

View File

@ -23,7 +23,7 @@ install_requires = [
"docker>=2.5.1",
"jsondiff==1.1.1",
"aws-xray-sdk<0.96,>=0.93",
"responses",
"responses>=0.9.0",
]
extras_require = {

View File

@ -148,10 +148,41 @@ dummy_import_template = {
}
}
dummy_redrive_template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
"MainQueue": {
"Type": "AWS::SQS::Queue",
"Properties": {
"QueueName": "mainqueue.fifo",
"FifoQueue": True,
"ContentBasedDeduplication": False,
"RedrivePolicy": {
"deadLetterTargetArn": {
"Fn::GetAtt": [
"DeadLetterQueue",
"Arn"
]
},
"maxReceiveCount": 5
}
}
},
"DeadLetterQueue": {
"Type": "AWS::SQS::Queue",
"Properties": {
"FifoQueue": True
}
},
}
}
dummy_template_json = json.dumps(dummy_template)
dummy_update_template_json = json.dumps(dummy_update_template)
dummy_output_template_json = json.dumps(dummy_output_template)
dummy_import_template_json = json.dumps(dummy_import_template)
dummy_redrive_template_json = json.dumps(dummy_redrive_template)
@mock_cloudformation
@ -746,3 +777,19 @@ def test_stack_with_imports():
output = output_stack.outputs[0]['OutputValue']
queue = ec2_resource.get_queue_by_name(QueueName=output)
queue.should_not.be.none
@mock_sqs
@mock_cloudformation
def test_non_json_redrive_policy():
cf = boto3.resource('cloudformation', region_name='us-east-1')
stack = cf.create_stack(
StackName="test_stack1",
TemplateBody=dummy_redrive_template_json
)
stack.Resource('MainQueue').resource_status\
.should.equal("CREATE_COMPLETE")
stack.Resource('DeadLetterQueue').resource_status\
.should.equal("CREATE_COMPLETE")

View File

@ -204,6 +204,7 @@ def test_get_metric_statistics():
dict(
MetricName='metric',
Value=1.5,
Timestamp=utc_now
)
]
)
@ -211,7 +212,7 @@ def test_get_metric_statistics():
stats = conn.get_metric_statistics(
Namespace='tester',
MetricName='metric',
StartTime=utc_now,
StartTime=utc_now - timedelta(seconds=60),
EndTime=utc_now + timedelta(seconds=60),
Period=60,
Statistics=['SampleCount', 'Sum']

View File

@ -13,6 +13,10 @@ def test_log_group_create():
conn = boto3.client('logs', 'us-west-2')
log_group_name = 'dummy'
response = conn.create_log_group(logGroupName=log_group_name)
response = conn.describe_log_groups(logGroupNamePrefix=log_group_name)
assert len(response['logGroups']) == 1
response = conn.delete_log_group(logGroupName=log_group_name)

View File

@ -818,6 +818,48 @@ def test_create_cluster_from_snapshot():
new_cluster['Endpoint']['Port'].should.equal(1234)
@mock_redshift
def test_create_cluster_from_snapshot_with_waiter():
client = boto3.client('redshift', region_name='us-east-1')
original_cluster_identifier = 'original-cluster'
original_snapshot_identifier = 'original-snapshot'
new_cluster_identifier = 'new-cluster'
client.create_cluster(
ClusterIdentifier=original_cluster_identifier,
ClusterType='single-node',
NodeType='ds2.xlarge',
MasterUsername='username',
MasterUserPassword='password',
)
client.create_cluster_snapshot(
SnapshotIdentifier=original_snapshot_identifier,
ClusterIdentifier=original_cluster_identifier
)
response = client.restore_from_cluster_snapshot(
ClusterIdentifier=new_cluster_identifier,
SnapshotIdentifier=original_snapshot_identifier,
Port=1234
)
response['Cluster']['ClusterStatus'].should.equal('creating')
client.get_waiter('cluster_restored').wait(
ClusterIdentifier=new_cluster_identifier,
WaiterConfig={
'Delay': 1,
'MaxAttempts': 2,
}
)
response = client.describe_clusters(
ClusterIdentifier=new_cluster_identifier
)
new_cluster = response['Clusters'][0]
new_cluster['NodeType'].should.equal('ds2.xlarge')
new_cluster['MasterUsername'].should.equal('username')
new_cluster['Endpoint']['Port'].should.equal(1234)
@mock_redshift
def test_create_cluster_from_non_existent_snapshot():
client = boto3.client('redshift', region_name='us-east-1')

View File

@ -1405,6 +1405,19 @@ def test_boto3_deleted_versionings_list():
assert len(listed['Contents']) == 1
@mock_s3
def test_boto3_delete_versioned_bucket():
client = boto3.client('s3', region_name='us-east-1')
client.create_bucket(Bucket='blah')
client.put_bucket_versioning(Bucket='blah', VersioningConfiguration={'Status': 'Enabled'})
resp = client.put_object(Bucket='blah', Key='test1', Body=b'test1')
client.delete_object(Bucket='blah', Key='test1', VersionId=resp["VersionId"])
client.delete_bucket(Bucket='blah')
@mock_s3
def test_boto3_head_object_if_modified_since():
s3 = boto3.client('s3', region_name='us-east-1')

View File

@ -136,3 +136,59 @@ def test_send_raw_email():
send_quota = conn.get_send_quota()
sent_count = int(send_quota['SentLast24Hours'])
sent_count.should.equal(2)
@mock_ses
def test_send_raw_email_without_source():
conn = boto3.client('ses', region_name='us-east-1')
message = MIMEMultipart()
message['Subject'] = 'Test'
message['From'] = 'test@example.com'
message['To'] = 'to@example.com, foo@example.com'
# Message body
part = MIMEText('test file attached')
message.attach(part)
# Attachment
part = MIMEText('contents of test file here')
part.add_header('Content-Disposition', 'attachment; filename=test.txt')
message.attach(part)
kwargs = dict(
RawMessage={'Data': message.as_string()},
)
conn.send_raw_email.when.called_with(**kwargs).should.throw(ClientError)
conn.verify_email_identity(EmailAddress="test@example.com")
conn.send_raw_email(**kwargs)
send_quota = conn.get_send_quota()
sent_count = int(send_quota['SentLast24Hours'])
sent_count.should.equal(2)
@mock_ses
def test_send_raw_email_without_source_or_from():
conn = boto3.client('ses', region_name='us-east-1')
message = MIMEMultipart()
message['Subject'] = 'Test'
message['To'] = 'to@example.com, foo@example.com'
# Message body
part = MIMEText('test file attached')
message.attach(part)
# Attachment
part = MIMEText('contents of test file here')
part.add_header('Content-Disposition', 'attachment; filename=test.txt')
message.attach(part)
kwargs = dict(
RawMessage={'Data': message.as_string()},
)
conn.send_raw_email.when.called_with(**kwargs).should.throw(ClientError)

View File

@ -13,23 +13,24 @@ from moto.sns.models import DEFAULT_TOPIC_POLICY, DEFAULT_EFFECTIVE_DELIVERY_POL
@mock_sns
def test_create_and_delete_topic():
conn = boto3.client("sns", region_name="us-east-1")
conn.create_topic(Name="some-topic")
for topic_name in ('some-topic', '-some-topic-', '_some-topic_', 'a' * 256):
conn.create_topic(Name=topic_name)
topics_json = conn.list_topics()
topics = topics_json["Topics"]
topics.should.have.length_of(1)
topics[0]['TopicArn'].should.equal(
"arn:aws:sns:{0}:123456789012:some-topic"
.format(conn._client_config.region_name)
)
topics_json = conn.list_topics()
topics = topics_json["Topics"]
topics.should.have.length_of(1)
topics[0]['TopicArn'].should.equal(
"arn:aws:sns:{0}:123456789012:{1}"
.format(conn._client_config.region_name, topic_name)
)
# Delete the topic
conn.delete_topic(TopicArn=topics[0]['TopicArn'])
# Delete the topic
conn.delete_topic(TopicArn=topics[0]['TopicArn'])
# And there should now be 0 topics
topics_json = conn.list_topics()
topics = topics_json["Topics"]
topics.should.have.length_of(0)
# And there should now be 0 topics
topics_json = conn.list_topics()
topics = topics_json["Topics"]
topics.should.have.length_of(0)
@mock_sns
def test_create_topic_should_be_indempodent():