Merge pull request #45 from spulec/master

Merge upstream
This commit is contained in:
Bert Blommers 2020-05-16 12:53:09 +01:00 committed by GitHub
commit 110987c228
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
61 changed files with 6510 additions and 596 deletions

File diff suppressed because it is too large Load Diff

View File

@ -49,9 +49,7 @@ mock_dynamodbstreams = lazy_load(".dynamodbstreams", "mock_dynamodbstreams")
mock_elasticbeanstalk = lazy_load(".elasticbeanstalk", "mock_elasticbeanstalk") mock_elasticbeanstalk = lazy_load(".elasticbeanstalk", "mock_elasticbeanstalk")
mock_ec2 = lazy_load(".ec2", "mock_ec2") mock_ec2 = lazy_load(".ec2", "mock_ec2")
mock_ec2_deprecated = lazy_load(".ec2", "mock_ec2_deprecated") mock_ec2_deprecated = lazy_load(".ec2", "mock_ec2_deprecated")
mock_ec2_instance_connect = lazy_load( mock_ec2instanceconnect = lazy_load(".ec2instanceconnect", "mock_ec2instanceconnect")
".ec2_instance_connect", "mock_ec2_instance_connect"
)
mock_ecr = lazy_load(".ecr", "mock_ecr") mock_ecr = lazy_load(".ecr", "mock_ecr")
mock_ecr_deprecated = lazy_load(".ecr", "mock_ecr_deprecated") mock_ecr_deprecated = lazy_load(".ecr", "mock_ecr_deprecated")
mock_ecs = lazy_load(".ecs", "mock_ecs") mock_ecs = lazy_load(".ecs", "mock_ecs")
@ -75,6 +73,7 @@ mock_kms = lazy_load(".kms", "mock_kms")
mock_kms_deprecated = lazy_load(".kms", "mock_kms_deprecated") mock_kms_deprecated = lazy_load(".kms", "mock_kms_deprecated")
mock_logs = lazy_load(".logs", "mock_logs") mock_logs = lazy_load(".logs", "mock_logs")
mock_logs_deprecated = lazy_load(".logs", "mock_logs_deprecated") mock_logs_deprecated = lazy_load(".logs", "mock_logs_deprecated")
mock_managedblockchain = lazy_load(".managedblockchain", "mock_managedblockchain")
mock_opsworks = lazy_load(".opsworks", "mock_opsworks") mock_opsworks = lazy_load(".opsworks", "mock_opsworks")
mock_opsworks_deprecated = lazy_load(".opsworks", "mock_opsworks_deprecated") mock_opsworks_deprecated = lazy_load(".opsworks", "mock_opsworks_deprecated")
mock_organizations = lazy_load(".organizations", "mock_organizations") mock_organizations = lazy_load(".organizations", "mock_organizations")

View File

@ -56,8 +56,10 @@ class Deployment(BaseModel, dict):
class IntegrationResponse(BaseModel, dict): class IntegrationResponse(BaseModel, dict):
def __init__(self, status_code, selection_pattern=None): def __init__(self, status_code, selection_pattern=None, response_templates=None):
self["responseTemplates"] = {"application/json": None} if response_templates is None:
response_templates = {"application/json": None}
self["responseTemplates"] = response_templates
self["statusCode"] = status_code self["statusCode"] = status_code
if selection_pattern: if selection_pattern:
self["selectionPattern"] = selection_pattern self["selectionPattern"] = selection_pattern
@ -72,8 +74,14 @@ class Integration(BaseModel, dict):
self["requestTemplates"] = request_templates self["requestTemplates"] = request_templates
self["integrationResponses"] = {"200": IntegrationResponse(200)} self["integrationResponses"] = {"200": IntegrationResponse(200)}
def create_integration_response(self, status_code, selection_pattern): def create_integration_response(
integration_response = IntegrationResponse(status_code, selection_pattern) self, status_code, selection_pattern, response_templates
):
if response_templates == {}:
response_templates = None
integration_response = IntegrationResponse(
status_code, selection_pattern, response_templates
)
self["integrationResponses"][status_code] = integration_response self["integrationResponses"][status_code] = integration_response
return integration_response return integration_response
@ -956,7 +964,7 @@ class APIGatewayBackend(BaseBackend):
raise InvalidRequestInput() raise InvalidRequestInput()
integration = self.get_integration(function_id, resource_id, method_type) integration = self.get_integration(function_id, resource_id, method_type)
integration_response = integration.create_integration_response( integration_response = integration.create_integration_response(
status_code, selection_pattern status_code, selection_pattern, response_templates
) )
return integration_response return integration_response

View File

@ -419,11 +419,8 @@ class FakeAutoScalingGroup(BaseModel):
curr_instance_count = len(self.active_instances()) curr_instance_count = len(self.active_instances())
if self.desired_capacity == curr_instance_count: if self.desired_capacity == curr_instance_count:
self.autoscaling_backend.update_attached_elbs(self.name) pass # Nothing to do here
self.autoscaling_backend.update_attached_target_groups(self.name) elif self.desired_capacity > curr_instance_count:
return
if self.desired_capacity > curr_instance_count:
# Need more instances # Need more instances
count_needed = int(self.desired_capacity) - int(curr_instance_count) count_needed = int(self.desired_capacity) - int(curr_instance_count)
@ -447,6 +444,7 @@ class FakeAutoScalingGroup(BaseModel):
self.instance_states = list( self.instance_states = list(
set(self.instance_states) - set(instances_to_remove) set(self.instance_states) - set(instances_to_remove)
) )
if self.name in self.autoscaling_backend.autoscaling_groups:
self.autoscaling_backend.update_attached_elbs(self.name) self.autoscaling_backend.update_attached_elbs(self.name)
self.autoscaling_backend.update_attached_target_groups(self.name) self.autoscaling_backend.update_attached_target_groups(self.name)
@ -695,6 +693,7 @@ class AutoScalingBackend(BaseBackend):
) )
group.instance_states.extend(new_instances) group.instance_states.extend(new_instances)
self.update_attached_elbs(group.name) self.update_attached_elbs(group.name)
self.update_attached_target_groups(group.name)
def set_instance_health( def set_instance_health(
self, instance_id, health_status, should_respect_grace_period self, instance_id, health_status, should_respect_grace_period
@ -938,7 +937,6 @@ class AutoScalingBackend(BaseBackend):
standby_instances.append(instance_state) standby_instances.append(instance_state)
if should_decrement: if should_decrement:
group.desired_capacity = group.desired_capacity - len(instance_ids) group.desired_capacity = group.desired_capacity - len(instance_ids)
else:
group.set_desired_capacity(group.desired_capacity) group.set_desired_capacity(group.desired_capacity)
return standby_instances, original_size, group.desired_capacity return standby_instances, original_size, group.desired_capacity
@ -951,6 +949,7 @@ class AutoScalingBackend(BaseBackend):
instance_state.lifecycle_state = "InService" instance_state.lifecycle_state = "InService"
standby_instances.append(instance_state) standby_instances.append(instance_state)
group.desired_capacity = group.desired_capacity + len(instance_ids) group.desired_capacity = group.desired_capacity + len(instance_ids)
group.set_desired_capacity(group.desired_capacity)
return standby_instances, original_size, group.desired_capacity return standby_instances, original_size, group.desired_capacity
def terminate_instance(self, instance_id, should_decrement): def terminate_instance(self, instance_id, should_decrement):

View File

@ -5,6 +5,8 @@ import time
from collections import defaultdict from collections import defaultdict
import copy import copy
import datetime import datetime
from gzip import GzipFile
import docker import docker
import docker.errors import docker.errors
import hashlib import hashlib
@ -988,6 +990,28 @@ class LambdaBackend(BaseBackend):
func = self._lambdas.get_arn(function_arn) func = self._lambdas.get_arn(function_arn)
return func.invoke(json.dumps(event), {}, {}) return func.invoke(json.dumps(event), {}, {})
def send_log_event(
self, function_arn, filter_name, log_group_name, log_stream_name, log_events
):
data = {
"messageType": "DATA_MESSAGE",
"owner": ACCOUNT_ID,
"logGroup": log_group_name,
"logStream": log_stream_name,
"subscriptionFilters": [filter_name],
"logEvents": log_events,
}
output = io.BytesIO()
with GzipFile(fileobj=output, mode="w") as f:
f.write(json.dumps(data, separators=(",", ":")).encode("utf-8"))
payload_gz_encoded = base64.b64encode(output.getvalue()).decode("utf-8")
event = {"awslogs": {"data": payload_gz_encoded}}
func = self._lambdas.get_arn(function_arn)
return func.invoke(json.dumps(event), {}, {})
def list_tags(self, resource): def list_tags(self, resource):
return self.get_function_by_arn(resource).tags return self.get_function_by_arn(resource).tags

View File

@ -21,7 +21,7 @@ BACKENDS = {
"dynamodb2": ("dynamodb2", "dynamodb_backends2"), "dynamodb2": ("dynamodb2", "dynamodb_backends2"),
"dynamodbstreams": ("dynamodbstreams", "dynamodbstreams_backends"), "dynamodbstreams": ("dynamodbstreams", "dynamodbstreams_backends"),
"ec2": ("ec2", "ec2_backends"), "ec2": ("ec2", "ec2_backends"),
"ec2_instance_connect": ("ec2_instance_connect", "ec2_instance_connect_backends"), "ec2instanceconnect": ("ec2instanceconnect", "ec2instanceconnect_backends"),
"ecr": ("ecr", "ecr_backends"), "ecr": ("ecr", "ecr_backends"),
"ecs": ("ecs", "ecs_backends"), "ecs": ("ecs", "ecs_backends"),
"elasticbeanstalk": ("elasticbeanstalk", "eb_backends"), "elasticbeanstalk": ("elasticbeanstalk", "eb_backends"),
@ -39,6 +39,7 @@ BACKENDS = {
"kms": ("kms", "kms_backends"), "kms": ("kms", "kms_backends"),
"lambda": ("awslambda", "lambda_backends"), "lambda": ("awslambda", "lambda_backends"),
"logs": ("logs", "logs_backends"), "logs": ("logs", "logs_backends"),
"managedblockchain": ("managedblockchain", "managedblockchain_backends"),
"moto_api": ("core", "moto_api_backends"), "moto_api": ("core", "moto_api_backends"),
"opsworks": ("opsworks", "opsworks_backends"), "opsworks": ("opsworks", "opsworks_backends"),
"organizations": ("organizations", "organizations_backends"), "organizations": ("organizations", "organizations_backends"),

View File

@ -149,3 +149,18 @@ class IncorrectDataType(MockValidationException):
def __init__(self): def __init__(self):
super(IncorrectDataType, self).__init__(self.inc_data_type_msg) super(IncorrectDataType, self).__init__(self.inc_data_type_msg)
class ConditionalCheckFailed(ValueError):
msg = "The conditional request failed"
def __init__(self):
super(ConditionalCheckFailed, self).__init__(self.msg)
class TransactionCanceledException(ValueError):
cancel_reason_msg = "Transaction cancelled, please refer cancellation reasons for specific reasons [{}]"
def __init__(self, errors):
msg = self.cancel_reason_msg.format(", ".join([str(err) for err in errors]))
super(TransactionCanceledException, self).__init__(msg)

View File

@ -18,6 +18,8 @@ from moto.dynamodb2.exceptions import (
InvalidIndexNameError, InvalidIndexNameError,
ItemSizeTooLarge, ItemSizeTooLarge,
ItemSizeToUpdateTooLarge, ItemSizeToUpdateTooLarge,
ConditionalCheckFailed,
TransactionCanceledException,
) )
from moto.dynamodb2.models.utilities import bytesize from moto.dynamodb2.models.utilities import bytesize
from moto.dynamodb2.models.dynamo_type import DynamoType from moto.dynamodb2.models.dynamo_type import DynamoType
@ -316,6 +318,12 @@ class Table(BaseModel):
} }
self.set_stream_specification(streams) self.set_stream_specification(streams)
self.lambda_event_source_mappings = {} self.lambda_event_source_mappings = {}
self.continuous_backups = {
"ContinuousBackupsStatus": "ENABLED", # One of 'ENABLED'|'DISABLED', it's enabled by default
"PointInTimeRecoveryDescription": {
"PointInTimeRecoveryStatus": "DISABLED" # One of 'ENABLED'|'DISABLED'
},
}
@classmethod @classmethod
def create_from_cloudformation_json( def create_from_cloudformation_json(
@ -453,14 +461,14 @@ class Table(BaseModel):
if not overwrite: if not overwrite:
if not get_expected(expected).expr(current): if not get_expected(expected).expr(current):
raise ValueError("The conditional request failed") raise ConditionalCheckFailed
condition_op = get_filter_expression( condition_op = get_filter_expression(
condition_expression, condition_expression,
expression_attribute_names, expression_attribute_names,
expression_attribute_values, expression_attribute_values,
) )
if not condition_op.expr(current): if not condition_op.expr(current):
raise ValueError("The conditional request failed") raise ConditionalCheckFailed
if range_value: if range_value:
self.items[hash_value][range_value] = item self.items[hash_value][range_value] = item
@ -824,6 +832,42 @@ class DynamoDBBackend(BaseBackend):
required_table = self.tables[table] required_table = self.tables[table]
return required_table.tags return required_table.tags
def list_tables(self, limit, exclusive_start_table_name):
all_tables = list(self.tables.keys())
if exclusive_start_table_name:
try:
last_table_index = all_tables.index(exclusive_start_table_name)
except ValueError:
start = len(all_tables)
else:
start = last_table_index + 1
else:
start = 0
if limit:
tables = all_tables[start : start + limit]
else:
tables = all_tables[start:]
if limit and len(all_tables) > start + limit:
return tables, tables[-1]
return tables, None
def describe_table(self, name):
table = self.tables[name]
return table.describe(base_key="Table")
def update_table(self, name, global_index, throughput, stream_spec):
table = self.get_table(name)
if global_index:
table = self.update_table_global_indexes(name, global_index)
if throughput:
table = self.update_table_throughput(name, throughput)
if stream_spec:
table = self.update_table_streams(name, stream_spec)
return table
def update_table_throughput(self, name, throughput): def update_table_throughput(self, name, throughput):
table = self.tables[name] table = self.tables[name]
table.throughput = throughput table.throughput = throughput
@ -1070,14 +1114,14 @@ class DynamoDBBackend(BaseBackend):
expected = {} expected = {}
if not get_expected(expected).expr(item): if not get_expected(expected).expr(item):
raise ValueError("The conditional request failed") raise ConditionalCheckFailed
condition_op = get_filter_expression( condition_op = get_filter_expression(
condition_expression, condition_expression,
expression_attribute_names, expression_attribute_names,
expression_attribute_values, expression_attribute_values,
) )
if not condition_op.expr(item): if not condition_op.expr(item):
raise ValueError("The conditional request failed") raise ConditionalCheckFailed
# Update does not fail on new items, so create one # Update does not fail on new items, so create one
if item is None: if item is None:
@ -1130,11 +1174,11 @@ class DynamoDBBackend(BaseBackend):
expression_attribute_values, expression_attribute_values,
) )
if not condition_op.expr(item): if not condition_op.expr(item):
raise ValueError("The conditional request failed") raise ConditionalCheckFailed
return table.delete_item(hash_value, range_value) return table.delete_item(hash_value, range_value)
def update_ttl(self, table_name, ttl_spec): def update_time_to_live(self, table_name, ttl_spec):
table = self.tables.get(table_name) table = self.tables.get(table_name)
if table is None: if table is None:
raise JsonRESTError("ResourceNotFound", "Table not found") raise JsonRESTError("ResourceNotFound", "Table not found")
@ -1151,7 +1195,7 @@ class DynamoDBBackend(BaseBackend):
table.ttl["TimeToLiveStatus"] = "DISABLED" table.ttl["TimeToLiveStatus"] = "DISABLED"
table.ttl["AttributeName"] = ttl_spec["AttributeName"] table.ttl["AttributeName"] = ttl_spec["AttributeName"]
def describe_ttl(self, table_name): def describe_time_to_live(self, table_name):
table = self.tables.get(table_name) table = self.tables.get(table_name)
if table is None: if table is None:
raise JsonRESTError("ResourceNotFound", "Table not found") raise JsonRESTError("ResourceNotFound", "Table not found")
@ -1161,8 +1205,9 @@ class DynamoDBBackend(BaseBackend):
def transact_write_items(self, transact_items): def transact_write_items(self, transact_items):
# Create a backup in case any of the transactions fail # Create a backup in case any of the transactions fail
original_table_state = copy.deepcopy(self.tables) original_table_state = copy.deepcopy(self.tables)
try: errors = []
for item in transact_items: for item in transact_items:
try:
if "ConditionCheck" in item: if "ConditionCheck" in item:
item = item["ConditionCheck"] item = item["ConditionCheck"]
key = item["Key"] key = item["Key"]
@ -1182,7 +1227,7 @@ class DynamoDBBackend(BaseBackend):
expression_attribute_values, expression_attribute_values,
) )
if not condition_op.expr(current): if not condition_op.expr(current):
raise ValueError("The conditional request failed") raise ConditionalCheckFailed()
elif "Put" in item: elif "Put" in item:
item = item["Put"] item = item["Put"]
attrs = item["Item"] attrs = item["Item"]
@ -1241,10 +1286,55 @@ class DynamoDBBackend(BaseBackend):
) )
else: else:
raise ValueError raise ValueError
except: # noqa: E722 Do not use bare except errors.append(None)
# Rollback to the original state, and reraise the error except Exception as e: # noqa: E722 Do not use bare except
errors.append(type(e).__name__)
if any(errors):
# Rollback to the original state, and reraise the errors
self.tables = original_table_state self.tables = original_table_state
raise raise TransactionCanceledException(errors)
def describe_continuous_backups(self, table_name):
table = self.get_table(table_name)
return table.continuous_backups
def update_continuous_backups(self, table_name, point_in_time_spec):
table = self.get_table(table_name)
if (
point_in_time_spec["PointInTimeRecoveryEnabled"]
and table.continuous_backups["PointInTimeRecoveryDescription"][
"PointInTimeRecoveryStatus"
]
== "DISABLED"
):
table.continuous_backups["PointInTimeRecoveryDescription"] = {
"PointInTimeRecoveryStatus": "ENABLED",
"EarliestRestorableDateTime": unix_time(),
"LatestRestorableDateTime": unix_time(),
}
elif not point_in_time_spec["PointInTimeRecoveryEnabled"]:
table.continuous_backups["PointInTimeRecoveryDescription"] = {
"PointInTimeRecoveryStatus": "DISABLED"
}
return table.continuous_backups
######################
# LIST of methods where the logic completely resides in responses.py
# Duplicated here so that the implementation coverage script is aware
# TODO: Move logic here
######################
def batch_get_item(self):
pass
def batch_write_item(self):
pass
def transact_get_items(self):
pass
dynamodb_backends = {} dynamodb_backends = {}

View File

@ -1,6 +1,10 @@
from abc import abstractmethod from abc import abstractmethod
from moto.dynamodb2.exceptions import IncorrectOperandType, IncorrectDataType from moto.dynamodb2.exceptions import (
IncorrectOperandType,
IncorrectDataType,
ProvidedKeyDoesNotExist,
)
from moto.dynamodb2.models import DynamoType from moto.dynamodb2.models import DynamoType
from moto.dynamodb2.models.dynamo_type import DDBTypeConversion, DDBType from moto.dynamodb2.models.dynamo_type import DDBTypeConversion, DDBType
from moto.dynamodb2.parsing.ast_nodes import ( from moto.dynamodb2.parsing.ast_nodes import (
@ -193,7 +197,18 @@ class AddExecutor(NodeExecutor):
value_to_add = self.get_action_value() value_to_add = self.get_action_value()
if isinstance(value_to_add, DynamoType): if isinstance(value_to_add, DynamoType):
if value_to_add.is_set(): if value_to_add.is_set():
try:
current_string_set = self.get_item_at_end_of_path(item) current_string_set = self.get_item_at_end_of_path(item)
except ProvidedKeyDoesNotExist:
current_string_set = DynamoType({value_to_add.type: []})
SetExecutor.set(
item_part_to_modify_with_set=self.get_item_before_end_of_path(
item
),
element_to_set=self.get_element_to_action(),
value_to_set=current_string_set,
expression_attribute_names=self.expression_attribute_names,
)
assert isinstance(current_string_set, DynamoType) assert isinstance(current_string_set, DynamoType)
if not current_string_set.type == value_to_add.type: if not current_string_set.type == value_to_add.type:
raise IncorrectDataType() raise IncorrectDataType()
@ -204,7 +219,11 @@ class AddExecutor(NodeExecutor):
else: else:
current_string_set.value.append(value) current_string_set.value.append(value)
elif value_to_add.type == DDBType.NUMBER: elif value_to_add.type == DDBType.NUMBER:
try:
existing_value = self.get_item_at_end_of_path(item) existing_value = self.get_item_at_end_of_path(item)
except ProvidedKeyDoesNotExist:
existing_value = DynamoType({DDBType.NUMBER: "0"})
assert isinstance(existing_value, DynamoType) assert isinstance(existing_value, DynamoType)
if not existing_value.type == DDBType.NUMBER: if not existing_value.type == DDBType.NUMBER:
raise IncorrectDataType() raise IncorrectDataType()

View File

@ -9,7 +9,12 @@ import six
from moto.core.responses import BaseResponse from moto.core.responses import BaseResponse
from moto.core.utils import camelcase_to_underscores, amzn_request_id from moto.core.utils import camelcase_to_underscores, amzn_request_id
from .exceptions import InvalidIndexNameError, ItemSizeTooLarge, MockValidationException from .exceptions import (
InvalidIndexNameError,
ItemSizeTooLarge,
MockValidationException,
TransactionCanceledException,
)
from moto.dynamodb2.models import dynamodb_backends, dynamo_json_dump from moto.dynamodb2.models import dynamodb_backends, dynamo_json_dump
@ -92,27 +97,14 @@ class DynamoHandler(BaseResponse):
def list_tables(self): def list_tables(self):
body = self.body body = self.body
limit = body.get("Limit", 100) limit = body.get("Limit", 100)
all_tables = list(self.dynamodb_backend.tables.keys())
exclusive_start_table_name = body.get("ExclusiveStartTableName") exclusive_start_table_name = body.get("ExclusiveStartTableName")
if exclusive_start_table_name: tables, last_eval = self.dynamodb_backend.list_tables(
try: limit, exclusive_start_table_name
last_table_index = all_tables.index(exclusive_start_table_name) )
except ValueError:
start = len(all_tables)
else:
start = last_table_index + 1
else:
start = 0
if limit:
tables = all_tables[start : start + limit]
else:
tables = all_tables[start:]
response = {"TableNames": tables} response = {"TableNames": tables}
if limit and len(all_tables) > start + limit: if last_eval:
response["LastEvaluatedTableName"] = tables[-1] response["LastEvaluatedTableName"] = last_eval
return dynamo_json_dump(response) return dynamo_json_dump(response)
@ -232,33 +224,29 @@ class DynamoHandler(BaseResponse):
def update_table(self): def update_table(self):
name = self.body["TableName"] name = self.body["TableName"]
table = self.dynamodb_backend.get_table(name) global_index = self.body.get("GlobalSecondaryIndexUpdates", None)
if "GlobalSecondaryIndexUpdates" in self.body: throughput = self.body.get("ProvisionedThroughput", None)
table = self.dynamodb_backend.update_table_global_indexes( stream_spec = self.body.get("StreamSpecification", None)
name, self.body["GlobalSecondaryIndexUpdates"]
)
if "ProvisionedThroughput" in self.body:
throughput = self.body["ProvisionedThroughput"]
table = self.dynamodb_backend.update_table_throughput(name, throughput)
if "StreamSpecification" in self.body:
try: try:
table = self.dynamodb_backend.update_table_streams( table = self.dynamodb_backend.update_table(
name, self.body["StreamSpecification"] name=name,
global_index=global_index,
throughput=throughput,
stream_spec=stream_spec,
) )
return dynamo_json_dump(table.describe())
except ValueError: except ValueError:
er = "com.amazonaws.dynamodb.v20111205#ResourceInUseException" er = "com.amazonaws.dynamodb.v20111205#ResourceInUseException"
return self.error(er, "Cannot enable stream") return self.error(er, "Cannot enable stream")
return dynamo_json_dump(table.describe())
def describe_table(self): def describe_table(self):
name = self.body["TableName"] name = self.body["TableName"]
try: try:
table = self.dynamodb_backend.tables[name] table = self.dynamodb_backend.describe_table(name)
return dynamo_json_dump(table)
except KeyError: except KeyError:
er = "com.amazonaws.dynamodb.v20111205#ResourceNotFoundException" er = "com.amazonaws.dynamodb.v20111205#ResourceNotFoundException"
return self.error(er, "Requested resource not found") return self.error(er, "Requested resource not found")
return dynamo_json_dump(table.describe(base_key="Table"))
def put_item(self): def put_item(self):
name = self.body["TableName"] name = self.body["TableName"]
@ -850,14 +838,14 @@ class DynamoHandler(BaseResponse):
name = self.body["TableName"] name = self.body["TableName"]
ttl_spec = self.body["TimeToLiveSpecification"] ttl_spec = self.body["TimeToLiveSpecification"]
self.dynamodb_backend.update_ttl(name, ttl_spec) self.dynamodb_backend.update_time_to_live(name, ttl_spec)
return json.dumps({"TimeToLiveSpecification": ttl_spec}) return json.dumps({"TimeToLiveSpecification": ttl_spec})
def describe_time_to_live(self): def describe_time_to_live(self):
name = self.body["TableName"] name = self.body["TableName"]
ttl_spec = self.dynamodb_backend.describe_ttl(name) ttl_spec = self.dynamodb_backend.describe_time_to_live(name)
return json.dumps({"TimeToLiveDescription": ttl_spec}) return json.dumps({"TimeToLiveDescription": ttl_spec})
@ -929,10 +917,37 @@ class DynamoHandler(BaseResponse):
transact_items = self.body["TransactItems"] transact_items = self.body["TransactItems"]
try: try:
self.dynamodb_backend.transact_write_items(transact_items) self.dynamodb_backend.transact_write_items(transact_items)
except ValueError: except TransactionCanceledException as e:
er = "com.amazonaws.dynamodb.v20111205#ConditionalCheckFailedException" er = "com.amazonaws.dynamodb.v20111205#TransactionCanceledException"
return self.error( return self.error(er, str(e))
er, "A condition specified in the operation could not be evaluated."
)
response = {"ConsumedCapacity": [], "ItemCollectionMetrics": {}} response = {"ConsumedCapacity": [], "ItemCollectionMetrics": {}}
return dynamo_json_dump(response) return dynamo_json_dump(response)
def describe_continuous_backups(self):
name = self.body["TableName"]
if self.dynamodb_backend.get_table(name) is None:
return self.error(
"com.amazonaws.dynamodb.v20111205#TableNotFoundException",
"Table not found: {}".format(name),
)
response = self.dynamodb_backend.describe_continuous_backups(name)
return json.dumps({"ContinuousBackupsDescription": response})
def update_continuous_backups(self):
name = self.body["TableName"]
point_in_time_spec = self.body["PointInTimeRecoverySpecification"]
if self.dynamodb_backend.get_table(name) is None:
return self.error(
"com.amazonaws.dynamodb.v20111205#TableNotFoundException",
"Table not found: {}".format(name),
)
response = self.dynamodb_backend.update_continuous_backups(
name, point_in_time_spec
)
return json.dumps({"ContinuousBackupsDescription": response})

View File

@ -560,8 +560,10 @@ class Instance(TaggedEC2Resource, BotoInstance):
# worst case we'll get IP address exaustion... rarely # worst case we'll get IP address exaustion... rarely
pass pass
def add_block_device(self, size, device_path): def add_block_device(self, size, device_path, snapshot_id=None, encrypted=False):
volume = self.ec2_backend.create_volume(size, self.region_name) volume = self.ec2_backend.create_volume(
size, self.region_name, snapshot_id, encrypted
)
self.ec2_backend.attach_volume(volume.id, self.id, device_path) self.ec2_backend.attach_volume(volume.id, self.id, device_path)
def setup_defaults(self): def setup_defaults(self):
@ -891,8 +893,12 @@ class InstanceBackend(object):
new_instance.add_tags(instance_tags) new_instance.add_tags(instance_tags)
if "block_device_mappings" in kwargs: if "block_device_mappings" in kwargs:
for block_device in kwargs["block_device_mappings"]: for block_device in kwargs["block_device_mappings"]:
device_name = block_device["DeviceName"]
volume_size = block_device["Ebs"].get("VolumeSize")
snapshot_id = block_device["Ebs"].get("SnapshotId")
encrypted = block_device["Ebs"].get("Encrypted", False)
new_instance.add_block_device( new_instance.add_block_device(
block_device["Ebs"]["VolumeSize"], block_device["DeviceName"] volume_size, device_name, snapshot_id, encrypted
) )
else: else:
new_instance.setup_defaults() new_instance.setup_defaults()

View File

@ -4,10 +4,16 @@ from boto.ec2.instancetype import InstanceType
from moto.autoscaling import autoscaling_backends from moto.autoscaling import autoscaling_backends
from moto.core.responses import BaseResponse from moto.core.responses import BaseResponse
from moto.core.utils import camelcase_to_underscores from moto.core.utils import camelcase_to_underscores
from moto.ec2.utils import filters_from_querystring, dict_from_querystring from moto.ec2.exceptions import MissingParameterError
from moto.ec2.utils import (
filters_from_querystring,
dict_from_querystring,
)
from moto.elbv2 import elbv2_backends from moto.elbv2 import elbv2_backends
from moto.core import ACCOUNT_ID from moto.core import ACCOUNT_ID
from copy import deepcopy
class InstanceResponse(BaseResponse): class InstanceResponse(BaseResponse):
def describe_instances(self): def describe_instances(self):
@ -44,40 +50,31 @@ class InstanceResponse(BaseResponse):
owner_id = self._get_param("OwnerId") owner_id = self._get_param("OwnerId")
user_data = self._get_param("UserData") user_data = self._get_param("UserData")
security_group_names = self._get_multi_param("SecurityGroup") security_group_names = self._get_multi_param("SecurityGroup")
security_group_ids = self._get_multi_param("SecurityGroupId") kwargs = {
nics = dict_from_querystring("NetworkInterface", self.querystring) "instance_type": self._get_param("InstanceType", if_none="m1.small"),
instance_type = self._get_param("InstanceType", if_none="m1.small") "placement": self._get_param("Placement.AvailabilityZone"),
placement = self._get_param("Placement.AvailabilityZone") "region_name": self.region,
subnet_id = self._get_param("SubnetId") "subnet_id": self._get_param("SubnetId"),
private_ip = self._get_param("PrivateIpAddress") "owner_id": owner_id,
associate_public_ip = self._get_param("AssociatePublicIpAddress") "key_name": self._get_param("KeyName"),
key_name = self._get_param("KeyName") "security_group_ids": self._get_multi_param("SecurityGroupId"),
ebs_optimized = self._get_param("EbsOptimized") or False "nics": dict_from_querystring("NetworkInterface", self.querystring),
instance_initiated_shutdown_behavior = self._get_param( "private_ip": self._get_param("PrivateIpAddress"),
"associate_public_ip": self._get_param("AssociatePublicIpAddress"),
"tags": self._parse_tag_specification("TagSpecification"),
"ebs_optimized": self._get_param("EbsOptimized") or False,
"instance_initiated_shutdown_behavior": self._get_param(
"InstanceInitiatedShutdownBehavior" "InstanceInitiatedShutdownBehavior"
) ),
tags = self._parse_tag_specification("TagSpecification") }
region_name = self.region
mappings = self._parse_block_device_mapping()
if mappings:
kwargs["block_device_mappings"] = mappings
if self.is_not_dryrun("RunInstance"): if self.is_not_dryrun("RunInstance"):
new_reservation = self.ec2_backend.add_instances( new_reservation = self.ec2_backend.add_instances(
image_id, image_id, min_count, user_data, security_group_names, **kwargs
min_count,
user_data,
security_group_names,
instance_type=instance_type,
placement=placement,
region_name=region_name,
subnet_id=subnet_id,
owner_id=owner_id,
key_name=key_name,
security_group_ids=security_group_ids,
nics=nics,
private_ip=private_ip,
associate_public_ip=associate_public_ip,
tags=tags,
ebs_optimized=ebs_optimized,
instance_initiated_shutdown_behavior=instance_initiated_shutdown_behavior,
) )
template = self.response_template(EC2_RUN_INSTANCES) template = self.response_template(EC2_RUN_INSTANCES)
@ -272,6 +269,58 @@ class InstanceResponse(BaseResponse):
) )
return EC2_MODIFY_INSTANCE_ATTRIBUTE return EC2_MODIFY_INSTANCE_ATTRIBUTE
def _parse_block_device_mapping(self):
device_mappings = self._get_list_prefix("BlockDeviceMapping")
mappings = []
for device_mapping in device_mappings:
self._validate_block_device_mapping(device_mapping)
device_template = deepcopy(BLOCK_DEVICE_MAPPING_TEMPLATE)
device_template["VirtualName"] = device_mapping.get("virtual_name")
device_template["DeviceName"] = device_mapping.get("device_name")
device_template["Ebs"]["SnapshotId"] = device_mapping.get(
"ebs._snapshot_id"
)
device_template["Ebs"]["VolumeSize"] = device_mapping.get(
"ebs._volume_size"
)
device_template["Ebs"]["DeleteOnTermination"] = device_mapping.get(
"ebs._delete_on_termination", False
)
device_template["Ebs"]["VolumeType"] = device_mapping.get(
"ebs._volume_type"
)
device_template["Ebs"]["Iops"] = device_mapping.get("ebs._iops")
device_template["Ebs"]["Encrypted"] = device_mapping.get(
"ebs._encrypted", False
)
mappings.append(device_template)
return mappings
@staticmethod
def _validate_block_device_mapping(device_mapping):
if not any(mapping for mapping in device_mapping if mapping.startswith("ebs.")):
raise MissingParameterError("ebs")
if (
"ebs._volume_size" not in device_mapping
and "ebs._snapshot_id" not in device_mapping
):
raise MissingParameterError("size or snapshotId")
BLOCK_DEVICE_MAPPING_TEMPLATE = {
"VirtualName": None,
"DeviceName": None,
"Ebs": {
"SnapshotId": None,
"VolumeSize": None,
"DeleteOnTermination": None,
"VolumeType": None,
"Iops": None,
"Encrypted": None,
},
}
EC2_RUN_INSTANCES = ( EC2_RUN_INSTANCES = (
"""<RunInstancesResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/"> """<RunInstancesResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">

View File

@ -1,4 +0,0 @@
from ..core.models import base_decorator
from .models import ec2_instance_connect_backends
mock_ec2_instance_connect = base_decorator(ec2_instance_connect_backends)

View File

@ -1,11 +0,0 @@
import boto.ec2
from moto.core import BaseBackend
class Ec2InstanceConnectBackend(BaseBackend):
pass
ec2_instance_connect_backends = {}
for region in boto.ec2.regions():
ec2_instance_connect_backends[region.name] = Ec2InstanceConnectBackend()

View File

@ -1,9 +0,0 @@
import json
from moto.core.responses import BaseResponse
class Ec2InstanceConnectResponse(BaseResponse):
def send_ssh_public_key(self):
return json.dumps(
{"RequestId": "example-2a47-4c91-9700-e37e85162cb6", "Success": True}
)

View File

@ -0,0 +1,4 @@
from ..core.models import base_decorator
from .models import ec2instanceconnect_backends
mock_ec2instanceconnect = base_decorator(ec2instanceconnect_backends)

View File

@ -0,0 +1,15 @@
import boto.ec2
import json
from moto.core import BaseBackend
class Ec2InstanceConnectBackend(BaseBackend):
def send_ssh_public_key(self):
return json.dumps(
{"RequestId": "example-2a47-4c91-9700-e37e85162cb6", "Success": True}
)
ec2instanceconnect_backends = {}
for region in boto.ec2.regions():
ec2instanceconnect_backends[region.name] = Ec2InstanceConnectBackend()

View File

@ -0,0 +1,11 @@
from moto.core.responses import BaseResponse
from .models import ec2instanceconnect_backends
class Ec2InstanceConnectResponse(BaseResponse):
@property
def ec2instanceconnect_backend(self):
return ec2instanceconnect_backends[self.region]
def send_ssh_public_key(self):
return self.ec2instanceconnect_backend.send_ssh_public_key()

View File

@ -857,8 +857,30 @@ class IoTBackend(BaseBackend):
del self.thing_groups[thing_group.arn] del self.thing_groups[thing_group.arn]
def list_thing_groups(self, parent_group, name_prefix_filter, recursive): def list_thing_groups(self, parent_group, name_prefix_filter, recursive):
thing_groups = self.thing_groups.values() if recursive is None:
return thing_groups recursive = True
if name_prefix_filter is None:
name_prefix_filter = ""
if parent_group and parent_group not in [
_.thing_group_name for _ in self.thing_groups.values()
]:
raise ResourceNotFoundException()
thing_groups = [
_ for _ in self.thing_groups.values() if _.parent_group_name == parent_group
]
if recursive:
for g in thing_groups:
thing_groups.extend(
self.list_thing_groups(
parent_group=g.thing_group_name,
name_prefix_filter=None,
recursive=False,
)
)
# thing_groups = groups_to_process.values()
return [
_ for _ in thing_groups if _.thing_group_name.startswith(name_prefix_filter)
]
def update_thing_group( def update_thing_group(
self, thing_group_name, thing_group_properties, expected_version self, thing_group_name, thing_group_properties, expected_version

View File

@ -535,7 +535,7 @@ class IoTResponse(BaseResponse):
# max_results = self._get_int_param("maxResults") # max_results = self._get_int_param("maxResults")
parent_group = self._get_param("parentGroup") parent_group = self._get_param("parentGroup")
name_prefix_filter = self._get_param("namePrefixFilter") name_prefix_filter = self._get_param("namePrefixFilter")
recursive = self._get_param("recursive") recursive = self._get_bool_param("recursive")
thing_groups = self.iot_backend.list_thing_groups( thing_groups = self.iot_backend.list_thing_groups(
parent_group=parent_group, parent_group=parent_group,
name_prefix_filter=name_prefix_filter, name_prefix_filter=name_prefix_filter,

View File

@ -7,10 +7,10 @@ class LogsClientError(JsonRESTError):
class ResourceNotFoundException(LogsClientError): class ResourceNotFoundException(LogsClientError):
def __init__(self): def __init__(self, msg=None):
self.code = 400 self.code = 400
super(ResourceNotFoundException, self).__init__( super(ResourceNotFoundException, self).__init__(
"ResourceNotFoundException", "The specified resource does not exist" "ResourceNotFoundException", msg or "The specified log group does not exist"
) )
@ -28,3 +28,11 @@ class ResourceAlreadyExistsException(LogsClientError):
super(ResourceAlreadyExistsException, self).__init__( super(ResourceAlreadyExistsException, self).__init__(
"ResourceAlreadyExistsException", "The specified log group already exists" "ResourceAlreadyExistsException", "The specified log group already exists"
) )
class LimitExceededException(LogsClientError):
def __init__(self):
self.code = 400
super(LimitExceededException, self).__init__(
"LimitExceededException", "Resource limit exceeded."
)

View File

@ -6,6 +6,7 @@ from .exceptions import (
ResourceNotFoundException, ResourceNotFoundException,
ResourceAlreadyExistsException, ResourceAlreadyExistsException,
InvalidParameterException, InvalidParameterException,
LimitExceededException,
) )
@ -57,6 +58,8 @@ class LogStream:
0 # I'm guessing this is token needed for sequenceToken by put_events 0 # I'm guessing this is token needed for sequenceToken by put_events
) )
self.events = [] self.events = []
self.destination_arn = None
self.filter_name = None
self.__class__._log_ids += 1 self.__class__._log_ids += 1
@ -97,11 +100,32 @@ class LogStream:
self.lastIngestionTime = int(unix_time_millis()) self.lastIngestionTime = int(unix_time_millis())
# TODO: make this match AWS if possible # TODO: make this match AWS if possible
self.storedBytes += sum([len(log_event["message"]) for log_event in log_events]) self.storedBytes += sum([len(log_event["message"]) for log_event in log_events])
self.events += [ events = [
LogEvent(self.lastIngestionTime, log_event) for log_event in log_events LogEvent(self.lastIngestionTime, log_event) for log_event in log_events
] ]
self.events += events
self.uploadSequenceToken += 1 self.uploadSequenceToken += 1
if self.destination_arn and self.destination_arn.split(":")[2] == "lambda":
from moto.awslambda import lambda_backends # due to circular dependency
lambda_log_events = [
{
"id": event.eventId,
"timestamp": event.timestamp,
"message": event.message,
}
for event in events
]
lambda_backends[self.region].send_log_event(
self.destination_arn,
self.filter_name,
log_group_name,
log_stream_name,
lambda_log_events,
)
return "{:056d}".format(self.uploadSequenceToken) return "{:056d}".format(self.uploadSequenceToken)
def get_log_events( def get_log_events(
@ -227,6 +251,7 @@ class LogGroup:
self.retention_in_days = kwargs.get( self.retention_in_days = kwargs.get(
"RetentionInDays" "RetentionInDays"
) # AWS defaults to Never Expire for log group retention ) # AWS defaults to Never Expire for log group retention
self.subscription_filters = []
def create_log_stream(self, log_stream_name): def create_log_stream(self, log_stream_name):
if log_stream_name in self.streams: if log_stream_name in self.streams:
@ -386,6 +411,48 @@ class LogGroup:
k: v for (k, v) in self.tags.items() if k not in tags_to_remove k: v for (k, v) in self.tags.items() if k not in tags_to_remove
} }
def describe_subscription_filters(self):
return self.subscription_filters
def put_subscription_filter(
self, filter_name, filter_pattern, destination_arn, role_arn
):
creation_time = int(unix_time_millis())
# only one subscription filter can be associated with a log group
if self.subscription_filters:
if self.subscription_filters[0]["filterName"] == filter_name:
creation_time = self.subscription_filters[0]["creationTime"]
else:
raise LimitExceededException
for stream in self.streams.values():
stream.destination_arn = destination_arn
stream.filter_name = filter_name
self.subscription_filters = [
{
"filterName": filter_name,
"logGroupName": self.name,
"filterPattern": filter_pattern,
"destinationArn": destination_arn,
"roleArn": role_arn,
"distribution": "ByLogStream",
"creationTime": creation_time,
}
]
def delete_subscription_filter(self, filter_name):
if (
not self.subscription_filters
or self.subscription_filters[0]["filterName"] != filter_name
):
raise ResourceNotFoundException(
"The specified subscription filter does not exist."
)
self.subscription_filters = []
class LogsBackend(BaseBackend): class LogsBackend(BaseBackend):
def __init__(self, region_name): def __init__(self, region_name):
@ -557,6 +624,46 @@ class LogsBackend(BaseBackend):
log_group = self.groups[log_group_name] log_group = self.groups[log_group_name]
log_group.untag(tags) log_group.untag(tags)
def describe_subscription_filters(self, log_group_name):
log_group = self.groups.get(log_group_name)
if not log_group:
raise ResourceNotFoundException()
return log_group.describe_subscription_filters()
def put_subscription_filter(
self, log_group_name, filter_name, filter_pattern, destination_arn, role_arn
):
# TODO: support other destinations like Kinesis stream
from moto.awslambda import lambda_backends # due to circular dependency
log_group = self.groups.get(log_group_name)
if not log_group:
raise ResourceNotFoundException()
lambda_func = lambda_backends[self.region_name].get_function(destination_arn)
# no specific permission check implemented
if not lambda_func:
raise InvalidParameterException(
"Could not execute the lambda function. "
"Make sure you have given CloudWatch Logs permission to execute your function."
)
log_group.put_subscription_filter(
filter_name, filter_pattern, destination_arn, role_arn
)
def delete_subscription_filter(self, log_group_name, filter_name):
log_group = self.groups.get(log_group_name)
if not log_group:
raise ResourceNotFoundException()
log_group.delete_subscription_filter(filter_name)
logs_backends = {} logs_backends = {}
for region in Session().get_available_regions("logs"): for region in Session().get_available_regions("logs"):

View File

@ -178,3 +178,33 @@ class LogsResponse(BaseResponse):
tags = self._get_param("tags") tags = self._get_param("tags")
self.logs_backend.untag_log_group(log_group_name, tags) self.logs_backend.untag_log_group(log_group_name, tags)
return "" return ""
def describe_subscription_filters(self):
log_group_name = self._get_param("logGroupName")
subscription_filters = self.logs_backend.describe_subscription_filters(
log_group_name
)
return json.dumps({"subscriptionFilters": subscription_filters})
def put_subscription_filter(self):
log_group_name = self._get_param("logGroupName")
filter_name = self._get_param("filterName")
filter_pattern = self._get_param("filterPattern")
destination_arn = self._get_param("destinationArn")
role_arn = self._get_param("roleArn")
self.logs_backend.put_subscription_filter(
log_group_name, filter_name, filter_pattern, destination_arn, role_arn
)
return ""
def delete_subscription_filter(self):
log_group_name = self._get_param("logGroupName")
filter_name = self._get_param("filterName")
self.logs_backend.delete_subscription_filter(log_group_name, filter_name)
return ""

View File

@ -0,0 +1,9 @@
from __future__ import unicode_literals
from .models import managedblockchain_backends
from ..core.models import base_decorator, deprecated_base_decorator
managedblockchain_backend = managedblockchain_backends["us-east-1"]
mock_managedblockchain = base_decorator(managedblockchain_backends)
mock_managedblockchain_deprecated = deprecated_base_decorator(
managedblockchain_backends
)

View File

@ -0,0 +1,48 @@
from __future__ import unicode_literals
from moto.core.exceptions import RESTError
class ManagedBlockchainClientError(RESTError):
code = 400
class BadRequestException(ManagedBlockchainClientError):
def __init__(self, pretty_called_method, operation_error):
super(BadRequestException, self).__init__(
"BadRequestException",
"An error occurred (BadRequestException) when calling the {0} operation: {1}".format(
pretty_called_method, operation_error
),
)
class InvalidRequestException(ManagedBlockchainClientError):
def __init__(self, pretty_called_method, operation_error):
super(InvalidRequestException, self).__init__(
"InvalidRequestException",
"An error occurred (InvalidRequestException) when calling the {0} operation: {1}".format(
pretty_called_method, operation_error
),
)
class ResourceNotFoundException(ManagedBlockchainClientError):
def __init__(self, pretty_called_method, operation_error):
self.code = 404
super(ResourceNotFoundException, self).__init__(
"ResourceNotFoundException",
"An error occurred (BadRequestException) when calling the {0} operation: {1}".format(
pretty_called_method, operation_error
),
)
class ResourceLimitExceededException(ManagedBlockchainClientError):
def __init__(self, pretty_called_method, operation_error):
self.code = 429
super(ResourceLimitExceededException, self).__init__(
"ResourceLimitExceededException",
"An error occurred (ResourceLimitExceededException) when calling the {0} operation: {1}".format(
pretty_called_method, operation_error
),
)

View File

@ -0,0 +1,811 @@
from __future__ import unicode_literals, division
import datetime
import re
from boto3 import Session
from moto.core import BaseBackend, BaseModel
from .exceptions import (
BadRequestException,
ResourceNotFoundException,
InvalidRequestException,
ResourceLimitExceededException,
)
from .utils import (
get_network_id,
get_member_id,
get_proposal_id,
get_invitation_id,
member_name_exist_in_network,
number_of_members_in_network,
admin_password_ok,
)
FRAMEWORKS = [
"HYPERLEDGER_FABRIC",
]
FRAMEWORKVERSIONS = [
"1.2",
]
EDITIONS = {
"STARTER": {
"MaxMembers": 5,
"MaxNodesPerMember": 2,
"AllowedNodeInstanceTypes": ["bc.t3.small", "bc.t3.medium"],
},
"STANDARD": {
"MaxMembers": 14,
"MaxNodesPerMember": 3,
"AllowedNodeInstanceTypes": ["bc.t3", "bc.m5", "bc.c5"],
},
}
VOTEVALUES = ["YES", "NO"]
class ManagedBlockchainNetwork(BaseModel):
def __init__(
self,
id,
name,
framework,
frameworkversion,
frameworkconfiguration,
voting_policy,
member_configuration,
region,
description=None,
):
self.creationdate = datetime.datetime.utcnow()
self.id = id
self.name = name
self.description = description
self.framework = framework
self.frameworkversion = frameworkversion
self.frameworkconfiguration = frameworkconfiguration
self.voting_policy = voting_policy
self.member_configuration = member_configuration
self.region = region
@property
def network_name(self):
return self.name
@property
def network_framework(self):
return self.framework
@property
def network_framework_version(self):
return self.frameworkversion
@property
def network_creationdate(self):
return self.creationdate.strftime("%Y-%m-%dT%H:%M:%S.%f%z")
@property
def network_description(self):
return self.description
@property
def network_edition(self):
return self.frameworkconfiguration["Fabric"]["Edition"]
@property
def vote_pol_proposal_duration(self):
return self.voting_policy["ApprovalThresholdPolicy"]["ProposalDurationInHours"]
@property
def vote_pol_threshold_percentage(self):
return self.voting_policy["ApprovalThresholdPolicy"]["ThresholdPercentage"]
@property
def vote_pol_threshold_comparator(self):
return self.voting_policy["ApprovalThresholdPolicy"]["ThresholdComparator"]
def to_dict(self):
# Format for list_networks
d = {
"Id": self.id,
"Name": self.name,
"Framework": self.framework,
"FrameworkVersion": self.frameworkversion,
"Status": "AVAILABLE",
"CreationDate": self.creationdate.strftime("%Y-%m-%dT%H:%M:%S.%f%z"),
}
if self.description is not None:
d["Description"] = self.description
return d
def get_format(self):
# Format for get_network
frameworkattributes = {
"Fabric": {
"OrderingServiceEndpoint": "orderer.{0}.managedblockchain.{1}.amazonaws.com:30001".format(
self.id.lower(), self.region
),
"Edition": self.frameworkconfiguration["Fabric"]["Edition"],
}
}
vpcendpointname = "com.amazonaws.{0}.managedblockchain.{1}".format(
self.region, self.id.lower()
)
d = {
"Id": self.id,
"Name": self.name,
"Framework": self.framework,
"FrameworkVersion": self.frameworkversion,
"FrameworkAttributes": frameworkattributes,
"VpcEndpointServiceName": vpcendpointname,
"VotingPolicy": self.voting_policy,
"Status": "AVAILABLE",
"CreationDate": self.creationdate.strftime("%Y-%m-%dT%H:%M:%S.%f%z"),
}
if self.description is not None:
d["Description"] = self.description
return d
class ManagedBlockchainProposal(BaseModel):
def __init__(
self,
id,
networkid,
memberid,
membername,
numofmembers,
actions,
network_expirtation,
network_threshold,
network_threshold_comp,
description=None,
):
# In general, passing all values instead of creating
# an apparatus to look them up
self.id = id
self.networkid = networkid
self.memberid = memberid
self.membername = membername
self.numofmembers = numofmembers
self.actions = actions
self.network_expirtation = network_expirtation
self.network_threshold = network_threshold
self.network_threshold_comp = network_threshold_comp
self.description = description
self.creationdate = datetime.datetime.utcnow()
self.expirtationdate = self.creationdate + datetime.timedelta(
hours=network_expirtation
)
self.yes_vote_count = 0
self.no_vote_count = 0
self.outstanding_vote_count = self.numofmembers
self.status = "IN_PROGRESS"
self.votes = {}
@property
def network_id(self):
return self.networkid
@property
def proposal_status(self):
return self.status
@property
def proposal_votes(self):
return self.votes
def proposal_actions(self, action_type):
default_return = []
if action_type.lower() == "invitations":
if "Invitations" in self.actions:
return self.actions["Invitations"]
elif action_type.lower() == "removals":
if "Removals" in self.actions:
return self.actions["Removals"]
return default_return
def to_dict(self):
# Format for list_proposals
d = {
"ProposalId": self.id,
"ProposedByMemberId": self.memberid,
"ProposedByMemberName": self.membername,
"Status": self.status,
"CreationDate": self.creationdate.strftime("%Y-%m-%dT%H:%M:%S.%f%z"),
"ExpirationDate": self.expirtationdate.strftime("%Y-%m-%dT%H:%M:%S.%f%z"),
}
return d
def get_format(self):
# Format for get_proposal
d = {
"ProposalId": self.id,
"NetworkId": self.networkid,
"Actions": self.actions,
"ProposedByMemberId": self.memberid,
"ProposedByMemberName": self.membername,
"Status": self.status,
"CreationDate": self.creationdate.strftime("%Y-%m-%dT%H:%M:%S.%f%z"),
"ExpirationDate": self.expirtationdate.strftime("%Y-%m-%dT%H:%M:%S.%f%z"),
"YesVoteCount": self.yes_vote_count,
"NoVoteCount": self.no_vote_count,
"OutstandingVoteCount": self.outstanding_vote_count,
}
if self.description is not None:
d["Description"] = self.description
return d
def set_vote(self, votermemberid, votermembername, vote):
if datetime.datetime.utcnow() > self.expirtationdate:
self.status = "EXPIRED"
return False
if vote.upper() == "YES":
self.yes_vote_count += 1
else:
self.no_vote_count += 1
self.outstanding_vote_count -= 1
perct_yes = (self.yes_vote_count / self.numofmembers) * 100
perct_no = (self.no_vote_count / self.numofmembers) * 100
self.votes[votermemberid] = {
"MemberId": votermemberid,
"MemberName": votermembername,
"Vote": vote.upper(),
}
if self.network_threshold_comp == "GREATER_THAN_OR_EQUAL_TO":
if perct_yes >= self.network_threshold:
self.status = "APPROVED"
elif perct_no >= self.network_threshold:
self.status = "REJECTED"
else:
if perct_yes > self.network_threshold:
self.status = "APPROVED"
elif perct_no > self.network_threshold:
self.status = "REJECTED"
return True
class ManagedBlockchainInvitation(BaseModel):
def __init__(
self,
id,
networkid,
networkname,
networkframework,
networkframeworkversion,
networkcreationdate,
region,
networkdescription=None,
):
self.id = id
self.networkid = networkid
self.networkname = networkname
self.networkdescription = networkdescription
self.networkframework = networkframework
self.networkframeworkversion = networkframeworkversion
self.networkstatus = "AVAILABLE"
self.networkcreationdate = networkcreationdate
self.status = "PENDING"
self.region = region
self.creationdate = datetime.datetime.utcnow()
self.expirtationdate = self.creationdate + datetime.timedelta(days=7)
@property
def invitation_status(self):
return self.status
@property
def invitation_networkid(self):
return self.networkid
def to_dict(self):
d = {
"InvitationId": self.id,
"CreationDate": self.creationdate.strftime("%Y-%m-%dT%H:%M:%S.%f%z"),
"ExpirationDate": self.expirtationdate.strftime("%Y-%m-%dT%H:%M:%S.%f%z"),
"Status": self.status,
"NetworkSummary": {
"Id": self.networkid,
"Name": self.networkname,
"Framework": self.networkframework,
"FrameworkVersion": self.networkframeworkversion,
"Status": self.networkstatus,
"CreationDate": self.networkcreationdate,
},
}
if self.networkdescription is not None:
d["NetworkSummary"]["Description"] = self.networkdescription
return d
def accept_invitation(self):
self.status = "ACCEPTED"
def reject_invitation(self):
self.status = "REJECTED"
def set_network_status(self, network_status):
self.networkstatus = network_status
class ManagedBlockchainMember(BaseModel):
def __init__(
self, id, networkid, member_configuration, region,
):
self.creationdate = datetime.datetime.utcnow()
self.id = id
self.networkid = networkid
self.member_configuration = member_configuration
self.status = "AVAILABLE"
self.region = region
self.description = None
@property
def network_id(self):
return self.networkid
@property
def name(self):
return self.member_configuration["Name"]
@property
def member_status(self):
return self.status
def to_dict(self):
# Format for list_members
d = {
"Id": self.id,
"Name": self.member_configuration["Name"],
"Status": self.status,
"CreationDate": self.creationdate.strftime("%Y-%m-%dT%H:%M:%S.%f%z"),
"IsOwned": True,
}
if "Description" in self.member_configuration:
self.description = self.member_configuration["Description"]
return d
def get_format(self):
# Format for get_member
frameworkattributes = {
"Fabric": {
"AdminUsername": self.member_configuration["FrameworkConfiguration"][
"Fabric"
]["AdminUsername"],
"CaEndpoint": "ca.{0}.{1}.managedblockchain.{2}.amazonaws.com:30002".format(
self.id.lower(), self.networkid.lower(), self.region
),
}
}
d = {
"NetworkId": self.networkid,
"Id": self.id,
"Name": self.name,
"FrameworkAttributes": frameworkattributes,
"LogPublishingConfiguration": self.member_configuration[
"LogPublishingConfiguration"
],
"Status": self.status,
"CreationDate": self.creationdate.strftime("%Y-%m-%dT%H:%M:%S.%f%z"),
}
if "Description" in self.member_configuration:
d["Description"] = self.description
return d
def delete(self):
self.status = "DELETED"
def update(self, logpublishingconfiguration):
self.member_configuration[
"LogPublishingConfiguration"
] = logpublishingconfiguration
class ManagedBlockchainBackend(BaseBackend):
def __init__(self, region_name):
self.networks = {}
self.members = {}
self.proposals = {}
self.invitations = {}
self.region_name = region_name
def reset(self):
region_name = self.region_name
self.__dict__ = {}
self.__init__(region_name)
def create_network(
self,
name,
framework,
frameworkversion,
frameworkconfiguration,
voting_policy,
member_configuration,
description=None,
):
# Check framework
if framework not in FRAMEWORKS:
raise BadRequestException("CreateNetwork", "Invalid request body")
# Check framework version
if frameworkversion not in FRAMEWORKVERSIONS:
raise BadRequestException(
"CreateNetwork",
"Invalid version {0} requested for framework HYPERLEDGER_FABRIC".format(
frameworkversion
),
)
# Check edition
if frameworkconfiguration["Fabric"]["Edition"] not in EDITIONS:
raise BadRequestException("CreateNetwork", "Invalid request body")
## Generate network ID
network_id = get_network_id()
## Generate memberid ID and initial member
member_id = get_member_id()
self.members[member_id] = ManagedBlockchainMember(
id=member_id,
networkid=network_id,
member_configuration=member_configuration,
region=self.region_name,
)
self.networks[network_id] = ManagedBlockchainNetwork(
id=network_id,
name=name,
framework=framework,
frameworkversion=frameworkversion,
frameworkconfiguration=frameworkconfiguration,
voting_policy=voting_policy,
member_configuration=member_configuration,
region=self.region_name,
description=description,
)
# Return the network and member ID
d = {"NetworkId": network_id, "MemberId": member_id}
return d
def list_networks(self):
return self.networks.values()
def get_network(self, network_id):
if network_id not in self.networks:
raise ResourceNotFoundException(
"GetNetwork", "Network {0} not found.".format(network_id)
)
return self.networks.get(network_id)
def create_proposal(
self, networkid, memberid, actions, description=None,
):
# Check if network exists
if networkid not in self.networks:
raise ResourceNotFoundException(
"CreateProposal", "Network {0} not found.".format(networkid)
)
# Check if member exists
if memberid not in self.members:
raise ResourceNotFoundException(
"CreateProposal", "Member {0} not found.".format(memberid)
)
# CLI docs say that Invitations and Removals cannot both be passed - but it does
# not throw an error and can be performed
if "Invitations" in actions:
for propinvitation in actions["Invitations"]:
if re.match("[0-9]{12}", propinvitation["Principal"]) is None:
raise InvalidRequestException(
"CreateProposal",
"Account ID format specified in proposal is not valid.",
)
if "Removals" in actions:
for propmember in actions["Removals"]:
if propmember["MemberId"] not in self.members:
raise InvalidRequestException(
"CreateProposal",
"Member ID format specified in proposal is not valid.",
)
## Generate proposal ID
proposal_id = get_proposal_id()
self.proposals[proposal_id] = ManagedBlockchainProposal(
id=proposal_id,
networkid=networkid,
memberid=memberid,
membername=self.members.get(memberid).name,
numofmembers=number_of_members_in_network(self.members, networkid),
actions=actions,
network_expirtation=self.networks.get(networkid).vote_pol_proposal_duration,
network_threshold=self.networks.get(
networkid
).vote_pol_threshold_percentage,
network_threshold_comp=self.networks.get(
networkid
).vote_pol_threshold_comparator,
description=description,
)
# Return the proposal ID
d = {"ProposalId": proposal_id}
return d
def list_proposals(self, networkid):
# Check if network exists
if networkid not in self.networks:
raise ResourceNotFoundException(
"ListProposals", "Network {0} not found.".format(networkid)
)
proposalsfornetwork = []
for proposal_id in self.proposals:
if self.proposals.get(proposal_id).network_id == networkid:
proposalsfornetwork.append(self.proposals[proposal_id])
return proposalsfornetwork
def get_proposal(self, networkid, proposalid):
# Check if network exists
if networkid not in self.networks:
raise ResourceNotFoundException(
"GetProposal", "Network {0} not found.".format(networkid)
)
if proposalid not in self.proposals:
raise ResourceNotFoundException(
"GetProposal", "Proposal {0} not found.".format(proposalid)
)
return self.proposals.get(proposalid)
def vote_on_proposal(self, networkid, proposalid, votermemberid, vote):
# Check if network exists
if networkid not in self.networks:
raise ResourceNotFoundException(
"VoteOnProposal", "Network {0} not found.".format(networkid)
)
if proposalid not in self.proposals:
raise ResourceNotFoundException(
"VoteOnProposal", "Proposal {0} not found.".format(proposalid)
)
if votermemberid not in self.members:
raise ResourceNotFoundException(
"VoteOnProposal", "Member {0} not found.".format(votermemberid)
)
if vote.upper() not in VOTEVALUES:
raise BadRequestException("VoteOnProposal", "Invalid request body")
# Check to see if this member already voted
# TODO Verify exception
if votermemberid in self.proposals.get(proposalid).proposal_votes:
raise BadRequestException("VoteOnProposal", "Invalid request body")
# Will return false if vote was not cast (e.g., status wrong)
if self.proposals.get(proposalid).set_vote(
votermemberid, self.members.get(votermemberid).name, vote.upper()
):
if self.proposals.get(proposalid).proposal_status == "APPROVED":
## Generate invitations
for propinvitation in self.proposals.get(proposalid).proposal_actions(
"Invitations"
):
invitation_id = get_invitation_id()
self.invitations[invitation_id] = ManagedBlockchainInvitation(
id=invitation_id,
networkid=networkid,
networkname=self.networks.get(networkid).network_name,
networkframework=self.networks.get(networkid).network_framework,
networkframeworkversion=self.networks.get(
networkid
).network_framework_version,
networkcreationdate=self.networks.get(
networkid
).network_creationdate,
region=self.region_name,
networkdescription=self.networks.get(
networkid
).network_description,
)
## Delete members
for propmember in self.proposals.get(proposalid).proposal_actions(
"Removals"
):
self.delete_member(networkid, propmember["MemberId"])
def list_proposal_votes(self, networkid, proposalid):
# Check if network exists
if networkid not in self.networks:
raise ResourceNotFoundException(
"ListProposalVotes", "Network {0} not found.".format(networkid)
)
if proposalid not in self.proposals:
raise ResourceNotFoundException(
"ListProposalVotes", "Proposal {0} not found.".format(proposalid)
)
# Output the vote summaries
proposalvotesfornetwork = []
for proposal_id in self.proposals:
if self.proposals.get(proposal_id).network_id == networkid:
for pvmemberid in self.proposals.get(proposal_id).proposal_votes:
proposalvotesfornetwork.append(
self.proposals.get(proposal_id).proposal_votes[pvmemberid]
)
return proposalvotesfornetwork
def list_invitations(self):
return self.invitations.values()
def reject_invitation(self, invitationid):
if invitationid not in self.invitations:
raise ResourceNotFoundException(
"RejectInvitation", "InvitationId {0} not found.".format(invitationid)
)
self.invitations.get(invitationid).reject_invitation()
def create_member(
self, invitationid, networkid, member_configuration,
):
# Check if network exists
if networkid not in self.networks:
raise ResourceNotFoundException(
"CreateMember", "Network {0} not found.".format(networkid)
)
if invitationid not in self.invitations:
raise InvalidRequestException(
"CreateMember", "Invitation {0} not valid".format(invitationid)
)
if self.invitations.get(invitationid).invitation_status != "PENDING":
raise InvalidRequestException(
"CreateMember", "Invitation {0} not valid".format(invitationid)
)
if (
member_name_exist_in_network(
self.members, networkid, member_configuration["Name"]
)
is True
):
raise InvalidRequestException(
"CreateMember",
"Member name {0} already exists in network {1}.".format(
member_configuration["Name"], networkid
),
)
networkedition = self.networks.get(networkid).network_edition
if (
number_of_members_in_network(self.members, networkid)
>= EDITIONS[networkedition]["MaxMembers"]
):
raise ResourceLimitExceededException(
"CreateMember",
"You cannot create a member in network {0}.{1} is the maximum number of members allowed in a {2} Edition network.".format(
networkid, EDITIONS[networkedition]["MaxMembers"], networkedition
),
)
memberadminpassword = member_configuration["FrameworkConfiguration"]["Fabric"][
"AdminPassword"
]
if admin_password_ok(memberadminpassword) is False:
raise BadRequestException("CreateMember", "Invalid request body")
member_id = get_member_id()
self.members[member_id] = ManagedBlockchainMember(
id=member_id,
networkid=networkid,
member_configuration=member_configuration,
region=self.region_name,
)
# Accept the invitaiton
self.invitations.get(invitationid).accept_invitation()
# Return the member ID
d = {"MemberId": member_id}
return d
def list_members(self, networkid):
# Check if network exists
if networkid not in self.networks:
raise ResourceNotFoundException(
"ListMembers", "Network {0} not found.".format(networkid)
)
membersfornetwork = []
for member_id in self.members:
if self.members.get(member_id).network_id == networkid:
membersfornetwork.append(self.members[member_id])
return membersfornetwork
def get_member(self, networkid, memberid):
# Check if network exists
if networkid not in self.networks:
raise ResourceNotFoundException(
"GetMember", "Network {0} not found.".format(networkid)
)
if memberid not in self.members:
raise ResourceNotFoundException(
"GetMember", "Member {0} not found.".format(memberid)
)
## Cannot get a member than has been delted (it does show up in the list)
if self.members.get(memberid).member_status == "DELETED":
raise ResourceNotFoundException(
"GetMember", "Member {0} not found.".format(memberid)
)
return self.members.get(memberid)
def delete_member(self, networkid, memberid):
# Check if network exists
if networkid not in self.networks:
raise ResourceNotFoundException(
"DeleteMember", "Network {0} not found.".format(networkid)
)
if memberid not in self.members:
raise ResourceNotFoundException(
"DeleteMember", "Member {0} not found.".format(memberid)
)
self.members.get(memberid).delete()
# Is this the last member in the network? (all set to DELETED)
if number_of_members_in_network(
self.members, networkid, member_status="DELETED"
) == len(self.members):
# Set network status to DELETED for all invitations
for invitation_id in self.invitations:
if (
self.invitations.get(invitation_id).invitation_networkid
== networkid
):
self.invitations.get(invitation_id).set_network_status("DELETED")
# Remove network
del self.networks[networkid]
def update_member(self, networkid, memberid, logpublishingconfiguration):
# Check if network exists
if networkid not in self.networks:
raise ResourceNotFoundException(
"UpdateMember", "Network {0} not found.".format(networkid)
)
if memberid not in self.members:
raise ResourceNotFoundException(
"UpdateMember", "Member {0} not found.".format(memberid)
)
self.members.get(memberid).update(logpublishingconfiguration)
managedblockchain_backends = {}
for region in Session().get_available_regions("managedblockchain"):
managedblockchain_backends[region] = ManagedBlockchainBackend(region)

View File

@ -0,0 +1,326 @@
from __future__ import unicode_literals
import json
from six.moves.urllib.parse import urlparse, parse_qs
from moto.core.responses import BaseResponse
from .models import managedblockchain_backends
from .utils import (
region_from_managedblckchain_url,
networkid_from_managedblockchain_url,
proposalid_from_managedblockchain_url,
invitationid_from_managedblockchain_url,
memberid_from_managedblockchain_url,
)
class ManagedBlockchainResponse(BaseResponse):
def __init__(self, backend):
super(ManagedBlockchainResponse, self).__init__()
self.backend = backend
@classmethod
def network_response(clazz, request, full_url, headers):
region_name = region_from_managedblckchain_url(full_url)
response_instance = ManagedBlockchainResponse(
managedblockchain_backends[region_name]
)
return response_instance._network_response(request, full_url, headers)
def _network_response(self, request, full_url, headers):
method = request.method
if hasattr(request, "body"):
body = request.body
else:
body = request.data
parsed_url = urlparse(full_url)
querystring = parse_qs(parsed_url.query, keep_blank_values=True)
if method == "GET":
return self._all_networks_response(request, full_url, headers)
elif method == "POST":
json_body = json.loads(body.decode("utf-8"))
return self._network_response_post(json_body, querystring, headers)
def _all_networks_response(self, request, full_url, headers):
mbcnetworks = self.backend.list_networks()
response = json.dumps(
{"Networks": [mbcnetwork.to_dict() for mbcnetwork in mbcnetworks]}
)
headers["content-type"] = "application/json"
return 200, headers, response
def _network_response_post(self, json_body, querystring, headers):
name = json_body["Name"]
framework = json_body["Framework"]
frameworkversion = json_body["FrameworkVersion"]
frameworkconfiguration = json_body["FrameworkConfiguration"]
voting_policy = json_body["VotingPolicy"]
member_configuration = json_body["MemberConfiguration"]
# Optional
description = json_body.get("Description", None)
response = self.backend.create_network(
name,
framework,
frameworkversion,
frameworkconfiguration,
voting_policy,
member_configuration,
description,
)
return 200, headers, json.dumps(response)
@classmethod
def networkid_response(clazz, request, full_url, headers):
region_name = region_from_managedblckchain_url(full_url)
response_instance = ManagedBlockchainResponse(
managedblockchain_backends[region_name]
)
return response_instance._networkid_response(request, full_url, headers)
def _networkid_response(self, request, full_url, headers):
method = request.method
if method == "GET":
network_id = networkid_from_managedblockchain_url(full_url)
return self._networkid_response_get(network_id, headers)
def _networkid_response_get(self, network_id, headers):
mbcnetwork = self.backend.get_network(network_id)
response = json.dumps({"Network": mbcnetwork.get_format()})
headers["content-type"] = "application/json"
return 200, headers, response
@classmethod
def proposal_response(clazz, request, full_url, headers):
region_name = region_from_managedblckchain_url(full_url)
response_instance = ManagedBlockchainResponse(
managedblockchain_backends[region_name]
)
return response_instance._proposal_response(request, full_url, headers)
def _proposal_response(self, request, full_url, headers):
method = request.method
if hasattr(request, "body"):
body = request.body
else:
body = request.data
parsed_url = urlparse(full_url)
querystring = parse_qs(parsed_url.query, keep_blank_values=True)
network_id = networkid_from_managedblockchain_url(full_url)
if method == "GET":
return self._all_proposals_response(network_id, headers)
elif method == "POST":
json_body = json.loads(body.decode("utf-8"))
return self._proposal_response_post(
network_id, json_body, querystring, headers
)
def _all_proposals_response(self, network_id, headers):
proposals = self.backend.list_proposals(network_id)
response = json.dumps(
{"Proposals": [proposal.to_dict() for proposal in proposals]}
)
headers["content-type"] = "application/json"
return 200, headers, response
def _proposal_response_post(self, network_id, json_body, querystring, headers):
memberid = json_body["MemberId"]
actions = json_body["Actions"]
# Optional
description = json_body.get("Description", None)
response = self.backend.create_proposal(
network_id, memberid, actions, description,
)
return 200, headers, json.dumps(response)
@classmethod
def proposalid_response(clazz, request, full_url, headers):
region_name = region_from_managedblckchain_url(full_url)
response_instance = ManagedBlockchainResponse(
managedblockchain_backends[region_name]
)
return response_instance._proposalid_response(request, full_url, headers)
def _proposalid_response(self, request, full_url, headers):
method = request.method
network_id = networkid_from_managedblockchain_url(full_url)
if method == "GET":
proposal_id = proposalid_from_managedblockchain_url(full_url)
return self._proposalid_response_get(network_id, proposal_id, headers)
def _proposalid_response_get(self, network_id, proposal_id, headers):
proposal = self.backend.get_proposal(network_id, proposal_id)
response = json.dumps({"Proposal": proposal.get_format()})
headers["content-type"] = "application/json"
return 200, headers, response
@classmethod
def proposal_votes_response(clazz, request, full_url, headers):
region_name = region_from_managedblckchain_url(full_url)
response_instance = ManagedBlockchainResponse(
managedblockchain_backends[region_name]
)
return response_instance._proposal_votes_response(request, full_url, headers)
def _proposal_votes_response(self, request, full_url, headers):
method = request.method
if hasattr(request, "body"):
body = request.body
else:
body = request.data
parsed_url = urlparse(full_url)
querystring = parse_qs(parsed_url.query, keep_blank_values=True)
network_id = networkid_from_managedblockchain_url(full_url)
proposal_id = proposalid_from_managedblockchain_url(full_url)
if method == "GET":
return self._all_proposal_votes_response(network_id, proposal_id, headers)
elif method == "POST":
json_body = json.loads(body.decode("utf-8"))
return self._proposal_votes_response_post(
network_id, proposal_id, json_body, querystring, headers
)
def _all_proposal_votes_response(self, network_id, proposal_id, headers):
proposalvotes = self.backend.list_proposal_votes(network_id, proposal_id)
response = json.dumps({"ProposalVotes": proposalvotes})
headers["content-type"] = "application/json"
return 200, headers, response
def _proposal_votes_response_post(
self, network_id, proposal_id, json_body, querystring, headers
):
votermemberid = json_body["VoterMemberId"]
vote = json_body["Vote"]
self.backend.vote_on_proposal(
network_id, proposal_id, votermemberid, vote,
)
return 200, headers, ""
@classmethod
def invitation_response(clazz, request, full_url, headers):
region_name = region_from_managedblckchain_url(full_url)
response_instance = ManagedBlockchainResponse(
managedblockchain_backends[region_name]
)
return response_instance._invitation_response(request, full_url, headers)
def _invitation_response(self, request, full_url, headers):
method = request.method
if method == "GET":
return self._all_invitation_response(request, full_url, headers)
def _all_invitation_response(self, request, full_url, headers):
invitations = self.backend.list_invitations()
response = json.dumps(
{"Invitations": [invitation.to_dict() for invitation in invitations]}
)
headers["content-type"] = "application/json"
return 200, headers, response
@classmethod
def invitationid_response(clazz, request, full_url, headers):
region_name = region_from_managedblckchain_url(full_url)
response_instance = ManagedBlockchainResponse(
managedblockchain_backends[region_name]
)
return response_instance._invitationid_response(request, full_url, headers)
def _invitationid_response(self, request, full_url, headers):
method = request.method
if method == "DELETE":
invitation_id = invitationid_from_managedblockchain_url(full_url)
return self._invitationid_response_delete(invitation_id, headers)
def _invitationid_response_delete(self, invitation_id, headers):
self.backend.reject_invitation(invitation_id)
headers["content-type"] = "application/json"
return 200, headers, ""
@classmethod
def member_response(clazz, request, full_url, headers):
region_name = region_from_managedblckchain_url(full_url)
response_instance = ManagedBlockchainResponse(
managedblockchain_backends[region_name]
)
return response_instance._member_response(request, full_url, headers)
def _member_response(self, request, full_url, headers):
method = request.method
if hasattr(request, "body"):
body = request.body
else:
body = request.data
parsed_url = urlparse(full_url)
querystring = parse_qs(parsed_url.query, keep_blank_values=True)
network_id = networkid_from_managedblockchain_url(full_url)
if method == "GET":
return self._all_members_response(network_id, headers)
elif method == "POST":
json_body = json.loads(body.decode("utf-8"))
return self._member_response_post(
network_id, json_body, querystring, headers
)
def _all_members_response(self, network_id, headers):
members = self.backend.list_members(network_id)
response = json.dumps({"Members": [member.to_dict() for member in members]})
headers["content-type"] = "application/json"
return 200, headers, response
def _member_response_post(self, network_id, json_body, querystring, headers):
invitationid = json_body["InvitationId"]
member_configuration = json_body["MemberConfiguration"]
response = self.backend.create_member(
invitationid, network_id, member_configuration,
)
return 200, headers, json.dumps(response)
@classmethod
def memberid_response(clazz, request, full_url, headers):
region_name = region_from_managedblckchain_url(full_url)
response_instance = ManagedBlockchainResponse(
managedblockchain_backends[region_name]
)
return response_instance._memberid_response(request, full_url, headers)
def _memberid_response(self, request, full_url, headers):
method = request.method
if hasattr(request, "body"):
body = request.body
else:
body = request.data
network_id = networkid_from_managedblockchain_url(full_url)
member_id = memberid_from_managedblockchain_url(full_url)
if method == "GET":
return self._memberid_response_get(network_id, member_id, headers)
elif method == "PATCH":
json_body = json.loads(body.decode("utf-8"))
return self._memberid_response_patch(
network_id, member_id, json_body, headers
)
elif method == "DELETE":
return self._memberid_response_delete(network_id, member_id, headers)
def _memberid_response_get(self, network_id, member_id, headers):
member = self.backend.get_member(network_id, member_id)
response = json.dumps({"Member": member.get_format()})
headers["content-type"] = "application/json"
return 200, headers, response
def _memberid_response_patch(self, network_id, member_id, json_body, headers):
logpublishingconfiguration = json_body["LogPublishingConfiguration"]
self.backend.update_member(
network_id, member_id, logpublishingconfiguration,
)
return 200, headers, ""
def _memberid_response_delete(self, network_id, member_id, headers):
self.backend.delete_member(network_id, member_id)
headers["content-type"] = "application/json"
return 200, headers, ""

View File

@ -0,0 +1,16 @@
from __future__ import unicode_literals
from .responses import ManagedBlockchainResponse
url_bases = ["https?://managedblockchain.(.+).amazonaws.com"]
url_paths = {
"{0}/networks$": ManagedBlockchainResponse.network_response,
"{0}/networks/(?P<networkid>[^/.]+)$": ManagedBlockchainResponse.networkid_response,
"{0}/networks/(?P<networkid>[^/.]+)/proposals$": ManagedBlockchainResponse.proposal_response,
"{0}/networks/(?P<networkid>[^/.]+)/proposals/(?P<proposalid>[^/.]+)$": ManagedBlockchainResponse.proposalid_response,
"{0}/networks/(?P<networkid>[^/.]+)/proposals/(?P<proposalid>[^/.]+)/votes$": ManagedBlockchainResponse.proposal_votes_response,
"{0}/invitations$": ManagedBlockchainResponse.invitation_response,
"{0}/invitations/(?P<invitationid>[^/.]+)$": ManagedBlockchainResponse.invitationid_response,
"{0}/networks/(?P<networkid>[^/.]+)/members$": ManagedBlockchainResponse.member_response,
"{0}/networks/(?P<networkid>[^/.]+)/members/(?P<memberid>[^/.]+)$": ManagedBlockchainResponse.memberid_response,
}

View File

@ -0,0 +1,106 @@
import random
import re
import string
from six.moves.urllib.parse import urlparse
def region_from_managedblckchain_url(url):
domain = urlparse(url).netloc
region = "us-east-1"
if "." in domain:
region = domain.split(".")[1]
return region
def networkid_from_managedblockchain_url(full_url):
id_search = re.search("\/n-[A-Z0-9]{26}", full_url, re.IGNORECASE)
return_id = None
if id_search:
return_id = id_search.group(0).replace("/", "")
return return_id
def get_network_id():
return "n-" + "".join(
random.choice(string.ascii_uppercase + string.digits) for _ in range(26)
)
def memberid_from_managedblockchain_url(full_url):
id_search = re.search("\/m-[A-Z0-9]{26}", full_url, re.IGNORECASE)
return_id = None
if id_search:
return_id = id_search.group(0).replace("/", "")
return return_id
def get_member_id():
return "m-" + "".join(
random.choice(string.ascii_uppercase + string.digits) for _ in range(26)
)
def proposalid_from_managedblockchain_url(full_url):
id_search = re.search("\/p-[A-Z0-9]{26}", full_url, re.IGNORECASE)
return_id = None
if id_search:
return_id = id_search.group(0).replace("/", "")
return return_id
def get_proposal_id():
return "p-" + "".join(
random.choice(string.ascii_uppercase + string.digits) for _ in range(26)
)
def invitationid_from_managedblockchain_url(full_url):
id_search = re.search("\/in-[A-Z0-9]{26}", full_url, re.IGNORECASE)
return_id = None
if id_search:
return_id = id_search.group(0).replace("/", "")
return return_id
def get_invitation_id():
return "in-" + "".join(
random.choice(string.ascii_uppercase + string.digits) for _ in range(26)
)
def member_name_exist_in_network(members, networkid, membername):
membernamexists = False
for member_id in members:
if members.get(member_id).network_id == networkid:
if members.get(member_id).name == membername:
membernamexists = True
break
return membernamexists
def number_of_members_in_network(members, networkid, member_status=None):
return len(
[
membid
for membid in members
if members.get(membid).network_id == networkid
and (
member_status is None
or members.get(membid).member_status == member_status
)
]
)
def admin_password_ok(password):
if not re.search("[a-z]", password):
return False
elif not re.search("[A-Z]", password):
return False
elif not re.search("[0-9]", password):
return False
elif re.search("['\"@\\/]", password):
return False
else:
return True

View File

@ -125,6 +125,9 @@ class OpsworkInstance(BaseModel):
def status(self): def status(self):
if self.instance is None: if self.instance is None:
return "stopped" return "stopped"
# OpsWorks reports the "running" state as "online"
elif self.instance._state.name == "running":
return "online"
return self.instance._state.name return self.instance._state.name
def to_dict(self): def to_dict(self):

View File

@ -136,3 +136,10 @@ class SnapshotCopyAlreadyEnabledFaultError(RedshiftClientError):
cluster_identifier cluster_identifier
), ),
) )
class ClusterAlreadyExistsFaultError(RedshiftClientError):
def __init__(self):
super(ClusterAlreadyExistsFaultError, self).__init__(
"ClusterAlreadyExists", "Cluster already exists"
)

View File

@ -10,6 +10,7 @@ from moto.core import BaseBackend, BaseModel
from moto.core.utils import iso_8601_datetime_with_milliseconds from moto.core.utils import iso_8601_datetime_with_milliseconds
from moto.ec2 import ec2_backends from moto.ec2 import ec2_backends
from .exceptions import ( from .exceptions import (
ClusterAlreadyExistsFaultError,
ClusterNotFoundError, ClusterNotFoundError,
ClusterParameterGroupNotFoundError, ClusterParameterGroupNotFoundError,
ClusterSecurityGroupNotFoundError, ClusterSecurityGroupNotFoundError,
@ -580,6 +581,8 @@ class RedshiftBackend(BaseBackend):
def create_cluster(self, **cluster_kwargs): def create_cluster(self, **cluster_kwargs):
cluster_identifier = cluster_kwargs["cluster_identifier"] cluster_identifier = cluster_kwargs["cluster_identifier"]
if cluster_identifier in self.clusters:
raise ClusterAlreadyExistsFaultError()
cluster = Cluster(self, **cluster_kwargs) cluster = Cluster(self, **cluster_kwargs)
self.clusters[cluster_identifier] = cluster self.clusters[cluster_identifier] = cluster
return cluster return cluster

View File

@ -377,3 +377,12 @@ class NoSystemTags(S3ClientError):
super(NoSystemTags, self).__init__( super(NoSystemTags, self).__init__(
"InvalidTag", "System tags cannot be added/updated by requester" "InvalidTag", "System tags cannot be added/updated by requester"
) )
class NoSuchUpload(S3ClientError):
code = 404
def __init__(self):
super(NoSuchUpload, self).__init__(
"NoSuchUpload", "The specified multipart upload does not exist."
)

View File

@ -40,6 +40,7 @@ from .exceptions import (
NoSuchPublicAccessBlockConfiguration, NoSuchPublicAccessBlockConfiguration,
InvalidPublicAccessBlockConfiguration, InvalidPublicAccessBlockConfiguration,
WrongPublicAccessBlockAccountIdError, WrongPublicAccessBlockAccountIdError,
NoSuchUpload,
) )
from .utils import clean_key_name, _VersionedKeyStore from .utils import clean_key_name, _VersionedKeyStore
@ -1478,6 +1479,9 @@ class S3Backend(BaseBackend):
def cancel_multipart(self, bucket_name, multipart_id): def cancel_multipart(self, bucket_name, multipart_id):
bucket = self.get_bucket(bucket_name) bucket = self.get_bucket(bucket_name)
multipart_data = bucket.multiparts.get(multipart_id, None)
if not multipart_data:
raise NoSuchUpload()
del bucket.multiparts[multipart_id] del bucket.multiparts[multipart_id]
def list_multipart(self, bucket_name, multipart_id): def list_multipart(self, bucket_name, multipart_id):

View File

@ -7,3 +7,37 @@ class MessageRejectedError(RESTError):
def __init__(self, message): def __init__(self, message):
super(MessageRejectedError, self).__init__("MessageRejected", message) super(MessageRejectedError, self).__init__("MessageRejected", message)
class ConfigurationSetDoesNotExist(RESTError):
code = 400
def __init__(self, message):
super(ConfigurationSetDoesNotExist, self).__init__(
"ConfigurationSetDoesNotExist", message
)
class EventDestinationAlreadyExists(RESTError):
code = 400
def __init__(self, message):
super(EventDestinationAlreadyExists, self).__init__(
"EventDestinationAlreadyExists", message
)
class TemplateNameAlreadyExists(RESTError):
code = 400
def __init__(self, message):
super(TemplateNameAlreadyExists, self).__init__(
"TemplateNameAlreadyExists", message
)
class TemplateDoesNotExist(RESTError):
code = 400
def __init__(self, message):
super(TemplateDoesNotExist, self).__init__("TemplateDoesNotExist", message)

View File

@ -1,11 +1,18 @@
from __future__ import unicode_literals from __future__ import unicode_literals
import datetime
import email import email
from email.utils import parseaddr from email.utils import parseaddr
from moto.core import BaseBackend, BaseModel from moto.core import BaseBackend, BaseModel
from moto.sns.models import sns_backends from moto.sns.models import sns_backends
from .exceptions import MessageRejectedError from .exceptions import (
MessageRejectedError,
ConfigurationSetDoesNotExist,
EventDestinationAlreadyExists,
TemplateNameAlreadyExists,
TemplateDoesNotExist,
)
from .utils import get_random_message_id from .utils import get_random_message_id
from .feedback import COMMON_MAIL, BOUNCE, COMPLAINT, DELIVERY from .feedback import COMMON_MAIL, BOUNCE, COMPLAINT, DELIVERY
@ -81,7 +88,12 @@ class SESBackend(BaseBackend):
self.domains = [] self.domains = []
self.sent_messages = [] self.sent_messages = []
self.sent_message_count = 0 self.sent_message_count = 0
self.rejected_messages_count = 0
self.sns_topics = {} self.sns_topics = {}
self.config_set = {}
self.config_set_event_destination = {}
self.event_destinations = {}
self.templates = {}
def _is_verified_address(self, source): def _is_verified_address(self, source):
_, address = parseaddr(source) _, address = parseaddr(source)
@ -118,6 +130,7 @@ class SESBackend(BaseBackend):
if recipient_count > RECIPIENT_LIMIT: if recipient_count > RECIPIENT_LIMIT:
raise MessageRejectedError("Too many recipients.") raise MessageRejectedError("Too many recipients.")
if not self._is_verified_address(source): if not self._is_verified_address(source):
self.rejected_messages_count += 1
raise MessageRejectedError("Email address not verified %s" % source) raise MessageRejectedError("Email address not verified %s" % source)
self.__process_sns_feedback__(source, destinations, region) self.__process_sns_feedback__(source, destinations, region)
@ -135,6 +148,7 @@ class SESBackend(BaseBackend):
if recipient_count > RECIPIENT_LIMIT: if recipient_count > RECIPIENT_LIMIT:
raise MessageRejectedError("Too many recipients.") raise MessageRejectedError("Too many recipients.")
if not self._is_verified_address(source): if not self._is_verified_address(source):
self.rejected_messages_count += 1
raise MessageRejectedError("Email address not verified %s" % source) raise MessageRejectedError("Email address not verified %s" % source)
self.__process_sns_feedback__(source, destinations, region) self.__process_sns_feedback__(source, destinations, region)
@ -189,7 +203,7 @@ class SESBackend(BaseBackend):
def send_raw_email(self, source, destinations, raw_data, region): def send_raw_email(self, source, destinations, raw_data, region):
if source is not None: if source is not None:
_, source_email_address = parseaddr(source) _, source_email_address = parseaddr(source)
if source_email_address not in self.addresses: if not self._is_verified_address(source_email_address):
raise MessageRejectedError( raise MessageRejectedError(
"Did not have authority to send from email %s" "Did not have authority to send from email %s"
% source_email_address % source_email_address
@ -202,7 +216,7 @@ class SESBackend(BaseBackend):
raise MessageRejectedError("Source not specified") raise MessageRejectedError("Source not specified")
_, source_email_address = parseaddr(message["from"]) _, source_email_address = parseaddr(message["from"])
if source_email_address not in self.addresses: if not self._is_verified_address(source_email_address):
raise MessageRejectedError( raise MessageRejectedError(
"Did not have authority to send from email %s" "Did not have authority to send from email %s"
% source_email_address % source_email_address
@ -237,5 +251,48 @@ class SESBackend(BaseBackend):
return {} return {}
def create_configuration_set(self, configuration_set_name):
self.config_set[configuration_set_name] = 1
return {}
def create_configuration_set_event_destination(
self, configuration_set_name, event_destination
):
if self.config_set.get(configuration_set_name) is None:
raise ConfigurationSetDoesNotExist("Invalid Configuration Set Name.")
if self.event_destinations.get(event_destination["Name"]):
raise EventDestinationAlreadyExists("Duplicate Event destination Name.")
self.config_set_event_destination[configuration_set_name] = event_destination
self.event_destinations[event_destination["Name"]] = 1
return {}
def get_send_statistics(self):
statistics = {}
statistics["DeliveryAttempts"] = self.sent_message_count
statistics["Rejects"] = self.rejected_messages_count
statistics["Complaints"] = 0
statistics["Bounces"] = 0
statistics["Timestamp"] = datetime.datetime.utcnow()
return statistics
def add_template(self, template_info):
template_name = template_info["template_name"]
if self.templates.get(template_name, None):
raise TemplateNameAlreadyExists("Duplicate Template Name.")
self.templates[template_name] = template_info
def get_template(self, template_name):
if not self.templates.get(template_name, None):
raise TemplateDoesNotExist("Invalid Template Name.")
return self.templates[template_name]
def list_templates(self):
return list(self.templates.values())
ses_backend = SESBackend() ses_backend = SESBackend()

View File

@ -5,6 +5,7 @@ import six
from moto.core.responses import BaseResponse from moto.core.responses import BaseResponse
from .models import ses_backend from .models import ses_backend
from datetime import datetime
class EmailResponse(BaseResponse): class EmailResponse(BaseResponse):
@ -133,6 +134,71 @@ class EmailResponse(BaseResponse):
template = self.response_template(SET_IDENTITY_NOTIFICATION_TOPIC_RESPONSE) template = self.response_template(SET_IDENTITY_NOTIFICATION_TOPIC_RESPONSE)
return template.render() return template.render()
def get_send_statistics(self):
statistics = ses_backend.get_send_statistics()
template = self.response_template(GET_SEND_STATISTICS)
return template.render(all_statistics=[statistics])
def create_configuration_set(self):
configuration_set_name = self.querystring.get("ConfigurationSet.Name")[0]
ses_backend.create_configuration_set(
configuration_set_name=configuration_set_name
)
template = self.response_template(CREATE_CONFIGURATION_SET)
return template.render()
def create_configuration_set_event_destination(self):
configuration_set_name = self._get_param("ConfigurationSetName")
is_configuration_event_enabled = self.querystring.get(
"EventDestination.Enabled"
)[0]
configuration_event_name = self.querystring.get("EventDestination.Name")[0]
event_topic_arn = self.querystring.get(
"EventDestination.SNSDestination.TopicARN"
)[0]
event_matching_types = self._get_multi_param(
"EventDestination.MatchingEventTypes.member"
)
event_destination = {
"Name": configuration_event_name,
"Enabled": is_configuration_event_enabled,
"EventMatchingTypes": event_matching_types,
"SNSDestination": event_topic_arn,
}
ses_backend.create_configuration_set_event_destination(
configuration_set_name=configuration_set_name,
event_destination=event_destination,
)
template = self.response_template(CREATE_CONFIGURATION_SET_EVENT_DESTINATION)
return template.render()
def create_template(self):
template_data = self._get_dict_param("Template")
template_info = {}
template_info["text_part"] = template_data["._text_part"]
template_info["html_part"] = template_data["._html_part"]
template_info["template_name"] = template_data["._name"]
template_info["subject_part"] = template_data["._subject_part"]
template_info["Timestamp"] = datetime.utcnow()
ses_backend.add_template(template_info=template_info)
template = self.response_template(CREATE_TEMPLATE)
return template.render()
def get_template(self):
template_name = self._get_param("TemplateName")
template_data = ses_backend.get_template(template_name)
template = self.response_template(GET_TEMPLATE)
return template.render(template_data=template_data)
def list_templates(self):
email_templates = ses_backend.list_templates()
template = self.response_template(LIST_TEMPLATES)
return template.render(templates=email_templates)
VERIFY_EMAIL_IDENTITY = """<VerifyEmailIdentityResponse xmlns="http://ses.amazonaws.com/doc/2010-12-01/"> VERIFY_EMAIL_IDENTITY = """<VerifyEmailIdentityResponse xmlns="http://ses.amazonaws.com/doc/2010-12-01/">
<VerifyEmailIdentityResult/> <VerifyEmailIdentityResult/>
@ -248,3 +314,74 @@ SET_IDENTITY_NOTIFICATION_TOPIC_RESPONSE = """<SetIdentityNotificationTopicRespo
<RequestId>47e0ef1a-9bf2-11e1-9279-0100e8cf109a</RequestId> <RequestId>47e0ef1a-9bf2-11e1-9279-0100e8cf109a</RequestId>
</ResponseMetadata> </ResponseMetadata>
</SetIdentityNotificationTopicResponse>""" </SetIdentityNotificationTopicResponse>"""
GET_SEND_STATISTICS = """<GetSendStatisticsResponse xmlns="http://ses.amazonaws.com/doc/2010-12-01/">
<GetSendStatisticsResult>
<SendDataPoints>
{% for statistics in all_statistics %}
<item>
<DeliveryAttempts>{{ statistics["DeliveryAttempts"] }}</DeliveryAttempts>
<Rejects>{{ statistics["Rejects"] }}</Rejects>
<Bounces>{{ statistics["Bounces"] }}</Bounces>
<Complaints>{{ statistics["Complaints"] }}</Complaints>
<Timestamp>{{ statistics["Timestamp"] }}</Timestamp>
</item>
{% endfor %}
</SendDataPoints>
<ResponseMetadata>
<RequestId>e0abcdfa-c866-11e0-b6d0-273d09173z49</RequestId>
</ResponseMetadata>
</GetSendStatisticsResult>
</GetSendStatisticsResponse>"""
CREATE_CONFIGURATION_SET = """<CreateConfigurationSetResponse xmlns="http://ses.amazonaws.com/doc/2010-12-01/">
<CreateConfigurationSetResult/>
<ResponseMetadata>
<RequestId>47e0ef1a-9bf2-11e1-9279-0100e8cf109a</RequestId>
</ResponseMetadata>
</CreateConfigurationSetResponse>"""
CREATE_CONFIGURATION_SET_EVENT_DESTINATION = """<CreateConfigurationSetEventDestinationResponse xmlns="http://ses.amazonaws.com/doc/2010-12-01/">
<CreateConfigurationSetEventDestinationResult/>
<ResponseMetadata>
<RequestId>67e0ef1a-9bf2-11e1-9279-0100e8cf109a</RequestId>
</ResponseMetadata>
</CreateConfigurationSetEventDestinationResponse>"""
CREATE_TEMPLATE = """<CreateTemplateResponse xmlns="http://ses.amazonaws.com/doc/2010-12-01/">
<CreateTemplateResult/>
<ResponseMetadata>
<RequestId>47e0ef1a-9bf2-11e1-9279-0100e8cf12ba</RequestId>
</ResponseMetadata>
</CreateTemplateResponse>"""
GET_TEMPLATE = """<GetTemplateResponse xmlns="http://ses.amazonaws.com/doc/2010-12-01/">
<GetTemplateResult>
<Template>
<TemplateName>{{ template_data["template_name"] }}</TemplateName>
<SubjectPart>{{ template_data["subject_part"] }}</SubjectPart>
<HtmlPart>{{ template_data["html_part"] }}</HtmlPart>
<TextPart>{{ template_data["text_part"] }}</TextPart>
</Template>
</GetTemplateResult>
<ResponseMetadata>
<RequestId>47e0ef1a-9bf2-11e1-9279-0100e8cf12ba</RequestId>
</ResponseMetadata>
</GetTemplateResponse>"""
LIST_TEMPLATES = """<ListTemplatesResponse xmlns="http://ses.amazonaws.com/doc/2010-12-01/">
<ListTemplatesResult>
<TemplatesMetadata>
{% for template in templates %}
<Item>
<Name>{{ template["template_name"] }}</Name>
<CreatedTimestamp>{{ template["Timestamp"] }}</CreatedTimestamp>
</Item>
{% endfor %}
</TemplatesMetadata>
</ListTemplatesResult>
<ResponseMetadata>
<RequestId>47e0ef1a-9bf2-11e1-9279-0100e8cf12ba</RequestId>
</ResponseMetadata>
</ListTemplatesResponse>"""

View File

@ -514,6 +514,16 @@ class SimpleSystemManagerBackend(BaseBackend):
def get_parameters(self, names, with_decryption): def get_parameters(self, names, with_decryption):
result = [] result = []
if len(names) > 10:
raise ValidationException(
"1 validation error detected: "
"Value '[{}]' at 'names' failed to satisfy constraint: "
"Member must have length less than or equal to 10.".format(
", ".join(names)
)
)
for name in names: for name in names:
if name in self._parameters: if name in self._parameters:
result.append(self.get_parameter(name, with_decryption)) result.append(self.get_parameter(name, with_decryption))

View File

@ -7,18 +7,18 @@ import boto3
script_dir = os.path.dirname(os.path.abspath(__file__)) script_dir = os.path.dirname(os.path.abspath(__file__))
alternative_service_names = {'lambda': 'awslambda'} alternative_service_names = {'lambda': 'awslambda', 'dynamodb': 'dynamodb2'}
def get_moto_implementation(service_name): def get_moto_implementation(service_name):
service_name = service_name.replace("-", "") if "-" in service_name else service_name service_name = service_name.replace("-", "") if "-" in service_name else service_name
alt_service_name = alternative_service_names[service_name] if service_name in alternative_service_names else service_name alt_service_name = alternative_service_names[service_name] if service_name in alternative_service_names else service_name
if not hasattr(moto, alt_service_name): if hasattr(moto, "mock_{}".format(alt_service_name)):
return None mock = getattr(moto, "mock_{}".format(alt_service_name))
module = getattr(moto, alt_service_name) elif hasattr(moto, "mock_{}".format(service_name)):
if module is None: mock = getattr(moto, "mock_{}".format(service_name))
return None else:
mock = getattr(module, "mock_{}".format(service_name)) mock = None
if mock is None: if mock is None:
return None return None
backends = list(mock().backends.values()) backends = list(mock().backends.values())
@ -97,12 +97,14 @@ def write_implementation_coverage_to_file(coverage):
file.write("\n") file.write("\n")
file.write("## {}\n".format(service_name)) file.write("## {}\n".format(service_name))
file.write("{}% implemented\n".format(percentage_implemented)) file.write("<details>\n")
file.write("<summary>{}% implemented</summary>\n\n".format(percentage_implemented))
for op in operations: for op in operations:
if op in implemented: if op in implemented:
file.write("- [X] {}\n".format(op)) file.write("- [X] {}\n".format(op))
else: else:
file.write("- [ ] {}\n".format(op)) file.write("- [ ] {}\n".format(op))
file.write("</details>\n")
if __name__ == '__main__': if __name__ == '__main__':

View File

@ -1,5 +1,6 @@
from __future__ import unicode_literals from __future__ import unicode_literals
import json
import boto3 import boto3
from freezegun import freeze_time from freezegun import freeze_time
@ -1230,6 +1231,65 @@ def test_put_integration_response_requires_responseTemplate():
) )
@mock_apigateway
def test_put_integration_response_with_response_template():
client = boto3.client("apigateway", region_name="us-west-2")
response = client.create_rest_api(name="my_api", description="this is my api")
api_id = response["id"]
resources = client.get_resources(restApiId=api_id)
root_id = [resource for resource in resources["items"] if resource["path"] == "/"][
0
]["id"]
client.put_method(
restApiId=api_id, resourceId=root_id, httpMethod="GET", authorizationType="NONE"
)
client.put_method_response(
restApiId=api_id, resourceId=root_id, httpMethod="GET", statusCode="200"
)
client.put_integration(
restApiId=api_id,
resourceId=root_id,
httpMethod="GET",
type="HTTP",
uri="http://httpbin.org/robots.txt",
integrationHttpMethod="POST",
)
with assert_raises(ClientError) as ex:
client.put_integration_response(
restApiId=api_id, resourceId=root_id, httpMethod="GET", statusCode="200"
)
ex.exception.response["Error"]["Code"].should.equal("BadRequestException")
ex.exception.response["Error"]["Message"].should.equal("Invalid request input")
client.put_integration_response(
restApiId=api_id,
resourceId=root_id,
httpMethod="GET",
statusCode="200",
selectionPattern="foobar",
responseTemplates={"application/json": json.dumps({"data": "test"})},
)
response = client.get_integration_response(
restApiId=api_id, resourceId=root_id, httpMethod="GET", statusCode="200"
)
# this is hard to match against, so remove it
response["ResponseMetadata"].pop("HTTPHeaders", None)
response["ResponseMetadata"].pop("RetryAttempts", None)
response.should.equal(
{
"statusCode": "200",
"selectionPattern": "foobar",
"ResponseMetadata": {"HTTPStatusCode": 200},
"responseTemplates": {"application/json": json.dumps({"data": "test"})},
}
)
@mock_apigateway @mock_apigateway
def test_put_integration_validation(): def test_put_integration_validation():
client = boto3.client("apigateway", region_name="us-west-2") client = boto3.client("apigateway", region_name="us-west-2")

View File

@ -1071,6 +1071,7 @@ def test_autoscaling_describe_policies_boto3():
response["ScalingPolicies"][0]["PolicyName"].should.equal("test_policy_down") response["ScalingPolicies"][0]["PolicyName"].should.equal("test_policy_down")
@mock_elb
@mock_autoscaling @mock_autoscaling
@mock_ec2 @mock_ec2
def test_detach_one_instance_decrement(): def test_detach_one_instance_decrement():
@ -1096,6 +1097,19 @@ def test_detach_one_instance_decrement():
], ],
VPCZoneIdentifier=mocked_networking["subnet1"], VPCZoneIdentifier=mocked_networking["subnet1"],
) )
elb_client = boto3.client("elb", region_name="us-east-1")
elb_client.create_load_balancer(
LoadBalancerName="my-lb",
Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}],
AvailabilityZones=["us-east-1a", "us-east-1b"],
)
response = client.attach_load_balancers(
AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"]
)
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
instance_to_detach = response["AutoScalingGroups"][0]["Instances"][0]["InstanceId"] instance_to_detach = response["AutoScalingGroups"][0]["Instances"][0]["InstanceId"]
instance_to_keep = response["AutoScalingGroups"][0]["Instances"][1]["InstanceId"] instance_to_keep = response["AutoScalingGroups"][0]["Instances"][1]["InstanceId"]
@ -1111,6 +1125,9 @@ def test_detach_one_instance_decrement():
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
response["AutoScalingGroups"][0]["Instances"].should.have.length_of(1) response["AutoScalingGroups"][0]["Instances"].should.have.length_of(1)
instance_to_detach.shouldnt.be.within(
[x["InstanceId"] for x in response["AutoScalingGroups"][0]["Instances"]]
)
# test to ensure tag has been removed # test to ensure tag has been removed
response = ec2_client.describe_instances(InstanceIds=[instance_to_detach]) response = ec2_client.describe_instances(InstanceIds=[instance_to_detach])
@ -1122,7 +1139,14 @@ def test_detach_one_instance_decrement():
tags = response["Reservations"][0]["Instances"][0]["Tags"] tags = response["Reservations"][0]["Instances"][0]["Tags"]
tags.should.have.length_of(2) tags.should.have.length_of(2)
response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"])
list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(1)
instance_to_detach.shouldnt.be.within(
[x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]]
)
@mock_elb
@mock_autoscaling @mock_autoscaling
@mock_ec2 @mock_ec2
def test_detach_one_instance(): def test_detach_one_instance():
@ -1148,6 +1172,19 @@ def test_detach_one_instance():
], ],
VPCZoneIdentifier=mocked_networking["subnet1"], VPCZoneIdentifier=mocked_networking["subnet1"],
) )
elb_client = boto3.client("elb", region_name="us-east-1")
elb_client.create_load_balancer(
LoadBalancerName="my-lb",
Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}],
AvailabilityZones=["us-east-1a", "us-east-1b"],
)
response = client.attach_load_balancers(
AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"]
)
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
instance_to_detach = response["AutoScalingGroups"][0]["Instances"][0]["InstanceId"] instance_to_detach = response["AutoScalingGroups"][0]["Instances"][0]["InstanceId"]
instance_to_keep = response["AutoScalingGroups"][0]["Instances"][1]["InstanceId"] instance_to_keep = response["AutoScalingGroups"][0]["Instances"][1]["InstanceId"]
@ -1173,7 +1210,14 @@ def test_detach_one_instance():
tags = response["Reservations"][0]["Instances"][0]["Tags"] tags = response["Reservations"][0]["Instances"][0]["Tags"]
tags.should.have.length_of(2) tags.should.have.length_of(2)
response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"])
list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(2)
instance_to_detach.shouldnt.be.within(
[x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]]
)
@mock_elb
@mock_autoscaling @mock_autoscaling
@mock_ec2 @mock_ec2
def test_standby_one_instance_decrement(): def test_standby_one_instance_decrement():
@ -1199,6 +1243,19 @@ def test_standby_one_instance_decrement():
], ],
VPCZoneIdentifier=mocked_networking["subnet1"], VPCZoneIdentifier=mocked_networking["subnet1"],
) )
elb_client = boto3.client("elb", region_name="us-east-1")
elb_client.create_load_balancer(
LoadBalancerName="my-lb",
Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}],
AvailabilityZones=["us-east-1a", "us-east-1b"],
)
response = client.attach_load_balancers(
AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"]
)
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
instance_to_standby = response["AutoScalingGroups"][0]["Instances"][0]["InstanceId"] instance_to_standby = response["AutoScalingGroups"][0]["Instances"][0]["InstanceId"]
instance_to_keep = response["AutoScalingGroups"][0]["Instances"][1]["InstanceId"] instance_to_keep = response["AutoScalingGroups"][0]["Instances"][1]["InstanceId"]
@ -1226,7 +1283,14 @@ def test_standby_one_instance_decrement():
tags = instance["Tags"] tags = instance["Tags"]
tags.should.have.length_of(2) tags.should.have.length_of(2)
response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"])
list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(1)
instance_to_standby.shouldnt.be.within(
[x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]]
)
@mock_elb
@mock_autoscaling @mock_autoscaling
@mock_ec2 @mock_ec2
def test_standby_one_instance(): def test_standby_one_instance():
@ -1252,6 +1316,19 @@ def test_standby_one_instance():
], ],
VPCZoneIdentifier=mocked_networking["subnet1"], VPCZoneIdentifier=mocked_networking["subnet1"],
) )
elb_client = boto3.client("elb", region_name="us-east-1")
elb_client.create_load_balancer(
LoadBalancerName="my-lb",
Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}],
AvailabilityZones=["us-east-1a", "us-east-1b"],
)
response = client.attach_load_balancers(
AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"]
)
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
instance_to_standby = response["AutoScalingGroups"][0]["Instances"][0]["InstanceId"] instance_to_standby = response["AutoScalingGroups"][0]["Instances"][0]["InstanceId"]
instance_to_keep = response["AutoScalingGroups"][0]["Instances"][1]["InstanceId"] instance_to_keep = response["AutoScalingGroups"][0]["Instances"][1]["InstanceId"]
@ -1279,6 +1356,12 @@ def test_standby_one_instance():
tags = instance["Tags"] tags = instance["Tags"]
tags.should.have.length_of(2) tags.should.have.length_of(2)
response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"])
list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(2)
instance_to_standby.shouldnt.be.within(
[x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]]
)
@mock_elb @mock_elb
@mock_autoscaling @mock_autoscaling
@ -1338,8 +1421,12 @@ def test_standby_elb_update():
response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"]) response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"])
list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(2) list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(2)
instance_to_standby.shouldnt.be.within(
[x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]]
)
@mock_elb
@mock_autoscaling @mock_autoscaling
@mock_ec2 @mock_ec2
def test_standby_terminate_instance_decrement(): def test_standby_terminate_instance_decrement():
@ -1366,6 +1453,18 @@ def test_standby_terminate_instance_decrement():
VPCZoneIdentifier=mocked_networking["subnet1"], VPCZoneIdentifier=mocked_networking["subnet1"],
) )
elb_client = boto3.client("elb", region_name="us-east-1")
elb_client.create_load_balancer(
LoadBalancerName="my-lb",
Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}],
AvailabilityZones=["us-east-1a", "us-east-1b"],
)
response = client.attach_load_balancers(
AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"]
)
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
instance_to_standby_terminate = response["AutoScalingGroups"][0]["Instances"][0][ instance_to_standby_terminate = response["AutoScalingGroups"][0]["Instances"][0][
"InstanceId" "InstanceId"
@ -1409,7 +1508,14 @@ def test_standby_terminate_instance_decrement():
"terminated" "terminated"
) )
response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"])
list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(1)
instance_to_standby_terminate.shouldnt.be.within(
[x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]]
)
@mock_elb
@mock_autoscaling @mock_autoscaling
@mock_ec2 @mock_ec2
def test_standby_terminate_instance_no_decrement(): def test_standby_terminate_instance_no_decrement():
@ -1436,6 +1542,18 @@ def test_standby_terminate_instance_no_decrement():
VPCZoneIdentifier=mocked_networking["subnet1"], VPCZoneIdentifier=mocked_networking["subnet1"],
) )
elb_client = boto3.client("elb", region_name="us-east-1")
elb_client.create_load_balancer(
LoadBalancerName="my-lb",
Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}],
AvailabilityZones=["us-east-1a", "us-east-1b"],
)
response = client.attach_load_balancers(
AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"]
)
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
instance_to_standby_terminate = response["AutoScalingGroups"][0]["Instances"][0][ instance_to_standby_terminate = response["AutoScalingGroups"][0]["Instances"][0][
"InstanceId" "InstanceId"
@ -1479,7 +1597,14 @@ def test_standby_terminate_instance_no_decrement():
"terminated" "terminated"
) )
response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"])
list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(2)
instance_to_standby_terminate.shouldnt.be.within(
[x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]]
)
@mock_elb
@mock_autoscaling @mock_autoscaling
@mock_ec2 @mock_ec2
def test_standby_detach_instance_decrement(): def test_standby_detach_instance_decrement():
@ -1506,6 +1631,18 @@ def test_standby_detach_instance_decrement():
VPCZoneIdentifier=mocked_networking["subnet1"], VPCZoneIdentifier=mocked_networking["subnet1"],
) )
elb_client = boto3.client("elb", region_name="us-east-1")
elb_client.create_load_balancer(
LoadBalancerName="my-lb",
Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}],
AvailabilityZones=["us-east-1a", "us-east-1b"],
)
response = client.attach_load_balancers(
AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"]
)
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
instance_to_standby_detach = response["AutoScalingGroups"][0]["Instances"][0][ instance_to_standby_detach = response["AutoScalingGroups"][0]["Instances"][0][
"InstanceId" "InstanceId"
@ -1547,7 +1684,14 @@ def test_standby_detach_instance_decrement():
response = ec2_client.describe_instances(InstanceIds=[instance_to_standby_detach]) response = ec2_client.describe_instances(InstanceIds=[instance_to_standby_detach])
response["Reservations"][0]["Instances"][0]["State"]["Name"].should.equal("running") response["Reservations"][0]["Instances"][0]["State"]["Name"].should.equal("running")
response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"])
list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(1)
instance_to_standby_detach.shouldnt.be.within(
[x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]]
)
@mock_elb
@mock_autoscaling @mock_autoscaling
@mock_ec2 @mock_ec2
def test_standby_detach_instance_no_decrement(): def test_standby_detach_instance_no_decrement():
@ -1574,6 +1718,18 @@ def test_standby_detach_instance_no_decrement():
VPCZoneIdentifier=mocked_networking["subnet1"], VPCZoneIdentifier=mocked_networking["subnet1"],
) )
elb_client = boto3.client("elb", region_name="us-east-1")
elb_client.create_load_balancer(
LoadBalancerName="my-lb",
Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}],
AvailabilityZones=["us-east-1a", "us-east-1b"],
)
response = client.attach_load_balancers(
AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"]
)
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
instance_to_standby_detach = response["AutoScalingGroups"][0]["Instances"][0][ instance_to_standby_detach = response["AutoScalingGroups"][0]["Instances"][0][
"InstanceId" "InstanceId"
@ -1615,7 +1771,14 @@ def test_standby_detach_instance_no_decrement():
response = ec2_client.describe_instances(InstanceIds=[instance_to_standby_detach]) response = ec2_client.describe_instances(InstanceIds=[instance_to_standby_detach])
response["Reservations"][0]["Instances"][0]["State"]["Name"].should.equal("running") response["Reservations"][0]["Instances"][0]["State"]["Name"].should.equal("running")
response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"])
list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(2)
instance_to_standby_detach.shouldnt.be.within(
[x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]]
)
@mock_elb
@mock_autoscaling @mock_autoscaling
@mock_ec2 @mock_ec2
def test_standby_exit_standby(): def test_standby_exit_standby():
@ -1642,6 +1805,18 @@ def test_standby_exit_standby():
VPCZoneIdentifier=mocked_networking["subnet1"], VPCZoneIdentifier=mocked_networking["subnet1"],
) )
elb_client = boto3.client("elb", region_name="us-east-1")
elb_client.create_load_balancer(
LoadBalancerName="my-lb",
Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}],
AvailabilityZones=["us-east-1a", "us-east-1b"],
)
response = client.attach_load_balancers(
AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"]
)
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
instance_to_standby_exit_standby = response["AutoScalingGroups"][0]["Instances"][0][ instance_to_standby_exit_standby = response["AutoScalingGroups"][0]["Instances"][0][
"InstanceId" "InstanceId"
@ -1683,7 +1858,14 @@ def test_standby_exit_standby():
) )
response["Reservations"][0]["Instances"][0]["State"]["Name"].should.equal("running") response["Reservations"][0]["Instances"][0]["State"]["Name"].should.equal("running")
response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"])
list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(3)
instance_to_standby_exit_standby.should.be.within(
[x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]]
)
@mock_elb
@mock_autoscaling @mock_autoscaling
@mock_ec2 @mock_ec2
def test_attach_one_instance(): def test_attach_one_instance():
@ -1711,6 +1893,18 @@ def test_attach_one_instance():
NewInstancesProtectedFromScaleIn=True, NewInstancesProtectedFromScaleIn=True,
) )
elb_client = boto3.client("elb", region_name="us-east-1")
elb_client.create_load_balancer(
LoadBalancerName="my-lb",
Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}],
AvailabilityZones=["us-east-1a", "us-east-1b"],
)
response = client.attach_load_balancers(
AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"]
)
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
ec2 = boto3.resource("ec2", "us-east-1") ec2 = boto3.resource("ec2", "us-east-1")
instances_to_add = [ instances_to_add = [
x.id for x in ec2.create_instances(ImageId="", MinCount=1, MaxCount=1) x.id for x in ec2.create_instances(ImageId="", MinCount=1, MaxCount=1)
@ -1727,6 +1921,9 @@ def test_attach_one_instance():
for instance in instances: for instance in instances:
instance["ProtectedFromScaleIn"].should.equal(True) instance["ProtectedFromScaleIn"].should.equal(True)
response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"])
list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(3)
@mock_autoscaling @mock_autoscaling
@mock_ec2 @mock_ec2
@ -1948,6 +2145,7 @@ def test_terminate_instance_via_ec2_in_autoscaling_group():
replaced_instance_id.should_not.equal(original_instance_id) replaced_instance_id.should_not.equal(original_instance_id)
@mock_elb
@mock_autoscaling @mock_autoscaling
@mock_ec2 @mock_ec2
def test_terminate_instance_in_auto_scaling_group_decrement(): def test_terminate_instance_in_auto_scaling_group_decrement():
@ -1966,6 +2164,18 @@ def test_terminate_instance_in_auto_scaling_group_decrement():
NewInstancesProtectedFromScaleIn=False, NewInstancesProtectedFromScaleIn=False,
) )
elb_client = boto3.client("elb", region_name="us-east-1")
elb_client.create_load_balancer(
LoadBalancerName="my-lb",
Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}],
AvailabilityZones=["us-east-1a", "us-east-1b"],
)
response = client.attach_load_balancers(
AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"]
)
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
original_instance_id = next( original_instance_id = next(
instance["InstanceId"] instance["InstanceId"]
@ -1979,7 +2189,11 @@ def test_terminate_instance_in_auto_scaling_group_decrement():
response["AutoScalingGroups"][0]["Instances"].should.equal([]) response["AutoScalingGroups"][0]["Instances"].should.equal([])
response["AutoScalingGroups"][0]["DesiredCapacity"].should.equal(0) response["AutoScalingGroups"][0]["DesiredCapacity"].should.equal(0)
response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"])
list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(0)
@mock_elb
@mock_autoscaling @mock_autoscaling
@mock_ec2 @mock_ec2
def test_terminate_instance_in_auto_scaling_group_no_decrement(): def test_terminate_instance_in_auto_scaling_group_no_decrement():
@ -1998,6 +2212,18 @@ def test_terminate_instance_in_auto_scaling_group_no_decrement():
NewInstancesProtectedFromScaleIn=False, NewInstancesProtectedFromScaleIn=False,
) )
elb_client = boto3.client("elb", region_name="us-east-1")
elb_client.create_load_balancer(
LoadBalancerName="my-lb",
Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}],
AvailabilityZones=["us-east-1a", "us-east-1b"],
)
response = client.attach_load_balancers(
AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"]
)
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"])
original_instance_id = next( original_instance_id = next(
instance["InstanceId"] instance["InstanceId"]
@ -2014,3 +2240,9 @@ def test_terminate_instance_in_auto_scaling_group_no_decrement():
) )
replaced_instance_id.should_not.equal(original_instance_id) replaced_instance_id.should_not.equal(original_instance_id)
response["AutoScalingGroups"][0]["DesiredCapacity"].should.equal(1) response["AutoScalingGroups"][0]["DesiredCapacity"].should.equal(1)
response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"])
list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(1)
original_instance_id.shouldnt.be.within(
[x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]]
)

View File

@ -1,5 +1,6 @@
from __future__ import unicode_literals, print_function from __future__ import unicode_literals, print_function
from datetime import datetime
from decimal import Decimal from decimal import Decimal
import boto import boto
@ -2049,6 +2050,141 @@ def test_set_ttl():
resp["TimeToLiveDescription"]["TimeToLiveStatus"].should.equal("DISABLED") resp["TimeToLiveDescription"]["TimeToLiveStatus"].should.equal("DISABLED")
@mock_dynamodb2
def test_describe_continuous_backups():
# given
client = boto3.client("dynamodb", region_name="us-east-1")
table_name = client.create_table(
TableName="test",
AttributeDefinitions=[
{"AttributeName": "client", "AttributeType": "S"},
{"AttributeName": "app", "AttributeType": "S"},
],
KeySchema=[
{"AttributeName": "client", "KeyType": "HASH"},
{"AttributeName": "app", "KeyType": "RANGE"},
],
BillingMode="PAY_PER_REQUEST",
)["TableDescription"]["TableName"]
# when
response = client.describe_continuous_backups(TableName=table_name)
# then
response["ContinuousBackupsDescription"].should.equal(
{
"ContinuousBackupsStatus": "ENABLED",
"PointInTimeRecoveryDescription": {"PointInTimeRecoveryStatus": "DISABLED"},
}
)
@mock_dynamodb2
def test_describe_continuous_backups_errors():
# given
client = boto3.client("dynamodb", region_name="us-east-1")
# when
with assert_raises(Exception) as e:
client.describe_continuous_backups(TableName="not-existing-table")
# then
ex = e.exception
ex.operation_name.should.equal("DescribeContinuousBackups")
ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
ex.response["Error"]["Code"].should.contain("TableNotFoundException")
ex.response["Error"]["Message"].should.equal("Table not found: not-existing-table")
@mock_dynamodb2
def test_update_continuous_backups():
# given
client = boto3.client("dynamodb", region_name="us-east-1")
table_name = client.create_table(
TableName="test",
AttributeDefinitions=[
{"AttributeName": "client", "AttributeType": "S"},
{"AttributeName": "app", "AttributeType": "S"},
],
KeySchema=[
{"AttributeName": "client", "KeyType": "HASH"},
{"AttributeName": "app", "KeyType": "RANGE"},
],
BillingMode="PAY_PER_REQUEST",
)["TableDescription"]["TableName"]
# when
response = client.update_continuous_backups(
TableName=table_name,
PointInTimeRecoverySpecification={"PointInTimeRecoveryEnabled": True},
)
# then
response["ContinuousBackupsDescription"]["ContinuousBackupsStatus"].should.equal(
"ENABLED"
)
point_in_time = response["ContinuousBackupsDescription"][
"PointInTimeRecoveryDescription"
]
earliest_datetime = point_in_time["EarliestRestorableDateTime"]
earliest_datetime.should.be.a(datetime)
latest_datetime = point_in_time["LatestRestorableDateTime"]
latest_datetime.should.be.a(datetime)
point_in_time["PointInTimeRecoveryStatus"].should.equal("ENABLED")
# when
# a second update should not change anything
response = client.update_continuous_backups(
TableName=table_name,
PointInTimeRecoverySpecification={"PointInTimeRecoveryEnabled": True},
)
# then
response["ContinuousBackupsDescription"]["ContinuousBackupsStatus"].should.equal(
"ENABLED"
)
point_in_time = response["ContinuousBackupsDescription"][
"PointInTimeRecoveryDescription"
]
point_in_time["EarliestRestorableDateTime"].should.equal(earliest_datetime)
point_in_time["LatestRestorableDateTime"].should.equal(latest_datetime)
point_in_time["PointInTimeRecoveryStatus"].should.equal("ENABLED")
# when
response = client.update_continuous_backups(
TableName=table_name,
PointInTimeRecoverySpecification={"PointInTimeRecoveryEnabled": False},
)
# then
response["ContinuousBackupsDescription"].should.equal(
{
"ContinuousBackupsStatus": "ENABLED",
"PointInTimeRecoveryDescription": {"PointInTimeRecoveryStatus": "DISABLED"},
}
)
@mock_dynamodb2
def test_update_continuous_backups_errors():
# given
client = boto3.client("dynamodb", region_name="us-east-1")
# when
with assert_raises(Exception) as e:
client.update_continuous_backups(
TableName="not-existing-table",
PointInTimeRecoverySpecification={"PointInTimeRecoveryEnabled": True},
)
# then
ex = e.exception
ex.operation_name.should.equal("UpdateContinuousBackups")
ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
ex.response["Error"]["Code"].should.contain("TableNotFoundException")
ex.response["Error"]["Message"].should.equal("Table not found: not-existing-table")
# https://github.com/spulec/moto/issues/1043 # https://github.com/spulec/moto/issues/1043
@mock_dynamodb2 @mock_dynamodb2
def test_query_missing_expr_names(): def test_query_missing_expr_names():
@ -4298,13 +4434,8 @@ def test_transact_write_items_put_conditional_expressions():
] ]
) )
# Assert the exception is correct # Assert the exception is correct
ex.exception.response["Error"]["Code"].should.equal( ex.exception.response["Error"]["Code"].should.equal("TransactionCanceledException")
"ConditionalCheckFailedException"
)
ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
ex.exception.response["Error"]["Message"].should.equal(
"A condition specified in the operation could not be evaluated."
)
# Assert all are present # Assert all are present
items = dynamodb.scan(TableName="test-table")["Items"] items = dynamodb.scan(TableName="test-table")["Items"]
items.should.have.length_of(1) items.should.have.length_of(1)
@ -4393,13 +4524,8 @@ def test_transact_write_items_conditioncheck_fails():
] ]
) )
# Assert the exception is correct # Assert the exception is correct
ex.exception.response["Error"]["Code"].should.equal( ex.exception.response["Error"]["Code"].should.equal("TransactionCanceledException")
"ConditionalCheckFailedException"
)
ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
ex.exception.response["Error"]["Message"].should.equal(
"A condition specified in the operation could not be evaluated."
)
# Assert the original email address is still present # Assert the original email address is still present
items = dynamodb.scan(TableName="test-table")["Items"] items = dynamodb.scan(TableName="test-table")["Items"]
@ -4495,13 +4621,8 @@ def test_transact_write_items_delete_with_failed_condition_expression():
] ]
) )
# Assert the exception is correct # Assert the exception is correct
ex.exception.response["Error"]["Code"].should.equal( ex.exception.response["Error"]["Code"].should.equal("TransactionCanceledException")
"ConditionalCheckFailedException"
)
ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
ex.exception.response["Error"]["Message"].should.equal(
"A condition specified in the operation could not be evaluated."
)
# Assert the original item is still present # Assert the original item is still present
items = dynamodb.scan(TableName="test-table")["Items"] items = dynamodb.scan(TableName="test-table")["Items"]
items.should.have.length_of(1) items.should.have.length_of(1)
@ -4573,13 +4694,8 @@ def test_transact_write_items_update_with_failed_condition_expression():
] ]
) )
# Assert the exception is correct # Assert the exception is correct
ex.exception.response["Error"]["Code"].should.equal( ex.exception.response["Error"]["Code"].should.equal("TransactionCanceledException")
"ConditionalCheckFailedException"
)
ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
ex.exception.response["Error"]["Message"].should.equal(
"A condition specified in the operation could not be evaluated."
)
# Assert the original item is still present # Assert the original item is still present
items = dynamodb.scan(TableName="test-table")["Items"] items = dynamodb.scan(TableName="test-table")["Items"]
items.should.have.length_of(1) items.should.have.length_of(1)
@ -5029,3 +5145,126 @@ def test_update_item_atomic_counter_return_values():
"v" in response["Attributes"] "v" in response["Attributes"]
), "v has been updated, and should be returned here" ), "v has been updated, and should be returned here"
response["Attributes"]["v"]["N"].should.equal("8") response["Attributes"]["v"]["N"].should.equal("8")
@mock_dynamodb2
def test_update_item_atomic_counter_from_zero():
table = "table_t"
ddb_mock = boto3.client("dynamodb", region_name="eu-west-1")
ddb_mock.create_table(
TableName=table,
KeySchema=[{"AttributeName": "t_id", "KeyType": "HASH"}],
AttributeDefinitions=[{"AttributeName": "t_id", "AttributeType": "S"}],
BillingMode="PAY_PER_REQUEST",
)
key = {"t_id": {"S": "item1"}}
ddb_mock.put_item(
TableName=table, Item=key,
)
ddb_mock.update_item(
TableName=table,
Key=key,
UpdateExpression="add n_i :inc1, n_f :inc2",
ExpressionAttributeValues={":inc1": {"N": "1.2"}, ":inc2": {"N": "-0.5"}},
)
updated_item = ddb_mock.get_item(TableName=table, Key=key)["Item"]
assert updated_item["n_i"]["N"] == "1.2"
assert updated_item["n_f"]["N"] == "-0.5"
@mock_dynamodb2
def test_update_item_add_to_non_existent_set():
table = "table_t"
ddb_mock = boto3.client("dynamodb", region_name="eu-west-1")
ddb_mock.create_table(
TableName=table,
KeySchema=[{"AttributeName": "t_id", "KeyType": "HASH"}],
AttributeDefinitions=[{"AttributeName": "t_id", "AttributeType": "S"}],
BillingMode="PAY_PER_REQUEST",
)
key = {"t_id": {"S": "item1"}}
ddb_mock.put_item(
TableName=table, Item=key,
)
ddb_mock.update_item(
TableName=table,
Key=key,
UpdateExpression="add s_i :s1",
ExpressionAttributeValues={":s1": {"SS": ["hello"]}},
)
updated_item = ddb_mock.get_item(TableName=table, Key=key)["Item"]
assert updated_item["s_i"]["SS"] == ["hello"]
@mock_dynamodb2
def test_update_item_add_to_non_existent_number_set():
table = "table_t"
ddb_mock = boto3.client("dynamodb", region_name="eu-west-1")
ddb_mock.create_table(
TableName=table,
KeySchema=[{"AttributeName": "t_id", "KeyType": "HASH"}],
AttributeDefinitions=[{"AttributeName": "t_id", "AttributeType": "S"}],
BillingMode="PAY_PER_REQUEST",
)
key = {"t_id": {"S": "item1"}}
ddb_mock.put_item(
TableName=table, Item=key,
)
ddb_mock.update_item(
TableName=table,
Key=key,
UpdateExpression="add s_i :s1",
ExpressionAttributeValues={":s1": {"NS": ["3"]}},
)
updated_item = ddb_mock.get_item(TableName=table, Key=key)["Item"]
assert updated_item["s_i"]["NS"] == ["3"]
@mock_dynamodb2
def test_transact_write_items_fails_with_transaction_canceled_exception():
table_schema = {
"KeySchema": [{"AttributeName": "id", "KeyType": "HASH"}],
"AttributeDefinitions": [{"AttributeName": "id", "AttributeType": "S"},],
}
dynamodb = boto3.client("dynamodb", region_name="us-east-1")
dynamodb.create_table(
TableName="test-table", BillingMode="PAY_PER_REQUEST", **table_schema
)
# Insert one item
dynamodb.put_item(TableName="test-table", Item={"id": {"S": "foo"}})
# Update two items, the one that exists and another that doesn't
with assert_raises(ClientError) as ex:
dynamodb.transact_write_items(
TransactItems=[
{
"Update": {
"Key": {"id": {"S": "foo"}},
"TableName": "test-table",
"UpdateExpression": "SET #k = :v",
"ConditionExpression": "attribute_exists(id)",
"ExpressionAttributeNames": {"#k": "key"},
"ExpressionAttributeValues": {":v": {"S": "value"}},
}
},
{
"Update": {
"Key": {"id": {"S": "doesnotexist"}},
"TableName": "test-table",
"UpdateExpression": "SET #e = :v",
"ConditionExpression": "attribute_exists(id)",
"ExpressionAttributeNames": {"#e": "key"},
"ExpressionAttributeValues": {":v": {"S": "value"}},
}
},
]
)
ex.exception.response["Error"]["Code"].should.equal("TransactionCanceledException")
ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
ex.exception.response["Error"]["Message"].should.equal(
"Transaction cancelled, please refer cancellation reasons for specific reasons [None, ConditionalCheckFailed]"
)

View File

@ -1307,16 +1307,16 @@ def test_update_item_add_with_expression():
ExpressionAttributeValues={":v": {"item4"}}, ExpressionAttributeValues={":v": {"item4"}},
) )
current_item["str_set"] = current_item["str_set"].union({"item4"}) current_item["str_set"] = current_item["str_set"].union({"item4"})
dict(table.get_item(Key=item_key)["Item"]).should.equal(current_item) assert dict(table.get_item(Key=item_key)["Item"]) == current_item
# Update item to add a string value to a non-existing set # Update item to add a string value to a non-existing set
# Should throw: 'The provided key element does not match the schema' table.update_item(
assert_failure_due_to_key_not_in_schema(
table.update_item,
Key=item_key, Key=item_key,
UpdateExpression="ADD non_existing_str_set :v", UpdateExpression="ADD non_existing_str_set :v",
ExpressionAttributeValues={":v": {"item4"}}, ExpressionAttributeValues={":v": {"item4"}},
) )
current_item["non_existing_str_set"] = {"item4"}
assert dict(table.get_item(Key=item_key)["Item"]) == current_item
# Update item to add a num value to a num set # Update item to add a num value to a num set
table.update_item( table.update_item(
@ -1325,7 +1325,7 @@ def test_update_item_add_with_expression():
ExpressionAttributeValues={":v": {6}}, ExpressionAttributeValues={":v": {6}},
) )
current_item["num_set"] = current_item["num_set"].union({6}) current_item["num_set"] = current_item["num_set"].union({6})
dict(table.get_item(Key=item_key)["Item"]).should.equal(current_item) assert dict(table.get_item(Key=item_key)["Item"]) == current_item
# Update item to add a value to a number value # Update item to add a value to a number value
table.update_item( table.update_item(
@ -1334,7 +1334,7 @@ def test_update_item_add_with_expression():
ExpressionAttributeValues={":v": 20}, ExpressionAttributeValues={":v": 20},
) )
current_item["num_val"] = current_item["num_val"] + 20 current_item["num_val"] = current_item["num_val"] + 20
dict(table.get_item(Key=item_key)["Item"]).should.equal(current_item) assert dict(table.get_item(Key=item_key)["Item"]) == current_item
# Attempt to add a number value to a string set, should raise Client Error # Attempt to add a number value to a string set, should raise Client Error
table.update_item.when.called_with( table.update_item.when.called_with(
@ -1342,7 +1342,7 @@ def test_update_item_add_with_expression():
UpdateExpression="ADD str_set :v", UpdateExpression="ADD str_set :v",
ExpressionAttributeValues={":v": 20}, ExpressionAttributeValues={":v": 20},
).should.have.raised(ClientError) ).should.have.raised(ClientError)
dict(table.get_item(Key=item_key)["Item"]).should.equal(current_item) assert dict(table.get_item(Key=item_key)["Item"]) == current_item
# Attempt to add a number set to the string set, should raise a ClientError # Attempt to add a number set to the string set, should raise a ClientError
table.update_item.when.called_with( table.update_item.when.called_with(
@ -1350,7 +1350,7 @@ def test_update_item_add_with_expression():
UpdateExpression="ADD str_set :v", UpdateExpression="ADD str_set :v",
ExpressionAttributeValues={":v": {20}}, ExpressionAttributeValues={":v": {20}},
).should.have.raised(ClientError) ).should.have.raised(ClientError)
dict(table.get_item(Key=item_key)["Item"]).should.equal(current_item) assert dict(table.get_item(Key=item_key)["Item"]) == current_item
# Attempt to update with a bad expression # Attempt to update with a bad expression
table.update_item.when.called_with( table.update_item.when.called_with(
@ -1388,17 +1388,18 @@ def test_update_item_add_with_nested_sets():
current_item["nested"]["str_set"] = current_item["nested"]["str_set"].union( current_item["nested"]["str_set"] = current_item["nested"]["str_set"].union(
{"item4"} {"item4"}
) )
dict(table.get_item(Key=item_key)["Item"]).should.equal(current_item) assert dict(table.get_item(Key=item_key)["Item"]) == current_item
# Update item to add a string value to a non-existing set # Update item to add a string value to a non-existing set
# Should raise # Should raise
assert_failure_due_to_key_not_in_schema( table.update_item(
table.update_item,
Key=item_key, Key=item_key,
UpdateExpression="ADD #ns.#ne :v", UpdateExpression="ADD #ns.#ne :v",
ExpressionAttributeNames={"#ns": "nested", "#ne": "non_existing_str_set"}, ExpressionAttributeNames={"#ns": "nested", "#ne": "non_existing_str_set"},
ExpressionAttributeValues={":v": {"new_item"}}, ExpressionAttributeValues={":v": {"new_item"}},
) )
current_item["nested"]["non_existing_str_set"] = {"new_item"}
assert dict(table.get_item(Key=item_key)["Item"]) == current_item
@mock_dynamodb2 @mock_dynamodb2

View File

@ -1126,6 +1126,111 @@ def test_run_instance_with_keypair():
instance.key_name.should.equal("keypair_name") instance.key_name.should.equal("keypair_name")
@mock_ec2
def test_run_instance_with_block_device_mappings():
ec2_client = boto3.client("ec2", region_name="us-east-1")
kwargs = {
"MinCount": 1,
"MaxCount": 1,
"ImageId": "ami-d3adb33f",
"KeyName": "the_key",
"InstanceType": "t1.micro",
"BlockDeviceMappings": [{"DeviceName": "/dev/sda2", "Ebs": {"VolumeSize": 50}}],
}
ec2_client.run_instances(**kwargs)
instances = ec2_client.describe_instances()
volume = instances["Reservations"][0]["Instances"][0]["BlockDeviceMappings"][0][
"Ebs"
]
volumes = ec2_client.describe_volumes(VolumeIds=[volume["VolumeId"]])
volumes["Volumes"][0]["Size"].should.equal(50)
@mock_ec2
def test_run_instance_with_block_device_mappings_missing_ebs():
ec2_client = boto3.client("ec2", region_name="us-east-1")
kwargs = {
"MinCount": 1,
"MaxCount": 1,
"ImageId": "ami-d3adb33f",
"KeyName": "the_key",
"InstanceType": "t1.micro",
"BlockDeviceMappings": [{"DeviceName": "/dev/sda2"}],
}
with assert_raises(ClientError) as ex:
ec2_client.run_instances(**kwargs)
ex.exception.response["Error"]["Code"].should.equal("MissingParameter")
ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
ex.exception.response["Error"]["Message"].should.equal(
"The request must contain the parameter ebs"
)
@mock_ec2
def test_run_instance_with_block_device_mappings_missing_size():
ec2_client = boto3.client("ec2", region_name="us-east-1")
kwargs = {
"MinCount": 1,
"MaxCount": 1,
"ImageId": "ami-d3adb33f",
"KeyName": "the_key",
"InstanceType": "t1.micro",
"BlockDeviceMappings": [
{"DeviceName": "/dev/sda2", "Ebs": {"VolumeType": "standard"}}
],
}
with assert_raises(ClientError) as ex:
ec2_client.run_instances(**kwargs)
ex.exception.response["Error"]["Code"].should.equal("MissingParameter")
ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
ex.exception.response["Error"]["Message"].should.equal(
"The request must contain the parameter size or snapshotId"
)
@mock_ec2
def test_run_instance_with_block_device_mappings_from_snapshot():
ec2_client = boto3.client("ec2", region_name="us-east-1")
ec2_resource = boto3.resource("ec2", region_name="us-east-1")
volume_details = {
"AvailabilityZone": "1a",
"Size": 30,
}
volume = ec2_resource.create_volume(**volume_details)
snapshot = volume.create_snapshot()
kwargs = {
"MinCount": 1,
"MaxCount": 1,
"ImageId": "ami-d3adb33f",
"KeyName": "the_key",
"InstanceType": "t1.micro",
"BlockDeviceMappings": [
{"DeviceName": "/dev/sda2", "Ebs": {"SnapshotId": snapshot.snapshot_id}}
],
}
ec2_client.run_instances(**kwargs)
instances = ec2_client.describe_instances()
volume = instances["Reservations"][0]["Instances"][0]["BlockDeviceMappings"][0][
"Ebs"
]
volumes = ec2_client.describe_volumes(VolumeIds=[volume["VolumeId"]])
volumes["Volumes"][0]["Size"].should.equal(30)
volumes["Volumes"][0]["SnapshotId"].should.equal(snapshot.snapshot_id)
@mock_ec2_deprecated @mock_ec2_deprecated
def test_describe_instance_status_no_instances(): def test_describe_instance_status_no_instances():
conn = boto.connect_ec2("the_key", "the_secret") conn = boto.connect_ec2("the_key", "the_secret")

View File

@ -1,6 +1,6 @@
import boto3 import boto3
from moto import mock_ec2_instance_connect from moto import mock_ec2instanceconnect
pubkey = """ssh-rsa pubkey = """ssh-rsa
AAAAB3NzaC1yc2EAAAADAQABAAABAQDV5+voluw2zmzqpqCAqtsyoP01TQ8Ydx1eS1yD6wUsHcPqMIqpo57YxiC8XPwrdeKQ6GG6MC3bHsgXoPypGP0LyixbiuLTU31DnnqorcHt4bWs6rQa7dK2pCCflz2fhYRt5ZjqSNsAKivIbqkH66JozN0SySIka3kEV79GdB0BicioKeEJlCwM9vvxafyzjWf/z8E0lh4ni3vkLpIVJ0t5l+Qd9QMJrT6Is0SCQPVagTYZoi8+fWDoGsBa8vyRwDjEzBl28ZplKh9tSyDkRIYszWTpmK8qHiqjLYZBfAxXjGJbEYL1iig4ZxvbYzKEiKSBi1ZMW9iWjHfZDZuxXAmB AAAAB3NzaC1yc2EAAAADAQABAAABAQDV5+voluw2zmzqpqCAqtsyoP01TQ8Ydx1eS1yD6wUsHcPqMIqpo57YxiC8XPwrdeKQ6GG6MC3bHsgXoPypGP0LyixbiuLTU31DnnqorcHt4bWs6rQa7dK2pCCflz2fhYRt5ZjqSNsAKivIbqkH66JozN0SySIka3kEV79GdB0BicioKeEJlCwM9vvxafyzjWf/z8E0lh4ni3vkLpIVJ0t5l+Qd9QMJrT6Is0SCQPVagTYZoi8+fWDoGsBa8vyRwDjEzBl28ZplKh9tSyDkRIYszWTpmK8qHiqjLYZBfAxXjGJbEYL1iig4ZxvbYzKEiKSBi1ZMW9iWjHfZDZuxXAmB
@ -8,7 +8,7 @@ example
""" """
@mock_ec2_instance_connect @mock_ec2instanceconnect
def test_send_ssh_public_key(): def test_send_ssh_public_key():
client = boto3.client("ec2-instance-connect", region_name="us-east-1") client = boto3.client("ec2-instance-connect", region_name="us-east-1")
fake_request_id = "example-2a47-4c91-9700-e37e85162cb6" fake_request_id = "example-2a47-4c91-9700-e37e85162cb6"

View File

@ -9,6 +9,38 @@ from botocore.exceptions import ClientError
from nose.tools import assert_raises from nose.tools import assert_raises
def generate_thing_group_tree(iot_client, tree_dict, _parent=None):
"""
Generates a thing group tree given the input tree structure.
:param iot_client: the iot client for boto3
:param tree_dict: dictionary with the key being the group_name, and the value being a sub tree.
tree_dict = {
"group_name_1a":{
"group_name_2a":{
"group_name_3a":{} or None
},
},
"group_name_1b":{}
}
:return: a dictionary of created groups, keyed by group name
"""
if tree_dict is None:
tree_dict = {}
created_dict = {}
for group_name in tree_dict.keys():
params = {"thingGroupName": group_name}
if _parent:
params["parentGroupName"] = _parent
created_group = iot_client.create_thing_group(**params)
created_dict[group_name] = created_group
subtree_dict = generate_thing_group_tree(
iot_client=iot_client, tree_dict=tree_dict[group_name], _parent=group_name
)
created_dict.update(created_dict)
created_dict.update(subtree_dict)
return created_dict
@mock_iot @mock_iot
def test_attach_policy(): def test_attach_policy():
client = boto3.client("iot", region_name="ap-northeast-1") client = boto3.client("iot", region_name="ap-northeast-1")
@ -756,25 +788,143 @@ def test_delete_principal_thing():
client.delete_certificate(certificateId=cert_id) client.delete_certificate(certificateId=cert_id)
class TestListThingGroup:
group_name_1a = "my-group-name-1a"
group_name_1b = "my-group-name-1b"
group_name_2a = "my-group-name-2a"
group_name_2b = "my-group-name-2b"
group_name_3a = "my-group-name-3a"
group_name_3b = "my-group-name-3b"
group_name_3c = "my-group-name-3c"
group_name_3d = "my-group-name-3d"
tree_dict = {
group_name_1a: {
group_name_2a: {group_name_3a: {}, group_name_3b: {}},
group_name_2b: {group_name_3c: {}, group_name_3d: {}},
},
group_name_1b: {},
}
@mock_iot
def test_should_list_all_groups(self):
# setup
client = boto3.client("iot", region_name="ap-northeast-1")
group_catalog = generate_thing_group_tree(client, self.tree_dict)
# test
resp = client.list_thing_groups()
resp.should.have.key("thingGroups")
resp["thingGroups"].should.have.length_of(8)
@mock_iot
def test_should_list_all_groups_non_recursively(self):
# setup
client = boto3.client("iot", region_name="ap-northeast-1")
group_catalog = generate_thing_group_tree(client, self.tree_dict)
# test
resp = client.list_thing_groups(recursive=False)
resp.should.have.key("thingGroups")
resp["thingGroups"].should.have.length_of(2)
@mock_iot
def test_should_list_all_groups_filtered_by_parent(self):
# setup
client = boto3.client("iot", region_name="ap-northeast-1")
group_catalog = generate_thing_group_tree(client, self.tree_dict)
# test
resp = client.list_thing_groups(parentGroup=self.group_name_1a)
resp.should.have.key("thingGroups")
resp["thingGroups"].should.have.length_of(6)
resp = client.list_thing_groups(parentGroup=self.group_name_2a)
resp.should.have.key("thingGroups")
resp["thingGroups"].should.have.length_of(2)
resp = client.list_thing_groups(parentGroup=self.group_name_1b)
resp.should.have.key("thingGroups")
resp["thingGroups"].should.have.length_of(0)
with assert_raises(ClientError) as e:
client.list_thing_groups(parentGroup="inexistant-group-name")
e.exception.response["Error"]["Code"].should.equal(
"ResourceNotFoundException"
)
@mock_iot
def test_should_list_all_groups_filtered_by_parent_non_recursively(self):
# setup
client = boto3.client("iot", region_name="ap-northeast-1")
group_catalog = generate_thing_group_tree(client, self.tree_dict)
# test
resp = client.list_thing_groups(parentGroup=self.group_name_1a, recursive=False)
resp.should.have.key("thingGroups")
resp["thingGroups"].should.have.length_of(2)
resp = client.list_thing_groups(parentGroup=self.group_name_2a, recursive=False)
resp.should.have.key("thingGroups")
resp["thingGroups"].should.have.length_of(2)
@mock_iot
def test_should_list_all_groups_filtered_by_name_prefix(self):
# setup
client = boto3.client("iot", region_name="ap-northeast-1")
group_catalog = generate_thing_group_tree(client, self.tree_dict)
# test
resp = client.list_thing_groups(namePrefixFilter="my-group-name-1")
resp.should.have.key("thingGroups")
resp["thingGroups"].should.have.length_of(2)
resp = client.list_thing_groups(namePrefixFilter="my-group-name-3")
resp.should.have.key("thingGroups")
resp["thingGroups"].should.have.length_of(4)
resp = client.list_thing_groups(namePrefixFilter="prefix-which-doesn-not-match")
resp.should.have.key("thingGroups")
resp["thingGroups"].should.have.length_of(0)
@mock_iot
def test_should_list_all_groups_filtered_by_name_prefix_non_recursively(self):
# setup
client = boto3.client("iot", region_name="ap-northeast-1")
group_catalog = generate_thing_group_tree(client, self.tree_dict)
# test
resp = client.list_thing_groups(
namePrefixFilter="my-group-name-1", recursive=False
)
resp.should.have.key("thingGroups")
resp["thingGroups"].should.have.length_of(2)
resp = client.list_thing_groups(
namePrefixFilter="my-group-name-3", recursive=False
)
resp.should.have.key("thingGroups")
resp["thingGroups"].should.have.length_of(0)
@mock_iot
def test_should_list_all_groups_filtered_by_name_prefix_and_parent(self):
# setup
client = boto3.client("iot", region_name="ap-northeast-1")
group_catalog = generate_thing_group_tree(client, self.tree_dict)
# test
resp = client.list_thing_groups(
namePrefixFilter="my-group-name-2", parentGroup=self.group_name_1a
)
resp.should.have.key("thingGroups")
resp["thingGroups"].should.have.length_of(2)
resp = client.list_thing_groups(
namePrefixFilter="my-group-name-3", parentGroup=self.group_name_1a
)
resp.should.have.key("thingGroups")
resp["thingGroups"].should.have.length_of(4)
resp = client.list_thing_groups(
namePrefixFilter="prefix-which-doesn-not-match",
parentGroup=self.group_name_1a,
)
resp.should.have.key("thingGroups")
resp["thingGroups"].should.have.length_of(0)
@mock_iot @mock_iot
def test_delete_thing_group(): def test_delete_thing_group():
client = boto3.client("iot", region_name="ap-northeast-1") client = boto3.client("iot", region_name="ap-northeast-1")
group_name_1a = "my-group-name-1a" group_name_1a = "my-group-name-1a"
group_name_2a = "my-group-name-2a" group_name_2a = "my-group-name-2a"
# --1a tree_dict = {
# |--2a group_name_1a: {group_name_2a: {},},
}
# create thing groups tree group_catalog = generate_thing_group_tree(client, tree_dict)
# 1
thing_group1a = client.create_thing_group(thingGroupName=group_name_1a)
thing_group1a.should.have.key("thingGroupName").which.should.equal(group_name_1a)
thing_group1a.should.have.key("thingGroupArn")
# 2
thing_group2a = client.create_thing_group(
thingGroupName=group_name_2a, parentGroupName=group_name_1a
)
thing_group2a.should.have.key("thingGroupName").which.should.equal(group_name_2a)
thing_group2a.should.have.key("thingGroupArn")
# delete group with child # delete group with child
try: try:
@ -809,56 +959,14 @@ def test_describe_thing_group_metadata_hierarchy():
group_name_3c = "my-group-name-3c" group_name_3c = "my-group-name-3c"
group_name_3d = "my-group-name-3d" group_name_3d = "my-group-name-3d"
# --1a tree_dict = {
# |--2a group_name_1a: {
# | |--3a group_name_2a: {group_name_3a: {}, group_name_3b: {}},
# | |--3b group_name_2b: {group_name_3c: {}, group_name_3d: {}},
# | },
# |--2b group_name_1b: {},
# |--3c }
# |--3d group_catalog = generate_thing_group_tree(client, tree_dict)
# --1b
# create thing groups tree
# 1
thing_group1a = client.create_thing_group(thingGroupName=group_name_1a)
thing_group1a.should.have.key("thingGroupName").which.should.equal(group_name_1a)
thing_group1a.should.have.key("thingGroupArn")
thing_group1b = client.create_thing_group(thingGroupName=group_name_1b)
thing_group1b.should.have.key("thingGroupName").which.should.equal(group_name_1b)
thing_group1b.should.have.key("thingGroupArn")
# 2
thing_group2a = client.create_thing_group(
thingGroupName=group_name_2a, parentGroupName=group_name_1a
)
thing_group2a.should.have.key("thingGroupName").which.should.equal(group_name_2a)
thing_group2a.should.have.key("thingGroupArn")
thing_group2b = client.create_thing_group(
thingGroupName=group_name_2b, parentGroupName=group_name_1a
)
thing_group2b.should.have.key("thingGroupName").which.should.equal(group_name_2b)
thing_group2b.should.have.key("thingGroupArn")
# 3
thing_group3a = client.create_thing_group(
thingGroupName=group_name_3a, parentGroupName=group_name_2a
)
thing_group3a.should.have.key("thingGroupName").which.should.equal(group_name_3a)
thing_group3a.should.have.key("thingGroupArn")
thing_group3b = client.create_thing_group(
thingGroupName=group_name_3b, parentGroupName=group_name_2a
)
thing_group3b.should.have.key("thingGroupName").which.should.equal(group_name_3b)
thing_group3b.should.have.key("thingGroupArn")
thing_group3c = client.create_thing_group(
thingGroupName=group_name_3c, parentGroupName=group_name_2b
)
thing_group3c.should.have.key("thingGroupName").which.should.equal(group_name_3c)
thing_group3c.should.have.key("thingGroupArn")
thing_group3d = client.create_thing_group(
thingGroupName=group_name_3d, parentGroupName=group_name_2b
)
thing_group3d.should.have.key("thingGroupName").which.should.equal(group_name_3d)
thing_group3d.should.have.key("thingGroupArn")
# describe groups # describe groups
# groups level 1 # groups level 1
@ -910,7 +1018,7 @@ def test_describe_thing_group_metadata_hierarchy():
].should.match(group_name_1a) ].should.match(group_name_1a)
thing_group_description2a["thingGroupMetadata"]["rootToParentThingGroups"][0][ thing_group_description2a["thingGroupMetadata"]["rootToParentThingGroups"][0][
"groupArn" "groupArn"
].should.match(thing_group1a["thingGroupArn"]) ].should.match(group_catalog[group_name_1a]["thingGroupArn"])
thing_group_description2a.should.have.key("version") thing_group_description2a.should.have.key("version")
# 2b # 2b
thing_group_description2b = client.describe_thing_group( thing_group_description2b = client.describe_thing_group(
@ -936,7 +1044,7 @@ def test_describe_thing_group_metadata_hierarchy():
].should.match(group_name_1a) ].should.match(group_name_1a)
thing_group_description2b["thingGroupMetadata"]["rootToParentThingGroups"][0][ thing_group_description2b["thingGroupMetadata"]["rootToParentThingGroups"][0][
"groupArn" "groupArn"
].should.match(thing_group1a["thingGroupArn"]) ].should.match(group_catalog[group_name_1a]["thingGroupArn"])
thing_group_description2b.should.have.key("version") thing_group_description2b.should.have.key("version")
# groups level 3 # groups level 3
# 3a # 3a
@ -963,13 +1071,13 @@ def test_describe_thing_group_metadata_hierarchy():
].should.match(group_name_1a) ].should.match(group_name_1a)
thing_group_description3a["thingGroupMetadata"]["rootToParentThingGroups"][0][ thing_group_description3a["thingGroupMetadata"]["rootToParentThingGroups"][0][
"groupArn" "groupArn"
].should.match(thing_group1a["thingGroupArn"]) ].should.match(group_catalog[group_name_1a]["thingGroupArn"])
thing_group_description3a["thingGroupMetadata"]["rootToParentThingGroups"][1][ thing_group_description3a["thingGroupMetadata"]["rootToParentThingGroups"][1][
"groupName" "groupName"
].should.match(group_name_2a) ].should.match(group_name_2a)
thing_group_description3a["thingGroupMetadata"]["rootToParentThingGroups"][1][ thing_group_description3a["thingGroupMetadata"]["rootToParentThingGroups"][1][
"groupArn" "groupArn"
].should.match(thing_group2a["thingGroupArn"]) ].should.match(group_catalog[group_name_2a]["thingGroupArn"])
thing_group_description3a.should.have.key("version") thing_group_description3a.should.have.key("version")
# 3b # 3b
thing_group_description3b = client.describe_thing_group( thing_group_description3b = client.describe_thing_group(
@ -995,13 +1103,13 @@ def test_describe_thing_group_metadata_hierarchy():
].should.match(group_name_1a) ].should.match(group_name_1a)
thing_group_description3b["thingGroupMetadata"]["rootToParentThingGroups"][0][ thing_group_description3b["thingGroupMetadata"]["rootToParentThingGroups"][0][
"groupArn" "groupArn"
].should.match(thing_group1a["thingGroupArn"]) ].should.match(group_catalog[group_name_1a]["thingGroupArn"])
thing_group_description3b["thingGroupMetadata"]["rootToParentThingGroups"][1][ thing_group_description3b["thingGroupMetadata"]["rootToParentThingGroups"][1][
"groupName" "groupName"
].should.match(group_name_2a) ].should.match(group_name_2a)
thing_group_description3b["thingGroupMetadata"]["rootToParentThingGroups"][1][ thing_group_description3b["thingGroupMetadata"]["rootToParentThingGroups"][1][
"groupArn" "groupArn"
].should.match(thing_group2a["thingGroupArn"]) ].should.match(group_catalog[group_name_2a]["thingGroupArn"])
thing_group_description3b.should.have.key("version") thing_group_description3b.should.have.key("version")
# 3c # 3c
thing_group_description3c = client.describe_thing_group( thing_group_description3c = client.describe_thing_group(
@ -1027,13 +1135,13 @@ def test_describe_thing_group_metadata_hierarchy():
].should.match(group_name_1a) ].should.match(group_name_1a)
thing_group_description3c["thingGroupMetadata"]["rootToParentThingGroups"][0][ thing_group_description3c["thingGroupMetadata"]["rootToParentThingGroups"][0][
"groupArn" "groupArn"
].should.match(thing_group1a["thingGroupArn"]) ].should.match(group_catalog[group_name_1a]["thingGroupArn"])
thing_group_description3c["thingGroupMetadata"]["rootToParentThingGroups"][1][ thing_group_description3c["thingGroupMetadata"]["rootToParentThingGroups"][1][
"groupName" "groupName"
].should.match(group_name_2b) ].should.match(group_name_2b)
thing_group_description3c["thingGroupMetadata"]["rootToParentThingGroups"][1][ thing_group_description3c["thingGroupMetadata"]["rootToParentThingGroups"][1][
"groupArn" "groupArn"
].should.match(thing_group2b["thingGroupArn"]) ].should.match(group_catalog[group_name_2b]["thingGroupArn"])
thing_group_description3c.should.have.key("version") thing_group_description3c.should.have.key("version")
# 3d # 3d
thing_group_description3d = client.describe_thing_group( thing_group_description3d = client.describe_thing_group(
@ -1059,13 +1167,13 @@ def test_describe_thing_group_metadata_hierarchy():
].should.match(group_name_1a) ].should.match(group_name_1a)
thing_group_description3d["thingGroupMetadata"]["rootToParentThingGroups"][0][ thing_group_description3d["thingGroupMetadata"]["rootToParentThingGroups"][0][
"groupArn" "groupArn"
].should.match(thing_group1a["thingGroupArn"]) ].should.match(group_catalog[group_name_1a]["thingGroupArn"])
thing_group_description3d["thingGroupMetadata"]["rootToParentThingGroups"][1][ thing_group_description3d["thingGroupMetadata"]["rootToParentThingGroups"][1][
"groupName" "groupName"
].should.match(group_name_2b) ].should.match(group_name_2b)
thing_group_description3d["thingGroupMetadata"]["rootToParentThingGroups"][1][ thing_group_description3d["thingGroupMetadata"]["rootToParentThingGroups"][1][
"groupArn" "groupArn"
].should.match(thing_group2b["thingGroupArn"]) ].should.match(group_catalog[group_name_2b]["thingGroupArn"])
thing_group_description3d.should.have.key("version") thing_group_description3d.should.have.key("version")

View File

@ -1,10 +1,17 @@
import base64
import json
import time
import zlib
from io import BytesIO
from zipfile import ZipFile, ZIP_DEFLATED
import boto3 import boto3
import os import os
import sure # noqa import sure # noqa
import six import six
from botocore.exceptions import ClientError from botocore.exceptions import ClientError
from moto import mock_logs, settings from moto import mock_logs, settings, mock_lambda, mock_iam
from nose.tools import assert_raises from nose.tools import assert_raises
from nose import SkipTest from nose import SkipTest
@ -425,3 +432,408 @@ def test_untag_log_group():
assert response["tags"] == remaining_tags assert response["tags"] == remaining_tags
response = conn.delete_log_group(logGroupName=log_group_name) response = conn.delete_log_group(logGroupName=log_group_name)
@mock_logs
def test_describe_subscription_filters():
# given
client = boto3.client("logs", "us-east-1")
log_group_name = "/test"
client.create_log_group(logGroupName=log_group_name)
# when
response = client.describe_subscription_filters(logGroupName=log_group_name)
# then
response["subscriptionFilters"].should.have.length_of(0)
@mock_logs
def test_describe_subscription_filters_errors():
# given
client = boto3.client("logs", "us-east-1")
# when
with assert_raises(ClientError) as e:
client.describe_subscription_filters(logGroupName="not-existing-log-group",)
# then
ex = e.exception
ex.operation_name.should.equal("DescribeSubscriptionFilters")
ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
ex.response["Error"]["Code"].should.contain("ResourceNotFoundException")
ex.response["Error"]["Message"].should.equal(
"The specified log group does not exist"
)
@mock_lambda
@mock_logs
def test_put_subscription_filter_update():
# given
region_name = "us-east-1"
client_lambda = boto3.client("lambda", region_name)
client_logs = boto3.client("logs", region_name)
log_group_name = "/test"
log_stream_name = "stream"
client_logs.create_log_group(logGroupName=log_group_name)
client_logs.create_log_stream(
logGroupName=log_group_name, logStreamName=log_stream_name
)
function_arn = client_lambda.create_function(
FunctionName="test",
Runtime="python3.8",
Role=_get_role_name(region_name),
Handler="lambda_function.lambda_handler",
Code={"ZipFile": _get_test_zip_file()},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)["FunctionArn"]
# when
client_logs.put_subscription_filter(
logGroupName=log_group_name,
filterName="test",
filterPattern="",
destinationArn=function_arn,
)
# then
response = client_logs.describe_subscription_filters(logGroupName=log_group_name)
response["subscriptionFilters"].should.have.length_of(1)
filter = response["subscriptionFilters"][0]
creation_time = filter["creationTime"]
creation_time.should.be.a(int)
filter["destinationArn"] = "arn:aws:lambda:us-east-1:123456789012:function:test"
filter["distribution"] = "ByLogStream"
filter["logGroupName"] = "/test"
filter["filterName"] = "test"
filter["filterPattern"] = ""
# when
# to update an existing subscription filter the 'filerName' must be identical
client_logs.put_subscription_filter(
logGroupName=log_group_name,
filterName="test",
filterPattern="[]",
destinationArn=function_arn,
)
# then
response = client_logs.describe_subscription_filters(logGroupName=log_group_name)
response["subscriptionFilters"].should.have.length_of(1)
filter = response["subscriptionFilters"][0]
filter["creationTime"].should.equal(creation_time)
filter["destinationArn"] = "arn:aws:lambda:us-east-1:123456789012:function:test"
filter["distribution"] = "ByLogStream"
filter["logGroupName"] = "/test"
filter["filterName"] = "test"
filter["filterPattern"] = "[]"
# when
# only one subscription filter can be associated with a log group
with assert_raises(ClientError) as e:
client_logs.put_subscription_filter(
logGroupName=log_group_name,
filterName="test-2",
filterPattern="",
destinationArn=function_arn,
)
# then
ex = e.exception
ex.operation_name.should.equal("PutSubscriptionFilter")
ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
ex.response["Error"]["Code"].should.contain("LimitExceededException")
ex.response["Error"]["Message"].should.equal("Resource limit exceeded.")
@mock_lambda
@mock_logs
def test_put_subscription_filter_with_lambda():
# given
region_name = "us-east-1"
client_lambda = boto3.client("lambda", region_name)
client_logs = boto3.client("logs", region_name)
log_group_name = "/test"
log_stream_name = "stream"
client_logs.create_log_group(logGroupName=log_group_name)
client_logs.create_log_stream(
logGroupName=log_group_name, logStreamName=log_stream_name
)
function_arn = client_lambda.create_function(
FunctionName="test",
Runtime="python3.8",
Role=_get_role_name(region_name),
Handler="lambda_function.lambda_handler",
Code={"ZipFile": _get_test_zip_file()},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)["FunctionArn"]
# when
client_logs.put_subscription_filter(
logGroupName=log_group_name,
filterName="test",
filterPattern="",
destinationArn=function_arn,
)
# then
response = client_logs.describe_subscription_filters(logGroupName=log_group_name)
response["subscriptionFilters"].should.have.length_of(1)
filter = response["subscriptionFilters"][0]
filter["creationTime"].should.be.a(int)
filter["destinationArn"] = "arn:aws:lambda:us-east-1:123456789012:function:test"
filter["distribution"] = "ByLogStream"
filter["logGroupName"] = "/test"
filter["filterName"] = "test"
filter["filterPattern"] = ""
# when
client_logs.put_log_events(
logGroupName=log_group_name,
logStreamName=log_stream_name,
logEvents=[
{"timestamp": 0, "message": "test"},
{"timestamp": 0, "message": "test 2"},
],
)
# then
msg_showed_up, received_message = _wait_for_log_msg(
client_logs, "/aws/lambda/test", "awslogs"
)
assert msg_showed_up, "CloudWatch log event was not found. All logs: {}".format(
received_message
)
data = json.loads(received_message)["awslogs"]["data"]
response = json.loads(
zlib.decompress(base64.b64decode(data), 16 + zlib.MAX_WBITS).decode("utf-8")
)
response["messageType"].should.equal("DATA_MESSAGE")
response["owner"].should.equal("123456789012")
response["logGroup"].should.equal("/test")
response["logStream"].should.equal("stream")
response["subscriptionFilters"].should.equal(["test"])
log_events = sorted(response["logEvents"], key=lambda log_event: log_event["id"])
log_events.should.have.length_of(2)
log_events[0]["id"].should.be.a(int)
log_events[0]["message"].should.equal("test")
log_events[0]["timestamp"].should.equal(0)
log_events[1]["id"].should.be.a(int)
log_events[1]["message"].should.equal("test 2")
log_events[1]["timestamp"].should.equal(0)
@mock_logs
def test_put_subscription_filter_errors():
# given
client = boto3.client("logs", "us-east-1")
log_group_name = "/test"
client.create_log_group(logGroupName=log_group_name)
# when
with assert_raises(ClientError) as e:
client.put_subscription_filter(
logGroupName="not-existing-log-group",
filterName="test",
filterPattern="",
destinationArn="arn:aws:lambda:us-east-1:123456789012:function:test",
)
# then
ex = e.exception
ex.operation_name.should.equal("PutSubscriptionFilter")
ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
ex.response["Error"]["Code"].should.contain("ResourceNotFoundException")
ex.response["Error"]["Message"].should.equal(
"The specified log group does not exist"
)
# when
with assert_raises(ClientError) as e:
client.put_subscription_filter(
logGroupName="/test",
filterName="test",
filterPattern="",
destinationArn="arn:aws:lambda:us-east-1:123456789012:function:not-existing",
)
# then
ex = e.exception
ex.operation_name.should.equal("PutSubscriptionFilter")
ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
ex.response["Error"]["Code"].should.contain("InvalidParameterException")
ex.response["Error"]["Message"].should.equal(
"Could not execute the lambda function. "
"Make sure you have given CloudWatch Logs permission to execute your function."
)
# when
with assert_raises(ClientError) as e:
client.put_subscription_filter(
logGroupName="/test",
filterName="test",
filterPattern="",
destinationArn="arn:aws:lambda:us-east-1:123456789012:function:not-existing",
)
# then
ex = e.exception
ex.operation_name.should.equal("PutSubscriptionFilter")
ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
ex.response["Error"]["Code"].should.contain("InvalidParameterException")
ex.response["Error"]["Message"].should.equal(
"Could not execute the lambda function. "
"Make sure you have given CloudWatch Logs permission to execute your function."
)
@mock_lambda
@mock_logs
def test_delete_subscription_filter_errors():
# given
region_name = "us-east-1"
client_lambda = boto3.client("lambda", region_name)
client_logs = boto3.client("logs", region_name)
log_group_name = "/test"
client_logs.create_log_group(logGroupName=log_group_name)
function_arn = client_lambda.create_function(
FunctionName="test",
Runtime="python3.8",
Role=_get_role_name(region_name),
Handler="lambda_function.lambda_handler",
Code={"ZipFile": _get_test_zip_file()},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)["FunctionArn"]
client_logs.put_subscription_filter(
logGroupName=log_group_name,
filterName="test",
filterPattern="",
destinationArn=function_arn,
)
# when
client_logs.delete_subscription_filter(
logGroupName="/test", filterName="test",
)
# then
response = client_logs.describe_subscription_filters(logGroupName=log_group_name)
response["subscriptionFilters"].should.have.length_of(0)
@mock_lambda
@mock_logs
def test_delete_subscription_filter_errors():
# given
region_name = "us-east-1"
client_lambda = boto3.client("lambda", region_name)
client_logs = boto3.client("logs", region_name)
log_group_name = "/test"
client_logs.create_log_group(logGroupName=log_group_name)
function_arn = client_lambda.create_function(
FunctionName="test",
Runtime="python3.8",
Role=_get_role_name(region_name),
Handler="lambda_function.lambda_handler",
Code={"ZipFile": _get_test_zip_file()},
Description="test lambda function",
Timeout=3,
MemorySize=128,
Publish=True,
)["FunctionArn"]
client_logs.put_subscription_filter(
logGroupName=log_group_name,
filterName="test",
filterPattern="",
destinationArn=function_arn,
)
# when
with assert_raises(ClientError) as e:
client_logs.delete_subscription_filter(
logGroupName="not-existing-log-group", filterName="test",
)
# then
ex = e.exception
ex.operation_name.should.equal("DeleteSubscriptionFilter")
ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
ex.response["Error"]["Code"].should.contain("ResourceNotFoundException")
ex.response["Error"]["Message"].should.equal(
"The specified log group does not exist"
)
# when
with assert_raises(ClientError) as e:
client_logs.delete_subscription_filter(
logGroupName="/test", filterName="wrong-filter-name",
)
# then
ex = e.exception
ex.operation_name.should.equal("DeleteSubscriptionFilter")
ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
ex.response["Error"]["Code"].should.contain("ResourceNotFoundException")
ex.response["Error"]["Message"].should.equal(
"The specified subscription filter does not exist."
)
def _get_role_name(region_name):
with mock_iam():
iam = boto3.client("iam", region_name=region_name)
try:
return iam.get_role(RoleName="test-role")["Role"]["Arn"]
except ClientError:
return iam.create_role(
RoleName="test-role", AssumeRolePolicyDocument="test policy", Path="/",
)["Role"]["Arn"]
def _get_test_zip_file():
func_str = """
def lambda_handler(event, context):
return event
"""
zip_output = BytesIO()
zip_file = ZipFile(zip_output, "w", ZIP_DEFLATED)
zip_file.writestr("lambda_function.py", func_str)
zip_file.close()
zip_output.seek(0)
return zip_output.read()
def _wait_for_log_msg(client, log_group_name, expected_msg_part):
received_messages = []
start = time.time()
while (time.time() - start) < 10:
result = client.describe_log_streams(logGroupName=log_group_name)
log_streams = result.get("logStreams")
if not log_streams:
time.sleep(1)
continue
for log_stream in log_streams:
result = client.get_log_events(
logGroupName=log_group_name, logStreamName=log_stream["logStreamName"],
)
received_messages.extend(
[event["message"] for event in result.get("events")]
)
for message in received_messages:
if expected_msg_part in message:
return True, message
time.sleep(1)
return False, received_messages

View File

@ -0,0 +1 @@
from __future__ import unicode_literals

View File

@ -0,0 +1,67 @@
from __future__ import unicode_literals
default_frameworkconfiguration = {"Fabric": {"Edition": "STARTER"}}
default_votingpolicy = {
"ApprovalThresholdPolicy": {
"ThresholdPercentage": 50,
"ProposalDurationInHours": 24,
"ThresholdComparator": "GREATER_THAN_OR_EQUAL_TO",
}
}
default_memberconfiguration = {
"Name": "testmember1",
"Description": "Test Member 1",
"FrameworkConfiguration": {
"Fabric": {"AdminUsername": "admin", "AdminPassword": "Admin12345"}
},
"LogPublishingConfiguration": {
"Fabric": {"CaLogs": {"Cloudwatch": {"Enabled": False}}}
},
}
default_policy_actions = {"Invitations": [{"Principal": "123456789012"}]}
multiple_policy_actions = {
"Invitations": [{"Principal": "123456789012"}, {"Principal": "123456789013"}]
}
def member_id_exist_in_list(members, memberid):
memberidxists = False
for member in members:
if member["Id"] == memberid:
memberidxists = True
break
return memberidxists
def create_member_configuration(
name, adminuser, adminpass, cloudwatchenabled, description=None
):
d = {
"Name": name,
"FrameworkConfiguration": {
"Fabric": {"AdminUsername": adminuser, "AdminPassword": adminpass}
},
"LogPublishingConfiguration": {
"Fabric": {"CaLogs": {"Cloudwatch": {"Enabled": cloudwatchenabled}}}
},
}
if description is not None:
d["Description"] = description
return d
def select_invitation_id_for_network(invitations, networkid, status=None):
# Get invitations based on network and maybe status
invitationsfornetwork = []
for invitation in invitations:
if invitation["NetworkSummary"]["Id"] == networkid:
if status is None or invitation["Status"] == status:
invitationsfornetwork.append(invitation["InvitationId"])
return invitationsfornetwork

View File

@ -0,0 +1,142 @@
from __future__ import unicode_literals
import boto3
import sure # noqa
from moto.managedblockchain.exceptions import BadRequestException
from moto import mock_managedblockchain
from . import helpers
@mock_managedblockchain
def test_create_2_invitations():
conn = boto3.client("managedblockchain", region_name="us-east-1")
# Create network
response = conn.create_network(
Name="testnetwork1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
member_id = response["MemberId"]
# Create proposal
response = conn.create_proposal(
NetworkId=network_id,
MemberId=member_id,
Actions=helpers.multiple_policy_actions,
)
proposal_id = response["ProposalId"]
# Get proposal details
response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id)
response["Proposal"]["NetworkId"].should.equal(network_id)
response["Proposal"]["Status"].should.equal("IN_PROGRESS")
# Vote yes
response = conn.vote_on_proposal(
NetworkId=network_id,
ProposalId=proposal_id,
VoterMemberId=member_id,
Vote="YES",
)
# Get the invitation
response = conn.list_invitations()
response["Invitations"].should.have.length_of(2)
response["Invitations"][0]["NetworkSummary"]["Id"].should.equal(network_id)
response["Invitations"][0]["Status"].should.equal("PENDING")
response["Invitations"][1]["NetworkSummary"]["Id"].should.equal(network_id)
response["Invitations"][1]["Status"].should.equal("PENDING")
@mock_managedblockchain
def test_reject_invitation():
conn = boto3.client("managedblockchain", region_name="us-east-1")
# Create network
response = conn.create_network(
Name="testnetwork1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
member_id = response["MemberId"]
# Create proposal
response = conn.create_proposal(
NetworkId=network_id,
MemberId=member_id,
Actions=helpers.default_policy_actions,
)
proposal_id = response["ProposalId"]
# Get proposal details
response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id)
response["Proposal"]["NetworkId"].should.equal(network_id)
response["Proposal"]["Status"].should.equal("IN_PROGRESS")
# Vote yes
response = conn.vote_on_proposal(
NetworkId=network_id,
ProposalId=proposal_id,
VoterMemberId=member_id,
Vote="YES",
)
# Get the invitation
response = conn.list_invitations()
response["Invitations"][0]["NetworkSummary"]["Id"].should.equal(network_id)
response["Invitations"][0]["Status"].should.equal("PENDING")
invitation_id = response["Invitations"][0]["InvitationId"]
# Reject - thanks but no thanks
response = conn.reject_invitation(InvitationId=invitation_id)
# Check the invitation status
response = conn.list_invitations()
response["Invitations"][0]["InvitationId"].should.equal(invitation_id)
response["Invitations"][0]["Status"].should.equal("REJECTED")
@mock_managedblockchain
def test_reject_invitation_badinvitation():
conn = boto3.client("managedblockchain", region_name="us-east-1")
# Create network - need a good network
response = conn.create_network(
Name="testnetwork1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
member_id = response["MemberId"]
response = conn.create_proposal(
NetworkId=network_id,
MemberId=member_id,
Actions=helpers.default_policy_actions,
)
proposal_id = response["ProposalId"]
response = conn.vote_on_proposal(
NetworkId=network_id,
ProposalId=proposal_id,
VoterMemberId=member_id,
Vote="YES",
)
response = conn.reject_invitation.when.called_with(
InvitationId="in-ABCDEFGHIJKLMNOP0123456789",
).should.throw(Exception, "InvitationId in-ABCDEFGHIJKLMNOP0123456789 not found.")

View File

@ -0,0 +1,669 @@
from __future__ import unicode_literals
import boto3
import sure # noqa
from moto.managedblockchain.exceptions import BadRequestException
from moto import mock_managedblockchain
from . import helpers
@mock_managedblockchain
def test_create_another_member():
conn = boto3.client("managedblockchain", region_name="us-east-1")
# Create network
response = conn.create_network(
Name="testnetwork1",
Description="Test Network 1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
member_id = response["MemberId"]
# Create proposal
response = conn.create_proposal(
NetworkId=network_id,
MemberId=member_id,
Actions=helpers.default_policy_actions,
)
proposal_id = response["ProposalId"]
# Get proposal details
response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id)
response["Proposal"]["NetworkId"].should.equal(network_id)
response["Proposal"]["Status"].should.equal("IN_PROGRESS")
# Vote yes
response = conn.vote_on_proposal(
NetworkId=network_id,
ProposalId=proposal_id,
VoterMemberId=member_id,
Vote="YES",
)
# Get the invitation
response = conn.list_invitations()
response["Invitations"][0]["NetworkSummary"]["Id"].should.equal(network_id)
response["Invitations"][0]["Status"].should.equal("PENDING")
invitation_id = response["Invitations"][0]["InvitationId"]
# Create the member
response = conn.create_member(
InvitationId=invitation_id,
NetworkId=network_id,
MemberConfiguration=helpers.create_member_configuration(
"testmember2", "admin", "Admin12345", False
),
)
member_id2 = response["MemberId"]
# Check the invitation status
response = conn.list_invitations()
response["Invitations"][0]["InvitationId"].should.equal(invitation_id)
response["Invitations"][0]["Status"].should.equal("ACCEPTED")
# Find member in full list
response = conn.list_members(NetworkId=network_id)
members = response["Members"]
members.should.have.length_of(2)
helpers.member_id_exist_in_list(members, member_id2).should.equal(True)
# Get member 2 details
response = conn.get_member(NetworkId=network_id, MemberId=member_id2)
response["Member"]["Name"].should.equal("testmember2")
# Update member
logconfignewenabled = not helpers.default_memberconfiguration[
"LogPublishingConfiguration"
]["Fabric"]["CaLogs"]["Cloudwatch"]["Enabled"]
logconfignew = {
"Fabric": {"CaLogs": {"Cloudwatch": {"Enabled": logconfignewenabled}}}
}
conn.update_member(
NetworkId=network_id,
MemberId=member_id2,
LogPublishingConfiguration=logconfignew,
)
# Get member 2 details
response = conn.get_member(NetworkId=network_id, MemberId=member_id2)
response["Member"]["LogPublishingConfiguration"]["Fabric"]["CaLogs"]["Cloudwatch"][
"Enabled"
].should.equal(logconfignewenabled)
@mock_managedblockchain
def test_create_another_member_withopts():
conn = boto3.client("managedblockchain", region_name="us-east-1")
# Create network
response = conn.create_network(
Name="testnetwork1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
member_id = response["MemberId"]
# Create proposal
response = conn.create_proposal(
NetworkId=network_id,
MemberId=member_id,
Actions=helpers.default_policy_actions,
)
proposal_id = response["ProposalId"]
# Get proposal details
response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id)
response["Proposal"]["NetworkId"].should.equal(network_id)
response["Proposal"]["Status"].should.equal("IN_PROGRESS")
# Vote yes
response = conn.vote_on_proposal(
NetworkId=network_id,
ProposalId=proposal_id,
VoterMemberId=member_id,
Vote="YES",
)
# Get the invitation
response = conn.list_invitations()
response["Invitations"][0]["NetworkSummary"]["Id"].should.equal(network_id)
response["Invitations"][0]["Status"].should.equal("PENDING")
invitation_id = response["Invitations"][0]["InvitationId"]
# Create the member
response = conn.create_member(
InvitationId=invitation_id,
NetworkId=network_id,
MemberConfiguration=helpers.create_member_configuration(
"testmember2", "admin", "Admin12345", False, "Test Member 2"
),
)
member_id2 = response["MemberId"]
# Check the invitation status
response = conn.list_invitations()
response["Invitations"][0]["InvitationId"].should.equal(invitation_id)
response["Invitations"][0]["Status"].should.equal("ACCEPTED")
# Find member in full list
response = conn.list_members(NetworkId=network_id)
members = response["Members"]
members.should.have.length_of(2)
helpers.member_id_exist_in_list(members, member_id2).should.equal(True)
# Get member 2 details
response = conn.get_member(NetworkId=network_id, MemberId=member_id2)
response["Member"]["Description"].should.equal("Test Member 2")
# Try to create member with already used invitation
response = conn.create_member.when.called_with(
InvitationId=invitation_id,
NetworkId=network_id,
MemberConfiguration=helpers.create_member_configuration(
"testmember2", "admin", "Admin12345", False, "Test Member 2 Duplicate"
),
).should.throw(Exception, "Invitation {0} not valid".format(invitation_id))
# Delete member 2
conn.delete_member(NetworkId=network_id, MemberId=member_id2)
# Member is still in the list
response = conn.list_members(NetworkId=network_id)
members = response["Members"]
members.should.have.length_of(2)
# But cannot get
response = conn.get_member.when.called_with(
NetworkId=network_id, MemberId=member_id2,
).should.throw(Exception, "Member {0} not found".format(member_id2))
# Delete member 1
conn.delete_member(NetworkId=network_id, MemberId=member_id)
# Network should be gone
response = conn.list_networks()
mbcnetworks = response["Networks"]
mbcnetworks.should.have.length_of(0)
# Verify the invitation network status is DELETED
# Get the invitation
response = conn.list_invitations()
response["Invitations"].should.have.length_of(1)
response["Invitations"][0]["NetworkSummary"]["Id"].should.equal(network_id)
response["Invitations"][0]["NetworkSummary"]["Status"].should.equal("DELETED")
@mock_managedblockchain
def test_create_and_delete_member():
conn = boto3.client("managedblockchain", region_name="us-east-1")
# Create network
response = conn.create_network(
Name="testnetwork1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
member_id = response["MemberId"]
# Create proposal (create additional member)
response = conn.create_proposal(
NetworkId=network_id,
MemberId=member_id,
Actions=helpers.default_policy_actions,
)
proposal_id = response["ProposalId"]
# Vote yes
response = conn.vote_on_proposal(
NetworkId=network_id,
ProposalId=proposal_id,
VoterMemberId=member_id,
Vote="YES",
)
# Get the invitation
response = conn.list_invitations()
invitation_id = response["Invitations"][0]["InvitationId"]
# Create the member
response = conn.create_member(
InvitationId=invitation_id,
NetworkId=network_id,
MemberConfiguration=helpers.create_member_configuration(
"testmember2", "admin", "Admin12345", False, "Test Member 2"
),
)
member_id2 = response["MemberId"]
both_policy_actions = {
"Invitations": [{"Principal": "123456789012"}],
"Removals": [{"MemberId": member_id2}],
}
# Create proposal (invite and remove member)
response = conn.create_proposal(
NetworkId=network_id, MemberId=member_id, Actions=both_policy_actions,
)
proposal_id2 = response["ProposalId"]
# Get proposal details
response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id2)
response["Proposal"]["NetworkId"].should.equal(network_id)
response["Proposal"]["Status"].should.equal("IN_PROGRESS")
# Vote yes
response = conn.vote_on_proposal(
NetworkId=network_id,
ProposalId=proposal_id2,
VoterMemberId=member_id,
Vote="YES",
)
# Check the invitation status
response = conn.list_invitations()
invitations = helpers.select_invitation_id_for_network(
response["Invitations"], network_id, "PENDING"
)
invitations.should.have.length_of(1)
# Member is still in the list
response = conn.list_members(NetworkId=network_id)
members = response["Members"]
members.should.have.length_of(2)
foundmember2 = False
for member in members:
if member["Id"] == member_id2 and member["Status"] == "DELETED":
foundmember2 = True
foundmember2.should.equal(True)
@mock_managedblockchain
def test_create_too_many_members():
conn = boto3.client("managedblockchain", region_name="us-east-1")
# Create network
response = conn.create_network(
Name="testnetwork1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
member_id = response["MemberId"]
# Create 4 more members - create invitations for 5
for counter in range(2, 7):
# Create proposal
response = conn.create_proposal(
NetworkId=network_id,
MemberId=member_id,
Actions=helpers.default_policy_actions,
)
proposal_id = response["ProposalId"]
# Vote yes
response = conn.vote_on_proposal(
NetworkId=network_id,
ProposalId=proposal_id,
VoterMemberId=member_id,
Vote="YES",
)
for counter in range(2, 6):
# Get the invitation
response = conn.list_invitations()
invitation_id = helpers.select_invitation_id_for_network(
response["Invitations"], network_id, "PENDING"
)[0]
# Create the member
response = conn.create_member(
InvitationId=invitation_id,
NetworkId=network_id,
MemberConfiguration=helpers.create_member_configuration(
"testmember" + str(counter),
"admin",
"Admin12345",
False,
"Test Member " + str(counter),
),
)
member_id = response["MemberId"]
# Find member in full list
response = conn.list_members(NetworkId=network_id)
members = response["Members"]
members.should.have.length_of(counter)
helpers.member_id_exist_in_list(members, member_id).should.equal(True)
# Get member details
response = conn.get_member(NetworkId=network_id, MemberId=member_id)
response["Member"]["Description"].should.equal("Test Member " + str(counter))
# Try to create the sixth
response = conn.list_invitations()
invitation_id = helpers.select_invitation_id_for_network(
response["Invitations"], network_id, "PENDING"
)[0]
# Try to create member with already used invitation
response = conn.create_member.when.called_with(
InvitationId=invitation_id,
NetworkId=network_id,
MemberConfiguration=helpers.create_member_configuration(
"testmember6", "admin", "Admin12345", False, "Test Member 6"
),
).should.throw(
Exception,
"5 is the maximum number of members allowed in a STARTER Edition network",
)
@mock_managedblockchain
def test_create_another_member_alreadyhave():
conn = boto3.client("managedblockchain", region_name="us-east-1")
# Create network
response = conn.create_network(
Name="testnetwork1",
Description="Test Network 1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
member_id = response["MemberId"]
# Create proposal
response = conn.create_proposal(
NetworkId=network_id,
MemberId=member_id,
Actions=helpers.default_policy_actions,
)
proposal_id = response["ProposalId"]
# Vote yes
response = conn.vote_on_proposal(
NetworkId=network_id,
ProposalId=proposal_id,
VoterMemberId=member_id,
Vote="YES",
)
# Get the invitation
response = conn.list_invitations()
invitation_id = response["Invitations"][0]["InvitationId"]
# Should fail trying to create with same name
response = conn.create_member.when.called_with(
NetworkId=network_id,
InvitationId=invitation_id,
MemberConfiguration=helpers.create_member_configuration(
"testmember1", "admin", "Admin12345", False
),
).should.throw(
Exception,
"Member name {0} already exists in network {1}".format(
"testmember1", network_id
),
)
@mock_managedblockchain
def test_create_another_member_badnetwork():
conn = boto3.client("managedblockchain", region_name="us-east-1")
response = conn.create_member.when.called_with(
NetworkId="n-ABCDEFGHIJKLMNOP0123456789",
InvitationId="id-ABCDEFGHIJKLMNOP0123456789",
MemberConfiguration=helpers.create_member_configuration(
"testmember2", "admin", "Admin12345", False
),
).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found")
@mock_managedblockchain
def test_create_another_member_badinvitation():
conn = boto3.client("managedblockchain", region_name="us-east-1")
# Create network - need a good network
response = conn.create_network(
Name="testnetwork1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
response = conn.create_member.when.called_with(
NetworkId=network_id,
InvitationId="in-ABCDEFGHIJKLMNOP0123456789",
MemberConfiguration=helpers.create_member_configuration(
"testmember2", "admin", "Admin12345", False
),
).should.throw(Exception, "Invitation in-ABCDEFGHIJKLMNOP0123456789 not valid")
@mock_managedblockchain
def test_create_another_member_adminpassword():
conn = boto3.client("managedblockchain", region_name="us-east-1")
# Create network - need a good network
response = conn.create_network(
Name="testnetwork1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
member_id = response["MemberId"]
# Create proposal
response = conn.create_proposal(
NetworkId=network_id,
MemberId=member_id,
Actions=helpers.default_policy_actions,
)
proposal_id = response["ProposalId"]
# Get proposal details
response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id)
response["Proposal"]["NetworkId"].should.equal(network_id)
response["Proposal"]["Status"].should.equal("IN_PROGRESS")
# Vote yes
response = conn.vote_on_proposal(
NetworkId=network_id,
ProposalId=proposal_id,
VoterMemberId=member_id,
Vote="YES",
)
# Get the invitation
response = conn.list_invitations()
invitation_id = response["Invitations"][0]["InvitationId"]
badadminpassmemberconf = helpers.create_member_configuration(
"testmember2", "admin", "Admin12345", False
)
# Too short
badadminpassmemberconf["FrameworkConfiguration"]["Fabric"][
"AdminPassword"
] = "badap"
response = conn.create_member.when.called_with(
NetworkId=network_id,
InvitationId=invitation_id,
MemberConfiguration=badadminpassmemberconf,
).should.throw(
Exception,
"Invalid length for parameter MemberConfiguration.FrameworkConfiguration.Fabric.AdminPassword",
)
# No uppercase or numbers
badadminpassmemberconf["FrameworkConfiguration"]["Fabric"][
"AdminPassword"
] = "badadminpwd"
response = conn.create_member.when.called_with(
NetworkId=network_id,
InvitationId=invitation_id,
MemberConfiguration=badadminpassmemberconf,
).should.throw(Exception, "Invalid request body")
# No lowercase or numbers
badadminpassmemberconf["FrameworkConfiguration"]["Fabric"][
"AdminPassword"
] = "BADADMINPWD"
response = conn.create_member.when.called_with(
NetworkId=network_id,
InvitationId=invitation_id,
MemberConfiguration=badadminpassmemberconf,
).should.throw(Exception, "Invalid request body")
# No numbers
badadminpassmemberconf["FrameworkConfiguration"]["Fabric"][
"AdminPassword"
] = "badAdminpwd"
response = conn.create_member.when.called_with(
NetworkId=network_id,
InvitationId=invitation_id,
MemberConfiguration=badadminpassmemberconf,
).should.throw(Exception, "Invalid request body")
# Invalid character
badadminpassmemberconf["FrameworkConfiguration"]["Fabric"][
"AdminPassword"
] = "badAdmin@pwd1"
response = conn.create_member.when.called_with(
NetworkId=network_id,
InvitationId=invitation_id,
MemberConfiguration=badadminpassmemberconf,
).should.throw(Exception, "Invalid request body")
@mock_managedblockchain
def test_list_members_badnetwork():
conn = boto3.client("managedblockchain", region_name="us-east-1")
response = conn.list_members.when.called_with(
NetworkId="n-ABCDEFGHIJKLMNOP0123456789",
).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found")
@mock_managedblockchain
def test_get_member_badnetwork():
conn = boto3.client("managedblockchain", region_name="us-east-1")
response = conn.get_member.when.called_with(
NetworkId="n-ABCDEFGHIJKLMNOP0123456789",
MemberId="m-ABCDEFGHIJKLMNOP0123456789",
).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found")
@mock_managedblockchain
def test_get_member_badmember():
conn = boto3.client("managedblockchain", region_name="us-east-1")
# Create network - need a good network
response = conn.create_network(
Name="testnetwork1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
response = conn.get_member.when.called_with(
NetworkId=network_id, MemberId="m-ABCDEFGHIJKLMNOP0123456789",
).should.throw(Exception, "Member m-ABCDEFGHIJKLMNOP0123456789 not found")
@mock_managedblockchain
def test_delete_member_badnetwork():
conn = boto3.client("managedblockchain", region_name="us-east-1")
response = conn.delete_member.when.called_with(
NetworkId="n-ABCDEFGHIJKLMNOP0123456789",
MemberId="m-ABCDEFGHIJKLMNOP0123456789",
).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found")
@mock_managedblockchain
def test_delete_member_badmember():
conn = boto3.client("managedblockchain", region_name="us-east-1")
# Create network - need a good network
response = conn.create_network(
Name="testnetwork1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
response = conn.delete_member.when.called_with(
NetworkId=network_id, MemberId="m-ABCDEFGHIJKLMNOP0123456789",
).should.throw(Exception, "Member m-ABCDEFGHIJKLMNOP0123456789 not found")
@mock_managedblockchain
def test_update_member_badnetwork():
conn = boto3.client("managedblockchain", region_name="us-east-1")
response = conn.update_member.when.called_with(
NetworkId="n-ABCDEFGHIJKLMNOP0123456789",
MemberId="m-ABCDEFGHIJKLMNOP0123456789",
LogPublishingConfiguration=helpers.default_memberconfiguration[
"LogPublishingConfiguration"
],
).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found")
@mock_managedblockchain
def test_update_member_badmember():
conn = boto3.client("managedblockchain", region_name="us-east-1")
# Create network - need a good network
response = conn.create_network(
Name="testnetwork1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
response = conn.update_member.when.called_with(
NetworkId=network_id,
MemberId="m-ABCDEFGHIJKLMNOP0123456789",
LogPublishingConfiguration=helpers.default_memberconfiguration[
"LogPublishingConfiguration"
],
).should.throw(Exception, "Member m-ABCDEFGHIJKLMNOP0123456789 not found")

View File

@ -0,0 +1,123 @@
from __future__ import unicode_literals
import boto3
import sure # noqa
from moto.managedblockchain.exceptions import BadRequestException
from moto import mock_managedblockchain
from . import helpers
@mock_managedblockchain
def test_create_network():
conn = boto3.client("managedblockchain", region_name="us-east-1")
response = conn.create_network(
Name="testnetwork1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
member_id = response["MemberId"]
network_id.should.match("n-[A-Z0-9]{26}")
member_id.should.match("m-[A-Z0-9]{26}")
# Find in full list
response = conn.list_networks()
mbcnetworks = response["Networks"]
mbcnetworks.should.have.length_of(1)
mbcnetworks[0]["Name"].should.equal("testnetwork1")
# Get network details
response = conn.get_network(NetworkId=network_id)
response["Network"]["Name"].should.equal("testnetwork1")
@mock_managedblockchain
def test_create_network_withopts():
conn = boto3.client("managedblockchain", region_name="us-east-1")
response = conn.create_network(
Name="testnetwork1",
Description="Test Network 1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
member_id = response["MemberId"]
network_id.should.match("n-[A-Z0-9]{26}")
member_id.should.match("m-[A-Z0-9]{26}")
# Find in full list
response = conn.list_networks()
mbcnetworks = response["Networks"]
mbcnetworks.should.have.length_of(1)
mbcnetworks[0]["Description"].should.equal("Test Network 1")
# Get network details
response = conn.get_network(NetworkId=network_id)
response["Network"]["Description"].should.equal("Test Network 1")
@mock_managedblockchain
def test_create_network_noframework():
conn = boto3.client("managedblockchain", region_name="us-east-1")
response = conn.create_network.when.called_with(
Name="testnetwork1",
Description="Test Network 1",
Framework="HYPERLEDGER_VINYL",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
).should.throw(Exception, "Invalid request body")
@mock_managedblockchain
def test_create_network_badframeworkver():
conn = boto3.client("managedblockchain", region_name="us-east-1")
response = conn.create_network.when.called_with(
Name="testnetwork1",
Description="Test Network 1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.X",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
).should.throw(
Exception, "Invalid version 1.X requested for framework HYPERLEDGER_FABRIC"
)
@mock_managedblockchain
def test_create_network_badedition():
conn = boto3.client("managedblockchain", region_name="us-east-1")
frameworkconfiguration = {"Fabric": {"Edition": "SUPER"}}
response = conn.create_network.when.called_with(
Name="testnetwork1",
Description="Test Network 1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
).should.throw(Exception, "Invalid request body")
@mock_managedblockchain
def test_get_network_badnetwork():
conn = boto3.client("managedblockchain", region_name="us-east-1")
response = conn.get_network.when.called_with(
NetworkId="n-ABCDEFGHIJKLMNOP0123456789",
).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found")

View File

@ -0,0 +1,199 @@
from __future__ import unicode_literals
import boto3
import sure # noqa
from moto.managedblockchain.exceptions import BadRequestException
from moto import mock_managedblockchain
from . import helpers
@mock_managedblockchain
def test_create_proposal():
conn = boto3.client("managedblockchain", region_name="us-east-1")
# Create network
response = conn.create_network(
Name="testnetwork1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
member_id = response["MemberId"]
network_id.should.match("n-[A-Z0-9]{26}")
member_id.should.match("m-[A-Z0-9]{26}")
# Create proposal
response = conn.create_proposal(
NetworkId=network_id,
MemberId=member_id,
Actions=helpers.default_policy_actions,
)
proposal_id = response["ProposalId"]
proposal_id.should.match("p-[A-Z0-9]{26}")
# Find in full list
response = conn.list_proposals(NetworkId=network_id)
proposals = response["Proposals"]
proposals.should.have.length_of(1)
proposals[0]["ProposalId"].should.equal(proposal_id)
# Get proposal details
response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id)
response["Proposal"]["NetworkId"].should.equal(network_id)
@mock_managedblockchain
def test_create_proposal_withopts():
conn = boto3.client("managedblockchain", region_name="us-east-1")
# Create network
response = conn.create_network(
Name="testnetwork1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
member_id = response["MemberId"]
network_id.should.match("n-[A-Z0-9]{26}")
member_id.should.match("m-[A-Z0-9]{26}")
# Create proposal
response = conn.create_proposal(
NetworkId=network_id,
MemberId=member_id,
Actions=helpers.default_policy_actions,
Description="Adding a new member",
)
proposal_id = response["ProposalId"]
proposal_id.should.match("p-[A-Z0-9]{26}")
# Get proposal details
response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id)
response["Proposal"]["Description"].should.equal("Adding a new member")
@mock_managedblockchain
def test_create_proposal_badnetwork():
conn = boto3.client("managedblockchain", region_name="us-east-1")
response = conn.create_proposal.when.called_with(
NetworkId="n-ABCDEFGHIJKLMNOP0123456789",
MemberId="m-ABCDEFGHIJKLMNOP0123456789",
Actions=helpers.default_policy_actions,
).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found")
@mock_managedblockchain
def test_create_proposal_badmember():
conn = boto3.client("managedblockchain", region_name="us-east-1")
# Create network - need a good network
response = conn.create_network(
Name="testnetwork1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
response = conn.create_proposal.when.called_with(
NetworkId=network_id,
MemberId="m-ABCDEFGHIJKLMNOP0123456789",
Actions=helpers.default_policy_actions,
).should.throw(Exception, "Member m-ABCDEFGHIJKLMNOP0123456789 not found")
@mock_managedblockchain
def test_create_proposal_badinvitationacctid():
conn = boto3.client("managedblockchain", region_name="us-east-1")
# Must be 12 digits
actions = {"Invitations": [{"Principal": "1234567890"}]}
# Create network - need a good network
response = conn.create_network(
Name="testnetwork1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
member_id = response["MemberId"]
response = conn.create_proposal.when.called_with(
NetworkId=network_id, MemberId=member_id, Actions=actions,
).should.throw(Exception, "Account ID format specified in proposal is not valid")
@mock_managedblockchain
def test_create_proposal_badremovalmemid():
conn = boto3.client("managedblockchain", region_name="us-east-1")
# Must be 12 digits
actions = {"Removals": [{"MemberId": "m-ABCDEFGHIJKLMNOP0123456789"}]}
# Create network - need a good network
response = conn.create_network(
Name="testnetwork1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
member_id = response["MemberId"]
response = conn.create_proposal.when.called_with(
NetworkId=network_id, MemberId=member_id, Actions=actions,
).should.throw(Exception, "Member ID format specified in proposal is not valid")
@mock_managedblockchain
def test_list_proposal_badnetwork():
conn = boto3.client("managedblockchain", region_name="us-east-1")
response = conn.list_proposals.when.called_with(
NetworkId="n-ABCDEFGHIJKLMNOP0123456789",
).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found")
@mock_managedblockchain
def test_get_proposal_badnetwork():
conn = boto3.client("managedblockchain", region_name="us-east-1")
response = conn.get_proposal.when.called_with(
NetworkId="n-ABCDEFGHIJKLMNOP0123456789",
ProposalId="p-ABCDEFGHIJKLMNOP0123456789",
).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found")
@mock_managedblockchain
def test_get_proposal_badproposal():
conn = boto3.client("managedblockchain", region_name="us-east-1")
# Create network - need a good network
response = conn.create_network(
Name="testnetwork1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
response = conn.get_proposal.when.called_with(
NetworkId=network_id, ProposalId="p-ABCDEFGHIJKLMNOP0123456789",
).should.throw(Exception, "Proposal p-ABCDEFGHIJKLMNOP0123456789 not found")

View File

@ -0,0 +1,529 @@
from __future__ import unicode_literals
import os
import boto3
import sure # noqa
from freezegun import freeze_time
from nose import SkipTest
from moto.managedblockchain.exceptions import BadRequestException
from moto import mock_managedblockchain, settings
from . import helpers
@mock_managedblockchain
def test_vote_on_proposal_one_member_total_yes():
conn = boto3.client("managedblockchain", region_name="us-east-1")
# Create network
response = conn.create_network(
Name="testnetwork1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
member_id = response["MemberId"]
# Create proposal
response = conn.create_proposal(
NetworkId=network_id,
MemberId=member_id,
Actions=helpers.default_policy_actions,
)
proposal_id = response["ProposalId"]
# Get proposal details
response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id)
response["Proposal"]["NetworkId"].should.equal(network_id)
response["Proposal"]["Status"].should.equal("IN_PROGRESS")
# Vote yes
response = conn.vote_on_proposal(
NetworkId=network_id,
ProposalId=proposal_id,
VoterMemberId=member_id,
Vote="YES",
)
# List proposal votes
response = conn.list_proposal_votes(NetworkId=network_id, ProposalId=proposal_id)
response["ProposalVotes"][0]["MemberId"].should.equal(member_id)
# Get proposal details - should be APPROVED
response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id)
response["Proposal"]["Status"].should.equal("APPROVED")
response["Proposal"]["YesVoteCount"].should.equal(1)
response["Proposal"]["NoVoteCount"].should.equal(0)
response["Proposal"]["OutstandingVoteCount"].should.equal(0)
@mock_managedblockchain
def test_vote_on_proposal_one_member_total_no():
conn = boto3.client("managedblockchain", region_name="us-east-1")
# Create network
response = conn.create_network(
Name="testnetwork1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
member_id = response["MemberId"]
# Create proposal
response = conn.create_proposal(
NetworkId=network_id,
MemberId=member_id,
Actions=helpers.default_policy_actions,
)
proposal_id = response["ProposalId"]
# Get proposal details
response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id)
response["Proposal"]["NetworkId"].should.equal(network_id)
response["Proposal"]["Status"].should.equal("IN_PROGRESS")
# Vote no
response = conn.vote_on_proposal(
NetworkId=network_id,
ProposalId=proposal_id,
VoterMemberId=member_id,
Vote="NO",
)
# List proposal votes
response = conn.list_proposal_votes(NetworkId=network_id, ProposalId=proposal_id)
response["ProposalVotes"][0]["MemberId"].should.equal(member_id)
# Get proposal details - should be REJECTED
response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id)
response["Proposal"]["Status"].should.equal("REJECTED")
response["Proposal"]["YesVoteCount"].should.equal(0)
response["Proposal"]["NoVoteCount"].should.equal(1)
response["Proposal"]["OutstandingVoteCount"].should.equal(0)
@mock_managedblockchain
def test_vote_on_proposal_yes_greater_than():
conn = boto3.client("managedblockchain", region_name="us-east-1")
votingpolicy = {
"ApprovalThresholdPolicy": {
"ThresholdPercentage": 50,
"ProposalDurationInHours": 24,
"ThresholdComparator": "GREATER_THAN",
}
}
# Create network
response = conn.create_network(
Name="testnetwork1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
member_id = response["MemberId"]
response = conn.create_proposal(
NetworkId=network_id,
MemberId=member_id,
Actions=helpers.default_policy_actions,
)
proposal_id = response["ProposalId"]
# Vote yes
response = conn.vote_on_proposal(
NetworkId=network_id,
ProposalId=proposal_id,
VoterMemberId=member_id,
Vote="YES",
)
# Get the invitation
response = conn.list_invitations()
invitation_id = response["Invitations"][0]["InvitationId"]
# Create the member
response = conn.create_member(
InvitationId=invitation_id,
NetworkId=network_id,
MemberConfiguration=helpers.create_member_configuration(
"testmember2", "admin", "Admin12345", False, "Test Member 2"
),
)
member_id2 = response["MemberId"]
# Create another proposal
response = conn.create_proposal(
NetworkId=network_id,
MemberId=member_id,
Actions=helpers.default_policy_actions,
)
proposal_id = response["ProposalId"]
# Vote yes with member 1
response = conn.vote_on_proposal(
NetworkId=network_id,
ProposalId=proposal_id,
VoterMemberId=member_id,
Vote="YES",
)
# Get proposal details
response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id)
response["Proposal"]["NetworkId"].should.equal(network_id)
response["Proposal"]["Status"].should.equal("IN_PROGRESS")
@mock_managedblockchain
def test_vote_on_proposal_no_greater_than():
conn = boto3.client("managedblockchain", region_name="us-east-1")
votingpolicy = {
"ApprovalThresholdPolicy": {
"ThresholdPercentage": 50,
"ProposalDurationInHours": 24,
"ThresholdComparator": "GREATER_THAN",
}
}
# Create network
response = conn.create_network(
Name="testnetwork1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
member_id = response["MemberId"]
response = conn.create_proposal(
NetworkId=network_id,
MemberId=member_id,
Actions=helpers.default_policy_actions,
)
proposal_id = response["ProposalId"]
# Vote yes
response = conn.vote_on_proposal(
NetworkId=network_id,
ProposalId=proposal_id,
VoterMemberId=member_id,
Vote="YES",
)
# Get the invitation
response = conn.list_invitations()
invitation_id = response["Invitations"][0]["InvitationId"]
# Create the member
response = conn.create_member(
InvitationId=invitation_id,
NetworkId=network_id,
MemberConfiguration=helpers.create_member_configuration(
"testmember2", "admin", "Admin12345", False, "Test Member 2"
),
)
member_id2 = response["MemberId"]
# Create another proposal
response = conn.create_proposal(
NetworkId=network_id,
MemberId=member_id,
Actions=helpers.default_policy_actions,
)
proposal_id = response["ProposalId"]
# Vote no with member 1
response = conn.vote_on_proposal(
NetworkId=network_id,
ProposalId=proposal_id,
VoterMemberId=member_id,
Vote="NO",
)
# Vote no with member 2
response = conn.vote_on_proposal(
NetworkId=network_id,
ProposalId=proposal_id,
VoterMemberId=member_id2,
Vote="NO",
)
# Get proposal details
response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id)
response["Proposal"]["NetworkId"].should.equal(network_id)
response["Proposal"]["Status"].should.equal("REJECTED")
@mock_managedblockchain
def test_vote_on_proposal_expiredproposal():
if os.environ.get("TEST_SERVER_MODE", "false").lower() == "true":
raise SkipTest("Cant manipulate time in server mode")
votingpolicy = {
"ApprovalThresholdPolicy": {
"ThresholdPercentage": 50,
"ProposalDurationInHours": 1,
"ThresholdComparator": "GREATER_THAN_OR_EQUAL_TO",
}
}
conn = boto3.client("managedblockchain", region_name="us-east-1")
with freeze_time("2015-01-01 12:00:00"):
# Create network - need a good network
response = conn.create_network(
Name="testnetwork1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
member_id = response["MemberId"]
response = conn.create_proposal(
NetworkId=network_id,
MemberId=member_id,
Actions=helpers.default_policy_actions,
)
proposal_id = response["ProposalId"]
with freeze_time("2015-02-01 12:00:00"):
# Vote yes - should set status to expired
response = conn.vote_on_proposal(
NetworkId=network_id,
ProposalId=proposal_id,
VoterMemberId=member_id,
Vote="YES",
)
# Get proposal details - should be EXPIRED
response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id)
response["Proposal"]["Status"].should.equal("EXPIRED")
@mock_managedblockchain
def test_vote_on_proposal_badnetwork():
conn = boto3.client("managedblockchain", region_name="us-east-1")
response = conn.vote_on_proposal.when.called_with(
NetworkId="n-ABCDEFGHIJKLMNOP0123456789",
ProposalId="p-ABCDEFGHIJKLMNOP0123456789",
VoterMemberId="m-ABCDEFGHIJKLMNOP0123456789",
Vote="YES",
).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found")
@mock_managedblockchain
def test_vote_on_proposal_badproposal():
conn = boto3.client("managedblockchain", region_name="us-east-1")
# Create network - need a good network
response = conn.create_network(
Name="testnetwork1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
response = conn.vote_on_proposal.when.called_with(
NetworkId=network_id,
ProposalId="p-ABCDEFGHIJKLMNOP0123456789",
VoterMemberId="m-ABCDEFGHIJKLMNOP0123456789",
Vote="YES",
).should.throw(Exception, "Proposal p-ABCDEFGHIJKLMNOP0123456789 not found")
@mock_managedblockchain
def test_vote_on_proposal_badmember():
conn = boto3.client("managedblockchain", region_name="us-east-1")
# Create network - need a good network
response = conn.create_network(
Name="testnetwork1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
member_id = response["MemberId"]
response = conn.create_proposal(
NetworkId=network_id,
MemberId=member_id,
Actions=helpers.default_policy_actions,
)
proposal_id = response["ProposalId"]
response = conn.vote_on_proposal.when.called_with(
NetworkId=network_id,
ProposalId=proposal_id,
VoterMemberId="m-ABCDEFGHIJKLMNOP0123456789",
Vote="YES",
).should.throw(Exception, "Member m-ABCDEFGHIJKLMNOP0123456789 not found")
@mock_managedblockchain
def test_vote_on_proposal_badvote():
conn = boto3.client("managedblockchain", region_name="us-east-1")
# Create network - need a good network
response = conn.create_network(
Name="testnetwork1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
member_id = response["MemberId"]
response = conn.create_proposal(
NetworkId=network_id,
MemberId=member_id,
Actions=helpers.default_policy_actions,
)
proposal_id = response["ProposalId"]
response = conn.vote_on_proposal.when.called_with(
NetworkId=network_id,
ProposalId=proposal_id,
VoterMemberId=member_id,
Vote="FOO",
).should.throw(Exception, "Invalid request body")
@mock_managedblockchain
def test_vote_on_proposal_alreadyvoted():
conn = boto3.client("managedblockchain", region_name="us-east-1")
# Create network - need a good network
response = conn.create_network(
Name="testnetwork1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
member_id = response["MemberId"]
response = conn.create_proposal(
NetworkId=network_id,
MemberId=member_id,
Actions=helpers.default_policy_actions,
)
proposal_id = response["ProposalId"]
# Vote yes
response = conn.vote_on_proposal(
NetworkId=network_id,
ProposalId=proposal_id,
VoterMemberId=member_id,
Vote="YES",
)
# Get the invitation
response = conn.list_invitations()
invitation_id = response["Invitations"][0]["InvitationId"]
# Create the member
response = conn.create_member(
InvitationId=invitation_id,
NetworkId=network_id,
MemberConfiguration=helpers.create_member_configuration(
"testmember2", "admin", "Admin12345", False, "Test Member 2"
),
)
member_id2 = response["MemberId"]
# Create another proposal
response = conn.create_proposal(
NetworkId=network_id,
MemberId=member_id,
Actions=helpers.default_policy_actions,
)
proposal_id = response["ProposalId"]
# Get proposal details
response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id)
response["Proposal"]["NetworkId"].should.equal(network_id)
response["Proposal"]["Status"].should.equal("IN_PROGRESS")
# Vote yes with member 1
response = conn.vote_on_proposal(
NetworkId=network_id,
ProposalId=proposal_id,
VoterMemberId=member_id,
Vote="YES",
)
# Vote yes with member 1 again
response = conn.vote_on_proposal.when.called_with(
NetworkId=network_id,
ProposalId=proposal_id,
VoterMemberId=member_id,
Vote="YES",
).should.throw(Exception, "Invalid request body")
@mock_managedblockchain
def test_list_proposal_votes_badnetwork():
conn = boto3.client("managedblockchain", region_name="us-east-1")
response = conn.list_proposal_votes.when.called_with(
NetworkId="n-ABCDEFGHIJKLMNOP0123456789",
ProposalId="p-ABCDEFGHIJKLMNOP0123456789",
).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found")
@mock_managedblockchain
def test_list_proposal_votes_badproposal():
conn = boto3.client("managedblockchain", region_name="us-east-1")
# Create network
response = conn.create_network(
Name="testnetwork1",
Framework="HYPERLEDGER_FABRIC",
FrameworkVersion="1.2",
FrameworkConfiguration=helpers.default_frameworkconfiguration,
VotingPolicy=helpers.default_votingpolicy,
MemberConfiguration=helpers.default_memberconfiguration,
)
network_id = response["NetworkId"]
member_id = response["MemberId"]
response = conn.list_proposal_votes.when.called_with(
NetworkId=network_id, ProposalId="p-ABCDEFGHIJKLMNOP0123456789",
).should.throw(Exception, "Proposal p-ABCDEFGHIJKLMNOP0123456789 not found")

View File

@ -195,6 +195,10 @@ def test_ec2_integration():
reservations = ec2.describe_instances()["Reservations"] reservations = ec2.describe_instances()["Reservations"]
assert reservations.should.be.empty assert reservations.should.be.empty
# Before starting the instance, its status should be "stopped"
opsworks_instance = opsworks.describe_instances(StackId=stack_id)["Instances"][0]
opsworks_instance["Status"].should.equal("stopped")
# After starting the instance, it should be discoverable via ec2 # After starting the instance, it should be discoverable via ec2
opsworks.start_instance(InstanceId=instance_id) opsworks.start_instance(InstanceId=instance_id)
reservations = ec2.describe_instances()["Reservations"] reservations = ec2.describe_instances()["Reservations"]
@ -204,3 +208,5 @@ def test_ec2_integration():
instance["InstanceId"].should.equal(opsworks_instance["Ec2InstanceId"]) instance["InstanceId"].should.equal(opsworks_instance["Ec2InstanceId"])
instance["PrivateIpAddress"].should.equal(opsworks_instance["PrivateIp"]) instance["PrivateIpAddress"].should.equal(opsworks_instance["PrivateIp"])
# After starting the instance, its status should be "online"
opsworks_instance["Status"].should.equal("online")

View File

@ -915,6 +915,11 @@ def test_create_cluster_from_snapshot():
ClusterIdentifier=original_cluster_identifier, ClusterIdentifier=original_cluster_identifier,
) )
client.restore_from_cluster_snapshot.when.called_with(
ClusterIdentifier=original_cluster_identifier,
SnapshotIdentifier=original_snapshot_identifier,
).should.throw(ClientError, "ClusterAlreadyExists")
response = client.restore_from_cluster_snapshot( response = client.restore_from_cluster_snapshot(
ClusterIdentifier=new_cluster_identifier, ClusterIdentifier=new_cluster_identifier,
SnapshotIdentifier=original_snapshot_identifier, SnapshotIdentifier=original_snapshot_identifier,
@ -1333,3 +1338,20 @@ def test_modify_snapshot_copy_retention_period():
response = client.describe_clusters(ClusterIdentifier="test") response = client.describe_clusters(ClusterIdentifier="test")
cluster_snapshot_copy_status = response["Clusters"][0]["ClusterSnapshotCopyStatus"] cluster_snapshot_copy_status = response["Clusters"][0]["ClusterSnapshotCopyStatus"]
cluster_snapshot_copy_status["RetentionPeriod"].should.equal(5) cluster_snapshot_copy_status["RetentionPeriod"].should.equal(5)
@mock_redshift
def test_create_duplicate_cluster_fails():
kwargs = {
"ClusterIdentifier": "test",
"ClusterType": "single-node",
"DBName": "test",
"MasterUsername": "user",
"MasterUserPassword": "password",
"NodeType": "ds2.xlarge",
}
client = boto3.client("redshift", region_name="us-east-1")
client.create_cluster(**kwargs)
client.create_cluster.when.called_with(**kwargs).should.throw(
ClientError, "ClusterAlreadyExists"
)

View File

@ -2149,6 +2149,19 @@ def test_boto3_copy_object_with_versioning():
data.should.equal(b"test2") data.should.equal(b"test2")
@mock_s3
def test_s3_abort_multipart_data_with_invalid_upload_and_key():
client = boto3.client("s3", region_name=DEFAULT_REGION_NAME)
client.create_bucket(Bucket="blah")
with assert_raises(Exception) as err:
client.abort_multipart_upload(
Bucket="blah", Key="foobar", UploadId="dummy_upload_id"
)
err.exception.response["Error"]["Code"].should.equal("NoSuchUpload")
@mock_s3 @mock_s3
def test_boto3_copy_object_from_unversioned_to_versioned_bucket(): def test_boto3_copy_object_from_unversioned_to_versioned_bucket():
client = boto3.client("s3", region_name=DEFAULT_REGION_NAME) client = boto3.client("s3", region_name=DEFAULT_REGION_NAME)

View File

@ -127,3 +127,53 @@ def test_send_raw_email():
send_quota["GetSendQuotaResponse"]["GetSendQuotaResult"]["SentLast24Hours"] send_quota["GetSendQuotaResponse"]["GetSendQuotaResult"]["SentLast24Hours"]
) )
sent_count.should.equal(1) sent_count.should.equal(1)
@mock_ses_deprecated
def test_get_send_statistics():
conn = boto.connect_ses("the_key", "the_secret")
conn.send_email.when.called_with(
"test@example.com",
"test subject",
"<span>test body</span>",
"test_to@example.com",
format="html",
).should.throw(BotoServerError)
# tests to verify rejects in get_send_statistics
result = conn.get_send_statistics()
reject_count = int(
result["GetSendStatisticsResponse"]["GetSendStatisticsResult"][
"SendDataPoints"
][0]["Rejects"]
)
delivery_count = int(
result["GetSendStatisticsResponse"]["GetSendStatisticsResult"][
"SendDataPoints"
][0]["DeliveryAttempts"]
)
reject_count.should.equal(1)
delivery_count.should.equal(0)
conn.verify_email_identity("test@example.com")
conn.send_email(
"test@example.com", "test subject", "test body", "test_to@example.com"
)
# tests to delivery attempts in get_send_statistics
result = conn.get_send_statistics()
reject_count = int(
result["GetSendStatisticsResponse"]["GetSendStatisticsResult"][
"SendDataPoints"
][0]["Rejects"]
)
delivery_count = int(
result["GetSendStatisticsResponse"]["GetSendStatisticsResult"][
"SendDataPoints"
][0]["DeliveryAttempts"]
)
reject_count.should.equal(1)
delivery_count.should.equal(1)

View File

@ -4,6 +4,8 @@ import boto3
from botocore.exceptions import ClientError from botocore.exceptions import ClientError
from six.moves.email_mime_multipart import MIMEMultipart from six.moves.email_mime_multipart import MIMEMultipart
from six.moves.email_mime_text import MIMEText from six.moves.email_mime_text import MIMEText
from nose.tools import assert_raises
import sure # noqa import sure # noqa
@ -139,19 +141,7 @@ def test_send_html_email():
def test_send_raw_email(): def test_send_raw_email():
conn = boto3.client("ses", region_name="us-east-1") conn = boto3.client("ses", region_name="us-east-1")
message = MIMEMultipart() message = get_raw_email()
message["Subject"] = "Test"
message["From"] = "test@example.com"
message["To"] = "to@example.com, foo@example.com"
# Message body
part = MIMEText("test file attached")
message.attach(part)
# Attachment
part = MIMEText("contents of test file here")
part.add_header("Content-Disposition", "attachment; filename=test.txt")
message.attach(part)
kwargs = dict(Source=message["From"], RawMessage={"Data": message.as_string()}) kwargs = dict(Source=message["From"], RawMessage={"Data": message.as_string()})
@ -165,6 +155,39 @@ def test_send_raw_email():
sent_count.should.equal(2) sent_count.should.equal(2)
@mock_ses
def test_send_raw_email_validate_domain():
conn = boto3.client("ses", region_name="us-east-1")
message = get_raw_email()
kwargs = dict(Source=message["From"], RawMessage={"Data": message.as_string()})
conn.send_raw_email.when.called_with(**kwargs).should.throw(ClientError)
conn.verify_domain_identity(Domain="example.com")
conn.send_raw_email(**kwargs)
send_quota = conn.get_send_quota()
sent_count = int(send_quota["SentLast24Hours"])
sent_count.should.equal(2)
def get_raw_email():
message = MIMEMultipart()
message["Subject"] = "Test"
message["From"] = "test@example.com"
message["To"] = "to@example.com, foo@example.com"
# Message body
part = MIMEText("test file attached")
message.attach(part)
# Attachment
part = MIMEText("contents of test file here")
part.add_header("Content-Disposition", "attachment; filename=test.txt")
message.attach(part)
return message
@mock_ses @mock_ses
def test_send_raw_email_without_source(): def test_send_raw_email_without_source():
conn = boto3.client("ses", region_name="us-east-1") conn = boto3.client("ses", region_name="us-east-1")
@ -227,3 +250,94 @@ def test_send_email_notification_with_encoded_sender():
Message={"Subject": {"Data": "hi",}, "Body": {"Text": {"Data": "there",}}}, Message={"Subject": {"Data": "hi",}, "Body": {"Text": {"Data": "there",}}},
) )
response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200)
@mock_ses
def test_create_configuration_set():
conn = boto3.client("ses", region_name="us-east-1")
conn.create_configuration_set(ConfigurationSet=dict({"Name": "test"}))
conn.create_configuration_set_event_destination(
ConfigurationSetName="test",
EventDestination={
"Name": "snsEvent",
"Enabled": True,
"MatchingEventTypes": ["send",],
"SNSDestination": {
"TopicARN": "arn:aws:sns:us-east-1:123456789012:myTopic"
},
},
)
with assert_raises(ClientError) as ex:
conn.create_configuration_set_event_destination(
ConfigurationSetName="failtest",
EventDestination={
"Name": "snsEvent",
"Enabled": True,
"MatchingEventTypes": ["send",],
"SNSDestination": {
"TopicARN": "arn:aws:sns:us-east-1:123456789012:myTopic"
},
},
)
ex.exception.response["Error"]["Code"].should.equal("ConfigurationSetDoesNotExist")
with assert_raises(ClientError) as ex:
conn.create_configuration_set_event_destination(
ConfigurationSetName="test",
EventDestination={
"Name": "snsEvent",
"Enabled": True,
"MatchingEventTypes": ["send",],
"SNSDestination": {
"TopicARN": "arn:aws:sns:us-east-1:123456789012:myTopic"
},
},
)
ex.exception.response["Error"]["Code"].should.equal("EventDestinationAlreadyExists")
@mock_ses
def test_create_ses_template():
conn = boto3.client("ses", region_name="us-east-1")
conn.create_template(
Template={
"TemplateName": "MyTemplate",
"SubjectPart": "Greetings, {{name}}!",
"TextPart": "Dear {{name}},"
"\r\nYour favorite animal is {{favoriteanimal}}.",
"HtmlPart": "<h1>Hello {{name}},"
"</h1><p>Your favorite animal is {{favoriteanimal}}.</p>",
}
)
with assert_raises(ClientError) as ex:
conn.create_template(
Template={
"TemplateName": "MyTemplate",
"SubjectPart": "Greetings, {{name}}!",
"TextPart": "Dear {{name}},"
"\r\nYour favorite animal is {{favoriteanimal}}.",
"HtmlPart": "<h1>Hello {{name}},"
"</h1><p>Your favorite animal is {{favoriteanimal}}.</p>",
}
)
ex.exception.response["Error"]["Code"].should.equal("TemplateNameAlreadyExists")
# get a template which is already added
result = conn.get_template(TemplateName="MyTemplate")
result["Template"]["TemplateName"].should.equal("MyTemplate")
result["Template"]["SubjectPart"].should.equal("Greetings, {{name}}!")
# get a template which is not present
with assert_raises(ClientError) as ex:
conn.get_template(TemplateName="MyFakeTemplate")
ex.exception.response["Error"]["Code"].should.equal("TemplateDoesNotExist")
result = conn.list_templates()
result["TemplatesMetadata"][0]["Name"].should.equal("MyTemplate")

View File

@ -1,5 +1,7 @@
from __future__ import unicode_literals from __future__ import unicode_literals
import string
import boto3 import boto3
import botocore.exceptions import botocore.exceptions
import sure # noqa import sure # noqa
@ -300,6 +302,30 @@ def test_get_parameter():
) )
@mock_ssm
def test_get_parameters_errors():
client = boto3.client("ssm", region_name="us-east-1")
ssm_parameters = {name: "value" for name in string.ascii_lowercase[:11]}
for name, value in ssm_parameters.items():
client.put_parameter(Name=name, Value=value, Type="String")
with assert_raises(ClientError) as e:
client.get_parameters(Names=list(ssm_parameters.keys()))
ex = e.exception
ex.operation_name.should.equal("GetParameters")
ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
ex.response["Error"]["Code"].should.contain("ValidationException")
ex.response["Error"]["Message"].should.equal(
"1 validation error detected: "
"Value '[{}]' at 'names' failed to satisfy constraint: "
"Member must have length less than or equal to 10.".format(
", ".join(ssm_parameters.keys())
)
)
@mock_ssm @mock_ssm
def test_get_nonexistant_parameter(): def test_get_nonexistant_parameter():
client = boto3.client("ssm", region_name="us-east-1") client = boto3.client("ssm", region_name="us-east-1")