Pylinting on moto/d and moto/e directories (#4728)
This commit is contained in:
parent
e020b06016
commit
8def040f8d
@ -8,6 +8,4 @@ class DataSyncClientError(JsonRESTError):
|
||||
class InvalidRequestException(DataSyncClientError):
|
||||
def __init__(self, msg=None):
|
||||
self.code = 400
|
||||
super(InvalidRequestException, self).__init__(
|
||||
"InvalidRequestException", msg or "The request is not valid."
|
||||
)
|
||||
super().__init__("InvalidRequestException", msg or "The request is not valid.")
|
||||
|
@ -13,7 +13,7 @@ class DataSyncResponse(BaseResponse):
|
||||
def list_locations(self):
|
||||
locations = list()
|
||||
for arn, location in self.datasync_backend.locations.items():
|
||||
locations.append({"LocationArn": location.arn, "LocationUri": location.uri})
|
||||
locations.append({"LocationArn": arn, "LocationUri": location.uri})
|
||||
return json.dumps({"Locations": locations})
|
||||
|
||||
def _get_location(self, location_arn, typ):
|
||||
@ -112,9 +112,7 @@ class DataSyncResponse(BaseResponse):
|
||||
def list_tasks(self):
|
||||
tasks = list()
|
||||
for arn, task in self.datasync_backend.tasks.items():
|
||||
tasks.append(
|
||||
{"Name": task.name, "Status": task.status, "TaskArn": task.arn}
|
||||
)
|
||||
tasks.append({"Name": task.name, "Status": task.status, "TaskArn": arn})
|
||||
return json.dumps({"Tasks": tasks})
|
||||
|
||||
def delete_task(self):
|
||||
|
@ -93,7 +93,6 @@ class DaxCluster(BaseModel):
|
||||
{"SecurityGroupIdentifier": f"sg-{get_random_hex(10)}", "Status": "active"}
|
||||
]
|
||||
self.sse_specification = sse_specification
|
||||
print(sse_specification)
|
||||
|
||||
# Internal counter to keep track of when this cluster is available/deleted
|
||||
# Used in conjunction with `advance()`
|
||||
|
@ -7,18 +7,14 @@ class DmsClientError(JsonRESTError):
|
||||
|
||||
class ResourceNotFoundFault(DmsClientError):
|
||||
def __init__(self, message):
|
||||
super(ResourceNotFoundFault, self).__init__("ResourceNotFoundFault", message)
|
||||
super().__init__("ResourceNotFoundFault", message)
|
||||
|
||||
|
||||
class InvalidResourceStateFault(DmsClientError):
|
||||
def __init__(self, message):
|
||||
super(InvalidResourceStateFault, self).__init__(
|
||||
"InvalidResourceStateFault", message
|
||||
)
|
||||
super().__init__("InvalidResourceStateFault", message)
|
||||
|
||||
|
||||
class ResourceAlreadyExistsFault(DmsClientError):
|
||||
def __init__(self, message):
|
||||
super(ResourceAlreadyExistsFault, self).__init__(
|
||||
"ResourceAlreadyExistsFault", message
|
||||
)
|
||||
super().__init__("ResourceAlreadyExistsFault", message)
|
||||
|
@ -14,7 +14,7 @@ from .utils import filter_tasks
|
||||
|
||||
class DatabaseMigrationServiceBackend(BaseBackend):
|
||||
def __init__(self, region_name=None):
|
||||
super(DatabaseMigrationServiceBackend, self).__init__()
|
||||
super().__init__()
|
||||
self.region_name = region_name
|
||||
self.replication_tasks = {}
|
||||
|
||||
|
@ -174,13 +174,9 @@ class Table(CloudFormationModel):
|
||||
return Table(**spec)
|
||||
|
||||
def __len__(self):
|
||||
count = 0
|
||||
for key, value in self.items.items():
|
||||
if self.has_range_key:
|
||||
count += len(value)
|
||||
else:
|
||||
count += 1
|
||||
return count
|
||||
return sum(
|
||||
[(len(value) if self.has_range_key else 1) for value in self.items.values()]
|
||||
)
|
||||
|
||||
def __nonzero__(self):
|
||||
return True
|
||||
|
@ -202,7 +202,7 @@ class DynamoHandler(BaseResponse):
|
||||
range_comparison = None
|
||||
range_values = []
|
||||
|
||||
items, last_page = dynamodb_backend.query(
|
||||
items, _ = dynamodb_backend.query(
|
||||
name, hash_key, range_comparison, range_values
|
||||
)
|
||||
|
||||
@ -236,7 +236,7 @@ class DynamoHandler(BaseResponse):
|
||||
comparison_values = scan_filter.get("AttributeValueList", [])
|
||||
filters[attribute_name] = (comparison_operator, comparison_values)
|
||||
|
||||
items, scanned_count, last_page = dynamodb_backend.scan(name, filters)
|
||||
items, scanned_count, _ = dynamodb_backend.scan(name, filters)
|
||||
|
||||
if items is None:
|
||||
er = "com.amazonaws.dynamodb.v20111205#ResourceNotFoundException"
|
||||
@ -281,7 +281,6 @@ class DynamoHandler(BaseResponse):
|
||||
hash_key = key["HashKeyElement"]
|
||||
range_key = key.get("RangeKeyElement")
|
||||
updates = self.body["AttributeUpdates"]
|
||||
return_values = self.body.get("ReturnValues", "") # noqa
|
||||
|
||||
item = dynamodb_backend.update_item(name, hash_key, range_key, updates)
|
||||
|
||||
|
@ -953,7 +953,7 @@ class OpNot(Op):
|
||||
OP = "NOT"
|
||||
|
||||
def __init__(self, lhs):
|
||||
super(OpNot, self).__init__(lhs, None)
|
||||
super().__init__(lhs, None)
|
||||
|
||||
def expr(self, item):
|
||||
lhs = self.lhs.expr(item)
|
||||
@ -1073,7 +1073,7 @@ class FuncAttrExists(Func):
|
||||
|
||||
def __init__(self, attribute):
|
||||
self.attr = attribute
|
||||
super(FuncAttrExists, self).__init__(attribute)
|
||||
super().__init__(attribute)
|
||||
|
||||
def expr(self, item):
|
||||
return self.attr.get_type(item) is not None
|
||||
@ -1089,7 +1089,7 @@ class FuncAttrType(Func):
|
||||
def __init__(self, attribute, _type):
|
||||
self.attr = attribute
|
||||
self.type = _type
|
||||
super(FuncAttrType, self).__init__(attribute, _type)
|
||||
super().__init__(attribute, _type)
|
||||
|
||||
def expr(self, item):
|
||||
return self.attr.get_type(item) == self.type.expr(item)
|
||||
@ -1101,7 +1101,7 @@ class FuncBeginsWith(Func):
|
||||
def __init__(self, attribute, substr):
|
||||
self.attr = attribute
|
||||
self.substr = substr
|
||||
super(FuncBeginsWith, self).__init__(attribute, substr)
|
||||
super().__init__(attribute, substr)
|
||||
|
||||
def expr(self, item):
|
||||
if self.attr.get_type(item) != "S":
|
||||
@ -1117,7 +1117,7 @@ class FuncContains(Func):
|
||||
def __init__(self, attribute, operand):
|
||||
self.attr = attribute
|
||||
self.operand = operand
|
||||
super(FuncContains, self).__init__(attribute, operand)
|
||||
super().__init__(attribute, operand)
|
||||
|
||||
def expr(self, item):
|
||||
if self.attr.get_type(item) in ("S", "SS", "NS", "BS", "L"):
|
||||
@ -1137,7 +1137,7 @@ class FuncSize(Func):
|
||||
|
||||
def __init__(self, attribute):
|
||||
self.attr = attribute
|
||||
super(FuncSize, self).__init__(attribute)
|
||||
super().__init__(attribute)
|
||||
|
||||
def expr(self, item):
|
||||
if self.attr.get_type(item) is None:
|
||||
@ -1155,7 +1155,7 @@ class FuncBetween(Func):
|
||||
self.attr = attribute
|
||||
self.start = start
|
||||
self.end = end
|
||||
super(FuncBetween, self).__init__(attribute, start, end)
|
||||
super().__init__(attribute, start, end)
|
||||
|
||||
def expr(self, item):
|
||||
# In python3 None is not a valid comparator when using < or > so must be handled specially
|
||||
@ -1183,7 +1183,7 @@ class FuncIn(Func):
|
||||
def __init__(self, attribute, *possible_values):
|
||||
self.attr = attribute
|
||||
self.possible_values = possible_values
|
||||
super(FuncIn, self).__init__(attribute, *possible_values)
|
||||
super().__init__(attribute, *possible_values)
|
||||
|
||||
def expr(self, item):
|
||||
for possible_value in self.possible_values:
|
||||
|
@ -16,9 +16,7 @@ class InvalidUpdateExpressionInvalidDocumentPath(MockValidationException):
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super(InvalidUpdateExpressionInvalidDocumentPath, self).__init__(
|
||||
self.invalid_update_expression_msg
|
||||
)
|
||||
super().__init__(self.invalid_update_expression_msg)
|
||||
|
||||
|
||||
class InvalidUpdateExpression(MockValidationException):
|
||||
@ -26,7 +24,7 @@ class InvalidUpdateExpression(MockValidationException):
|
||||
|
||||
def __init__(self, update_expression_error):
|
||||
self.update_expression_error = update_expression_error
|
||||
super(InvalidUpdateExpression, self).__init__(
|
||||
super().__init__(
|
||||
self.invalid_update_expr_msg.format(
|
||||
update_expression_error=update_expression_error
|
||||
)
|
||||
@ -39,7 +37,7 @@ class AttributeDoesNotExist(MockValidationException):
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super(AttributeDoesNotExist, self).__init__(self.attr_does_not_exist_msg)
|
||||
super().__init__(self.attr_does_not_exist_msg)
|
||||
|
||||
|
||||
class ProvidedKeyDoesNotExist(MockValidationException):
|
||||
@ -48,9 +46,7 @@ class ProvidedKeyDoesNotExist(MockValidationException):
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super(ProvidedKeyDoesNotExist, self).__init__(
|
||||
self.provided_key_does_not_exist_msg
|
||||
)
|
||||
super().__init__(self.provided_key_does_not_exist_msg)
|
||||
|
||||
|
||||
class ExpressionAttributeNameNotDefined(InvalidUpdateExpression):
|
||||
@ -58,9 +54,7 @@ class ExpressionAttributeNameNotDefined(InvalidUpdateExpression):
|
||||
|
||||
def __init__(self, attribute_name):
|
||||
self.not_defined_attribute_name = attribute_name
|
||||
super(ExpressionAttributeNameNotDefined, self).__init__(
|
||||
self.name_not_defined_msg.format(n=attribute_name)
|
||||
)
|
||||
super().__init__(self.name_not_defined_msg.format(n=attribute_name))
|
||||
|
||||
|
||||
class AttributeIsReservedKeyword(InvalidUpdateExpression):
|
||||
@ -70,9 +64,7 @@ class AttributeIsReservedKeyword(InvalidUpdateExpression):
|
||||
|
||||
def __init__(self, keyword):
|
||||
self.keyword = keyword
|
||||
super(AttributeIsReservedKeyword, self).__init__(
|
||||
self.attribute_is_keyword_msg.format(keyword=keyword)
|
||||
)
|
||||
super().__init__(self.attribute_is_keyword_msg.format(keyword=keyword))
|
||||
|
||||
|
||||
class ExpressionAttributeValueNotDefined(InvalidUpdateExpression):
|
||||
@ -80,7 +72,7 @@ class ExpressionAttributeValueNotDefined(InvalidUpdateExpression):
|
||||
|
||||
def __init__(self, attribute_value):
|
||||
self.attribute_value = attribute_value
|
||||
super(ExpressionAttributeValueNotDefined, self).__init__(
|
||||
super().__init__(
|
||||
self.attr_value_not_defined_msg.format(attribute_value=attribute_value)
|
||||
)
|
||||
|
||||
@ -90,7 +82,7 @@ class UpdateExprSyntaxError(InvalidUpdateExpression):
|
||||
|
||||
def __init__(self, error_detail):
|
||||
self.error_detail = error_detail
|
||||
super(UpdateExprSyntaxError, self).__init__(
|
||||
super().__init__(
|
||||
self.update_expr_syntax_error_msg.format(error_detail=error_detail)
|
||||
)
|
||||
|
||||
@ -101,9 +93,7 @@ class InvalidTokenException(UpdateExprSyntaxError):
|
||||
def __init__(self, token, near):
|
||||
self.token = token
|
||||
self.near = near
|
||||
super(InvalidTokenException, self).__init__(
|
||||
self.token_detail_msg.format(token=token, near=near)
|
||||
)
|
||||
super().__init__(self.token_detail_msg.format(token=token, near=near))
|
||||
|
||||
|
||||
class InvalidExpressionAttributeNameKey(MockValidationException):
|
||||
@ -113,16 +103,14 @@ class InvalidExpressionAttributeNameKey(MockValidationException):
|
||||
|
||||
def __init__(self, key):
|
||||
self.key = key
|
||||
super(InvalidExpressionAttributeNameKey, self).__init__(
|
||||
self.invalid_expr_attr_name_msg.format(key=key)
|
||||
)
|
||||
super().__init__(self.invalid_expr_attr_name_msg.format(key=key))
|
||||
|
||||
|
||||
class ItemSizeTooLarge(MockValidationException):
|
||||
item_size_too_large_msg = "Item size has exceeded the maximum allowed size"
|
||||
|
||||
def __init__(self):
|
||||
super(ItemSizeTooLarge, self).__init__(self.item_size_too_large_msg)
|
||||
super().__init__(self.item_size_too_large_msg)
|
||||
|
||||
|
||||
class ItemSizeToUpdateTooLarge(MockValidationException):
|
||||
@ -131,9 +119,7 @@ class ItemSizeToUpdateTooLarge(MockValidationException):
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super(ItemSizeToUpdateTooLarge, self).__init__(
|
||||
self.item_size_to_update_too_large_msg
|
||||
)
|
||||
super().__init__(self.item_size_to_update_too_large_msg)
|
||||
|
||||
|
||||
class HashKeyTooLong(MockValidationException):
|
||||
@ -143,7 +129,7 @@ class HashKeyTooLong(MockValidationException):
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super(HashKeyTooLong, self).__init__(self.key_too_large_msg)
|
||||
super().__init__(self.key_too_large_msg)
|
||||
|
||||
|
||||
class RangeKeyTooLong(MockValidationException):
|
||||
@ -152,7 +138,7 @@ class RangeKeyTooLong(MockValidationException):
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
super(RangeKeyTooLong, self).__init__(self.key_too_large_msg)
|
||||
super().__init__(self.key_too_large_msg)
|
||||
|
||||
|
||||
class IncorrectOperandType(InvalidUpdateExpression):
|
||||
@ -161,7 +147,7 @@ class IncorrectOperandType(InvalidUpdateExpression):
|
||||
def __init__(self, operator_or_function, operand_type):
|
||||
self.operator_or_function = operator_or_function
|
||||
self.operand_type = operand_type
|
||||
super(IncorrectOperandType, self).__init__(
|
||||
super().__init__(
|
||||
self.inv_operand_msg.format(f=operator_or_function, t=operand_type)
|
||||
)
|
||||
|
||||
@ -170,14 +156,14 @@ class IncorrectDataType(MockValidationException):
|
||||
inc_data_type_msg = "An operand in the update expression has an incorrect data type"
|
||||
|
||||
def __init__(self):
|
||||
super(IncorrectDataType, self).__init__(self.inc_data_type_msg)
|
||||
super().__init__(self.inc_data_type_msg)
|
||||
|
||||
|
||||
class ConditionalCheckFailed(ValueError):
|
||||
msg = "The conditional request failed"
|
||||
|
||||
def __init__(self):
|
||||
super(ConditionalCheckFailed, self).__init__(self.msg)
|
||||
super().__init__(self.msg)
|
||||
|
||||
|
||||
class TransactionCanceledException(ValueError):
|
||||
@ -185,7 +171,7 @@ class TransactionCanceledException(ValueError):
|
||||
|
||||
def __init__(self, errors):
|
||||
msg = self.cancel_reason_msg.format(", ".join([str(err) for err in errors]))
|
||||
super(TransactionCanceledException, self).__init__(msg)
|
||||
super().__init__(msg)
|
||||
|
||||
|
||||
class EmptyKeyAttributeException(MockValidationException):
|
||||
@ -194,16 +180,14 @@ class EmptyKeyAttributeException(MockValidationException):
|
||||
empty_index_msg = "One or more parameter values are not valid. The update expression attempted to update a secondary index key to a value that is not supported. The AttributeValue for a key attribute cannot contain an empty string value."
|
||||
|
||||
def __init__(self, key_in_index=False):
|
||||
super(EmptyKeyAttributeException, self).__init__(
|
||||
self.empty_index_msg if key_in_index else self.empty_str_msg
|
||||
)
|
||||
super().__init__(self.empty_index_msg if key_in_index else self.empty_str_msg)
|
||||
|
||||
|
||||
class UpdateHashRangeKeyException(MockValidationException):
|
||||
msg = "One or more parameter values were invalid: Cannot update attribute {}. This attribute is part of the key"
|
||||
|
||||
def __init__(self, key_name):
|
||||
super(UpdateHashRangeKeyException, self).__init__(self.msg.format(key_name))
|
||||
super().__init__(self.msg.format(key_name))
|
||||
|
||||
|
||||
class InvalidAttributeTypeError(MockValidationException):
|
||||
@ -217,4 +201,4 @@ class TooManyAddClauses(InvalidUpdateExpression):
|
||||
msg = 'The "ADD" section can only be used once in an update expression;'
|
||||
|
||||
def __init__(self):
|
||||
super(TooManyAddClauses, self).__init__(self.msg)
|
||||
super().__init__(self.msg)
|
||||
|
@ -63,7 +63,7 @@ class LimitedSizeDict(dict):
|
||||
# We'll set the limit to something in between to be safe
|
||||
if (current_item_size + new_item_size) > 405000:
|
||||
raise ItemSizeTooLarge
|
||||
super(LimitedSizeDict, self).__setitem__(key, value)
|
||||
super().__setitem__(key, value)
|
||||
|
||||
|
||||
class Item(BaseModel):
|
||||
@ -559,13 +559,9 @@ class Table(CloudFormationModel):
|
||||
return results
|
||||
|
||||
def __len__(self):
|
||||
count = 0
|
||||
for key, value in self.items.items():
|
||||
if self.has_range_key:
|
||||
count += len(value)
|
||||
else:
|
||||
count += 1
|
||||
return count
|
||||
return sum(
|
||||
[(len(value) if self.has_range_key else 1) for value in self.items.values()]
|
||||
)
|
||||
|
||||
@property
|
||||
def hash_key_names(self):
|
||||
@ -990,7 +986,7 @@ class Table(CloudFormationModel):
|
||||
class RestoredTable(Table):
|
||||
def __init__(self, name, backup):
|
||||
params = self._parse_params_from_backup(backup)
|
||||
super(RestoredTable, self).__init__(name, **params)
|
||||
super().__init__(name, **params)
|
||||
self.indexes = copy.deepcopy(backup.table.indexes)
|
||||
self.global_indexes = copy.deepcopy(backup.table.global_indexes)
|
||||
self.items = copy.deepcopy(backup.table.items)
|
||||
@ -1009,7 +1005,7 @@ class RestoredTable(Table):
|
||||
return params
|
||||
|
||||
def describe(self, base_key="TableDescription"):
|
||||
result = super(RestoredTable, self).describe(base_key=base_key)
|
||||
result = super().describe(base_key=base_key)
|
||||
result[base_key]["RestoreSummary"] = {
|
||||
"SourceBackupArn": self.source_backup_arn,
|
||||
"SourceTableArn": self.source_table_arn,
|
||||
@ -1022,7 +1018,7 @@ class RestoredTable(Table):
|
||||
class RestoredPITTable(Table):
|
||||
def __init__(self, name, source):
|
||||
params = self._parse_params_from_table(source)
|
||||
super(RestoredPITTable, self).__init__(name, **params)
|
||||
super().__init__(name, **params)
|
||||
self.indexes = copy.deepcopy(source.indexes)
|
||||
self.global_indexes = copy.deepcopy(source.global_indexes)
|
||||
self.items = copy.deepcopy(source.items)
|
||||
@ -1040,7 +1036,7 @@ class RestoredPITTable(Table):
|
||||
return params
|
||||
|
||||
def describe(self, base_key="TableDescription"):
|
||||
result = super(RestoredPITTable, self).describe(base_key=base_key)
|
||||
result = super().describe(base_key=base_key)
|
||||
result[base_key]["RestoreSummary"] = {
|
||||
"SourceTableArn": self.source_table_arn,
|
||||
"RestoreDateTime": unix_time(self.restore_date_time),
|
||||
|
@ -41,7 +41,7 @@ class LeafNode(Node):
|
||||
"""A LeafNode is a Node where none of the children are Nodes themselves."""
|
||||
|
||||
def __init__(self, children=None):
|
||||
super(LeafNode, self).__init__(children)
|
||||
super().__init__(children)
|
||||
|
||||
|
||||
class Expression(Node, metaclass=abc.ABCMeta):
|
||||
@ -184,7 +184,7 @@ class ExpressionSelector(LeafNode):
|
||||
|
||||
def __init__(self, selection_index):
|
||||
try:
|
||||
super(ExpressionSelector, self).__init__(children=[int(selection_index)])
|
||||
super().__init__(children=[int(selection_index)])
|
||||
except ValueError:
|
||||
assert (
|
||||
False
|
||||
@ -198,7 +198,7 @@ class ExpressionAttribute(LeafNode):
|
||||
"""An attribute identifier as used in the DDB item"""
|
||||
|
||||
def __init__(self, attribute):
|
||||
super(ExpressionAttribute, self).__init__(children=[attribute])
|
||||
super().__init__(children=[attribute])
|
||||
|
||||
def get_attribute_name(self):
|
||||
return self.children[0]
|
||||
@ -208,7 +208,7 @@ class ExpressionAttributeName(LeafNode):
|
||||
"""An ExpressionAttributeName is an alias for an attribute identifier"""
|
||||
|
||||
def __init__(self, attribute_name):
|
||||
super(ExpressionAttributeName, self).__init__(children=[attribute_name])
|
||||
super().__init__(children=[attribute_name])
|
||||
|
||||
def get_attribute_name_placeholder(self):
|
||||
return self.children[0]
|
||||
@ -218,7 +218,7 @@ class ExpressionAttributeValue(LeafNode):
|
||||
"""An ExpressionAttributeValue is an alias for an value"""
|
||||
|
||||
def __init__(self, value):
|
||||
super(ExpressionAttributeValue, self).__init__(children=[value])
|
||||
super().__init__(children=[value])
|
||||
|
||||
def get_value_name(self):
|
||||
return self.children[0]
|
||||
@ -228,7 +228,7 @@ class ExpressionValueOperator(LeafNode):
|
||||
"""An ExpressionValueOperator is an operation that works on 2 values"""
|
||||
|
||||
def __init__(self, value):
|
||||
super(ExpressionValueOperator, self).__init__(children=[value])
|
||||
super().__init__(children=[value])
|
||||
|
||||
def get_operator(self):
|
||||
return self.children[0]
|
||||
@ -257,7 +257,7 @@ class DDBTypedValue(Node):
|
||||
|
||||
def __init__(self, value):
|
||||
assert isinstance(value, DynamoType), "DDBTypedValue must be of DynamoType"
|
||||
super(DDBTypedValue, self).__init__(children=[value])
|
||||
super().__init__(children=[value])
|
||||
|
||||
def get_value(self):
|
||||
return self.children[0]
|
||||
@ -267,7 +267,7 @@ class NoneExistingPath(LeafNode):
|
||||
"""A placeholder for Paths that did not exist in the Item."""
|
||||
|
||||
def __init__(self, creatable=False):
|
||||
super(NoneExistingPath, self).__init__(children=[creatable])
|
||||
super().__init__(children=[creatable])
|
||||
|
||||
def is_creatable(self):
|
||||
"""Can this path be created if need be. For example path creating element in a dictionary or creating a new
|
||||
|
@ -341,7 +341,7 @@ class NestableBinExpressionParser(ExpressionParser):
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(NestableBinExpressionParser, self).__init__(*args, **kwargs)
|
||||
super().__init__(*args, **kwargs)
|
||||
self.target_nodes = deque()
|
||||
|
||||
def _parse_target_clause(self, factory_class):
|
||||
@ -456,7 +456,7 @@ class UpdateExpressionParser(ExpressionParser, NestableExpressionParserMixin):
|
||||
pass
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(UpdateExpressionParser, self).__init__(*args, **kwargs)
|
||||
super().__init__(*args, **kwargs)
|
||||
NestableExpressionParserMixin.__init__(self)
|
||||
|
||||
@classmethod
|
||||
@ -520,7 +520,7 @@ class UpdateExpressionActionsParser(ExpressionParser, NestableExpressionParserMi
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(UpdateExpressionActionsParser, self).__init__(*args, **kwargs)
|
||||
super().__init__(*args, **kwargs)
|
||||
NestableExpressionParserMixin.__init__(self)
|
||||
|
||||
@classmethod
|
||||
@ -626,7 +626,7 @@ class UpdateExpressionPathParser(ExpressionParser):
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(UpdateExpressionPathParser, self).__init__(*args, **kwargs)
|
||||
super().__init__(*args, **kwargs)
|
||||
self.path_nodes = []
|
||||
|
||||
@classmethod
|
||||
|
@ -368,9 +368,7 @@ class NetworkInterface(TaggedEC2Resource, CloudFormationModel):
|
||||
if self.subnet:
|
||||
vpc = self.ec2_backend.get_vpc(self.subnet.vpc_id)
|
||||
if vpc and vpc.enable_dns_hostnames:
|
||||
self.private_dns_name = generate_dns_from_ip(
|
||||
self.private_ip_address, type="internal"
|
||||
)
|
||||
self.private_dns_name = generate_dns_from_ip(self.private_ip_address)
|
||||
for address in self.private_ip_addresses:
|
||||
if address.get("Primary", None):
|
||||
address["PrivateDnsName"] = self.private_dns_name
|
||||
@ -1564,8 +1562,6 @@ class TagBackend(object):
|
||||
return True
|
||||
|
||||
def describe_tags(self, filters=None):
|
||||
import re
|
||||
|
||||
results = []
|
||||
key_filters = []
|
||||
resource_id_filters = []
|
||||
@ -2238,8 +2234,8 @@ class RegionsAndZonesBackend(object):
|
||||
],
|
||||
}
|
||||
|
||||
def describe_regions(self, region_names=[]):
|
||||
if len(region_names) == 0:
|
||||
def describe_regions(self, region_names=None):
|
||||
if not region_names:
|
||||
return self.regions
|
||||
ret = []
|
||||
for name in region_names:
|
||||
@ -2768,12 +2764,12 @@ class SecurityGroupBackend(object):
|
||||
|
||||
def get_security_group_from_name(self, name, vpc_id=None):
|
||||
if vpc_id:
|
||||
for group_id, group in self.groups[vpc_id].items():
|
||||
for group in self.groups[vpc_id].values():
|
||||
if group.name == name:
|
||||
return group
|
||||
else:
|
||||
for vpc_id in self.groups:
|
||||
for group_id, group in self.groups[vpc_id].items():
|
||||
for group in self.groups[vpc_id].values():
|
||||
if group.name == name:
|
||||
return group
|
||||
|
||||
@ -2786,7 +2782,7 @@ class SecurityGroupBackend(object):
|
||||
return group
|
||||
|
||||
def get_default_security_group(self, vpc_id=None):
|
||||
for group_id, group in self.groups[vpc_id or self.default_vpc.id].items():
|
||||
for group in self.groups[vpc_id or self.default_vpc.id].values():
|
||||
if group.is_default:
|
||||
return group
|
||||
|
||||
@ -3098,8 +3094,8 @@ class SecurityGroupBackend(object):
|
||||
from_port,
|
||||
to_port,
|
||||
ip_ranges,
|
||||
source_groups=[],
|
||||
prefix_list_ids=[],
|
||||
source_groups=None,
|
||||
prefix_list_ids=None,
|
||||
vpc_id=None,
|
||||
):
|
||||
|
||||
@ -3148,8 +3144,8 @@ class SecurityGroupBackend(object):
|
||||
from_port,
|
||||
to_port,
|
||||
ip_ranges,
|
||||
source_groups=[],
|
||||
prefix_list_ids=[],
|
||||
source_groups=None,
|
||||
prefix_list_ids=None,
|
||||
vpc_id=None,
|
||||
):
|
||||
|
||||
@ -3235,7 +3231,7 @@ class SecurityGroupBackend(object):
|
||||
|
||||
def _add_source_group(self, source_groups, vpc_id):
|
||||
_source_groups = []
|
||||
for item in source_groups:
|
||||
for item in source_groups or []:
|
||||
if "OwnerId" not in item:
|
||||
item["OwnerId"] = ACCOUNT_ID
|
||||
# for VPCs
|
||||
@ -3935,7 +3931,7 @@ class VPCBackend(object):
|
||||
cidr_block,
|
||||
instance_tenancy="default",
|
||||
amazon_provided_ipv6_cidr_block=False,
|
||||
tags=[],
|
||||
tags=None,
|
||||
):
|
||||
vpc_id = random_vpc_id()
|
||||
try:
|
||||
@ -3953,7 +3949,7 @@ class VPCBackend(object):
|
||||
amazon_provided_ipv6_cidr_block,
|
||||
)
|
||||
|
||||
for tag in tags:
|
||||
for tag in tags or []:
|
||||
tag_key = tag.get("Key")
|
||||
tag_value = tag.get("Value")
|
||||
vpc.add_tag(tag_key, tag_value)
|
||||
@ -4111,11 +4107,11 @@ class VPCBackend(object):
|
||||
self,
|
||||
vpc_id,
|
||||
service_name,
|
||||
type=None,
|
||||
endpoint_type=None,
|
||||
policy_document=False,
|
||||
route_table_ids=None,
|
||||
subnet_ids=[],
|
||||
network_interface_ids=[],
|
||||
subnet_ids=None,
|
||||
network_interface_ids=None,
|
||||
dns_entries=None,
|
||||
client_token=None,
|
||||
security_group_ids=None,
|
||||
@ -4129,10 +4125,10 @@ class VPCBackend(object):
|
||||
self.get_vpc(vpc_id)
|
||||
destination_prefix_list_id = None
|
||||
|
||||
if type and type.lower() == "interface":
|
||||
if endpoint_type and endpoint_type.lower() == "interface":
|
||||
|
||||
network_interface_ids = []
|
||||
for subnet_id in subnet_ids:
|
||||
for subnet_id in subnet_ids or []:
|
||||
self.get_subnet(subnet_id)
|
||||
eni = self.create_network_interface(subnet_id, random_private_ip())
|
||||
network_interface_ids.append(eni.id)
|
||||
@ -4153,7 +4149,7 @@ class VPCBackend(object):
|
||||
vpc_endpoint_id,
|
||||
vpc_id,
|
||||
service_name,
|
||||
type,
|
||||
endpoint_type,
|
||||
policy_document,
|
||||
route_table_ids,
|
||||
subnet_ids,
|
||||
@ -4179,8 +4175,8 @@ class VPCBackend(object):
|
||||
|
||||
return vpc_end_point
|
||||
|
||||
def delete_vpc_endpoints(self, vpce_ids=[]):
|
||||
for vpce_id in vpce_ids:
|
||||
def delete_vpc_endpoints(self, vpce_ids=None):
|
||||
for vpce_id in vpce_ids or []:
|
||||
vpc_endpoint = self.vpc_end_points.get(vpce_id, None)
|
||||
if vpc_endpoint:
|
||||
if vpc_endpoint.type.lower() == "interface":
|
||||
@ -4756,7 +4752,7 @@ class SubnetBackend(object):
|
||||
availability_zone=None,
|
||||
availability_zone_id=None,
|
||||
context=None,
|
||||
tags=[],
|
||||
tags=None,
|
||||
):
|
||||
subnet_id = random_subnet_id()
|
||||
vpc = self.get_vpc(
|
||||
@ -4840,7 +4836,7 @@ class SubnetBackend(object):
|
||||
assign_ipv6_address_on_creation=False,
|
||||
)
|
||||
|
||||
for tag in tags:
|
||||
for tag in tags or []:
|
||||
tag_key = tag.get("Key")
|
||||
tag_value = tag.get("Value")
|
||||
subnet.add_tag(tag_key, tag_value)
|
||||
@ -5296,11 +5292,11 @@ class RouteTableBackend(object):
|
||||
self.route_tables = {}
|
||||
super().__init__()
|
||||
|
||||
def create_route_table(self, vpc_id, tags=[], main=False):
|
||||
def create_route_table(self, vpc_id, tags=None, main=False):
|
||||
route_table_id = random_route_table_id()
|
||||
vpc = self.get_vpc(vpc_id) # Validate VPC exists
|
||||
route_table = RouteTable(self, route_table_id, vpc_id, main=main)
|
||||
for tag in tags:
|
||||
for tag in tags or []:
|
||||
route_table.add_tag(tag.get("Key"), tag.get("Value"))
|
||||
self.route_tables[route_table_id] = route_table
|
||||
|
||||
@ -5491,10 +5487,10 @@ class VPCEndPoint(TaggedEC2Resource):
|
||||
def __init__(
|
||||
self,
|
||||
ec2_backend,
|
||||
id,
|
||||
endpoint_id,
|
||||
vpc_id,
|
||||
service_name,
|
||||
type=None,
|
||||
endpoint_type=None,
|
||||
policy_document=False,
|
||||
route_table_ids=None,
|
||||
subnet_ids=None,
|
||||
@ -5507,14 +5503,14 @@ class VPCEndPoint(TaggedEC2Resource):
|
||||
destination_prefix_list_id=None,
|
||||
):
|
||||
self.ec2_backend = ec2_backend
|
||||
self.id = id
|
||||
self.id = endpoint_id
|
||||
self.vpc_id = vpc_id
|
||||
self.service_name = service_name
|
||||
self.type = type
|
||||
self.type = endpoint_type
|
||||
self.state = "available"
|
||||
self.policy_document = policy_document
|
||||
self.route_table_ids = route_table_ids
|
||||
self.network_interface_ids = network_interface_ids
|
||||
self.network_interface_ids = network_interface_ids or []
|
||||
self.subnet_ids = subnet_ids
|
||||
self.client_token = client_token
|
||||
self.security_group_ids = security_group_ids
|
||||
@ -5537,11 +5533,11 @@ class ManagedPrefixList(TaggedEC2Resource):
|
||||
self,
|
||||
backend,
|
||||
address_family=None,
|
||||
entry=[],
|
||||
entry=None,
|
||||
max_entries=None,
|
||||
prefix_list_name=None,
|
||||
region=None,
|
||||
tags={},
|
||||
tags=None,
|
||||
owner_id=None,
|
||||
):
|
||||
self.ec2_backend = backend
|
||||
@ -5577,10 +5573,10 @@ class ManagedPrefixListBackend(object):
|
||||
def create_managed_prefix_list(
|
||||
self,
|
||||
address_family=None,
|
||||
entry=[],
|
||||
entry=None,
|
||||
max_entries=None,
|
||||
prefix_list_name=None,
|
||||
tags={},
|
||||
tags=None,
|
||||
owner_id=None,
|
||||
):
|
||||
managed_prefix_list = ManagedPrefixList(
|
||||
@ -5834,7 +5830,7 @@ class RouteBackend(object):
|
||||
return route
|
||||
|
||||
def get_route(self, route_id):
|
||||
route_table_id, destination_cidr_block = split_route_id(route_id)
|
||||
route_table_id, _ = split_route_id(route_id)
|
||||
route_table = self.get_route_table(route_table_id)
|
||||
return route_table.get(route_id)
|
||||
|
||||
@ -5901,9 +5897,9 @@ class InternetGatewayBackend(object):
|
||||
self.internet_gateways = {}
|
||||
super().__init__()
|
||||
|
||||
def create_internet_gateway(self, tags=[]):
|
||||
def create_internet_gateway(self, tags=None):
|
||||
igw = InternetGateway(self)
|
||||
for tag in tags:
|
||||
for tag in tags or []:
|
||||
igw.add_tag(tag.get("Key"), tag.get("Value"))
|
||||
self.internet_gateways[igw.id] = igw
|
||||
return igw
|
||||
@ -5981,10 +5977,10 @@ class CarrierGatewayBackend(object):
|
||||
self.carrier_gateways[carrier_gateway.id] = carrier_gateway
|
||||
return carrier_gateway
|
||||
|
||||
def delete_carrier_gateway(self, id):
|
||||
if not self.carrier_gateways.get(id):
|
||||
raise InvalidCarrierGatewayID(id)
|
||||
carrier_gateway = self.carrier_gateways.pop(id)
|
||||
def delete_carrier_gateway(self, gateway_id):
|
||||
if not self.carrier_gateways.get(gateway_id):
|
||||
raise InvalidCarrierGatewayID(gateway_id)
|
||||
carrier_gateway = self.carrier_gateways.pop(gateway_id)
|
||||
carrier_gateway.state = "deleted"
|
||||
return carrier_gateway
|
||||
|
||||
@ -6038,7 +6034,9 @@ class EgressOnlyInternetGatewayBackend(object):
|
||||
return egress_only_igw
|
||||
|
||||
def describe_egress_only_internet_gateways(self, ids=None, filters=None):
|
||||
# TODO: support filtering based on tag
|
||||
"""
|
||||
The Filters-argument is not yet supported
|
||||
"""
|
||||
egress_only_igws = list(self.egress_only_internet_gateway_backend.values())
|
||||
|
||||
if ids:
|
||||
@ -6049,17 +6047,19 @@ class EgressOnlyInternetGatewayBackend(object):
|
||||
]
|
||||
return egress_only_igws
|
||||
|
||||
def delete_egress_only_internet_gateway(self, id):
|
||||
egress_only_igw = self.egress_only_internet_gateway_backend.get(id)
|
||||
def delete_egress_only_internet_gateway(self, gateway_id):
|
||||
egress_only_igw = self.egress_only_internet_gateway_backend.get(gateway_id)
|
||||
if not egress_only_igw:
|
||||
raise InvalidGatewayIDError(id)
|
||||
raise InvalidGatewayIDError(gateway_id)
|
||||
if egress_only_igw:
|
||||
self.egress_only_internet_gateway_backend.pop(id)
|
||||
self.egress_only_internet_gateway_backend.pop(gateway_id)
|
||||
|
||||
def get_egress_only_igw(self, id):
|
||||
egress_only_igw = self.egress_only_internet_gateway_backend.get(id, None)
|
||||
def get_egress_only_igw(self, gateway_id):
|
||||
egress_only_igw = self.egress_only_internet_gateway_backend.get(
|
||||
gateway_id, None
|
||||
)
|
||||
if not egress_only_igw:
|
||||
raise InvalidGatewayIDError(id)
|
||||
raise InvalidGatewayIDError(gateway_id)
|
||||
return egress_only_igw
|
||||
|
||||
|
||||
@ -6121,7 +6121,7 @@ class SpotInstanceRequest(BotoSpotRequest, TaggedEC2Resource):
|
||||
spot_request_id,
|
||||
price,
|
||||
image_id,
|
||||
type,
|
||||
spot_instance_type,
|
||||
valid_from,
|
||||
valid_until,
|
||||
launch_group,
|
||||
@ -6146,7 +6146,7 @@ class SpotInstanceRequest(BotoSpotRequest, TaggedEC2Resource):
|
||||
self.id = spot_request_id
|
||||
self.state = "open"
|
||||
self.price = price
|
||||
self.type = type
|
||||
self.type = spot_instance_type
|
||||
self.valid_from = valid_from
|
||||
self.valid_until = valid_until
|
||||
self.launch_group = launch_group
|
||||
@ -6211,7 +6211,7 @@ class SpotRequestBackend(object, metaclass=Model):
|
||||
price,
|
||||
image_id,
|
||||
count,
|
||||
type,
|
||||
spot_instance_type,
|
||||
valid_from,
|
||||
valid_until,
|
||||
launch_group,
|
||||
@ -6237,7 +6237,7 @@ class SpotRequestBackend(object, metaclass=Model):
|
||||
spot_request_id,
|
||||
price,
|
||||
image_id,
|
||||
type,
|
||||
spot_instance_type,
|
||||
valid_from,
|
||||
valid_until,
|
||||
launch_group,
|
||||
@ -6259,7 +6259,7 @@ class SpotRequestBackend(object, metaclass=Model):
|
||||
return requests
|
||||
|
||||
@Model.prop("SpotInstanceRequest")
|
||||
def describe_spot_instance_requests(self, filters=None, spot_instance_ids=[]):
|
||||
def describe_spot_instance_requests(self, filters=None, spot_instance_ids=None):
|
||||
requests = self.spot_instance_requests.copy().values()
|
||||
|
||||
if spot_instance_ids:
|
||||
@ -6431,7 +6431,7 @@ class SpotFleetRequest(TaggedEC2Resource, CloudFormationModel):
|
||||
price=launch_spec.spot_price,
|
||||
image_id=launch_spec.image_id,
|
||||
count=count,
|
||||
type="persistent",
|
||||
spot_instance_type="persistent",
|
||||
valid_from=None,
|
||||
valid_until=None,
|
||||
launch_group=None,
|
||||
@ -6962,18 +6962,18 @@ class VPNConnection(TaggedEC2Resource):
|
||||
def __init__(
|
||||
self,
|
||||
ec2_backend,
|
||||
id,
|
||||
type,
|
||||
vpn_connection_id,
|
||||
vpn_conn_type,
|
||||
customer_gateway_id,
|
||||
vpn_gateway_id=None,
|
||||
transit_gateway_id=None,
|
||||
tags={},
|
||||
tags=None,
|
||||
):
|
||||
self.ec2_backend = ec2_backend
|
||||
self.id = id
|
||||
self.id = vpn_connection_id
|
||||
self.state = "available"
|
||||
self.customer_gateway_configuration = {}
|
||||
self.type = type
|
||||
self.type = vpn_conn_type
|
||||
self.customer_gateway_id = customer_gateway_id
|
||||
self.vpn_gateway_id = vpn_gateway_id
|
||||
self.transit_gateway_id = transit_gateway_id
|
||||
@ -6993,20 +6993,20 @@ class VPNConnectionBackend(object):
|
||||
|
||||
def create_vpn_connection(
|
||||
self,
|
||||
type,
|
||||
vpn_conn_type,
|
||||
customer_gateway_id,
|
||||
vpn_gateway_id=None,
|
||||
transit_gateway_id=None,
|
||||
static_routes_only=None,
|
||||
tags={},
|
||||
tags=None,
|
||||
):
|
||||
vpn_connection_id = random_vpn_connection_id()
|
||||
if static_routes_only:
|
||||
pass
|
||||
vpn_connection = VPNConnection(
|
||||
self,
|
||||
id=vpn_connection_id,
|
||||
type=type,
|
||||
vpn_connection_id=vpn_connection_id,
|
||||
vpn_conn_type=vpn_conn_type,
|
||||
customer_gateway_id=customer_gateway_id,
|
||||
vpn_gateway_id=vpn_gateway_id,
|
||||
transit_gateway_id=transit_gateway_id,
|
||||
@ -7063,11 +7063,11 @@ class NetworkAclBackend(object):
|
||||
raise InvalidNetworkAclIdError(network_acl_id)
|
||||
return network_acl
|
||||
|
||||
def create_network_acl(self, vpc_id, tags=[], default=False):
|
||||
def create_network_acl(self, vpc_id, tags=None, default=False):
|
||||
network_acl_id = random_network_acl_id()
|
||||
self.get_vpc(vpc_id)
|
||||
network_acl = NetworkAcl(self, network_acl_id, vpc_id, default)
|
||||
for tag in tags:
|
||||
for tag in tags or []:
|
||||
network_acl.add_tag(tag.get("Key"), tag.get("Value"))
|
||||
self.network_acls[network_acl_id] = network_acl
|
||||
if default:
|
||||
@ -7305,16 +7305,16 @@ class VpnGateway(CloudFormationModel, TaggedEC2Resource):
|
||||
def __init__(
|
||||
self,
|
||||
ec2_backend,
|
||||
id,
|
||||
type,
|
||||
gateway_id,
|
||||
gateway_type,
|
||||
amazon_side_asn,
|
||||
availability_zone,
|
||||
tags=None,
|
||||
state="available",
|
||||
):
|
||||
self.ec2_backend = ec2_backend
|
||||
self.id = id
|
||||
self.type = type
|
||||
self.id = gateway_id
|
||||
self.type = gateway_type
|
||||
self.amazon_side_asn = amazon_side_asn
|
||||
self.availability_zone = availability_zone
|
||||
self.state = state
|
||||
@ -7340,7 +7340,7 @@ class VpnGateway(CloudFormationModel, TaggedEC2Resource):
|
||||
asn = properties.get("AmazonSideAsn", None)
|
||||
ec2_backend = ec2_backends[region_name]
|
||||
|
||||
return ec2_backend.create_vpn_gateway(type=_type, amazon_side_asn=asn)
|
||||
return ec2_backend.create_vpn_gateway(gateway_type=_type, amazon_side_asn=asn)
|
||||
|
||||
@property
|
||||
def physical_resource_id(self):
|
||||
@ -7364,11 +7364,15 @@ class VpnGatewayBackend(object):
|
||||
super().__init__()
|
||||
|
||||
def create_vpn_gateway(
|
||||
self, type="ipsec.1", amazon_side_asn=None, availability_zone=None, tags=None
|
||||
self,
|
||||
gateway_type="ipsec.1",
|
||||
amazon_side_asn=None,
|
||||
availability_zone=None,
|
||||
tags=None,
|
||||
):
|
||||
vpn_gateway_id = random_vpn_gateway_id()
|
||||
vpn_gateway = VpnGateway(
|
||||
self, vpn_gateway_id, type, amazon_side_asn, availability_zone, tags
|
||||
self, vpn_gateway_id, gateway_type, amazon_side_asn, availability_zone, tags
|
||||
)
|
||||
self.vpn_gateways[vpn_gateway_id] = vpn_gateway
|
||||
return vpn_gateway
|
||||
@ -7413,11 +7417,18 @@ class VpnGatewayBackend(object):
|
||||
|
||||
class CustomerGateway(TaggedEC2Resource):
|
||||
def __init__(
|
||||
self, ec2_backend, id, type, ip_address, bgp_asn, state="available", tags=None
|
||||
self,
|
||||
ec2_backend,
|
||||
gateway_id,
|
||||
gateway_type,
|
||||
ip_address,
|
||||
bgp_asn,
|
||||
state="available",
|
||||
tags=None,
|
||||
):
|
||||
self.ec2_backend = ec2_backend
|
||||
self.id = id
|
||||
self.type = type
|
||||
self.id = gateway_id
|
||||
self.type = gateway_type
|
||||
self.ip_address = ip_address
|
||||
self.bgp_asn = bgp_asn
|
||||
self.attachments = {}
|
||||
@ -7435,11 +7446,11 @@ class CustomerGatewayBackend(object):
|
||||
super().__init__()
|
||||
|
||||
def create_customer_gateway(
|
||||
self, type="ipsec.1", ip_address=None, bgp_asn=None, tags=None
|
||||
self, gateway_type="ipsec.1", ip_address=None, bgp_asn=None, tags=None
|
||||
):
|
||||
customer_gateway_id = random_customer_gateway_id()
|
||||
customer_gateway = CustomerGateway(
|
||||
self, customer_gateway_id, type, ip_address, bgp_asn, tags=tags
|
||||
self, customer_gateway_id, gateway_type, ip_address, bgp_asn, tags=tags
|
||||
)
|
||||
self.customer_gateways[customer_gateway_id] = customer_gateway
|
||||
return customer_gateway
|
||||
@ -7563,9 +7574,9 @@ class TransitGatewayBackend(object):
|
||||
self.transit_gateways = {}
|
||||
super().__init__()
|
||||
|
||||
def create_transit_gateway(self, description=None, options=None, tags=[]):
|
||||
def create_transit_gateway(self, description=None, options=None, tags=None):
|
||||
transit_gateway = TransitGateway(self, description, options)
|
||||
for tag in tags:
|
||||
for tag in tags or []:
|
||||
tag_key = tag.get("Key")
|
||||
tag_value = tag.get("Value")
|
||||
transit_gateway.add_tag(tag_key, tag_value)
|
||||
@ -7959,7 +7970,7 @@ class TransitGatewayAttachmentBackend(object):
|
||||
super().__init__()
|
||||
|
||||
def create_transit_gateway_vpn_attachment(
|
||||
self, vpn_id, transit_gateway_id, tags=[]
|
||||
self, vpn_id, transit_gateway_id, tags=None
|
||||
):
|
||||
transit_gateway_vpn_attachment = TransitGatewayAttachment(
|
||||
self,
|
||||
@ -8067,8 +8078,8 @@ class TransitGatewayAttachmentBackend(object):
|
||||
tgw_attachment.options.update(options)
|
||||
|
||||
if add_subnet_ids:
|
||||
for id in add_subnet_ids:
|
||||
tgw_attachment.subnet_ids.append(id)
|
||||
for subnet_id in add_subnet_ids:
|
||||
tgw_attachment.subnet_ids.append(subnet_id)
|
||||
|
||||
return tgw_attachment
|
||||
|
||||
@ -8279,7 +8290,7 @@ class NatGateway(CloudFormationModel, TaggedEC2Resource):
|
||||
backend,
|
||||
subnet_id,
|
||||
allocation_id,
|
||||
tags=[],
|
||||
tags=None,
|
||||
connectivity_type="public",
|
||||
address_set=None,
|
||||
):
|
||||
@ -8386,7 +8397,7 @@ class NatGatewayBackend(object):
|
||||
return nat_gateways
|
||||
|
||||
def create_nat_gateway(
|
||||
self, subnet_id, allocation_id, tags=[], connectivity_type="public"
|
||||
self, subnet_id, allocation_id, tags=None, connectivity_type="public"
|
||||
):
|
||||
nat_gateway = NatGateway(
|
||||
self, subnet_id, allocation_id, tags, connectivity_type
|
||||
|
@ -5,7 +5,7 @@ from moto.ec2.utils import filters_from_querystring
|
||||
class CustomerGateways(BaseResponse):
|
||||
def create_customer_gateway(self):
|
||||
# raise NotImplementedError('CustomerGateways(AmazonVPC).create_customer_gateway is not yet implemented')
|
||||
type = self._get_param("Type")
|
||||
gateway_type = self._get_param("Type")
|
||||
ip_address = self._get_param("IpAddress")
|
||||
bgp_asn = self._get_param("BgpAsn")
|
||||
tags = self._get_multi_param("TagSpecification")
|
||||
@ -13,7 +13,7 @@ class CustomerGateways(BaseResponse):
|
||||
tags = (tags or {}).get("Tag", [])
|
||||
tags = {t["Key"]: t["Value"] for t in tags}
|
||||
customer_gateway = self.ec2_backend.create_customer_gateway(
|
||||
type, ip_address=ip_address, bgp_asn=bgp_asn, tags=tags
|
||||
gateway_type, ip_address=ip_address, bgp_asn=bgp_asn, tags=tags
|
||||
)
|
||||
template = self.response_template(CREATE_CUSTOMER_GATEWAY_RESPONSE)
|
||||
return template.render(customer_gateway=customer_gateway)
|
||||
|
@ -25,7 +25,9 @@ class EgressOnlyInternetGateway(BaseResponse):
|
||||
|
||||
def delete_egress_only_internet_gateway(self):
|
||||
egress_only_igw_id = self._get_param("EgressOnlyInternetGatewayId")
|
||||
self.ec2_backend.delete_egress_only_internet_gateway(id=egress_only_igw_id)
|
||||
self.ec2_backend.delete_egress_only_internet_gateway(
|
||||
gateway_id=egress_only_igw_id
|
||||
)
|
||||
template = self.response_template(DELETE_EGRESS_ONLY_IGW_RESPONSE)
|
||||
return template.render()
|
||||
|
||||
|
@ -284,7 +284,7 @@ class InstanceResponse(BaseResponse):
|
||||
|
||||
def _security_grp_instance_attribute_handler(self):
|
||||
new_security_grp_list = []
|
||||
for key, value in self.querystring.items():
|
||||
for key in self.querystring:
|
||||
if "GroupId." in key:
|
||||
new_security_grp_list.append(self.querystring.get(key)[0])
|
||||
|
||||
|
@ -49,7 +49,7 @@ class SpotInstances(BaseResponse):
|
||||
price = self._get_param("SpotPrice")
|
||||
image_id = self._get_param("LaunchSpecification.ImageId")
|
||||
count = self._get_int_param("InstanceCount", 1)
|
||||
type = self._get_param("Type", "one-time")
|
||||
spot_instance_type = self._get_param("Type", "one-time")
|
||||
valid_from = self._get_param("ValidFrom")
|
||||
valid_until = self._get_param("ValidUntil")
|
||||
launch_group = self._get_param("LaunchGroup")
|
||||
@ -69,7 +69,7 @@ class SpotInstances(BaseResponse):
|
||||
price=price,
|
||||
image_id=image_id,
|
||||
count=count,
|
||||
type=type,
|
||||
spot_instance_type=spot_instance_type,
|
||||
valid_from=valid_from,
|
||||
valid_until=valid_until,
|
||||
launch_group=launch_group,
|
||||
|
@ -11,7 +11,7 @@ class VirtualPrivateGateways(BaseResponse):
|
||||
return template.render(attachment=attachment)
|
||||
|
||||
def create_vpn_gateway(self):
|
||||
type = self._get_param("Type")
|
||||
gateway_type = self._get_param("Type")
|
||||
amazon_side_asn = self._get_param("AmazonSideAsn")
|
||||
availability_zone = self._get_param("AvailabilityZone")
|
||||
tags = self._get_multi_param("TagSpecification")
|
||||
@ -19,7 +19,7 @@ class VirtualPrivateGateways(BaseResponse):
|
||||
tags = (tags or {}).get("Tag", [])
|
||||
tags = {t["Key"]: t["Value"] for t in tags}
|
||||
vpn_gateway = self.ec2_backend.create_vpn_gateway(
|
||||
type=type,
|
||||
gateway_type=gateway_type,
|
||||
amazon_side_asn=amazon_side_asn,
|
||||
availability_zone=availability_zone,
|
||||
tags=tags,
|
||||
|
@ -193,7 +193,7 @@ class VPCs(BaseResponse):
|
||||
vpc_end_point = self.ec2_backend.create_vpc_endpoint(
|
||||
vpc_id=vpc_id,
|
||||
service_name=service_name,
|
||||
type=endpoint_type,
|
||||
endpoint_type=endpoint_type,
|
||||
policy_document=policy_document,
|
||||
route_table_ids=route_table_ids,
|
||||
subnet_ids=subnet_ids,
|
||||
|
@ -5,14 +5,14 @@ from xml.sax.saxutils import escape
|
||||
|
||||
class VPNConnections(BaseResponse):
|
||||
def create_vpn_connection(self):
|
||||
type = self._get_param("Type")
|
||||
vpn_conn_type = self._get_param("Type")
|
||||
cgw_id = self._get_param("CustomerGatewayId")
|
||||
vgw_id = self._get_param("VpnGatewayId")
|
||||
tgw_id = self._get_param("TransitGatewayId")
|
||||
static_routes = self._get_param("StaticRoutesOnly")
|
||||
tags = add_tag_specification(self._get_multi_param("TagSpecification"))
|
||||
vpn_connection = self.ec2_backend.create_vpn_connection(
|
||||
type,
|
||||
vpn_conn_type,
|
||||
cgw_id,
|
||||
vpn_gateway_id=vgw_id,
|
||||
transit_gateway_id=tgw_id,
|
||||
|
@ -251,10 +251,10 @@ def random_ip():
|
||||
)
|
||||
|
||||
|
||||
def generate_dns_from_ip(ip, type="internal"):
|
||||
def generate_dns_from_ip(ip, dns_type="internal"):
|
||||
splits = ip.split("/")[0].split(".") if "/" in ip else ip.split(".")
|
||||
return "ip-{}-{}-{}-{}.ec2.{}".format(
|
||||
splits[0], splits[1], splits[2], splits[3], type
|
||||
splits[0], splits[1], splits[2], splits[3], dns_type
|
||||
)
|
||||
|
||||
|
||||
@ -553,8 +553,8 @@ def filter_internet_gateways(igws, filter_dict):
|
||||
return result
|
||||
|
||||
|
||||
def is_filter_matching(obj, filter, filter_value):
|
||||
value = obj.get_filter_value(filter)
|
||||
def is_filter_matching(obj, _filter, filter_value):
|
||||
value = obj.get_filter_value(_filter)
|
||||
|
||||
if filter_value is None:
|
||||
return False
|
||||
@ -616,7 +616,7 @@ def random_key_pair():
|
||||
|
||||
|
||||
def get_prefix(resource_id):
|
||||
resource_id_prefix, separator, after = resource_id.partition("-")
|
||||
resource_id_prefix, _, after = resource_id.partition("-")
|
||||
if resource_id_prefix == EC2_RESOURCE_TO_PREFIX["transit-gateway"]:
|
||||
if after.startswith("rtb"):
|
||||
resource_id_prefix = EC2_RESOURCE_TO_PREFIX["transit-gateway-route-table"]
|
||||
|
@ -5,7 +5,7 @@ class ServiceNotFoundException(RESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self):
|
||||
super(ServiceNotFoundException, self).__init__(
|
||||
super().__init__(
|
||||
error_type="ServiceNotFoundException", message="Service not found."
|
||||
)
|
||||
|
||||
@ -14,7 +14,7 @@ class TaskDefinitionNotFoundException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self):
|
||||
super(TaskDefinitionNotFoundException, self).__init__(
|
||||
super().__init__(
|
||||
error_type="ClientException",
|
||||
message="The specified task definition does not exist.",
|
||||
)
|
||||
@ -24,7 +24,7 @@ class RevisionNotFoundException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self):
|
||||
super(RevisionNotFoundException, self).__init__(
|
||||
super().__init__(
|
||||
error_type="ClientException", message="Revision is missing.",
|
||||
)
|
||||
|
||||
@ -33,7 +33,7 @@ class TaskSetNotFoundException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self):
|
||||
super(TaskSetNotFoundException, self).__init__(
|
||||
super().__init__(
|
||||
error_type="ClientException",
|
||||
message="The specified task set does not exist.",
|
||||
)
|
||||
@ -43,7 +43,7 @@ class ClusterNotFoundException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self):
|
||||
super(ClusterNotFoundException, self).__init__(
|
||||
super().__init__(
|
||||
error_type="ClusterNotFoundException", message="Cluster not found.",
|
||||
)
|
||||
|
||||
@ -52,7 +52,7 @@ class EcsClientException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, message):
|
||||
super(EcsClientException, self).__init__(
|
||||
super().__init__(
|
||||
error_type="ClientException", message=message,
|
||||
)
|
||||
|
||||
@ -61,7 +61,7 @@ class InvalidParameterException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, message):
|
||||
super(InvalidParameterException, self).__init__(
|
||||
super().__init__(
|
||||
error_type="InvalidParameterException", message=message,
|
||||
)
|
||||
|
||||
|
@ -289,9 +289,9 @@ class Task(BaseObject):
|
||||
container_instance_arn,
|
||||
resource_requirements,
|
||||
backend,
|
||||
overrides={},
|
||||
overrides=None,
|
||||
started_by="",
|
||||
tags=[],
|
||||
tags=None,
|
||||
):
|
||||
self.id = str(uuid.uuid4())
|
||||
self.cluster_name = cluster.name
|
||||
@ -300,10 +300,10 @@ class Task(BaseObject):
|
||||
self.last_status = "RUNNING"
|
||||
self.desired_status = "RUNNING"
|
||||
self.task_definition_arn = task_definition.arn
|
||||
self.overrides = overrides
|
||||
self.overrides = overrides or {}
|
||||
self.containers = []
|
||||
self.started_by = started_by
|
||||
self.tags = tags
|
||||
self.tags = tags or []
|
||||
self.stopped_reason = ""
|
||||
self.resource_requirements = resource_requirements
|
||||
self.region_name = cluster.region_name
|
||||
@ -715,7 +715,7 @@ class TaskSet(BaseObject):
|
||||
|
||||
class EC2ContainerServiceBackend(BaseBackend):
|
||||
def __init__(self, region_name):
|
||||
super(EC2ContainerServiceBackend, self).__init__()
|
||||
super().__init__()
|
||||
self.account_settings = dict()
|
||||
self.clusters = {}
|
||||
self.task_definitions = {}
|
||||
@ -917,7 +917,7 @@ class EC2ContainerServiceBackend(BaseBackend):
|
||||
container_instance_arn = container_instance.container_instance_arn
|
||||
try_to_place = True
|
||||
while try_to_place:
|
||||
can_be_placed, message = self._can_be_placed(
|
||||
can_be_placed = self._can_be_placed(
|
||||
container_instance, resource_requirements
|
||||
)
|
||||
if can_be_placed:
|
||||
@ -1002,14 +1002,14 @@ class EC2ContainerServiceBackend(BaseBackend):
|
||||
elif resource.get("name") == "PORTS":
|
||||
reserved_ports = resource.get("stringSetValue")
|
||||
if task_resource_requirements.get("CPU") > remaining_cpu:
|
||||
return False, "Not enough CPU credits"
|
||||
return False
|
||||
if task_resource_requirements.get("MEMORY") > remaining_memory:
|
||||
return False, "Not enough memory"
|
||||
return False
|
||||
ports_needed = task_resource_requirements.get("PORTS")
|
||||
for port in ports_needed:
|
||||
if str(port) in reserved_ports:
|
||||
return False, "Port clash"
|
||||
return True, "Can be placed"
|
||||
return False
|
||||
return True
|
||||
|
||||
def start_task(
|
||||
self,
|
||||
@ -1058,7 +1058,7 @@ class EC2ContainerServiceBackend(BaseBackend):
|
||||
if not tasks:
|
||||
raise InvalidParameterException("Tasks cannot be empty.")
|
||||
response = []
|
||||
for cluster, cluster_tasks in self.tasks.items():
|
||||
for cluster_tasks in self.tasks.values():
|
||||
for task_arn, task in cluster_tasks.items():
|
||||
task_id = task_arn.split("/")[-1]
|
||||
if (
|
||||
@ -1080,7 +1080,7 @@ class EC2ContainerServiceBackend(BaseBackend):
|
||||
):
|
||||
filtered_tasks = []
|
||||
for cluster, tasks in self.tasks.items():
|
||||
for arn, task in tasks.items():
|
||||
for task in tasks.values():
|
||||
filtered_tasks.append(task)
|
||||
if cluster_str:
|
||||
cluster = self._get_cluster(cluster_str)
|
||||
@ -1193,9 +1193,8 @@ class EC2ContainerServiceBackend(BaseBackend):
|
||||
def list_services(self, cluster_str, scheduling_strategy=None):
|
||||
cluster_name = cluster_str.split("/")[-1]
|
||||
service_arns = []
|
||||
for key, value in self.services.items():
|
||||
for key, service in self.services.items():
|
||||
if cluster_name + ":" in key:
|
||||
service = self.services[key]
|
||||
if (
|
||||
scheduling_strategy is None
|
||||
or service.scheduling_strategy == scheduling_strategy
|
||||
@ -1374,7 +1373,6 @@ class EC2ContainerServiceBackend(BaseBackend):
|
||||
def deregister_container_instance(self, cluster_str, container_instance_str, force):
|
||||
cluster = self._get_cluster(cluster_str)
|
||||
|
||||
failures = []
|
||||
container_instance_id = container_instance_str.split("/")[-1]
|
||||
container_instance = self.container_instances[cluster.name].get(
|
||||
container_instance_id
|
||||
@ -1393,7 +1391,7 @@ class EC2ContainerServiceBackend(BaseBackend):
|
||||
] = container_instance
|
||||
del self.container_instances[cluster.name][container_instance_id]
|
||||
self._respond_to_cluster_state_update(cluster_str)
|
||||
return container_instance, failures
|
||||
return container_instance
|
||||
|
||||
def _respond_to_cluster_state_update(self, cluster_str):
|
||||
self._get_cluster(cluster_str)
|
||||
|
@ -255,7 +255,7 @@ class EC2ContainerServiceResponse(BaseResponse):
|
||||
cluster_str = self._get_param("cluster", "default")
|
||||
container_instance_str = self._get_param("containerInstance")
|
||||
force = self._get_param("force")
|
||||
container_instance, failures = self.ecs_backend.deregister_container_instance(
|
||||
container_instance = self.ecs_backend.deregister_container_instance(
|
||||
cluster_str, container_instance_str, force
|
||||
)
|
||||
return json.dumps({"containerInstance": container_instance.response_object})
|
||||
|
@ -9,7 +9,7 @@ class FileSystemAlreadyExists(EFSError):
|
||||
code = 409
|
||||
|
||||
def __init__(self, creation_token, *args, **kwargs):
|
||||
super(FileSystemAlreadyExists, self).__init__(
|
||||
super().__init__(
|
||||
"FileSystemAlreadyExists",
|
||||
"File system with {} already exists.".format(creation_token),
|
||||
*args,
|
||||
@ -21,7 +21,7 @@ class FileSystemNotFound(EFSError):
|
||||
code = 404
|
||||
|
||||
def __init__(self, file_system_id, *args, **kwargs):
|
||||
super(FileSystemNotFound, self).__init__(
|
||||
super().__init__(
|
||||
"FileSystemNotFound",
|
||||
"File system {} does not exist.".format(file_system_id),
|
||||
*args,
|
||||
@ -33,23 +33,21 @@ class FileSystemInUse(EFSError):
|
||||
code = 409
|
||||
|
||||
def __init__(self, msg, *args, **kwargs):
|
||||
super(FileSystemInUse, self).__init__("FileSystemInUse", msg, *args, **kwargs)
|
||||
super().__init__("FileSystemInUse", msg, *args, **kwargs)
|
||||
|
||||
|
||||
class MountTargetConflict(EFSError):
|
||||
code = 409
|
||||
|
||||
def __init__(self, msg, *args, **kwargs):
|
||||
super(MountTargetConflict, self).__init__(
|
||||
"MountTargetConflict", msg, *args, **kwargs
|
||||
)
|
||||
super().__init__("MountTargetConflict", msg, *args, **kwargs)
|
||||
|
||||
|
||||
class MountTargetNotFound(EFSError):
|
||||
code = 404
|
||||
|
||||
def __init__(self, mount_target_id, *args, **kwargs):
|
||||
super(MountTargetNotFound, self).__init__(
|
||||
super().__init__(
|
||||
"MountTargetNotFound",
|
||||
"Mount target '{}' does not exist.".format(mount_target_id),
|
||||
*args,
|
||||
@ -61,21 +59,21 @@ class BadRequest(EFSError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, msg, *args, **kwargs):
|
||||
super(BadRequest, self).__init__("BadRequest", msg, *args, **kwargs)
|
||||
super().__init__("BadRequest", msg, *args, **kwargs)
|
||||
|
||||
|
||||
class PolicyNotFound(EFSError):
|
||||
code = 404
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(PolicyNotFound, self).__init__("PolicyNotFound", *args, **kwargs)
|
||||
super().__init__("PolicyNotFound", *args, **kwargs)
|
||||
|
||||
|
||||
class SubnetNotFound(EFSError):
|
||||
code = 404
|
||||
|
||||
def __init__(self, subnet_id, *args, **kwargs):
|
||||
super(SubnetNotFound, self).__init__(
|
||||
super().__init__(
|
||||
"SubnetNotFound",
|
||||
"The subnet ID '{}' does not exist".format(subnet_id),
|
||||
*args,
|
||||
@ -87,7 +85,7 @@ class SecurityGroupNotFound(EFSError):
|
||||
code = 404
|
||||
|
||||
def __init__(self, security_group_id, *args, **kwargs):
|
||||
super(SecurityGroupNotFound, self).__init__(
|
||||
super().__init__(
|
||||
"SecurityGroupNotFound",
|
||||
"The SecurityGroup ID '{}' does not exist".format(security_group_id),
|
||||
*args,
|
||||
@ -99,6 +97,4 @@ class SecurityGroupLimitExceeded(EFSError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, msg, *args, **kwargs):
|
||||
super(SecurityGroupLimitExceeded, self).__init__(
|
||||
"SecurityGroupLimitExceeded", msg, *args, **kwargs
|
||||
)
|
||||
super().__init__("SecurityGroupLimitExceeded", msg, *args, **kwargs)
|
||||
|
@ -315,7 +315,7 @@ class EFSBackend(BaseBackend):
|
||||
"""
|
||||
|
||||
def __init__(self, region_name=None):
|
||||
super(EFSBackend, self).__init__()
|
||||
super().__init__()
|
||||
self.region_name = region_name
|
||||
self.creation_tokens = set()
|
||||
self.file_systems_by_id = {}
|
||||
|
@ -312,7 +312,7 @@ class ManagedNodegroup:
|
||||
|
||||
class EKSBackend(BaseBackend):
|
||||
def __init__(self, region_name):
|
||||
super(EKSBackend, self).__init__()
|
||||
super().__init__()
|
||||
self.clusters = dict()
|
||||
self.cluster_count = 0
|
||||
self.region_name = region_name
|
||||
|
@ -3,13 +3,9 @@ from moto.core.exceptions import RESTError
|
||||
|
||||
class InvalidParameterValueError(RESTError):
|
||||
def __init__(self, message):
|
||||
super(InvalidParameterValueError, self).__init__(
|
||||
"InvalidParameterValue", message
|
||||
)
|
||||
super().__init__("InvalidParameterValue", message)
|
||||
|
||||
|
||||
class ResourceNotFoundException(RESTError):
|
||||
def __init__(self, message):
|
||||
super(ResourceNotFoundException, self).__init__(
|
||||
"ResourceNotFoundException", message
|
||||
)
|
||||
super().__init__("ResourceNotFoundException", message)
|
||||
|
@ -63,7 +63,7 @@ class Pipeline(BaseModel):
|
||||
|
||||
class ElasticTranscoderBackend(BaseBackend):
|
||||
def __init__(self, region_name=None):
|
||||
super(ElasticTranscoderBackend, self).__init__()
|
||||
super().__init__()
|
||||
self.region_name = region_name
|
||||
self.pipelines = {}
|
||||
|
||||
@ -99,12 +99,12 @@ class ElasticTranscoderBackend(BaseBackend):
|
||||
def list_pipelines(self):
|
||||
return [p.to_dict() for _, p in self.pipelines.items()]
|
||||
|
||||
def read_pipeline(self, id):
|
||||
return self.pipelines[id]
|
||||
def read_pipeline(self, pipeline_id):
|
||||
return self.pipelines[pipeline_id]
|
||||
|
||||
def update_pipeline(
|
||||
self,
|
||||
id,
|
||||
pipeline_id,
|
||||
name,
|
||||
input_bucket,
|
||||
role,
|
||||
@ -113,7 +113,7 @@ class ElasticTranscoderBackend(BaseBackend):
|
||||
content_config,
|
||||
thumbnail_config,
|
||||
):
|
||||
pipeline = self.read_pipeline(id)
|
||||
pipeline = self.read_pipeline(pipeline_id)
|
||||
pipeline.update(name, input_bucket, role)
|
||||
warnings = []
|
||||
return pipeline, warnings
|
||||
|
@ -117,7 +117,7 @@ class ElasticTranscoderResponse(BaseResponse):
|
||||
if err:
|
||||
return err
|
||||
pipeline, warnings = self.elastictranscoder_backend.update_pipeline(
|
||||
id=_id,
|
||||
pipeline_id=_id,
|
||||
name=name,
|
||||
input_bucket=input_bucket,
|
||||
role=role,
|
||||
|
@ -7,21 +7,21 @@ class ELBClientError(RESTError):
|
||||
|
||||
class DuplicateTagKeysError(ELBClientError):
|
||||
def __init__(self, cidr):
|
||||
super(DuplicateTagKeysError, self).__init__(
|
||||
super().__init__(
|
||||
"DuplicateTagKeys", "Tag key was specified more than once: {0}".format(cidr)
|
||||
)
|
||||
|
||||
|
||||
class CertificateNotFoundException(ELBClientError):
|
||||
def __init__(self):
|
||||
super(CertificateNotFoundException, self).__init__(
|
||||
super().__init__(
|
||||
"CertificateNotFoundException", "Supplied certificate was not found"
|
||||
)
|
||||
|
||||
|
||||
class LoadBalancerNotFoundError(ELBClientError):
|
||||
def __init__(self, cidr):
|
||||
super(LoadBalancerNotFoundError, self).__init__(
|
||||
super().__init__(
|
||||
"LoadBalancerNotFound",
|
||||
"The specified load balancer does not exist: {0}".format(cidr),
|
||||
)
|
||||
@ -29,7 +29,7 @@ class LoadBalancerNotFoundError(ELBClientError):
|
||||
|
||||
class TooManyTagsError(ELBClientError):
|
||||
def __init__(self):
|
||||
super(TooManyTagsError, self).__init__(
|
||||
super().__init__(
|
||||
"LoadBalancerNotFound",
|
||||
"The quota for the number of tags that can be assigned to a load balancer has been reached",
|
||||
)
|
||||
@ -37,7 +37,7 @@ class TooManyTagsError(ELBClientError):
|
||||
|
||||
class BadHealthCheckDefinition(ELBClientError):
|
||||
def __init__(self):
|
||||
super(BadHealthCheckDefinition, self).__init__(
|
||||
super().__init__(
|
||||
"ValidationError",
|
||||
"HealthCheck Target must begin with one of HTTP, TCP, HTTPS, SSL",
|
||||
)
|
||||
@ -45,7 +45,7 @@ class BadHealthCheckDefinition(ELBClientError):
|
||||
|
||||
class DuplicateListenerError(ELBClientError):
|
||||
def __init__(self, name, port):
|
||||
super(DuplicateListenerError, self).__init__(
|
||||
super().__init__(
|
||||
"DuplicateListener",
|
||||
"A listener already exists for {0} with LoadBalancerPort {1}, but with a different InstancePort, Protocol, or SSLCertificateId".format(
|
||||
name, port
|
||||
@ -55,7 +55,7 @@ class DuplicateListenerError(ELBClientError):
|
||||
|
||||
class DuplicateLoadBalancerName(ELBClientError):
|
||||
def __init__(self, name):
|
||||
super(DuplicateLoadBalancerName, self).__init__(
|
||||
super().__init__(
|
||||
"DuplicateLoadBalancerName",
|
||||
"The specified load balancer name already exists for this account: {0}".format(
|
||||
name
|
||||
@ -65,14 +65,12 @@ class DuplicateLoadBalancerName(ELBClientError):
|
||||
|
||||
class EmptyListenersError(ELBClientError):
|
||||
def __init__(self):
|
||||
super(EmptyListenersError, self).__init__(
|
||||
"ValidationError", "Listeners cannot be empty"
|
||||
)
|
||||
super().__init__("ValidationError", "Listeners cannot be empty")
|
||||
|
||||
|
||||
class InvalidSecurityGroupError(ELBClientError):
|
||||
def __init__(self):
|
||||
super(InvalidSecurityGroupError, self).__init__(
|
||||
super().__init__(
|
||||
"ValidationError",
|
||||
"One or more of the specified security groups do not exist.",
|
||||
)
|
||||
|
@ -309,7 +309,7 @@ class ELBResponse(BaseResponse):
|
||||
return template.render()
|
||||
|
||||
def remove_tags(self):
|
||||
for key, value in self.querystring.items():
|
||||
for key in self.querystring:
|
||||
if "LoadBalancerNames.member" in key:
|
||||
number = key.split(".")[2]
|
||||
load_balancer_name = self._get_param(
|
||||
@ -319,7 +319,6 @@ class ELBResponse(BaseResponse):
|
||||
if not elb:
|
||||
raise LoadBalancerNotFoundError(load_balancer_name)
|
||||
|
||||
key = "Tag.member.{0}.Key".format(number)
|
||||
for t_key, t_val in self.querystring.items():
|
||||
if t_key.startswith("Tags.member."):
|
||||
if t_key.split(".")[3] == "Key":
|
||||
@ -330,7 +329,7 @@ class ELBResponse(BaseResponse):
|
||||
|
||||
def describe_tags(self):
|
||||
elbs = []
|
||||
for key, value in self.querystring.items():
|
||||
for key in self.querystring:
|
||||
if "LoadBalancerNames.member" in key:
|
||||
number = key.split(".")[2]
|
||||
load_balancer_name = self._get_param(
|
||||
|
@ -7,42 +7,38 @@ class ELBClientError(RESTError):
|
||||
|
||||
class DuplicateTagKeysError(ELBClientError):
|
||||
def __init__(self, cidr):
|
||||
super(DuplicateTagKeysError, self).__init__(
|
||||
super().__init__(
|
||||
"DuplicateTagKeys", "Tag key was specified more than once: {0}".format(cidr)
|
||||
)
|
||||
|
||||
|
||||
class LoadBalancerNotFoundError(ELBClientError):
|
||||
def __init__(self):
|
||||
super(LoadBalancerNotFoundError, self).__init__(
|
||||
super().__init__(
|
||||
"LoadBalancerNotFound", "The specified load balancer does not exist."
|
||||
)
|
||||
|
||||
|
||||
class ListenerNotFoundError(ELBClientError):
|
||||
def __init__(self):
|
||||
super(ListenerNotFoundError, self).__init__(
|
||||
"ListenerNotFound", "The specified listener does not exist."
|
||||
)
|
||||
super().__init__("ListenerNotFound", "The specified listener does not exist.")
|
||||
|
||||
|
||||
class SubnetNotFoundError(ELBClientError):
|
||||
def __init__(self):
|
||||
super(SubnetNotFoundError, self).__init__(
|
||||
"SubnetNotFound", "The specified subnet does not exist."
|
||||
)
|
||||
super().__init__("SubnetNotFound", "The specified subnet does not exist.")
|
||||
|
||||
|
||||
class TargetGroupNotFoundError(ELBClientError):
|
||||
def __init__(self):
|
||||
super(TargetGroupNotFoundError, self).__init__(
|
||||
super().__init__(
|
||||
"TargetGroupNotFound", "The specified target group does not exist."
|
||||
)
|
||||
|
||||
|
||||
class TooManyTagsError(ELBClientError):
|
||||
def __init__(self):
|
||||
super(TooManyTagsError, self).__init__(
|
||||
super().__init__(
|
||||
"TooManyTagsError",
|
||||
"The quota for the number of tags that can be assigned to a load balancer has been reached",
|
||||
)
|
||||
@ -50,7 +46,7 @@ class TooManyTagsError(ELBClientError):
|
||||
|
||||
class BadHealthCheckDefinition(ELBClientError):
|
||||
def __init__(self):
|
||||
super(BadHealthCheckDefinition, self).__init__(
|
||||
super().__init__(
|
||||
"ValidationError",
|
||||
"HealthCheck Target must begin with one of HTTP, TCP, HTTPS, SSL",
|
||||
)
|
||||
@ -58,14 +54,14 @@ class BadHealthCheckDefinition(ELBClientError):
|
||||
|
||||
class DuplicateListenerError(ELBClientError):
|
||||
def __init__(self):
|
||||
super(DuplicateListenerError, self).__init__(
|
||||
super().__init__(
|
||||
"DuplicateListener", "A listener with the specified port already exists."
|
||||
)
|
||||
|
||||
|
||||
class DuplicateLoadBalancerName(ELBClientError):
|
||||
def __init__(self):
|
||||
super(DuplicateLoadBalancerName, self).__init__(
|
||||
super().__init__(
|
||||
"DuplicateLoadBalancerName",
|
||||
"A load balancer with the specified name already exists.",
|
||||
)
|
||||
@ -73,7 +69,7 @@ class DuplicateLoadBalancerName(ELBClientError):
|
||||
|
||||
class DuplicateTargetGroupName(ELBClientError):
|
||||
def __init__(self):
|
||||
super(DuplicateTargetGroupName, self).__init__(
|
||||
super().__init__(
|
||||
"DuplicateTargetGroupName",
|
||||
"A target group with the specified name already exists.",
|
||||
)
|
||||
@ -81,7 +77,7 @@ class DuplicateTargetGroupName(ELBClientError):
|
||||
|
||||
class InvalidTargetError(ELBClientError):
|
||||
def __init__(self):
|
||||
super(InvalidTargetError, self).__init__(
|
||||
super().__init__(
|
||||
"InvalidTarget",
|
||||
"The specified target does not exist or is not in the same VPC as the target group.",
|
||||
)
|
||||
@ -89,16 +85,12 @@ class InvalidTargetError(ELBClientError):
|
||||
|
||||
class EmptyListenersError(ELBClientError):
|
||||
def __init__(self):
|
||||
super(EmptyListenersError, self).__init__(
|
||||
"ValidationError", "Listeners cannot be empty"
|
||||
)
|
||||
super().__init__("ValidationError", "Listeners cannot be empty")
|
||||
|
||||
|
||||
class PriorityInUseError(ELBClientError):
|
||||
def __init__(self):
|
||||
super(PriorityInUseError, self).__init__(
|
||||
"PriorityInUse", "The specified priority is in use."
|
||||
)
|
||||
super().__init__("PriorityInUse", "The specified priority is in use.")
|
||||
|
||||
|
||||
class InvalidConditionFieldError(ELBClientError):
|
||||
@ -112,7 +104,7 @@ class InvalidConditionFieldError(ELBClientError):
|
||||
]
|
||||
|
||||
def __init__(self, invalid_name):
|
||||
super(InvalidConditionFieldError, self).__init__(
|
||||
super().__init__(
|
||||
"ValidationError",
|
||||
"Condition field '%s' must be one of '[%s]'"
|
||||
% (invalid_name, ",".join(self.VALID_FIELDS)),
|
||||
@ -121,12 +113,12 @@ class InvalidConditionFieldError(ELBClientError):
|
||||
|
||||
class InvalidConditionValueError(ELBClientError):
|
||||
def __init__(self, msg):
|
||||
super(InvalidConditionValueError, self).__init__("ValidationError", msg)
|
||||
super().__init__("ValidationError", msg)
|
||||
|
||||
|
||||
class InvalidActionTypeError(ELBClientError):
|
||||
def __init__(self, invalid_name, index):
|
||||
super(InvalidActionTypeError, self).__init__(
|
||||
super().__init__(
|
||||
"ValidationError",
|
||||
"1 validation error detected: Value '%s' at 'actions.%s.member.type' failed to satisfy constraint: Member must satisfy enum value set: [forward, redirect, fixed-response]"
|
||||
% (invalid_name, index),
|
||||
@ -135,14 +127,12 @@ class InvalidActionTypeError(ELBClientError):
|
||||
|
||||
class ActionTargetGroupNotFoundError(ELBClientError):
|
||||
def __init__(self, arn):
|
||||
super(ActionTargetGroupNotFoundError, self).__init__(
|
||||
"TargetGroupNotFound", "Target group '%s' not found" % arn
|
||||
)
|
||||
super().__init__("TargetGroupNotFound", "Target group '%s' not found" % arn)
|
||||
|
||||
|
||||
class ListenerOrBalancerMissingError(ELBClientError):
|
||||
def __init__(self, arn):
|
||||
super(ListenerOrBalancerMissingError, self).__init__(
|
||||
super().__init__(
|
||||
"ValidationError",
|
||||
"You must specify either listener ARNs or a load balancer ARN",
|
||||
)
|
||||
@ -150,24 +140,22 @@ class ListenerOrBalancerMissingError(ELBClientError):
|
||||
|
||||
class InvalidDescribeRulesRequest(ELBClientError):
|
||||
def __init__(self, msg):
|
||||
super(InvalidDescribeRulesRequest, self).__init__("ValidationError", msg)
|
||||
super().__init__("ValidationError", msg)
|
||||
|
||||
|
||||
class ResourceInUseError(ELBClientError):
|
||||
def __init__(self, msg="A specified resource is in use"):
|
||||
super(ResourceInUseError, self).__init__("ResourceInUse", msg)
|
||||
super().__init__("ResourceInUse", msg)
|
||||
|
||||
|
||||
class RuleNotFoundError(ELBClientError):
|
||||
def __init__(self):
|
||||
super(RuleNotFoundError, self).__init__(
|
||||
"RuleNotFound", "The specified rule does not exist."
|
||||
)
|
||||
super().__init__("RuleNotFound", "The specified rule does not exist.")
|
||||
|
||||
|
||||
class DuplicatePriorityError(ELBClientError):
|
||||
def __init__(self, invalid_value):
|
||||
super(DuplicatePriorityError, self).__init__(
|
||||
super().__init__(
|
||||
"ValidationError",
|
||||
"Priority '%s' was provided multiple times" % invalid_value,
|
||||
)
|
||||
@ -175,23 +163,21 @@ class DuplicatePriorityError(ELBClientError):
|
||||
|
||||
class InvalidTargetGroupNameError(ELBClientError):
|
||||
def __init__(self, msg):
|
||||
super(InvalidTargetGroupNameError, self).__init__("ValidationError", msg)
|
||||
super().__init__("ValidationError", msg)
|
||||
|
||||
|
||||
class InvalidModifyRuleArgumentsError(ELBClientError):
|
||||
def __init__(self):
|
||||
super(InvalidModifyRuleArgumentsError, self).__init__(
|
||||
super().__init__(
|
||||
"ValidationError", "Either conditions or actions must be specified"
|
||||
)
|
||||
|
||||
|
||||
class InvalidStatusCodeActionTypeError(ELBClientError):
|
||||
def __init__(self, msg):
|
||||
super(InvalidStatusCodeActionTypeError, self).__init__("ValidationError", msg)
|
||||
super().__init__("ValidationError", msg)
|
||||
|
||||
|
||||
class InvalidLoadBalancerActionException(ELBClientError):
|
||||
def __init__(self, msg):
|
||||
super(InvalidLoadBalancerActionException, self).__init__(
|
||||
"InvalidLoadBalancerAction", msg
|
||||
)
|
||||
super().__init__("InvalidLoadBalancerAction", msg)
|
||||
|
@ -1395,10 +1395,6 @@ Member must satisfy regular expression pattern: {}".format(
|
||||
listener = load_balancer.listeners[arn]
|
||||
|
||||
if port is not None:
|
||||
for listener_arn, current_listener in load_balancer.listeners.items():
|
||||
if listener_arn == arn:
|
||||
continue
|
||||
|
||||
listener.port = port
|
||||
|
||||
if protocol is not None:
|
||||
|
@ -3,20 +3,14 @@ from moto.core.exceptions import JsonRESTError
|
||||
|
||||
class InvalidRequestException(JsonRESTError):
|
||||
def __init__(self, message, **kwargs):
|
||||
super(InvalidRequestException, self).__init__(
|
||||
"InvalidRequestException", message, **kwargs
|
||||
)
|
||||
super().__init__("InvalidRequestException", message, **kwargs)
|
||||
|
||||
|
||||
class ValidationException(JsonRESTError):
|
||||
def __init__(self, message, **kwargs):
|
||||
super(ValidationException, self).__init__(
|
||||
"ValidationException", message, **kwargs
|
||||
)
|
||||
super().__init__("ValidationException", message, **kwargs)
|
||||
|
||||
|
||||
class ResourceNotFoundException(JsonRESTError):
|
||||
def __init__(self, message, **kwargs):
|
||||
super(ResourceNotFoundException, self).__init__(
|
||||
"ResourceNotFoundException", message, **kwargs
|
||||
)
|
||||
super().__init__("ResourceNotFoundException", message, **kwargs)
|
||||
|
@ -40,9 +40,9 @@ class FakeBootstrapAction(BaseModel):
|
||||
|
||||
class FakeInstance(BaseModel):
|
||||
def __init__(
|
||||
self, ec2_instance_id, instance_group, instance_fleet_id=None, id=None,
|
||||
self, ec2_instance_id, instance_group, instance_fleet_id=None, instance_id=None,
|
||||
):
|
||||
self.id = id or random_instance_group_id()
|
||||
self.id = instance_id or random_instance_group_id()
|
||||
self.ec2_instance_id = ec2_instance_id
|
||||
self.instance_group = instance_group
|
||||
self.instance_fleet_id = instance_fleet_id
|
||||
@ -57,12 +57,12 @@ class FakeInstanceGroup(BaseModel):
|
||||
instance_type,
|
||||
market="ON_DEMAND",
|
||||
name=None,
|
||||
id=None,
|
||||
instance_group_id=None,
|
||||
bid_price=None,
|
||||
ebs_configuration=None,
|
||||
auto_scaling_policy=None,
|
||||
):
|
||||
self.id = id or random_instance_group_id()
|
||||
self.id = instance_group_id or random_instance_group_id()
|
||||
self.cluster_id = cluster_id
|
||||
|
||||
self.bid_price = bid_price
|
||||
@ -391,7 +391,7 @@ class FakeSecurityConfiguration(BaseModel):
|
||||
|
||||
class ElasticMapReduceBackend(BaseBackend):
|
||||
def __init__(self, region_name):
|
||||
super(ElasticMapReduceBackend, self).__init__()
|
||||
super().__init__()
|
||||
self.region_name = region_name
|
||||
self.clusters = {}
|
||||
self.instance_groups = {}
|
||||
|
@ -160,7 +160,7 @@ class EMRContainersBackend(BaseBackend):
|
||||
"""Implementation of EMRContainers APIs."""
|
||||
|
||||
def __init__(self, region_name=None):
|
||||
super(EMRContainersBackend, self).__init__()
|
||||
super().__init__()
|
||||
self.virtual_clusters = dict()
|
||||
self.virtual_cluster_count = 0
|
||||
self.jobs = dict()
|
||||
@ -198,18 +198,18 @@ class EMRContainersBackend(BaseBackend):
|
||||
self.virtual_cluster_count += 1
|
||||
return virtual_cluster
|
||||
|
||||
def delete_virtual_cluster(self, id):
|
||||
if id not in self.virtual_clusters:
|
||||
def delete_virtual_cluster(self, cluster_id):
|
||||
if cluster_id not in self.virtual_clusters:
|
||||
raise ValidationException("VirtualCluster does not exist")
|
||||
|
||||
self.virtual_clusters[id].state = "TERMINATED"
|
||||
return self.virtual_clusters[id]
|
||||
self.virtual_clusters[cluster_id].state = "TERMINATED"
|
||||
return self.virtual_clusters[cluster_id]
|
||||
|
||||
def describe_virtual_cluster(self, id):
|
||||
if id not in self.virtual_clusters:
|
||||
raise ValidationException(f"Virtual cluster {id} doesn't exist.")
|
||||
def describe_virtual_cluster(self, cluster_id):
|
||||
if cluster_id not in self.virtual_clusters:
|
||||
raise ValidationException(f"Virtual cluster {cluster_id} doesn't exist.")
|
||||
|
||||
return self.virtual_clusters[id].to_dict()
|
||||
return self.virtual_clusters[cluster_id].to_dict()
|
||||
|
||||
def list_virtual_clusters(
|
||||
self,
|
||||
@ -303,26 +303,26 @@ class EMRContainersBackend(BaseBackend):
|
||||
self.job_count += 1
|
||||
return job
|
||||
|
||||
def cancel_job_run(self, id, virtual_cluster_id):
|
||||
def cancel_job_run(self, job_id, virtual_cluster_id):
|
||||
|
||||
if not re.match(r"[a-z,A-Z,0-9]{19}", id):
|
||||
if not re.match(r"[a-z,A-Z,0-9]{19}", job_id):
|
||||
raise ValidationException("Invalid job run short id")
|
||||
|
||||
if id not in self.jobs.keys():
|
||||
raise ResourceNotFoundException(f"Job run {id} doesn't exist.")
|
||||
if job_id not in self.jobs.keys():
|
||||
raise ResourceNotFoundException(f"Job run {job_id} doesn't exist.")
|
||||
|
||||
if virtual_cluster_id != self.jobs[id].virtual_cluster_id:
|
||||
raise ResourceNotFoundException(f"Job run {id} doesn't exist.")
|
||||
if virtual_cluster_id != self.jobs[job_id].virtual_cluster_id:
|
||||
raise ResourceNotFoundException(f"Job run {job_id} doesn't exist.")
|
||||
|
||||
if self.jobs[id].state in [
|
||||
if self.jobs[job_id].state in [
|
||||
"FAILED",
|
||||
"CANCELLED",
|
||||
"CANCEL_PENDING",
|
||||
"COMPLETED",
|
||||
]:
|
||||
raise ValidationException(f"Job run {id} is not in a cancellable state")
|
||||
raise ValidationException(f"Job run {job_id} is not in a cancellable state")
|
||||
|
||||
job = self.jobs[id]
|
||||
job = self.jobs[job_id]
|
||||
job.state = "CANCELLED"
|
||||
job.finished_at = iso_8601_datetime_without_milliseconds(
|
||||
datetime.today().replace(hour=0, minute=1, second=0, microsecond=0)
|
||||
@ -360,17 +360,17 @@ class EMRContainersBackend(BaseBackend):
|
||||
sort_key = "id"
|
||||
return paginated_list(jobs, sort_key, max_results, next_token)
|
||||
|
||||
def describe_job_run(self, id, virtual_cluster_id):
|
||||
if not re.match(r"[a-z,A-Z,0-9]{19}", id):
|
||||
def describe_job_run(self, job_id, virtual_cluster_id):
|
||||
if not re.match(r"[a-z,A-Z,0-9]{19}", job_id):
|
||||
raise ValidationException("Invalid job run short id")
|
||||
|
||||
if id not in self.jobs.keys():
|
||||
raise ResourceNotFoundException(f"Job run {id} doesn't exist.")
|
||||
if job_id not in self.jobs.keys():
|
||||
raise ResourceNotFoundException(f"Job run {job_id} doesn't exist.")
|
||||
|
||||
if virtual_cluster_id != self.jobs[id].virtual_cluster_id:
|
||||
raise ResourceNotFoundException(f"Job run {id} doesn't exist.")
|
||||
if virtual_cluster_id != self.jobs[job_id].virtual_cluster_id:
|
||||
raise ResourceNotFoundException(f"Job run {job_id} doesn't exist.")
|
||||
|
||||
return self.jobs[id].to_dict()
|
||||
return self.jobs[job_id].to_dict()
|
||||
|
||||
|
||||
emrcontainers_backends = BackendDict(EMRContainersBackend, "emr-containers")
|
||||
|
@ -34,15 +34,19 @@ class EMRContainersResponse(BaseResponse):
|
||||
return 200, {}, json.dumps(dict(virtual_cluster))
|
||||
|
||||
def delete_virtual_cluster(self):
|
||||
id = self._get_param("virtualClusterId")
|
||||
cluster_id = self._get_param("virtualClusterId")
|
||||
|
||||
virtual_cluster = self.emrcontainers_backend.delete_virtual_cluster(id=id)
|
||||
virtual_cluster = self.emrcontainers_backend.delete_virtual_cluster(
|
||||
cluster_id=cluster_id
|
||||
)
|
||||
return 200, {}, json.dumps(dict(virtual_cluster))
|
||||
|
||||
def describe_virtual_cluster(self):
|
||||
id = self._get_param("virtualClusterId")
|
||||
cluster_id = self._get_param("virtualClusterId")
|
||||
|
||||
virtual_cluster = self.emrcontainers_backend.describe_virtual_cluster(id=id)
|
||||
virtual_cluster = self.emrcontainers_backend.describe_virtual_cluster(
|
||||
cluster_id=cluster_id
|
||||
)
|
||||
response = {"virtualCluster": virtual_cluster}
|
||||
return 200, {}, json.dumps(response)
|
||||
|
||||
@ -93,11 +97,11 @@ class EMRContainersResponse(BaseResponse):
|
||||
return 200, {}, json.dumps(dict(job))
|
||||
|
||||
def cancel_job_run(self):
|
||||
id = self._get_param("jobRunId")
|
||||
job_id = self._get_param("jobRunId")
|
||||
virtual_cluster_id = self._get_param("virtualClusterId")
|
||||
|
||||
job = self.emrcontainers_backend.cancel_job_run(
|
||||
id=id, virtual_cluster_id=virtual_cluster_id,
|
||||
job_id=job_id, virtual_cluster_id=virtual_cluster_id,
|
||||
)
|
||||
return 200, {}, json.dumps(dict(job))
|
||||
|
||||
@ -124,11 +128,11 @@ class EMRContainersResponse(BaseResponse):
|
||||
return 200, {}, json.dumps(response)
|
||||
|
||||
def describe_job_run(self):
|
||||
id = self._get_param("jobRunId")
|
||||
job_id = self._get_param("jobRunId")
|
||||
virtual_cluster_id = self._get_param("virtualClusterId")
|
||||
|
||||
job_run = self.emrcontainers_backend.describe_job_run(
|
||||
id=id, virtual_cluster_id=virtual_cluster_id,
|
||||
job_id=job_id, virtual_cluster_id=virtual_cluster_id,
|
||||
)
|
||||
|
||||
response = {"jobRun": job_run}
|
||||
|
@ -5,7 +5,7 @@ class IllegalStatusException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, message):
|
||||
super(IllegalStatusException, self).__init__("IllegalStatusException", message)
|
||||
super().__init__("IllegalStatusException", message)
|
||||
|
||||
|
||||
class InvalidEventPatternException(JsonRESTError):
|
||||
@ -16,31 +16,25 @@ class InvalidEventPatternException(JsonRESTError):
|
||||
if reason:
|
||||
msg += f"Reason: {reason}"
|
||||
|
||||
super(InvalidEventPatternException, self).__init__(
|
||||
"InvalidEventPatternException", msg
|
||||
)
|
||||
super().__init__("InvalidEventPatternException", msg)
|
||||
|
||||
|
||||
class ResourceNotFoundException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, message):
|
||||
super(ResourceNotFoundException, self).__init__(
|
||||
"ResourceNotFoundException", message
|
||||
)
|
||||
super().__init__("ResourceNotFoundException", message)
|
||||
|
||||
|
||||
class ResourceAlreadyExistsException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, message):
|
||||
super(ResourceAlreadyExistsException, self).__init__(
|
||||
"ResourceAlreadyExistsException", message
|
||||
)
|
||||
super().__init__("ResourceAlreadyExistsException", message)
|
||||
|
||||
|
||||
class ValidationException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, message):
|
||||
super(ValidationException, self).__init__("ValidationException", message)
|
||||
super().__init__("ValidationException", message)
|
||||
|
Loading…
Reference in New Issue
Block a user