Update Black + formatting (#4926)
This commit is contained in:
parent
a4f58f0774
commit
29d01c35bc
4
Makefile
4
Makefile
@ -16,8 +16,12 @@ init:
|
||||
@pip install -r requirements-dev.txt
|
||||
|
||||
lint:
|
||||
@echo "Running flake8..."
|
||||
flake8 moto
|
||||
@echo "Running black... "
|
||||
@echo "(Make sure you have black-22.1.0 installed, as other versions will produce different results)"
|
||||
black --check moto/ tests/
|
||||
@echo "Running pylint..."
|
||||
pylint -j 0 moto tests
|
||||
|
||||
format:
|
||||
|
@ -281,7 +281,7 @@ class AWSCertificateManagerResponse(BaseResponse):
|
||||
certificate_chain,
|
||||
private_key,
|
||||
) = self.acm_backend.export_certificate(
|
||||
certificate_arn=certificate_arn, passphrase=passphrase,
|
||||
certificate_arn=certificate_arn, passphrase=passphrase
|
||||
)
|
||||
return json.dumps(
|
||||
dict(
|
||||
|
@ -29,7 +29,7 @@ class ConflictException(ApiGatewayException):
|
||||
class AwsProxyNotAllowed(BadRequestException):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
"Integrations of type 'AWS_PROXY' currently only supports Lambda function and Firehose stream invocations.",
|
||||
"Integrations of type 'AWS_PROXY' currently only supports Lambda function and Firehose stream invocations."
|
||||
)
|
||||
|
||||
|
||||
@ -53,7 +53,7 @@ class IntegrationMethodNotDefined(BadRequestException):
|
||||
class InvalidResourcePathException(BadRequestException):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
"Resource's path part only allow a-zA-Z0-9._- and curly braces at the beginning and the end and an optional plus sign before the closing brace.",
|
||||
"Resource's path part only allow a-zA-Z0-9._- and curly braces at the beginning and the end and an optional plus sign before the closing brace."
|
||||
)
|
||||
|
||||
|
||||
@ -198,7 +198,7 @@ class InvalidBasePathException(BadRequestException):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
"API Gateway V1 doesn't support the slash character (/) in base path mappings. "
|
||||
"To create a multi-level base path mapping, use API Gateway V2.",
|
||||
"To create a multi-level base path mapping, use API Gateway V2."
|
||||
)
|
||||
|
||||
|
||||
|
@ -8,10 +8,7 @@ from copy import copy
|
||||
|
||||
import time
|
||||
|
||||
try:
|
||||
from urlparse import urlparse
|
||||
except ImportError:
|
||||
from urllib.parse import urlparse
|
||||
from urllib.parse import urlparse
|
||||
import responses
|
||||
from moto.core import ACCOUNT_ID, BaseBackend, BaseModel, CloudFormationModel
|
||||
from .utils import create_id, to_path
|
||||
@ -1221,7 +1218,7 @@ class APIGatewayBackend(BaseBackend):
|
||||
restApiId=api_id,
|
||||
...,
|
||||
uri="http://httpbin.org/robots.txt",
|
||||
integrationHttpMethod="GET",
|
||||
integrationHttpMethod="GET"
|
||||
)
|
||||
deploy_url = f"https://{api_id}.execute-api.us-east-1.amazonaws.com/dev"
|
||||
requests.get(deploy_url).content.should.equal(b"a fake response")
|
||||
@ -1847,7 +1844,7 @@ class APIGatewayBackend(BaseBackend):
|
||||
def create_request_validator(self, restapi_id, name, body, params):
|
||||
restApi = self.get_rest_api(restapi_id)
|
||||
return restApi.create_request_validator(
|
||||
name=name, validateRequestBody=body, validateRequestParameters=params,
|
||||
name=name, validateRequestBody=body, validateRequestParameters=params
|
||||
)
|
||||
|
||||
def get_request_validator(self, restapi_id, validator_id):
|
||||
|
@ -5,10 +5,7 @@ from urllib.parse import unquote
|
||||
from moto.utilities.utils import merge_multiple_dicts
|
||||
from moto.core.responses import BaseResponse
|
||||
from .models import apigateway_backends
|
||||
from .exceptions import (
|
||||
ApiGatewayException,
|
||||
InvalidRequestInput,
|
||||
)
|
||||
from .exceptions import ApiGatewayException, InvalidRequestInput
|
||||
|
||||
API_KEY_SOURCES = ["AUTHORIZER", "HEADER"]
|
||||
AUTHORIZER_TYPES = ["TOKEN", "REQUEST", "COGNITO_USER_POOLS"]
|
||||
@ -30,11 +27,7 @@ class APIGatewayResponse(BaseResponse):
|
||||
def error(self, type_, message, status=400):
|
||||
headers = self.response_headers or {}
|
||||
headers["X-Amzn-Errortype"] = type_
|
||||
return (
|
||||
status,
|
||||
headers,
|
||||
json.dumps({"__type": type_, "message": message}),
|
||||
)
|
||||
return (status, headers, json.dumps({"__type": type_, "message": message}))
|
||||
|
||||
@property
|
||||
def backend(self):
|
||||
@ -757,7 +750,7 @@ class APIGatewayResponse(BaseResponse):
|
||||
stage = self._get_param("stage")
|
||||
|
||||
base_path_mapping_resp = self.backend.create_base_path_mapping(
|
||||
domain_name, rest_api_id, base_path, stage,
|
||||
domain_name, rest_api_id, base_path, stage
|
||||
)
|
||||
return 201, {}, json.dumps(base_path_mapping_resp)
|
||||
|
||||
@ -834,19 +827,19 @@ class APIGatewayResponse(BaseResponse):
|
||||
rest_api_id = self.path.split("/")[-3]
|
||||
response_type = self.path.split("/")[-1]
|
||||
response = self.backend.get_gateway_response(
|
||||
rest_api_id=rest_api_id, response_type=response_type,
|
||||
rest_api_id=rest_api_id, response_type=response_type
|
||||
)
|
||||
return 200, {}, json.dumps(response)
|
||||
|
||||
def get_gateway_responses(self):
|
||||
rest_api_id = self.path.split("/")[-2]
|
||||
responses = self.backend.get_gateway_responses(rest_api_id=rest_api_id,)
|
||||
responses = self.backend.get_gateway_responses(rest_api_id=rest_api_id)
|
||||
return 200, {}, json.dumps(dict(item=responses))
|
||||
|
||||
def delete_gateway_response(self):
|
||||
rest_api_id = self.path.split("/")[-3]
|
||||
response_type = self.path.split("/")[-1]
|
||||
self.backend.delete_gateway_response(
|
||||
rest_api_id=rest_api_id, response_type=response_type,
|
||||
rest_api_id=rest_api_id, response_type=response_type
|
||||
)
|
||||
return 202, {}, json.dumps(dict())
|
||||
|
@ -623,7 +623,7 @@ class ApiGatewayV2Response(BaseResponse):
|
||||
integration_id = self.path.split("/")[-1]
|
||||
|
||||
self.apigatewayv2_backend.delete_integration(
|
||||
api_id=api_id, integration_id=integration_id,
|
||||
api_id=api_id, integration_id=integration_id
|
||||
)
|
||||
return 200, {}, "{}"
|
||||
|
||||
|
@ -82,9 +82,7 @@ class ApplicationAutoscalingBackend(BaseBackend):
|
||||
def applicationautoscaling_backend(self):
|
||||
return applicationautoscaling_backends[self.region]
|
||||
|
||||
def describe_scalable_targets(
|
||||
self, namespace, r_ids=None, dimension=None,
|
||||
):
|
||||
def describe_scalable_targets(self, namespace, r_ids=None, dimension=None):
|
||||
"""Describe scalable targets."""
|
||||
if r_ids is None:
|
||||
r_ids = []
|
||||
|
@ -20,8 +20,10 @@ class ApplicationAutoScalingResponse(BaseResponse):
|
||||
scalable_dimension = self._get_param("ScalableDimension")
|
||||
max_results = self._get_int_param("MaxResults", 50)
|
||||
marker = self._get_param("NextToken")
|
||||
all_scalable_targets = self.applicationautoscaling_backend.describe_scalable_targets(
|
||||
service_namespace, resource_ids, scalable_dimension
|
||||
all_scalable_targets = (
|
||||
self.applicationautoscaling_backend.describe_scalable_targets(
|
||||
service_namespace, resource_ids, scalable_dimension
|
||||
)
|
||||
)
|
||||
start = int(marker) + 1 if marker else 0
|
||||
next_token = None
|
||||
|
@ -10,7 +10,5 @@ class GraphqlAPINotFound(AppSyncExceptions):
|
||||
code = 404
|
||||
|
||||
def __init__(self, api_id):
|
||||
super().__init__(
|
||||
"NotFoundException", f"GraphQL API {api_id} not found.",
|
||||
)
|
||||
super().__init__("NotFoundException", f"GraphQL API {api_id} not found.")
|
||||
self.description = json.dumps({"message": self.message})
|
||||
|
@ -164,7 +164,7 @@ class AppSyncResponse(BaseResponse):
|
||||
description = params.get("description")
|
||||
expires = params.get("expires")
|
||||
api_key = self.appsync_backend.create_api_key(
|
||||
api_id=api_id, description=description, expires=expires,
|
||||
api_id=api_id, description=description, expires=expires
|
||||
)
|
||||
print(api_key.to_json())
|
||||
return 200, {}, json.dumps(dict(apiKey=api_key.to_json()))
|
||||
@ -172,9 +172,7 @@ class AppSyncResponse(BaseResponse):
|
||||
def delete_api_key(self):
|
||||
api_id = self.path.split("/")[-3]
|
||||
api_key_id = self.path.split("/")[-1]
|
||||
self.appsync_backend.delete_api_key(
|
||||
api_id=api_id, api_key_id=api_key_id,
|
||||
)
|
||||
self.appsync_backend.delete_api_key(api_id=api_id, api_key_id=api_key_id)
|
||||
return 200, {}, json.dumps(dict())
|
||||
|
||||
def list_api_keys(self):
|
||||
@ -202,37 +200,33 @@ class AppSyncResponse(BaseResponse):
|
||||
api_id = self.path.split("/")[-2]
|
||||
definition = params.get("definition")
|
||||
status = self.appsync_backend.start_schema_creation(
|
||||
api_id=api_id, definition=definition,
|
||||
api_id=api_id, definition=definition
|
||||
)
|
||||
return 200, {}, json.dumps({"status": status})
|
||||
|
||||
def get_schema_creation_status(self):
|
||||
api_id = self.path.split("/")[-2]
|
||||
status, details = self.appsync_backend.get_schema_creation_status(
|
||||
api_id=api_id,
|
||||
)
|
||||
status, details = self.appsync_backend.get_schema_creation_status(api_id=api_id)
|
||||
return 200, {}, json.dumps(dict(status=status, details=details))
|
||||
|
||||
def tag_resource(self):
|
||||
resource_arn = self._extract_arn_from_path()
|
||||
params = json.loads(self.body)
|
||||
tags = params.get("tags")
|
||||
self.appsync_backend.tag_resource(
|
||||
resource_arn=resource_arn, tags=tags,
|
||||
)
|
||||
self.appsync_backend.tag_resource(resource_arn=resource_arn, tags=tags)
|
||||
return 200, {}, json.dumps(dict())
|
||||
|
||||
def untag_resource(self):
|
||||
resource_arn = self._extract_arn_from_path()
|
||||
tag_keys = self.querystring.get("tagKeys", [])
|
||||
self.appsync_backend.untag_resource(
|
||||
resource_arn=resource_arn, tag_keys=tag_keys,
|
||||
resource_arn=resource_arn, tag_keys=tag_keys
|
||||
)
|
||||
return 200, {}, json.dumps(dict())
|
||||
|
||||
def list_tags_for_resource(self):
|
||||
resource_arn = self._extract_arn_from_path()
|
||||
tags = self.appsync_backend.list_tags_for_resource(resource_arn=resource_arn,)
|
||||
tags = self.appsync_backend.list_tags_for_resource(resource_arn=resource_arn)
|
||||
return 200, {}, json.dumps(dict(tags=tags))
|
||||
|
||||
def _extract_arn_from_path(self):
|
||||
@ -245,6 +239,6 @@ class AppSyncResponse(BaseResponse):
|
||||
type_name = self.path.split("/")[-1]
|
||||
type_format = self.querystring.get("format")[0]
|
||||
graphql_type = self.appsync_backend.get_type(
|
||||
api_id=api_id, type_name=type_name, type_format=type_format,
|
||||
api_id=api_id, type_name=type_name, type_format=type_format
|
||||
)
|
||||
return 200, {}, json.dumps(dict(type=graphql_type))
|
||||
|
@ -158,7 +158,7 @@ class AthenaBackend(BaseBackend):
|
||||
|
||||
def list_data_catalogs(self):
|
||||
return [
|
||||
{"CatalogName": dc.name, "Type": dc.type,}
|
||||
{"CatalogName": dc.name, "Type": dc.type}
|
||||
for dc in self.data_catalogs.values()
|
||||
]
|
||||
|
||||
|
@ -46,9 +46,7 @@ class InstanceState(object):
|
||||
|
||||
|
||||
class FakeLifeCycleHook(BaseModel):
|
||||
def __init__(
|
||||
self, name, as_name, transition, timeout, result,
|
||||
):
|
||||
def __init__(self, name, as_name, transition, timeout, result):
|
||||
self.name = name
|
||||
self.as_name = as_name
|
||||
if transition:
|
||||
@ -952,7 +950,7 @@ class AutoScalingBackend(BaseBackend):
|
||||
self.set_desired_capacity(group_name, desired_capacity)
|
||||
|
||||
def create_lifecycle_hook(self, name, as_name, transition, timeout, result):
|
||||
lifecycle_hook = FakeLifeCycleHook(name, as_name, transition, timeout, result,)
|
||||
lifecycle_hook = FakeLifeCycleHook(name, as_name, transition, timeout, result)
|
||||
|
||||
self.lifecycle_hooks["%s_%s" % (as_name, name)] = lifecycle_hook
|
||||
return lifecycle_hook
|
||||
@ -1033,10 +1031,10 @@ class AutoScalingBackend(BaseBackend):
|
||||
for elb in elbs:
|
||||
elb_instace_ids = set(elb.instance_ids)
|
||||
self.elb_backend.register_instances(
|
||||
elb.name, group_instance_ids - elb_instace_ids, from_autoscaling=True,
|
||||
elb.name, group_instance_ids - elb_instace_ids, from_autoscaling=True
|
||||
)
|
||||
self.elb_backend.deregister_instances(
|
||||
elb.name, elb_instace_ids - group_instance_ids, from_autoscaling=True,
|
||||
elb.name, elb_instace_ids - group_instance_ids, from_autoscaling=True
|
||||
)
|
||||
|
||||
def update_attached_target_groups(self, group_name):
|
||||
|
@ -42,8 +42,8 @@ class AutoScalingResponse(BaseResponse):
|
||||
|
||||
def describe_launch_configurations(self):
|
||||
names = self._get_multi_param("LaunchConfigurationNames.member")
|
||||
all_launch_configurations = self.autoscaling_backend.describe_launch_configurations(
|
||||
names
|
||||
all_launch_configurations = (
|
||||
self.autoscaling_backend.describe_launch_configurations(names)
|
||||
)
|
||||
marker = self._get_param("NextToken")
|
||||
all_names = [lc.name for lc in all_launch_configurations]
|
||||
@ -153,8 +153,8 @@ class AutoScalingResponse(BaseResponse):
|
||||
@amzn_request_id
|
||||
def describe_load_balancer_target_groups(self):
|
||||
group_name = self._get_param("AutoScalingGroupName")
|
||||
target_group_arns = self.autoscaling_backend.describe_load_balancer_target_groups(
|
||||
group_name
|
||||
target_group_arns = (
|
||||
self.autoscaling_backend.describe_load_balancer_target_groups(group_name)
|
||||
)
|
||||
template = self.response_template(DESCRIBE_LOAD_BALANCER_TARGET_GROUPS)
|
||||
return template.render(target_group_arns=target_group_arns)
|
||||
|
@ -271,11 +271,7 @@ class LayerVersion(CloudFormationModel):
|
||||
cls, resource_name, cloudformation_json, region_name, **kwargs
|
||||
):
|
||||
properties = cloudformation_json["Properties"]
|
||||
optional_properties = (
|
||||
"Description",
|
||||
"CompatibleRuntimes",
|
||||
"LicenseInfo",
|
||||
)
|
||||
optional_properties = ("Description", "CompatibleRuntimes", "LicenseInfo")
|
||||
|
||||
# required
|
||||
spec = {
|
||||
@ -661,8 +657,12 @@ class LambdaFunction(CloudFormationModel, DockerModel):
|
||||
def save_logs(self, output):
|
||||
# Send output to "logs" backend
|
||||
invoke_id = uuid.uuid4().hex
|
||||
log_stream_name = "{date.year}/{date.month:02d}/{date.day:02d}/[{version}]{invoke_id}".format(
|
||||
date=datetime.datetime.utcnow(), version=self.version, invoke_id=invoke_id,
|
||||
log_stream_name = (
|
||||
"{date.year}/{date.month:02d}/{date.day:02d}/[{version}]{invoke_id}".format(
|
||||
date=datetime.datetime.utcnow(),
|
||||
version=self.version,
|
||||
invoke_id=invoke_id,
|
||||
)
|
||||
)
|
||||
self.logs_backend.create_log_stream(self.logs_group_name, log_stream_name)
|
||||
log_events = [
|
||||
@ -1132,53 +1132,53 @@ class LayerStorage(object):
|
||||
|
||||
class LambdaBackend(BaseBackend):
|
||||
"""
|
||||
Implementation of the AWS Lambda endpoint.
|
||||
Invoking functions is supported - they will run inside a Docker container, emulating the real AWS behaviour as closely as possible.
|
||||
Implementation of the AWS Lambda endpoint.
|
||||
Invoking functions is supported - they will run inside a Docker container, emulating the real AWS behaviour as closely as possible.
|
||||
|
||||
It is possible to connect from AWS Lambdas to other services, as long as you are running Moto in ServerMode.
|
||||
The Lambda has access to environment variables `MOTO_HOST` and `MOTO_PORT`, which can be used to build the url that MotoServer runs on:
|
||||
It is possible to connect from AWS Lambdas to other services, as long as you are running Moto in ServerMode.
|
||||
The Lambda has access to environment variables `MOTO_HOST` and `MOTO_PORT`, which can be used to build the url that MotoServer runs on:
|
||||
|
||||
.. sourcecode:: python
|
||||
.. sourcecode:: python
|
||||
|
||||
def lambda_handler(event, context):
|
||||
host = os.environ.get("MOTO_HOST")
|
||||
port = os.environ.get("MOTO_PORT")
|
||||
url = host + ":" + port
|
||||
ec2 = boto3.client('ec2', region_name='us-west-2', endpoint_url=url)
|
||||
def lambda_handler(event, context):
|
||||
host = os.environ.get("MOTO_HOST")
|
||||
port = os.environ.get("MOTO_PORT")
|
||||
url = host + ":" + port
|
||||
ec2 = boto3.client('ec2', region_name='us-west-2', endpoint_url=url)
|
||||
|
||||
# Or even simpler:
|
||||
full_url = os.environ.get("MOTO_HTTP_ENDPOINT")
|
||||
ec2 = boto3.client("ec2", region_name="eu-west-1", endpoint_url=full_url)
|
||||
# Or even simpler:
|
||||
full_url = os.environ.get("MOTO_HTTP_ENDPOINT")
|
||||
ec2 = boto3.client("ec2", region_name="eu-west-1", endpoint_url=full_url)
|
||||
|
||||
ec2.do_whatever_inside_the_existing_moto_server()
|
||||
ec2.do_whatever_inside_the_existing_moto_server()
|
||||
|
||||
Moto will run on port 5000 by default. This can be overwritten by setting an environment variable when starting Moto:
|
||||
Moto will run on port 5000 by default. This can be overwritten by setting an environment variable when starting Moto:
|
||||
|
||||
.. sourcecode:: bash
|
||||
.. sourcecode:: bash
|
||||
|
||||
# This env var will be propagated to the Docker container running the Lambda functions
|
||||
MOTO_PORT=5000 moto_server
|
||||
# This env var will be propagated to the Docker container running the Lambda functions
|
||||
MOTO_PORT=5000 moto_server
|
||||
|
||||
The Docker container uses the default network mode, `bridge`.
|
||||
The following environment variables are available for fine-grained control over the Docker connection options:
|
||||
The Docker container uses the default network mode, `bridge`.
|
||||
The following environment variables are available for fine-grained control over the Docker connection options:
|
||||
|
||||
.. sourcecode:: bash
|
||||
.. sourcecode:: bash
|
||||
|
||||
# Provide the name of a custom network to connect to
|
||||
MOTO_DOCKER_NETWORK_NAME=mycustomnetwork moto_server
|
||||
# Provide the name of a custom network to connect to
|
||||
MOTO_DOCKER_NETWORK_NAME=mycustomnetwork moto_server
|
||||
|
||||
# Override the network mode
|
||||
# For example, network_mode=host would use the network of the host machine
|
||||
# Note that this option will be ignored if MOTO_DOCKER_NETWORK_NAME is also set
|
||||
MOTO_DOCKER_NETWORK_MODE=host moto_server
|
||||
# Override the network mode
|
||||
# For example, network_mode=host would use the network of the host machine
|
||||
# Note that this option will be ignored if MOTO_DOCKER_NETWORK_NAME is also set
|
||||
MOTO_DOCKER_NETWORK_MODE=host moto_server
|
||||
|
||||
The Docker images used by Moto are taken from the `lambci/lambda`-repo by default. Use the following environment variable to configure a different repo:
|
||||
The Docker images used by Moto are taken from the `lambci/lambda`-repo by default. Use the following environment variable to configure a different repo:
|
||||
|
||||
.. sourcecode:: bash
|
||||
.. sourcecode:: bash
|
||||
|
||||
MOTO_DOCKER_LAMBDA_IMAGE=mLupin/docker-lambda
|
||||
MOTO_DOCKER_LAMBDA_IMAGE=mLupin/docker-lambda
|
||||
|
||||
.. note:: When using the decorators, a Docker container cannot reach Moto, as it does not run as a server. Any boto3-invocations used within your Lambda will try to connect to AWS.
|
||||
.. note:: When using the decorators, a Docker container cannot reach Moto, as it does not run as a server. Any boto3-invocations used within your Lambda will try to connect to AWS.
|
||||
"""
|
||||
|
||||
def __init__(self, region_name):
|
||||
|
@ -599,7 +599,7 @@ class Job(threading.Thread, BaseModel, DockerModel):
|
||||
# add host.docker.internal host on linux to emulate Mac + Windows behavior
|
||||
# for communication with other mock AWS services running on localhost
|
||||
extra_hosts = (
|
||||
{"host.docker.internal": "host-gateway",}
|
||||
{"host.docker.internal": "host-gateway"}
|
||||
if platform == "linux" or platform == "linux2"
|
||||
else {}
|
||||
)
|
||||
@ -751,10 +751,11 @@ class Job(threading.Thread, BaseModel, DockerModel):
|
||||
self.attempts.append(self.latest_attempt)
|
||||
|
||||
def _stop_attempt(self):
|
||||
self.latest_attempt["container"]["logStreamName"] = self.log_stream_name
|
||||
self.latest_attempt["stoppedAt"] = datetime2int_milliseconds(
|
||||
self.job_stopped_at
|
||||
)
|
||||
if self.latest_attempt:
|
||||
self.latest_attempt["container"]["logStreamName"] = self.log_stream_name
|
||||
self.latest_attempt["stoppedAt"] = datetime2int_milliseconds(
|
||||
self.job_stopped_at
|
||||
)
|
||||
|
||||
def terminate(self, reason):
|
||||
if not self.stop:
|
||||
|
@ -298,7 +298,7 @@ class BatchResponse(BaseResponse):
|
||||
return ""
|
||||
|
||||
# CancelJob
|
||||
def canceljob(self,):
|
||||
def canceljob(self):
|
||||
job_id = self._get_param("jobId")
|
||||
reason = self._get_param("reason")
|
||||
self.batch_backend.cancel_job(job_id, reason)
|
||||
|
@ -30,9 +30,7 @@ class BudgetsResponse(BaseResponse):
|
||||
def delete_budget(self):
|
||||
account_id = self._get_param("AccountId")
|
||||
budget_name = self._get_param("BudgetName")
|
||||
budgets_backend.delete_budget(
|
||||
account_id=account_id, budget_name=budget_name,
|
||||
)
|
||||
budgets_backend.delete_budget(account_id=account_id, budget_name=budget_name)
|
||||
return json.dumps(dict())
|
||||
|
||||
def create_notification(self):
|
||||
@ -53,7 +51,7 @@ class BudgetsResponse(BaseResponse):
|
||||
budget_name = self._get_param("BudgetName")
|
||||
notification = self._get_param("Notification")
|
||||
budgets_backend.delete_notification(
|
||||
account_id=account_id, budget_name=budget_name, notification=notification,
|
||||
account_id=account_id, budget_name=budget_name, notification=notification
|
||||
)
|
||||
return json.dumps(dict())
|
||||
|
||||
@ -61,6 +59,6 @@ class BudgetsResponse(BaseResponse):
|
||||
account_id = self._get_param("AccountId")
|
||||
budget_name = self._get_param("BudgetName")
|
||||
notifications = budgets_backend.describe_notifications_for_budget(
|
||||
account_id=account_id, budget_name=budget_name,
|
||||
account_id=account_id, budget_name=budget_name
|
||||
)
|
||||
return json.dumps(dict(Notifications=notifications, NextToken=None))
|
||||
|
@ -272,9 +272,7 @@ def generate_resource_name(resource_type, stack_name, logical_id):
|
||||
return "{0}-{1}-{2}".format(stack_name, logical_id, random_suffix())
|
||||
|
||||
|
||||
def parse_resource(
|
||||
resource_json, resources_map,
|
||||
):
|
||||
def parse_resource(resource_json, resources_map):
|
||||
resource_type = resource_json["Type"]
|
||||
resource_class = resource_class_from_type(resource_type)
|
||||
if not resource_class:
|
||||
@ -293,9 +291,7 @@ def parse_resource(
|
||||
return resource_class, resource_json, resource_type
|
||||
|
||||
|
||||
def parse_resource_and_generate_name(
|
||||
logical_id, resource_json, resources_map,
|
||||
):
|
||||
def parse_resource_and_generate_name(logical_id, resource_json, resources_map):
|
||||
resource_tuple = parse_resource(resource_json, resources_map)
|
||||
if not resource_tuple:
|
||||
return None
|
||||
@ -780,7 +776,7 @@ class ResourceMap(collections_abc.Mapping):
|
||||
]
|
||||
|
||||
parse_and_delete_resource(
|
||||
resource_name, resource_json, self._region_name,
|
||||
resource_name, resource_json, self._region_name
|
||||
)
|
||||
|
||||
self._parsed_resources.pop(parsed_resource.logical_resource_id)
|
||||
|
@ -347,7 +347,7 @@ class CloudFormationResponse(BaseResponse):
|
||||
stack = self.cloudformation_backend.get_stack(stack_name)
|
||||
if stack.status == "REVIEW_IN_PROGRESS":
|
||||
raise ValidationError(
|
||||
message="GetTemplateSummary cannot be called on REVIEW_IN_PROGRESS stacks.",
|
||||
message="GetTemplateSummary cannot be called on REVIEW_IN_PROGRESS stacks."
|
||||
)
|
||||
stack_body = stack.template
|
||||
elif template_url:
|
||||
@ -362,7 +362,7 @@ class CloudFormationResponse(BaseResponse):
|
||||
new_params = self._get_param_values(incoming_params, old_stack.parameters)
|
||||
if old_stack.template == stack_body and old_stack.parameters == new_params:
|
||||
raise ValidationError(
|
||||
old_stack.name, message=f"Stack [{old_stack.name}] already exists",
|
||||
old_stack.name, message=f"Stack [{old_stack.name}] already exists"
|
||||
)
|
||||
|
||||
def _validate_status(self, stack):
|
||||
@ -478,8 +478,8 @@ class CloudFormationResponse(BaseResponse):
|
||||
name=stackset_name,
|
||||
template=stack_body,
|
||||
parameters=parameters,
|
||||
tags=tags,
|
||||
# role_arn=role_arn,
|
||||
tags=tags
|
||||
# role_arn=role_arn
|
||||
)
|
||||
if self.request_json:
|
||||
return json.dumps(
|
||||
|
@ -36,7 +36,7 @@ class CloudFrontResponse(BaseResponse):
|
||||
params = self._get_xml_body()
|
||||
distribution_config = params.get("DistributionConfig")
|
||||
distribution, location, e_tag = cloudfront_backend.create_distribution(
|
||||
distribution_config=distribution_config,
|
||||
distribution_config=distribution_config
|
||||
)
|
||||
template = self.response_template(CREATE_DISTRIBUTION_TEMPLATE)
|
||||
response = template.render(distribution=distribution, xmlns=XMLNS)
|
||||
|
@ -139,7 +139,7 @@ class CloudTrailResponse(BaseResponse):
|
||||
trail_arn,
|
||||
event_selectors,
|
||||
advanced_event_selectors,
|
||||
) = self.cloudtrail_backend.get_event_selectors(trail_name=trail_name,)
|
||||
) = self.cloudtrail_backend.get_event_selectors(trail_name=trail_name)
|
||||
return json.dumps(
|
||||
dict(
|
||||
TrailARN=trail_arn,
|
||||
@ -152,16 +152,14 @@ class CloudTrailResponse(BaseResponse):
|
||||
params = json.loads(self.body)
|
||||
resource_id = params.get("ResourceId")
|
||||
tags_list = params.get("TagsList")
|
||||
self.cloudtrail_backend.add_tags(
|
||||
resource_id=resource_id, tags_list=tags_list,
|
||||
)
|
||||
self.cloudtrail_backend.add_tags(resource_id=resource_id, tags_list=tags_list)
|
||||
return json.dumps(dict())
|
||||
|
||||
def remove_tags(self):
|
||||
resource_id = self._get_param("ResourceId")
|
||||
tags_list = self._get_param("TagsList")
|
||||
self.cloudtrail_backend.remove_tags(
|
||||
resource_id=resource_id, tags_list=tags_list,
|
||||
resource_id=resource_id, tags_list=tags_list
|
||||
)
|
||||
return json.dumps(dict())
|
||||
|
||||
@ -169,7 +167,7 @@ class CloudTrailResponse(BaseResponse):
|
||||
params = json.loads(self.body)
|
||||
resource_id_list = params.get("ResourceIdList")
|
||||
resource_tag_list = self.cloudtrail_backend.list_tags(
|
||||
resource_id_list=resource_id_list,
|
||||
resource_id_list=resource_id_list
|
||||
)
|
||||
return json.dumps(dict(ResourceTagList=resource_tag_list))
|
||||
|
||||
@ -177,14 +175,14 @@ class CloudTrailResponse(BaseResponse):
|
||||
trail_name = self._get_param("TrailName")
|
||||
insight_selectors = self._get_param("InsightSelectors")
|
||||
trail_arn, insight_selectors = self.cloudtrail_backend.put_insight_selectors(
|
||||
trail_name=trail_name, insight_selectors=insight_selectors,
|
||||
trail_name=trail_name, insight_selectors=insight_selectors
|
||||
)
|
||||
return json.dumps(dict(TrailARN=trail_arn, InsightSelectors=insight_selectors))
|
||||
|
||||
def get_insight_selectors(self):
|
||||
trail_name = self._get_param("TrailName")
|
||||
trail_arn, insight_selectors = self.cloudtrail_backend.get_insight_selectors(
|
||||
trail_name=trail_name,
|
||||
trail_name=trail_name
|
||||
)
|
||||
resp = {"TrailARN": trail_arn}
|
||||
if insight_selectors:
|
||||
|
@ -1,10 +1,6 @@
|
||||
import json
|
||||
|
||||
from moto.core import (
|
||||
BaseBackend,
|
||||
BaseModel,
|
||||
CloudWatchMetricProvider,
|
||||
)
|
||||
from moto.core import BaseBackend, BaseModel, CloudWatchMetricProvider
|
||||
from moto.core.utils import (
|
||||
iso_8601_datetime_without_milliseconds,
|
||||
iso_8601_datetime_with_nanoseconds,
|
||||
|
@ -54,7 +54,7 @@ class CognitoIdentityResponse(BaseResponse):
|
||||
|
||||
def get_id(self):
|
||||
return cognitoidentity_backends[self.region].get_id(
|
||||
identity_pool_id=self._get_param("IdentityPoolId"),
|
||||
identity_pool_id=self._get_param("IdentityPoolId")
|
||||
)
|
||||
|
||||
def describe_identity_pool(self):
|
||||
|
@ -234,8 +234,7 @@ class CognitoIdpUserPoolAttribute(BaseModel):
|
||||
else:
|
||||
self.developer_only = False
|
||||
self.mutable = schema.get(
|
||||
"Mutable",
|
||||
CognitoIdpUserPoolAttribute.STANDARD_SCHEMA[self.name]["Mutable"],
|
||||
"Mutable", CognitoIdpUserPoolAttribute.STANDARD_SCHEMA[self.name]["Mutable"]
|
||||
)
|
||||
self.required = schema.get(
|
||||
"Required",
|
||||
@ -1131,7 +1130,7 @@ class CognitoIdpBackend(BaseBackend):
|
||||
def _validate_auth_flow(
|
||||
self, auth_flow: str, valid_flows: typing.List[AuthFlow]
|
||||
) -> AuthFlow:
|
||||
""" validate auth_flow value and convert auth_flow to enum """
|
||||
"""validate auth_flow value and convert auth_flow to enum"""
|
||||
|
||||
try:
|
||||
auth_flow = AuthFlow[auth_flow]
|
||||
@ -1304,11 +1303,11 @@ class CognitoIdpBackend(BaseBackend):
|
||||
|
||||
def forgot_password(self, client_id, username):
|
||||
"""The ForgotPassword operation is partially broken in AWS. If the input is 100% correct it works fine.
|
||||
Otherwise you get semi-random garbage and HTTP 200 OK, for example:
|
||||
- recovery for username which is not registered in any cognito pool
|
||||
- recovery for username belonging to a different user pool than the client id is registered to
|
||||
- phone-based recovery for a user without phone_number / phone_number_verified attributes
|
||||
- same as above, but email / email_verified
|
||||
Otherwise you get semi-random garbage and HTTP 200 OK, for example:
|
||||
- recovery for username which is not registered in any cognito pool
|
||||
- recovery for username belonging to a different user pool than the client id is registered to
|
||||
- phone-based recovery for a user without phone_number / phone_number_verified attributes
|
||||
- same as above, but email / email_verified
|
||||
"""
|
||||
for user_pool in self.user_pools.values():
|
||||
if client_id in user_pool.clients:
|
||||
|
@ -534,7 +534,7 @@ class CognitoIdpResponse(BaseResponse):
|
||||
username = self._get_param("Username")
|
||||
confirmation_code = self._get_param("ConfirmationCode")
|
||||
cognitoidp_backends[self.region].confirm_sign_up(
|
||||
client_id=client_id, username=username, confirmation_code=confirmation_code,
|
||||
client_id=client_id, username=username, confirmation_code=confirmation_code
|
||||
)
|
||||
return ""
|
||||
|
||||
|
@ -17,8 +17,8 @@ class InvalidConfigurationRecorderNameException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, name):
|
||||
message = "The configuration recorder name '{name}' is not valid, blank string.".format(
|
||||
name=name
|
||||
message = (
|
||||
f"The configuration recorder name '{name}' is not valid, blank string."
|
||||
)
|
||||
super().__init__("InvalidConfigurationRecorderNameException", message)
|
||||
|
||||
@ -28,8 +28,8 @@ class MaxNumberOfConfigurationRecordersExceededException(JsonRESTError):
|
||||
|
||||
def __init__(self, name):
|
||||
message = (
|
||||
"Failed to put configuration recorder '{name}' because the maximum number of "
|
||||
"configuration recorders: 1 is reached.".format(name=name)
|
||||
f"Failed to put configuration recorder '{name}' because the maximum number of "
|
||||
"configuration recorders: 1 is reached."
|
||||
)
|
||||
super().__init__("MaxNumberOfConfigurationRecordersExceededException", message)
|
||||
|
||||
@ -47,11 +47,9 @@ class InvalidResourceTypeException(JsonRESTError):
|
||||
|
||||
def __init__(self, bad_list, good_list):
|
||||
message = (
|
||||
"{num} validation error detected: Value '{bad_list}' at "
|
||||
f"{len(bad_list)} validation error detected: Value '{bad_list}' at "
|
||||
"'configurationRecorder.recordingGroup.resourceTypes' failed to satisfy constraint: "
|
||||
"Member must satisfy constraint: [Member must satisfy enum value set: {good_list}]".format(
|
||||
num=len(bad_list), bad_list=bad_list, good_list=good_list
|
||||
)
|
||||
f"Member must satisfy constraint: [Member must satisfy enum value set: {good_list}]"
|
||||
)
|
||||
# For PY2:
|
||||
message = str(message)
|
||||
@ -77,8 +75,8 @@ class NoSuchConfigurationRecorderException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, name):
|
||||
message = "Cannot find configuration recorder with the specified name '{name}'.".format(
|
||||
name=name
|
||||
message = (
|
||||
f"Cannot find configuration recorder with the specified name '{name}'."
|
||||
)
|
||||
super().__init__("NoSuchConfigurationRecorderException", message)
|
||||
|
||||
@ -87,9 +85,7 @@ class InvalidDeliveryChannelNameException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, name):
|
||||
message = "The delivery channel name '{name}' is not valid, blank string.".format(
|
||||
name=name
|
||||
)
|
||||
message = f"The delivery channel name '{name}' is not valid, blank string."
|
||||
super().__init__("InvalidDeliveryChannelNameException", message)
|
||||
|
||||
|
||||
@ -134,11 +130,9 @@ class InvalidDeliveryFrequency(JsonRESTError):
|
||||
|
||||
def __init__(self, value, good_list):
|
||||
message = (
|
||||
"1 validation error detected: Value '{value}' at "
|
||||
f"1 validation error detected: Value '{value}' at "
|
||||
"'deliveryChannel.configSnapshotDeliveryProperties.deliveryFrequency' failed to satisfy "
|
||||
"constraint: Member must satisfy enum value set: {good_list}".format(
|
||||
value=value, good_list=good_list
|
||||
)
|
||||
f"constraint: Member must satisfy enum value set: {good_list}"
|
||||
)
|
||||
super().__init__("InvalidDeliveryFrequency", message)
|
||||
|
||||
@ -147,10 +141,7 @@ class MaxNumberOfDeliveryChannelsExceededException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, name):
|
||||
message = (
|
||||
"Failed to put delivery channel '{name}' because the maximum number of "
|
||||
"delivery channels: 1 is reached.".format(name=name)
|
||||
)
|
||||
message = f"Failed to put delivery channel '{name}' because the maximum number of delivery channels: 1 is reached."
|
||||
super().__init__("MaxNumberOfDeliveryChannelsExceededException", message)
|
||||
|
||||
|
||||
@ -158,9 +149,7 @@ class NoSuchDeliveryChannelException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, name):
|
||||
message = "Cannot find delivery channel with specified name '{name}'.".format(
|
||||
name=name
|
||||
)
|
||||
message = f"Cannot find delivery channel with specified name '{name}'."
|
||||
super().__init__("NoSuchDeliveryChannelException", message)
|
||||
|
||||
|
||||
@ -185,8 +174,8 @@ class LastDeliveryChannelDeleteFailedException(JsonRESTError):
|
||||
|
||||
def __init__(self, name):
|
||||
message = (
|
||||
"Failed to delete last specified delivery channel with name '{name}', because there, "
|
||||
"because there is a running configuration recorder.".format(name=name)
|
||||
f"Failed to delete last specified delivery channel with name '{name}', because there, "
|
||||
"because there is a running configuration recorder."
|
||||
)
|
||||
super().__init__("LastDeliveryChannelDeleteFailedException", message)
|
||||
|
||||
@ -222,10 +211,8 @@ class TagKeyTooBig(JsonRESTError):
|
||||
def __init__(self, tag, param="tags.X.member.key"):
|
||||
super().__init__(
|
||||
"ValidationException",
|
||||
"1 validation error detected: Value '{}' at '{}' failed to satisfy "
|
||||
"constraint: Member must have length less than or equal to 128".format(
|
||||
tag, param
|
||||
),
|
||||
f"1 validation error detected: Value '{tag}' at '{param}' failed to satisfy "
|
||||
"constraint: Member must have length less than or equal to 128",
|
||||
)
|
||||
|
||||
|
||||
@ -235,10 +222,8 @@ class TagValueTooBig(JsonRESTError):
|
||||
def __init__(self, tag, param="tags.X.member.value"):
|
||||
super().__init__(
|
||||
"ValidationException",
|
||||
"1 validation error detected: Value '{}' at '{}' failed to satisfy "
|
||||
"constraint: Member must have length less than or equal to 256".format(
|
||||
tag, param
|
||||
),
|
||||
f"1 validation error detected: Value '{tag}' at '{param}' failed to satisfy "
|
||||
"constraint: Member must have length less than or equal to 256",
|
||||
)
|
||||
|
||||
|
||||
@ -253,9 +238,7 @@ class InvalidTagCharacters(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, tag, param="tags.X.member.key"):
|
||||
message = "1 validation error detected: Value '{}' at '{}' failed to satisfy ".format(
|
||||
tag, param
|
||||
)
|
||||
message = f"1 validation error detected: Value '{tag}' at '{param}' failed to satisfy "
|
||||
message += "constraint: Member must satisfy regular expression pattern: [\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]+"
|
||||
|
||||
super().__init__("ValidationException", message)
|
||||
@ -267,10 +250,8 @@ class TooManyTags(JsonRESTError):
|
||||
def __init__(self, tags, param="tags"):
|
||||
super().__init__(
|
||||
"ValidationException",
|
||||
"1 validation error detected: Value '{}' at '{}' failed to satisfy "
|
||||
"constraint: Member must have length less than or equal to 50.".format(
|
||||
tags, param
|
||||
),
|
||||
f"1 validation error detected: Value '{tags}' at '{param}' failed to satisfy "
|
||||
"constraint: Member must have length less than or equal to 50.",
|
||||
)
|
||||
|
||||
|
||||
@ -290,8 +271,8 @@ class InvalidLimitException(JsonRESTError):
|
||||
def __init__(self, value):
|
||||
super().__init__(
|
||||
"InvalidLimitException",
|
||||
"Value '{value}' at 'limit' failed to satisfy constraint: Member"
|
||||
" must have value less than or equal to 100".format(value=value),
|
||||
f"Value '{value}' at 'limit' failed to satisfy constraint: Member"
|
||||
" must have value less than or equal to 100",
|
||||
)
|
||||
|
||||
|
||||
@ -312,8 +293,7 @@ class ResourceNotDiscoveredException(JsonRESTError):
|
||||
def __init__(self, resource_type, resource):
|
||||
super().__init__(
|
||||
"ResourceNotDiscoveredException",
|
||||
"Resource {resource} of resourceType:{type} is unknown or has not been "
|
||||
"discovered".format(resource=resource, type=resource_type),
|
||||
f"Resource {resource} of resourceType:{resource_type} is unknown or has not been discovered",
|
||||
)
|
||||
|
||||
|
||||
@ -322,10 +302,7 @@ class ResourceNotFoundException(JsonRESTError):
|
||||
|
||||
def __init__(self, resource_arn):
|
||||
super().__init__(
|
||||
"ResourceNotFoundException",
|
||||
"ResourceArn '{resource_arn}' does not exist".format(
|
||||
resource_arn=resource_arn
|
||||
),
|
||||
"ResourceNotFoundException", f"ResourceArn '{resource_arn}' does not exist"
|
||||
)
|
||||
|
||||
|
||||
@ -334,11 +311,9 @@ class TooManyResourceKeys(JsonRESTError):
|
||||
|
||||
def __init__(self, bad_list):
|
||||
message = (
|
||||
"1 validation error detected: Value '{bad_list}' at "
|
||||
f"1 validation error detected: Value '{bad_list}' at "
|
||||
"'resourceKeys' failed to satisfy constraint: "
|
||||
"Member must have length less than or equal to 100".format(
|
||||
bad_list=bad_list
|
||||
)
|
||||
"Member must have length less than or equal to 100"
|
||||
)
|
||||
super().__init__("ValidationException", message)
|
||||
|
||||
@ -369,10 +344,7 @@ class MaxNumberOfConfigRulesExceededException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, name, max_limit):
|
||||
message = (
|
||||
f"Failed to put config rule '{name}' because the maximum number "
|
||||
f"of config rules: {max_limit} is reached."
|
||||
)
|
||||
message = f"Failed to put config rule '{name}' because the maximum number of config rules: {max_limit} is reached."
|
||||
super().__init__("MaxNumberOfConfigRulesExceededException", message)
|
||||
|
||||
|
||||
@ -394,10 +366,7 @@ class NoSuchConfigRuleException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, rule_name):
|
||||
message = (
|
||||
f"The ConfigRule '{rule_name}' provided in the request is "
|
||||
f"invalid. Please check the configRule name"
|
||||
)
|
||||
message = f"The ConfigRule '{rule_name}' provided in the request is invalid. Please check the configRule name"
|
||||
super().__init__("NoSuchConfigRuleException", message)
|
||||
|
||||
|
||||
|
@ -430,8 +430,10 @@ class OrganizationConformancePack(ConfigEmptyDictable):
|
||||
self.delivery_s3_key_prefix = delivery_s3_key_prefix
|
||||
self.excluded_accounts = excluded_accounts or []
|
||||
self.last_update_time = datetime2int(datetime.utcnow())
|
||||
self.organization_conformance_pack_arn = "arn:aws:config:{0}:{1}:organization-conformance-pack/{2}".format(
|
||||
region, DEFAULT_ACCOUNT_ID, self._unique_pack_name
|
||||
self.organization_conformance_pack_arn = (
|
||||
"arn:aws:config:{0}:{1}:organization-conformance-pack/{2}".format(
|
||||
region, DEFAULT_ACCOUNT_ID, self._unique_pack_name
|
||||
)
|
||||
)
|
||||
self.organization_conformance_pack_name = name
|
||||
|
||||
|
@ -196,8 +196,10 @@ class ConfigResponse(BaseResponse):
|
||||
|
||||
def get_organization_conformance_pack_detailed_status(self):
|
||||
# 'Filters' parameter is not implemented yet
|
||||
statuses = self.config_backend.get_organization_conformance_pack_detailed_status(
|
||||
self._get_param("OrganizationConformancePackName")
|
||||
statuses = (
|
||||
self.config_backend.get_organization_conformance_pack_detailed_status(
|
||||
self._get_param("OrganizationConformancePackName")
|
||||
)
|
||||
)
|
||||
return json.dumps(statuses)
|
||||
|
||||
@ -209,25 +211,25 @@ class ConfigResponse(BaseResponse):
|
||||
|
||||
def tag_resource(self):
|
||||
self.config_backend.tag_resource(
|
||||
self._get_param("ResourceArn"), self._get_param("Tags"),
|
||||
self._get_param("ResourceArn"), self._get_param("Tags")
|
||||
)
|
||||
return ""
|
||||
|
||||
def untag_resource(self):
|
||||
self.config_backend.untag_resource(
|
||||
self._get_param("ResourceArn"), self._get_param("TagKeys"),
|
||||
self._get_param("ResourceArn"), self._get_param("TagKeys")
|
||||
)
|
||||
return ""
|
||||
|
||||
def put_config_rule(self):
|
||||
self.config_backend.put_config_rule(
|
||||
self.region, self._get_param("ConfigRule"), self._get_param("Tags"),
|
||||
self.region, self._get_param("ConfigRule"), self._get_param("Tags")
|
||||
)
|
||||
return ""
|
||||
|
||||
def describe_config_rules(self):
|
||||
rules = self.config_backend.describe_config_rules(
|
||||
self._get_param("ConfigRuleNames"), self._get_param("NextToken"),
|
||||
self._get_param("ConfigRuleNames"), self._get_param("NextToken")
|
||||
)
|
||||
return json.dumps(rules)
|
||||
|
||||
|
@ -26,10 +26,7 @@ from .custom_responses_mock import (
|
||||
not_implemented_callback,
|
||||
reset_responses_mock,
|
||||
)
|
||||
from .utils import (
|
||||
convert_regex_to_flask_path,
|
||||
convert_flask_to_responses_response,
|
||||
)
|
||||
from .utils import convert_regex_to_flask_path, convert_flask_to_responses_response
|
||||
|
||||
|
||||
ACCOUNT_ID = os.environ.get("MOTO_ACCOUNT_ID", "123456789012")
|
||||
@ -637,7 +634,7 @@ class BaseBackend:
|
||||
|
||||
@staticmethod
|
||||
def default_vpc_endpoint_service(
|
||||
service_region, zones,
|
||||
service_region, zones
|
||||
): # pylint: disable=unused-argument
|
||||
"""Invoke the factory method for any VPC endpoint(s) services."""
|
||||
return None
|
||||
|
@ -305,11 +305,11 @@ def tags_from_query_string(
|
||||
tag_index = key.replace(prefix + ".", "").replace("." + key_suffix, "")
|
||||
tag_key = querystring_dict.get(
|
||||
"{prefix}.{index}.{key_suffix}".format(
|
||||
prefix=prefix, index=tag_index, key_suffix=key_suffix,
|
||||
prefix=prefix, index=tag_index, key_suffix=key_suffix
|
||||
)
|
||||
)[0]
|
||||
tag_value_key = "{prefix}.{index}.{value_suffix}".format(
|
||||
prefix=prefix, index=tag_index, value_suffix=value_suffix,
|
||||
prefix=prefix, index=tag_index, value_suffix=value_suffix
|
||||
)
|
||||
if tag_value_key in querystring_dict:
|
||||
response_values[tag_key] = querystring_dict.get(tag_value_key)[0]
|
||||
@ -381,7 +381,7 @@ def merge_dicts(dict1, dict2, remove_nulls=False):
|
||||
|
||||
def aws_api_matches(pattern, string):
|
||||
"""
|
||||
AWS API can match a value based on a glob, or an exact match
|
||||
AWS API can match a value based on a glob, or an exact match
|
||||
"""
|
||||
# use a negative lookback regex to match stars that are not prefixed with a backslash
|
||||
# and replace all stars not prefixed w/ a backslash with '.*' to take this from "glob" to PCRE syntax
|
||||
|
@ -51,7 +51,7 @@ class DAXResponse(BaseResponse):
|
||||
|
||||
def delete_cluster(self):
|
||||
cluster_name = json.loads(self.body).get("ClusterName")
|
||||
cluster = self.dax_backend.delete_cluster(cluster_name=cluster_name,)
|
||||
cluster = self.dax_backend.delete_cluster(cluster_name)
|
||||
return json.dumps(dict(Cluster=cluster.to_json()))
|
||||
|
||||
def describe_clusters(self):
|
||||
|
@ -65,7 +65,7 @@ class DatabaseMigrationServiceResponse(BaseResponse):
|
||||
def stop_replication_task(self):
|
||||
replication_task_arn = self._get_param("ReplicationTaskArn")
|
||||
replication_task = self.dms_backend.stop_replication_task(
|
||||
replication_task_arn=replication_task_arn,
|
||||
replication_task_arn=replication_task_arn
|
||||
)
|
||||
|
||||
return json.dumps({"ReplicationTask": replication_task.to_dict()})
|
||||
@ -73,7 +73,7 @@ class DatabaseMigrationServiceResponse(BaseResponse):
|
||||
def delete_replication_task(self):
|
||||
replication_task_arn = self._get_param("ReplicationTaskArn")
|
||||
replication_task = self.dms_backend.delete_replication_task(
|
||||
replication_task_arn=replication_task_arn,
|
||||
replication_task_arn=replication_task_arn
|
||||
)
|
||||
|
||||
return json.dumps({"ReplicationTask": replication_task.to_dict()})
|
||||
@ -84,7 +84,7 @@ class DatabaseMigrationServiceResponse(BaseResponse):
|
||||
marker = self._get_param("Marker")
|
||||
without_settings = self._get_param("WithoutSettings")
|
||||
marker, replication_tasks = self.dms_backend.describe_replication_tasks(
|
||||
filters=filters, max_records=max_records, without_settings=without_settings,
|
||||
filters=filters, max_records=max_records, without_settings=without_settings
|
||||
)
|
||||
|
||||
return json.dumps(
|
||||
|
@ -49,7 +49,7 @@ def validate_alias(value):
|
||||
|
||||
alias_pattern = r"^(?!D-|d-)([\da-zA-Z]+)([-]*[\da-zA-Z])*$"
|
||||
if not re.match(alias_pattern, value):
|
||||
return fr"satisfy regular expression pattern: {alias_pattern}"
|
||||
return rf"satisfy regular expression pattern: {alias_pattern}"
|
||||
return ""
|
||||
|
||||
|
||||
@ -64,7 +64,7 @@ def validate_directory_id(value):
|
||||
"""Raise exception if the directory id is invalid."""
|
||||
id_pattern = r"^d-[0-9a-f]{10}$"
|
||||
if not re.match(id_pattern, value):
|
||||
return fr"satisfy regular expression pattern: {id_pattern}"
|
||||
return rf"satisfy regular expression pattern: {id_pattern}"
|
||||
return ""
|
||||
|
||||
|
||||
@ -76,7 +76,7 @@ def validate_dns_ips(value):
|
||||
)
|
||||
for dnsip in value:
|
||||
if not re.match(dnsip_pattern, dnsip):
|
||||
return fr"satisfy regular expression pattern: {dnsip_pattern}"
|
||||
return rf"satisfy regular expression pattern: {dnsip_pattern}"
|
||||
return ""
|
||||
|
||||
|
||||
@ -91,7 +91,7 @@ def validate_name(value):
|
||||
"""Raise exception if name fails to match constraints."""
|
||||
name_pattern = r"^([a-zA-Z0-9]+[\.-])+([a-zA-Z0-9])+$"
|
||||
if not re.match(name_pattern, value):
|
||||
return fr"satisfy regular expression pattern: {name_pattern}"
|
||||
return rf"satisfy regular expression pattern: {name_pattern}"
|
||||
return ""
|
||||
|
||||
|
||||
@ -104,7 +104,7 @@ def validate_password(value):
|
||||
r"(?=.*\d)(?=.*[A-Z])(?=.*[^A-Za-z0-9\s]))^.*$"
|
||||
)
|
||||
if not re.match(passwd_pattern, value):
|
||||
return fr"satisfy regular expression pattern: {passwd_pattern}"
|
||||
return rf"satisfy regular expression pattern: {passwd_pattern}"
|
||||
return ""
|
||||
|
||||
|
||||
@ -112,7 +112,7 @@ def validate_short_name(value):
|
||||
"""Raise exception if short name fails to match constraints."""
|
||||
short_name_pattern = r'^[^\/:*?"<>|.]+[^\/:*?"<>|]*$'
|
||||
if value and not re.match(short_name_pattern, value):
|
||||
return fr"satisfy regular expression pattern: {short_name_pattern}"
|
||||
return rf"satisfy regular expression pattern: {short_name_pattern}"
|
||||
return ""
|
||||
|
||||
|
||||
@ -135,7 +135,7 @@ def validate_subnet_ids(value):
|
||||
subnet_id_pattern = r"^(subnet-[0-9a-f]{8}|subnet-[0-9a-f]{17})$"
|
||||
for subnet in value:
|
||||
if not re.match(subnet_id_pattern, subnet):
|
||||
return fr"satisfy regular expression pattern: {subnet_id_pattern}"
|
||||
return rf"satisfy regular expression pattern: {subnet_id_pattern}"
|
||||
return ""
|
||||
|
||||
|
||||
@ -143,5 +143,5 @@ def validate_user_name(value):
|
||||
"""Raise exception is username fails to match constraints."""
|
||||
username_pattern = r"^[a-zA-Z0-9._-]+$"
|
||||
if value and not re.match(username_pattern, value):
|
||||
return fr"satisfy regular expression pattern: {username_pattern}"
|
||||
return rf"satisfy regular expression pattern: {username_pattern}"
|
||||
return ""
|
||||
|
@ -1090,9 +1090,7 @@ class RestoredPITTable(Table):
|
||||
|
||||
|
||||
class Backup(object):
|
||||
def __init__(
|
||||
self, backend, name, table, status=None, type_=None,
|
||||
):
|
||||
def __init__(self, backend, name, table, status=None, type_=None):
|
||||
self.backend = backend
|
||||
self.name = name
|
||||
self.table = copy.deepcopy(table)
|
||||
@ -1322,7 +1320,7 @@ class DynamoDBBackend(BaseBackend):
|
||||
)
|
||||
|
||||
gsis_by_name[gsi_to_create["IndexName"]] = GlobalSecondaryIndex.create(
|
||||
gsi_to_create, table.table_key_attrs,
|
||||
gsi_to_create, table.table_key_attrs
|
||||
)
|
||||
|
||||
# in python 3.6, dict.values() returns a dict_values object, but we expect it to be a list in other
|
||||
@ -1534,7 +1532,7 @@ class DynamoDBBackend(BaseBackend):
|
||||
if item is None:
|
||||
if update_expression:
|
||||
# Validate AST before creating anything
|
||||
item = Item(hash_value, range_value, attrs={},)
|
||||
item = Item(hash_value, range_value, attrs={})
|
||||
UpdateExpressionValidator(
|
||||
update_expression_ast,
|
||||
expression_attribute_names=expression_attribute_names,
|
||||
|
@ -92,7 +92,7 @@ class InvalidVPCIdError(EC2ClientError):
|
||||
def __init__(self, vpc_id):
|
||||
|
||||
super().__init__(
|
||||
"InvalidVpcID.NotFound", "VpcID {0} does not exist.".format(vpc_id),
|
||||
"InvalidVpcID.NotFound", "VpcID {0} does not exist.".format(vpc_id)
|
||||
)
|
||||
|
||||
|
||||
@ -418,7 +418,7 @@ class InvalidDependantParameterError(EC2ClientError):
|
||||
super().__init__(
|
||||
"InvalidParameter",
|
||||
"{0} can't be empty if {1} is {2}.".format(
|
||||
dependant_parameter, parameter, parameter_value,
|
||||
dependant_parameter, parameter, parameter_value
|
||||
),
|
||||
)
|
||||
|
||||
@ -428,16 +428,14 @@ class InvalidDependantParameterTypeError(EC2ClientError):
|
||||
super().__init__(
|
||||
"InvalidParameter",
|
||||
"{0} type must be {1} if {2} is provided.".format(
|
||||
dependant_parameter, parameter_value, parameter,
|
||||
dependant_parameter, parameter_value, parameter
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
class InvalidAggregationIntervalParameterError(EC2ClientError):
|
||||
def __init__(self, parameter):
|
||||
super().__init__(
|
||||
"InvalidParameter", "Invalid {0}".format(parameter),
|
||||
)
|
||||
super().__init__("InvalidParameter", "Invalid {0}".format(parameter))
|
||||
|
||||
|
||||
class InvalidParameterValueError(EC2ClientError):
|
||||
|
@ -1812,9 +1812,7 @@ class AmiBackend(object):
|
||||
if owners:
|
||||
# support filtering by Owners=['self']
|
||||
if "self" in owners:
|
||||
owners = list(
|
||||
map(lambda o: OWNER_ID if o == "self" else o, owners,)
|
||||
)
|
||||
owners = list(map(lambda o: OWNER_ID if o == "self" else o, owners))
|
||||
images = [
|
||||
ami
|
||||
for ami in images
|
||||
@ -2381,8 +2379,8 @@ class SecurityGroup(TaggedEC2Resource, CloudFormationModel):
|
||||
security_group.add_tag(tag_key, tag_value)
|
||||
|
||||
for ingress_rule in properties.get("SecurityGroupIngress", []):
|
||||
source_group_id = ingress_rule.get("SourceSecurityGroupId",)
|
||||
source_group_name = ingress_rule.get("SourceSecurityGroupName",)
|
||||
source_group_id = ingress_rule.get("SourceSecurityGroupId")
|
||||
source_group_name = ingress_rule.get("SourceSecurityGroupName")
|
||||
source_group = {}
|
||||
if source_group_id:
|
||||
source_group["GroupId"] = source_group_id
|
||||
@ -2777,7 +2775,7 @@ class SecurityGroupBackend(object):
|
||||
raise InvalidCIDRSubnetError(cidr=cidr)
|
||||
|
||||
self._verify_group_will_respect_rule_count_limit(
|
||||
group, group.get_number_of_ingress_rules(), ip_ranges, source_groups,
|
||||
group, group.get_number_of_ingress_rules(), ip_ranges, source_groups
|
||||
)
|
||||
|
||||
_source_groups = self._add_source_group(source_groups, vpc_id)
|
||||
@ -3209,7 +3207,7 @@ class SecurityGroupBackend(object):
|
||||
return _source_groups
|
||||
|
||||
def _verify_group_will_respect_rule_count_limit(
|
||||
self, group, current_rule_nb, ip_ranges, source_groups=None, egress=False,
|
||||
self, group, current_rule_nb, ip_ranges, source_groups=None, egress=False
|
||||
):
|
||||
max_nb_rules = 60 if group.vpc_id else 100
|
||||
future_group_nb_rules = current_rule_nb
|
||||
@ -4985,20 +4983,18 @@ class FlowLogsBackend(object):
|
||||
):
|
||||
if log_group_name is None and log_destination is None:
|
||||
raise InvalidDependantParameterError(
|
||||
"LogDestination", "LogGroupName", "not provided",
|
||||
"LogDestination", "LogGroupName", "not provided"
|
||||
)
|
||||
|
||||
if log_destination_type == "s3":
|
||||
if log_group_name is not None:
|
||||
raise InvalidDependantParameterTypeError(
|
||||
"LogDestination", "cloud-watch-logs", "LogGroupName",
|
||||
"LogDestination", "cloud-watch-logs", "LogGroupName"
|
||||
)
|
||||
elif log_destination_type == "cloud-watch-logs":
|
||||
if deliver_logs_permission_arn is None:
|
||||
raise InvalidDependantParameterError(
|
||||
"DeliverLogsPermissionArn",
|
||||
"LogDestinationType",
|
||||
"cloud-watch-logs",
|
||||
"DeliverLogsPermissionArn", "LogDestinationType", "cloud-watch-logs"
|
||||
)
|
||||
|
||||
if max_aggregation_interval not in ["60", "600"]:
|
||||
@ -5139,7 +5135,7 @@ class FlowLogsBackend(object):
|
||||
|
||||
if non_existing:
|
||||
raise InvalidFlowLogIdError(
|
||||
len(flow_log_ids), " ".join(x for x in flow_log_ids),
|
||||
len(flow_log_ids), " ".join(x for x in flow_log_ids)
|
||||
)
|
||||
return True
|
||||
|
||||
@ -7251,7 +7247,7 @@ class NetworkAclAssociation(object):
|
||||
|
||||
class NetworkAcl(TaggedEC2Resource):
|
||||
def __init__(
|
||||
self, ec2_backend, network_acl_id, vpc_id, default=False, owner_id=OWNER_ID,
|
||||
self, ec2_backend, network_acl_id, vpc_id, default=False, owner_id=OWNER_ID
|
||||
):
|
||||
self.ec2_backend = ec2_backend
|
||||
self.id = network_acl_id
|
||||
@ -7742,7 +7738,7 @@ class TransitGatewayRouteTableBackend(object):
|
||||
return transit_gateways_route_table.routes[destination_cidr_block]
|
||||
|
||||
def delete_transit_gateway_route(
|
||||
self, transit_gateway_route_table_id, destination_cidr_block,
|
||||
self, transit_gateway_route_table_id, destination_cidr_block
|
||||
):
|
||||
transit_gateways_route_table = self.transit_gateways_route_tables[
|
||||
transit_gateway_route_table_id
|
||||
@ -7759,10 +7755,7 @@ class TransitGatewayRouteTableBackend(object):
|
||||
if not transit_gateway_route_table:
|
||||
return []
|
||||
|
||||
attr_pairs = (
|
||||
("type", "type"),
|
||||
("state", "state"),
|
||||
)
|
||||
attr_pairs = (("type", "type"), ("state", "state"))
|
||||
|
||||
routes = transit_gateway_route_table.routes.copy()
|
||||
for key in transit_gateway_route_table.routes:
|
||||
@ -8542,10 +8535,7 @@ class IamInstanceProfileAssociationBackend(object):
|
||||
super().__init__()
|
||||
|
||||
def associate_iam_instance_profile(
|
||||
self,
|
||||
instance_id,
|
||||
iam_instance_profile_name=None,
|
||||
iam_instance_profile_arn=None,
|
||||
self, instance_id, iam_instance_profile_name=None, iam_instance_profile_arn=None
|
||||
):
|
||||
iam_association_id = random_iam_instance_profile_association_id()
|
||||
|
||||
@ -8600,9 +8590,9 @@ class IamInstanceProfileAssociationBackend(object):
|
||||
self.iam_instance_profile_associations[association_key].id
|
||||
== association_id
|
||||
):
|
||||
iam_instance_profile_associations = self.iam_instance_profile_associations[
|
||||
association_key
|
||||
]
|
||||
iam_instance_profile_associations = (
|
||||
self.iam_instance_profile_associations[association_key]
|
||||
)
|
||||
del self.iam_instance_profile_associations[association_key]
|
||||
# Deleting once and avoiding `RuntimeError: dictionary changed size during iteration`
|
||||
break
|
||||
@ -8631,9 +8621,9 @@ class IamInstanceProfileAssociationBackend(object):
|
||||
self.iam_instance_profile_associations[
|
||||
association_key
|
||||
].iam_instance_profile = instance_profile
|
||||
iam_instance_profile_association = self.iam_instance_profile_associations[
|
||||
association_key
|
||||
]
|
||||
iam_instance_profile_association = (
|
||||
self.iam_instance_profile_associations[association_key]
|
||||
)
|
||||
break
|
||||
|
||||
if not iam_instance_profile_association:
|
||||
|
@ -18,7 +18,7 @@ class EgressOnlyInternetGateway(BaseResponse):
|
||||
egress_only_igw_ids = self._get_multi_param("EgressOnlyInternetGatewayId")
|
||||
filters = filters_from_querystring(self.querystring)
|
||||
egress_only_igws = self.ec2_backend.describe_egress_only_internet_gateways(
|
||||
egress_only_igw_ids, filters,
|
||||
egress_only_igw_ids, filters
|
||||
)
|
||||
template = self.response_template(DESCRIBE_EGRESS_ONLY_IGW_RESPONSE)
|
||||
return template.render(egress_only_igws=egress_only_igws)
|
||||
|
@ -107,9 +107,7 @@ class ElasticNetworkInterfaces(BaseResponse):
|
||||
eni_id = self._get_param("NetworkInterfaceId")
|
||||
ipv6_count = self._get_int_param("Ipv6AddressCount", 0)
|
||||
ipv6_addresses = self._get_multi_param("Ipv6Addresses")
|
||||
eni = self.ec2_backend.assign_ipv6_addresses(
|
||||
eni_id, ipv6_addresses, ipv6_count,
|
||||
)
|
||||
eni = self.ec2_backend.assign_ipv6_addresses(eni_id, ipv6_addresses, ipv6_count)
|
||||
template = self.response_template(ASSIGN_IPV6_ADDRESSES)
|
||||
return template.render(eni=eni)
|
||||
|
||||
|
@ -5,10 +5,7 @@ from moto.ec2.exceptions import (
|
||||
InvalidParameterCombination,
|
||||
InvalidRequest,
|
||||
)
|
||||
from moto.ec2.utils import (
|
||||
filters_from_querystring,
|
||||
dict_from_querystring,
|
||||
)
|
||||
from moto.ec2.utils import filters_from_querystring, dict_from_querystring
|
||||
from moto.core import ACCOUNT_ID
|
||||
|
||||
from copy import deepcopy
|
||||
|
@ -65,14 +65,7 @@ def parse_sg_attributes_from_dict(sg_attributes):
|
||||
pl_item["Description"] = pl_dict.get("Description")[0]
|
||||
if pl_item:
|
||||
prefix_list_ids.append(pl_item)
|
||||
return (
|
||||
ip_protocol,
|
||||
from_port,
|
||||
to_port,
|
||||
ip_ranges,
|
||||
source_groups,
|
||||
prefix_list_ids,
|
||||
)
|
||||
return (ip_protocol, from_port, to_port, ip_ranges, source_groups, prefix_list_ids)
|
||||
|
||||
|
||||
class SecurityGroups(BaseResponse):
|
||||
|
@ -14,12 +14,14 @@ class TransitGatewayAttachment(BaseResponse):
|
||||
tags = (tags or {}).get("Tag", [])
|
||||
tags = {t["Key"]: t["Value"] for t in tags}
|
||||
|
||||
transit_gateway_attachment = self.ec2_backend.create_transit_gateway_vpc_attachment(
|
||||
transit_gateway_id=transit_gateway_id,
|
||||
tags=tags,
|
||||
vpc_id=vpc_id,
|
||||
subnet_ids=subnet_ids,
|
||||
options=options,
|
||||
transit_gateway_attachment = (
|
||||
self.ec2_backend.create_transit_gateway_vpc_attachment(
|
||||
transit_gateway_id=transit_gateway_id,
|
||||
tags=tags,
|
||||
vpc_id=vpc_id,
|
||||
subnet_ids=subnet_ids,
|
||||
options=options,
|
||||
)
|
||||
)
|
||||
template = self.response_template(CREATE_TRANSIT_GATEWAY_VPC_ATTACHMENT)
|
||||
return template.render(transit_gateway_attachment=transit_gateway_attachment)
|
||||
@ -30,10 +32,12 @@ class TransitGatewayAttachment(BaseResponse):
|
||||
)
|
||||
filters = filters_from_querystring(self.querystring)
|
||||
max_results = self._get_param("MaxResults")
|
||||
transit_gateway_vpc_attachments = self.ec2_backend.describe_transit_gateway_vpc_attachments(
|
||||
transit_gateways_attachment_ids=transit_gateways_attachment_ids,
|
||||
filters=filters,
|
||||
max_results=max_results,
|
||||
transit_gateway_vpc_attachments = (
|
||||
self.ec2_backend.describe_transit_gateway_vpc_attachments(
|
||||
transit_gateways_attachment_ids=transit_gateways_attachment_ids,
|
||||
filters=filters,
|
||||
max_results=max_results,
|
||||
)
|
||||
)
|
||||
template = self.response_template(DESCRIBE_TRANSIT_GATEWAY_VPC_ATTACHMENTS)
|
||||
return template.render(
|
||||
@ -46,11 +50,13 @@ class TransitGatewayAttachment(BaseResponse):
|
||||
remove_subnet_ids = self._get_multi_param("RemoveSubnetIds")
|
||||
transit_gateway_attachment_id = self._get_param("TransitGatewayAttachmentId")
|
||||
|
||||
transit_gateway_attachment = self.ec2_backend.modify_transit_gateway_vpc_attachment(
|
||||
add_subnet_ids=add_subnet_ids,
|
||||
options=options,
|
||||
remove_subnet_ids=remove_subnet_ids,
|
||||
transit_gateway_attachment_id=transit_gateway_attachment_id,
|
||||
transit_gateway_attachment = (
|
||||
self.ec2_backend.modify_transit_gateway_vpc_attachment(
|
||||
add_subnet_ids=add_subnet_ids,
|
||||
options=options,
|
||||
remove_subnet_ids=remove_subnet_ids,
|
||||
transit_gateway_attachment_id=transit_gateway_attachment_id,
|
||||
)
|
||||
)
|
||||
template = self.response_template(MODIFY_TRANSIT_GATEWAY_VPC_ATTACHMENTS)
|
||||
return template.render(transit_gateway_attachment=transit_gateway_attachment)
|
||||
@ -61,18 +67,22 @@ class TransitGatewayAttachment(BaseResponse):
|
||||
)
|
||||
filters = filters_from_querystring(self.querystring)
|
||||
max_results = self._get_param("MaxResults")
|
||||
transit_gateway_attachments = self.ec2_backend.describe_transit_gateway_attachments(
|
||||
transit_gateways_attachment_ids=transit_gateways_attachment_ids,
|
||||
filters=filters,
|
||||
max_results=max_results,
|
||||
transit_gateway_attachments = (
|
||||
self.ec2_backend.describe_transit_gateway_attachments(
|
||||
transit_gateways_attachment_ids=transit_gateways_attachment_ids,
|
||||
filters=filters,
|
||||
max_results=max_results,
|
||||
)
|
||||
)
|
||||
template = self.response_template(DESCRIBE_TRANSIT_GATEWAY_ATTACHMENTS)
|
||||
return template.render(transit_gateway_attachments=transit_gateway_attachments)
|
||||
|
||||
def delete_transit_gateway_vpc_attachment(self):
|
||||
transit_gateway_attachment_id = self._get_param("TransitGatewayAttachmentId")
|
||||
transit_gateway_attachment = self.ec2_backend.delete_transit_gateway_vpc_attachment(
|
||||
transit_gateway_attachment_id=transit_gateway_attachment_id
|
||||
transit_gateway_attachment = (
|
||||
self.ec2_backend.delete_transit_gateway_vpc_attachment(
|
||||
transit_gateway_attachment_id=transit_gateway_attachment_id
|
||||
)
|
||||
)
|
||||
template = self.response_template(DELETE_TRANSIT_GATEWAY_VPC_ATTACHMENTS)
|
||||
return template.render(transit_gateway_attachment=transit_gateway_attachment)
|
||||
@ -80,14 +90,16 @@ class TransitGatewayAttachment(BaseResponse):
|
||||
def associate_transit_gateway_route_table(self):
|
||||
transit_gateway_attachment_id = self._get_param("TransitGatewayAttachmentId")
|
||||
transit_gateway_route_table_id = self._get_param("TransitGatewayRouteTableId")
|
||||
transit_gateway_association = self.ec2_backend.associate_transit_gateway_route_table(
|
||||
transit_gateway_attachment_id=transit_gateway_attachment_id,
|
||||
transit_gateway_route_table_id=transit_gateway_route_table_id,
|
||||
transit_gateway_association = (
|
||||
self.ec2_backend.associate_transit_gateway_route_table(
|
||||
transit_gateway_attachment_id=transit_gateway_attachment_id,
|
||||
transit_gateway_route_table_id=transit_gateway_route_table_id,
|
||||
)
|
||||
)
|
||||
template = self.response_template(TRANSIT_GATEWAY_ASSOCIATION)
|
||||
return template.render(transit_gateway_association=transit_gateway_association)
|
||||
|
||||
def disassociate_transit_gateway_route_table(self,):
|
||||
def disassociate_transit_gateway_route_table(self):
|
||||
tgw_attach_id = self._get_param("TransitGatewayAttachmentId")
|
||||
tgw_rt_id = self._get_param("TransitGatewayRouteTableId")
|
||||
|
||||
@ -100,9 +112,11 @@ class TransitGatewayAttachment(BaseResponse):
|
||||
def enable_transit_gateway_route_table_propagation(self):
|
||||
transit_gateway_attachment_id = self._get_param("TransitGatewayAttachmentId")
|
||||
transit_gateway_route_table_id = self._get_param("TransitGatewayRouteTableId")
|
||||
transit_gateway_propagation = self.ec2_backend.enable_transit_gateway_route_table_propagation(
|
||||
transit_gateway_attachment_id=transit_gateway_attachment_id,
|
||||
transit_gateway_route_table_id=transit_gateway_route_table_id,
|
||||
transit_gateway_propagation = (
|
||||
self.ec2_backend.enable_transit_gateway_route_table_propagation(
|
||||
transit_gateway_attachment_id=transit_gateway_attachment_id,
|
||||
transit_gateway_route_table_id=transit_gateway_route_table_id,
|
||||
)
|
||||
)
|
||||
template = self.response_template(TRANSIT_GATEWAY_PROPAGATION)
|
||||
return template.render(transit_gateway_propagation=transit_gateway_propagation)
|
||||
@ -110,9 +124,11 @@ class TransitGatewayAttachment(BaseResponse):
|
||||
def disable_transit_gateway_route_table_propagation(self):
|
||||
transit_gateway_attachment_id = self._get_param("TransitGatewayAttachmentId")
|
||||
transit_gateway_route_table_id = self._get_param("TransitGatewayRouteTableId")
|
||||
transit_gateway_propagation = self.ec2_backend.disable_transit_gateway_route_table_propagation(
|
||||
transit_gateway_attachment_id=transit_gateway_attachment_id,
|
||||
transit_gateway_route_table_id=transit_gateway_route_table_id,
|
||||
transit_gateway_propagation = (
|
||||
self.ec2_backend.disable_transit_gateway_route_table_propagation(
|
||||
transit_gateway_attachment_id=transit_gateway_attachment_id,
|
||||
transit_gateway_route_table_id=transit_gateway_route_table_id,
|
||||
)
|
||||
)
|
||||
template = self.response_template(TRANSIT_GATEWAY_PROPAGATION)
|
||||
return template.render(transit_gateway_propagation=transit_gateway_propagation)
|
||||
@ -123,12 +139,14 @@ class TransitGatewayAttachment(BaseResponse):
|
||||
peer_transit_gateway_id = self._get_param("PeerTransitGatewayId")
|
||||
transit_gateway_id = self._get_param("TransitGatewayId")
|
||||
tags = add_tag_specification(self._get_multi_param("TagSpecification"))
|
||||
transit_gateway_peering_attachment = self.ec2_backend.create_transit_gateway_peering_attachment(
|
||||
transit_gateway_id,
|
||||
peer_transit_gateway_id,
|
||||
peer_region,
|
||||
peer_account_id,
|
||||
tags,
|
||||
transit_gateway_peering_attachment = (
|
||||
self.ec2_backend.create_transit_gateway_peering_attachment(
|
||||
transit_gateway_id,
|
||||
peer_transit_gateway_id,
|
||||
peer_region,
|
||||
peer_account_id,
|
||||
tags,
|
||||
)
|
||||
)
|
||||
template = self.response_template(TRANSIT_GATEWAY_PEERING_ATTACHMENT)
|
||||
return template.render(
|
||||
@ -142,10 +160,12 @@ class TransitGatewayAttachment(BaseResponse):
|
||||
)
|
||||
filters = filters_from_querystring(self.querystring)
|
||||
max_results = self._get_param("MaxResults")
|
||||
transit_gateway_peering_attachments = self.ec2_backend.describe_transit_gateway_peering_attachments(
|
||||
transit_gateways_attachment_ids=transit_gateways_attachment_ids,
|
||||
filters=filters,
|
||||
max_results=max_results,
|
||||
transit_gateway_peering_attachments = (
|
||||
self.ec2_backend.describe_transit_gateway_peering_attachments(
|
||||
transit_gateways_attachment_ids=transit_gateways_attachment_ids,
|
||||
filters=filters,
|
||||
max_results=max_results,
|
||||
)
|
||||
)
|
||||
template = self.response_template(DESCRIBE_TRANSIT_GATEWAY_PEERING_ATTACHMENTS)
|
||||
return template.render(
|
||||
@ -154,8 +174,10 @@ class TransitGatewayAttachment(BaseResponse):
|
||||
|
||||
def accept_transit_gateway_peering_attachment(self):
|
||||
transit_gateway_attachment_id = self._get_param("TransitGatewayAttachmentId")
|
||||
transit_gateway_peering_attachment = self.ec2_backend.accept_transit_gateway_peering_attachment(
|
||||
transit_gateway_attachment_id=transit_gateway_attachment_id
|
||||
transit_gateway_peering_attachment = (
|
||||
self.ec2_backend.accept_transit_gateway_peering_attachment(
|
||||
transit_gateway_attachment_id=transit_gateway_attachment_id
|
||||
)
|
||||
)
|
||||
template = self.response_template(TRANSIT_GATEWAY_PEERING_ATTACHMENT)
|
||||
return template.render(
|
||||
@ -165,8 +187,10 @@ class TransitGatewayAttachment(BaseResponse):
|
||||
|
||||
def delete_transit_gateway_peering_attachment(self):
|
||||
transit_gateway_attachment_id = self._get_param("TransitGatewayAttachmentId")
|
||||
transit_gateway_peering_attachment = self.ec2_backend.delete_transit_gateway_peering_attachment(
|
||||
transit_gateway_attachment_id=transit_gateway_attachment_id
|
||||
transit_gateway_peering_attachment = (
|
||||
self.ec2_backend.delete_transit_gateway_peering_attachment(
|
||||
transit_gateway_attachment_id=transit_gateway_attachment_id
|
||||
)
|
||||
)
|
||||
template = self.response_template(TRANSIT_GATEWAY_PEERING_ATTACHMENT)
|
||||
return template.render(
|
||||
@ -176,8 +200,10 @@ class TransitGatewayAttachment(BaseResponse):
|
||||
|
||||
def reject_transit_gateway_peering_attachment(self):
|
||||
transit_gateway_attachment_id = self._get_param("TransitGatewayAttachmentId")
|
||||
transit_gateway_peering_attachment = self.ec2_backend.reject_transit_gateway_peering_attachment(
|
||||
transit_gateway_attachment_id=transit_gateway_attachment_id
|
||||
transit_gateway_peering_attachment = (
|
||||
self.ec2_backend.reject_transit_gateway_peering_attachment(
|
||||
transit_gateway_attachment_id=transit_gateway_attachment_id
|
||||
)
|
||||
)
|
||||
template = self.response_template(TRANSIT_GATEWAY_PEERING_ATTACHMENT)
|
||||
return template.render(
|
||||
|
@ -11,8 +11,10 @@ class TransitGatewayRouteTable(BaseResponse):
|
||||
tags = (tags or {}).get("Tag", [])
|
||||
tags = {t["Key"]: t["Value"] for t in tags}
|
||||
|
||||
transit_gateway_route_table = self.ec2_backend.create_transit_gateway_route_table(
|
||||
transit_gateway_id=transit_gateway_id, tags=tags
|
||||
transit_gateway_route_table = (
|
||||
self.ec2_backend.create_transit_gateway_route_table(
|
||||
transit_gateway_id=transit_gateway_id, tags=tags
|
||||
)
|
||||
)
|
||||
template = self.response_template(CREATE_TRANSIT_GATEWAY_ROUTE_TABLE_RESPONSE)
|
||||
return template.render(transit_gateway_route_table=transit_gateway_route_table)
|
||||
@ -22,8 +24,10 @@ class TransitGatewayRouteTable(BaseResponse):
|
||||
transit_gateway_route_table_ids = (
|
||||
self._get_multi_param("TransitGatewayRouteTableIds") or None
|
||||
)
|
||||
transit_gateway_route_tables = self.ec2_backend.get_all_transit_gateway_route_tables(
|
||||
transit_gateway_route_table_ids, filters
|
||||
transit_gateway_route_tables = (
|
||||
self.ec2_backend.get_all_transit_gateway_route_tables(
|
||||
transit_gateway_route_table_ids, filters
|
||||
)
|
||||
)
|
||||
template = self.response_template(DESCRIBE_TRANSIT_GATEWAY_ROUTE_TABLE_RESPONSE)
|
||||
return template.render(
|
||||
@ -32,8 +36,10 @@ class TransitGatewayRouteTable(BaseResponse):
|
||||
|
||||
def delete_transit_gateway_route_table(self):
|
||||
transit_gateway_route_table_id = self._get_param("TransitGatewayRouteTableId")
|
||||
transit_gateway_route_table = self.ec2_backend.delete_transit_gateway_route_table(
|
||||
transit_gateway_route_table_id
|
||||
transit_gateway_route_table = (
|
||||
self.ec2_backend.delete_transit_gateway_route_table(
|
||||
transit_gateway_route_table_id
|
||||
)
|
||||
)
|
||||
template = self.response_template(DELETE_TRANSIT_GATEWAY_ROUTE_TABLE_RESPONSE)
|
||||
return template.render(transit_gateway_route_table=transit_gateway_route_table)
|
||||
@ -85,8 +91,10 @@ class TransitGatewayRouteTable(BaseResponse):
|
||||
def get_transit_gateway_route_table_associations(self):
|
||||
transit_gateway_route_table_id = self._get_param("TransitGatewayRouteTableId")
|
||||
filters = filters_from_querystring(self.querystring)
|
||||
transit_gateway_route_table_associations = self.ec2_backend.get_all_transit_gateway_route_table_associations(
|
||||
transit_gateway_route_table_id, filters
|
||||
transit_gateway_route_table_associations = (
|
||||
self.ec2_backend.get_all_transit_gateway_route_table_associations(
|
||||
transit_gateway_route_table_id, filters
|
||||
)
|
||||
)
|
||||
template = self.response_template(
|
||||
GET_TRANSIT_GATEWAY_ROUTE_TABLE_ASSOCIATIONS_RESPONSE
|
||||
@ -98,8 +106,10 @@ class TransitGatewayRouteTable(BaseResponse):
|
||||
def get_transit_gateway_route_table_propagations(self):
|
||||
transit_gateway_route_table_id = self._get_param("TransitGatewayRouteTableId")
|
||||
filters = filters_from_querystring(self.querystring)
|
||||
transit_gateway_route_table_propagations = self.ec2_backend.get_all_transit_gateway_route_table_propagations(
|
||||
transit_gateway_route_table_id, filters
|
||||
transit_gateway_route_table_propagations = (
|
||||
self.ec2_backend.get_all_transit_gateway_route_table_propagations(
|
||||
transit_gateway_route_table_id, filters
|
||||
)
|
||||
)
|
||||
template = self.response_template(
|
||||
GET_TRANSIT_GATEWAY_ROUTE_TABLE_PROPAGATIONS_RESPONSE
|
||||
|
@ -15,11 +15,13 @@ class TransitGateways(BaseResponse):
|
||||
)
|
||||
|
||||
# creating default route table
|
||||
transit_gateway_route_table = self.ec2_backend.create_transit_gateway_route_table(
|
||||
transit_gateway_id=transit_gateway.id,
|
||||
tags={},
|
||||
default_association_route_table=True,
|
||||
default_propagation_route_table=True,
|
||||
transit_gateway_route_table = (
|
||||
self.ec2_backend.create_transit_gateway_route_table(
|
||||
transit_gateway_id=transit_gateway.id,
|
||||
tags={},
|
||||
default_association_route_table=True,
|
||||
default_propagation_route_table=True,
|
||||
)
|
||||
)
|
||||
transit_gateway.options[
|
||||
"AssociationDefaultRouteTableId"
|
||||
|
@ -78,8 +78,8 @@ class VPCs(BaseResponse):
|
||||
|
||||
def enable_vpc_classic_link_dns_support(self):
|
||||
vpc_id = self._get_param("VpcId")
|
||||
classic_link_dns_supported = self.ec2_backend.enable_vpc_classic_link_dns_support(
|
||||
vpc_id=vpc_id
|
||||
classic_link_dns_supported = (
|
||||
self.ec2_backend.enable_vpc_classic_link_dns_support(vpc_id=vpc_id)
|
||||
)
|
||||
doc_date = self._get_doc_date()
|
||||
template = self.response_template(ENABLE_VPC_CLASSIC_LINK_DNS_SUPPORT_RESPONSE)
|
||||
@ -89,8 +89,8 @@ class VPCs(BaseResponse):
|
||||
|
||||
def disable_vpc_classic_link_dns_support(self):
|
||||
vpc_id = self._get_param("VpcId")
|
||||
classic_link_dns_supported = self.ec2_backend.disable_vpc_classic_link_dns_support(
|
||||
vpc_id=vpc_id
|
||||
classic_link_dns_supported = (
|
||||
self.ec2_backend.disable_vpc_classic_link_dns_support(vpc_id=vpc_id)
|
||||
)
|
||||
doc_date = self._get_doc_date()
|
||||
template = self.response_template(DISABLE_VPC_CLASSIC_LINK_DNS_SUPPORT_RESPONSE)
|
||||
@ -228,7 +228,7 @@ class VPCs(BaseResponse):
|
||||
|
||||
def delete_vpc_endpoints(self):
|
||||
vpc_end_points_ids = self._get_multi_param("VpcEndpointId")
|
||||
response = self.ec2_backend.delete_vpc_endpoints(vpce_ids=vpc_end_points_ids,)
|
||||
response = self.ec2_backend.delete_vpc_endpoints(vpce_ids=vpc_end_points_ids)
|
||||
template = self.response_template(DELETE_VPC_ENDPOINT_RESPONSE)
|
||||
return template.render(response=response)
|
||||
|
||||
@ -266,7 +266,7 @@ class VPCs(BaseResponse):
|
||||
prefix_list_id = self._get_param("PrefixListId")
|
||||
target_version = self._get_param("TargetVersion")
|
||||
managed_prefix_list = self.ec2_backend.get_managed_prefix_list_entries(
|
||||
prefix_list_id=prefix_list_id,
|
||||
prefix_list_id=prefix_list_id
|
||||
)
|
||||
entries = []
|
||||
if managed_prefix_list:
|
||||
|
@ -244,7 +244,7 @@ class Image(BaseObject):
|
||||
self.last_scan = None
|
||||
|
||||
def _create_digest(self):
|
||||
image_contents = "docker_image{0}".format(int(random() * 10 ** 6))
|
||||
image_contents = "docker_image{0}".format(int(random() * 10**6))
|
||||
self.image_digest = (
|
||||
"sha256:%s" % hashlib.sha256(image_contents.encode("utf-8")).hexdigest()
|
||||
)
|
||||
@ -359,7 +359,7 @@ class ECRBackend(BaseBackend):
|
||||
"VpcEndpointPolicySupported": True,
|
||||
}
|
||||
return BaseBackend.default_vpc_endpoint_service_factory(
|
||||
service_region, zones, "api.ecr", special_service_name="ecr.api",
|
||||
service_region, zones, "api.ecr", special_service_name="ecr.api"
|
||||
) + [docker_endpoint]
|
||||
|
||||
def _get_repository(self, name, registry_id=None) -> Repository:
|
||||
|
@ -125,7 +125,7 @@ class ECRResponse(BaseResponse):
|
||||
|
||||
return json.dumps(
|
||||
self.ecr_backend.delete_repository_policy(
|
||||
registry_id=registry_id, repository_name=repository_name,
|
||||
registry_id=registry_id, repository_name=repository_name
|
||||
)
|
||||
)
|
||||
|
||||
@ -160,7 +160,7 @@ class ECRResponse(BaseResponse):
|
||||
|
||||
return json.dumps(
|
||||
self.ecr_backend.get_repository_policy(
|
||||
registry_id=registry_id, repository_name=repository_name,
|
||||
registry_id=registry_id, repository_name=repository_name
|
||||
)
|
||||
)
|
||||
|
||||
@ -252,7 +252,7 @@ class ECRResponse(BaseResponse):
|
||||
|
||||
return json.dumps(
|
||||
self.ecr_backend.get_lifecycle_policy(
|
||||
registry_id=registry_id, repository_name=repository_name,
|
||||
registry_id=registry_id, repository_name=repository_name
|
||||
)
|
||||
)
|
||||
|
||||
@ -262,7 +262,7 @@ class ECRResponse(BaseResponse):
|
||||
|
||||
return json.dumps(
|
||||
self.ecr_backend.delete_lifecycle_policy(
|
||||
registry_id=registry_id, repository_name=repository_name,
|
||||
registry_id=registry_id, repository_name=repository_name
|
||||
)
|
||||
)
|
||||
|
||||
|
@ -24,9 +24,7 @@ class RevisionNotFoundException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
error_type="ClientException", message="Revision is missing.",
|
||||
)
|
||||
super().__init__(error_type="ClientException", message="Revision is missing.")
|
||||
|
||||
|
||||
class TaskSetNotFoundException(JsonRESTError):
|
||||
@ -44,7 +42,7 @@ class ClusterNotFoundException(JsonRESTError):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
error_type="ClusterNotFoundException", message="Cluster not found.",
|
||||
error_type="ClusterNotFoundException", message="Cluster not found."
|
||||
)
|
||||
|
||||
|
||||
@ -52,18 +50,14 @@ class EcsClientException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, message):
|
||||
super().__init__(
|
||||
error_type="ClientException", message=message,
|
||||
)
|
||||
super().__init__(error_type="ClientException", message=message)
|
||||
|
||||
|
||||
class InvalidParameterException(JsonRESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, message):
|
||||
super().__init__(
|
||||
error_type="InvalidParameterException", message=message,
|
||||
)
|
||||
super().__init__(error_type="InvalidParameterException", message=message)
|
||||
|
||||
|
||||
class UnknownAccountSettingException(InvalidParameterException):
|
||||
|
@ -240,7 +240,7 @@ class TaskDefinition(BaseObject, CloudFormationModel):
|
||||
properties = cloudformation_json["Properties"]
|
||||
|
||||
family = properties.get(
|
||||
"Family", "task-definition-{0}".format(int(random() * 10 ** 6))
|
||||
"Family", "task-definition-{0}".format(int(random() * 10**6))
|
||||
)
|
||||
container_definitions = remap_nested_keys(
|
||||
properties.get("ContainerDefinitions", []), pascal_to_camelcase
|
||||
@ -258,7 +258,7 @@ class TaskDefinition(BaseObject, CloudFormationModel):
|
||||
):
|
||||
properties = cloudformation_json["Properties"]
|
||||
family = properties.get(
|
||||
"Family", "task-definition-{0}".format(int(random() * 10 ** 6))
|
||||
"Family", "task-definition-{0}".format(int(random() * 10**6))
|
||||
)
|
||||
container_definitions = properties["ContainerDefinitions"]
|
||||
volumes = properties.get("Volumes")
|
||||
@ -361,7 +361,7 @@ class Service(BaseObject, CloudFormationModel):
|
||||
{
|
||||
"createdAt": datetime.now(pytz.utc),
|
||||
"desiredCount": self.desired_count,
|
||||
"id": "ecs-svc/{}".format(randint(0, 32 ** 12)),
|
||||
"id": "ecs-svc/{}".format(randint(0, 32**12)),
|
||||
"launchType": self.launch_type,
|
||||
"pendingCount": self.desired_count,
|
||||
"runningCount": 0,
|
||||
@ -685,7 +685,7 @@ class TaskSet(BaseObject):
|
||||
self.createdAt = datetime.now(pytz.utc)
|
||||
self.updatedAt = datetime.now(pytz.utc)
|
||||
self.stabilityStatusAt = datetime.now(pytz.utc)
|
||||
self.id = "ecs-svc/{}".format(randint(0, 32 ** 12))
|
||||
self.id = "ecs-svc/{}".format(randint(0, 32**12))
|
||||
self.service_arn = ""
|
||||
self.cluster_arn = ""
|
||||
|
||||
|
@ -96,18 +96,16 @@ class EFSResponse(BaseResponse):
|
||||
|
||||
def delete_file_system(self):
|
||||
file_system_id = self._get_param("FileSystemId")
|
||||
self.efs_backend.delete_file_system(file_system_id=file_system_id,)
|
||||
self.efs_backend.delete_file_system(file_system_id)
|
||||
return json.dumps(dict()), {"status": 204, "Content-Type": "application/json"}
|
||||
|
||||
def delete_mount_target(self):
|
||||
mount_target_id = self._get_param("MountTargetId")
|
||||
self.efs_backend.delete_mount_target(mount_target_id=mount_target_id,)
|
||||
self.efs_backend.delete_mount_target(mount_target_id)
|
||||
return json.dumps(dict()), {"status": 204, "Content-Type": "application/json"}
|
||||
|
||||
def describe_backup_policy(self):
|
||||
file_system_id = self._get_param("FileSystemId")
|
||||
backup_policy = self.efs_backend.describe_backup_policy(
|
||||
file_system_id=file_system_id,
|
||||
)
|
||||
backup_policy = self.efs_backend.describe_backup_policy(file_system_id)
|
||||
resp = {"BackupPolicy": backup_policy}
|
||||
return json.dumps(resp), {"Content-Type": "application/json"}
|
||||
|
@ -394,7 +394,7 @@ class EKSBackend(BaseBackend):
|
||||
)
|
||||
if not cluster.isActive():
|
||||
raise InvalidRequestException(
|
||||
message=CLUSTER_NOT_READY_MSG.format(clusterName=cluster_name,)
|
||||
message=CLUSTER_NOT_READY_MSG.format(clusterName=cluster_name)
|
||||
)
|
||||
|
||||
_validate_fargate_profile_selectors(selectors)
|
||||
@ -459,7 +459,7 @@ class EKSBackend(BaseBackend):
|
||||
)
|
||||
if not cluster.isActive():
|
||||
raise InvalidRequestException(
|
||||
message=CLUSTER_NOT_READY_MSG.format(clusterName=cluster_name,)
|
||||
message=CLUSTER_NOT_READY_MSG.format(clusterName=cluster_name)
|
||||
)
|
||||
if launch_template:
|
||||
validate_launch_template_combination(disk_size, remote_access)
|
||||
|
@ -146,7 +146,7 @@ class EKSResponse(BaseResponse):
|
||||
|
||||
try:
|
||||
fargate_profile = self.eks_backend.describe_fargate_profile(
|
||||
cluster_name=cluster_name, fargate_profile_name=fargate_profile_name,
|
||||
cluster_name=cluster_name, fargate_profile_name=fargate_profile_name
|
||||
)
|
||||
return 200, {}, json.dumps({"fargateProfile": dict(fargate_profile)})
|
||||
except (ResourceInUseException, ResourceNotFoundException) as e:
|
||||
@ -158,7 +158,7 @@ class EKSResponse(BaseResponse):
|
||||
|
||||
try:
|
||||
nodegroup = self.eks_backend.describe_nodegroup(
|
||||
cluster_name=cluster_name, nodegroup_name=nodegroup_name,
|
||||
cluster_name=cluster_name, nodegroup_name=nodegroup_name
|
||||
)
|
||||
|
||||
return 200, {}, json.dumps({"nodegroup": dict(nodegroup)})
|
||||
@ -170,7 +170,7 @@ class EKSResponse(BaseResponse):
|
||||
next_token = self._get_param("nextToken", DEFAULT_NEXT_TOKEN)
|
||||
|
||||
clusters, next_token = self.eks_backend.list_clusters(
|
||||
max_results=max_results, next_token=next_token,
|
||||
max_results=max_results, next_token=next_token
|
||||
)
|
||||
|
||||
return 200, {}, json.dumps(dict(clusters=clusters, nextToken=next_token))
|
||||
@ -181,7 +181,7 @@ class EKSResponse(BaseResponse):
|
||||
next_token = self._get_param("nextToken", DEFAULT_NEXT_TOKEN)
|
||||
|
||||
fargate_profile_names, next_token = self.eks_backend.list_fargate_profiles(
|
||||
cluster_name=cluster_name, max_results=max_results, next_token=next_token,
|
||||
cluster_name=cluster_name, max_results=max_results, next_token=next_token
|
||||
)
|
||||
|
||||
return (
|
||||
@ -198,20 +198,16 @@ class EKSResponse(BaseResponse):
|
||||
next_token = self._get_param("nextToken", DEFAULT_NEXT_TOKEN)
|
||||
|
||||
nodegroups, next_token = self.eks_backend.list_nodegroups(
|
||||
cluster_name=cluster_name, max_results=max_results, next_token=next_token,
|
||||
cluster_name=cluster_name, max_results=max_results, next_token=next_token
|
||||
)
|
||||
|
||||
return (
|
||||
200,
|
||||
{},
|
||||
json.dumps(dict(nodegroups=nodegroups, nextToken=next_token)),
|
||||
)
|
||||
return 200, {}, json.dumps(dict(nodegroups=nodegroups, nextToken=next_token))
|
||||
|
||||
def delete_cluster(self):
|
||||
name = self._get_param("name")
|
||||
|
||||
try:
|
||||
cluster = self.eks_backend.delete_cluster(name=name,)
|
||||
cluster = self.eks_backend.delete_cluster(name=name)
|
||||
|
||||
return 200, {}, json.dumps({"cluster": dict(cluster)})
|
||||
except (ResourceInUseException, ResourceNotFoundException) as e:
|
||||
@ -223,7 +219,7 @@ class EKSResponse(BaseResponse):
|
||||
|
||||
try:
|
||||
fargate_profile = self.eks_backend.delete_fargate_profile(
|
||||
cluster_name=cluster_name, fargate_profile_name=fargate_profile_name,
|
||||
cluster_name=cluster_name, fargate_profile_name=fargate_profile_name
|
||||
)
|
||||
|
||||
return 200, {}, json.dumps({"fargateProfile": dict(fargate_profile)})
|
||||
@ -236,7 +232,7 @@ class EKSResponse(BaseResponse):
|
||||
|
||||
try:
|
||||
nodegroup = self.eks_backend.delete_nodegroup(
|
||||
cluster_name=cluster_name, nodegroup_name=nodegroup_name,
|
||||
cluster_name=cluster_name, nodegroup_name=nodegroup_name
|
||||
)
|
||||
|
||||
return 200, {}, json.dumps({"nodegroup": dict(nodegroup)})
|
||||
|
@ -51,7 +51,7 @@ class UserAlreadyExists(ElastiCacheException):
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super().__init__(
|
||||
"UserAlreadyExists", message="User user1 already exists.", **kwargs,
|
||||
"UserAlreadyExists", message="User user1 already exists.", **kwargs
|
||||
)
|
||||
|
||||
|
||||
@ -60,6 +60,4 @@ class UserNotFound(ElastiCacheException):
|
||||
code = 404
|
||||
|
||||
def __init__(self, user_id, **kwargs):
|
||||
super().__init__(
|
||||
"UserNotFound", message=f"User {user_id} not found.", **kwargs,
|
||||
)
|
||||
super().__init__("UserNotFound", message=f"User {user_id} not found.", **kwargs)
|
||||
|
@ -7,9 +7,7 @@ from .utils import make_arn
|
||||
|
||||
|
||||
class FakeEnvironment(BaseModel):
|
||||
def __init__(
|
||||
self, application, environment_name, solution_stack_name, tags,
|
||||
):
|
||||
def __init__(self, application, environment_name, solution_stack_name, tags):
|
||||
self.application = weakref.proxy(
|
||||
application
|
||||
) # weakref to break circular dependencies
|
||||
@ -41,9 +39,7 @@ class FakeApplication(BaseModel):
|
||||
self.application_name = application_name
|
||||
self.environments = dict()
|
||||
|
||||
def create_environment(
|
||||
self, environment_name, solution_stack_name, tags,
|
||||
):
|
||||
def create_environment(self, environment_name, solution_stack_name, tags):
|
||||
if environment_name in self.environments:
|
||||
raise InvalidParameterValueError
|
||||
|
||||
@ -92,15 +88,13 @@ class EBBackend(BaseBackend):
|
||||
raise InvalidParameterValueError(
|
||||
"Application {} already exists.".format(application_name)
|
||||
)
|
||||
new_app = FakeApplication(backend=self, application_name=application_name,)
|
||||
new_app = FakeApplication(backend=self, application_name=application_name)
|
||||
self.applications[application_name] = new_app
|
||||
return new_app
|
||||
|
||||
def create_environment(self, app, environment_name, stack_name, tags):
|
||||
return app.create_environment(
|
||||
environment_name=environment_name,
|
||||
solution_stack_name=stack_name,
|
||||
tags=tags,
|
||||
environment_name=environment_name, solution_stack_name=stack_name, tags=tags
|
||||
)
|
||||
|
||||
def describe_environments(self):
|
||||
|
@ -14,15 +14,15 @@ class EBResponse(BaseResponse):
|
||||
|
||||
def create_application(self):
|
||||
app = self.backend.create_application(
|
||||
application_name=self._get_param("ApplicationName"),
|
||||
application_name=self._get_param("ApplicationName")
|
||||
)
|
||||
|
||||
template = self.response_template(EB_CREATE_APPLICATION)
|
||||
return template.render(region_name=self.backend.region, application=app,)
|
||||
return template.render(region_name=self.backend.region, application=app)
|
||||
|
||||
def describe_applications(self):
|
||||
template = self.response_template(EB_DESCRIBE_APPLICATIONS)
|
||||
return template.render(applications=self.backend.applications.values(),)
|
||||
return template.render(applications=self.backend.applications.values())
|
||||
|
||||
def create_environment(self):
|
||||
application_name = self._get_param("ApplicationName")
|
||||
@ -30,7 +30,7 @@ class EBResponse(BaseResponse):
|
||||
app = self.backend.applications[application_name]
|
||||
except KeyError:
|
||||
raise InvalidParameterValueError(
|
||||
"No Application named '{}' found.".format(application_name)
|
||||
f"No Application named '{application_name}' found."
|
||||
)
|
||||
|
||||
tags = tags_from_query_string(self.querystring, prefix="Tags.member")
|
||||
@ -42,13 +42,13 @@ class EBResponse(BaseResponse):
|
||||
)
|
||||
|
||||
template = self.response_template(EB_CREATE_ENVIRONMENT)
|
||||
return template.render(environment=env, region=self.backend.region,)
|
||||
return template.render(environment=env, region=self.backend.region)
|
||||
|
||||
def describe_environments(self):
|
||||
envs = self.backend.describe_environments()
|
||||
|
||||
template = self.response_template(EB_DESCRIBE_ENVIRONMENTS)
|
||||
return template.render(environments=envs,)
|
||||
return template.render(environments=envs)
|
||||
|
||||
def list_available_solution_stacks(self):
|
||||
return EB_LIST_AVAILABLE_SOLUTION_STACKS
|
||||
@ -68,7 +68,7 @@ class EBResponse(BaseResponse):
|
||||
tags = self.backend.list_tags_for_resource(resource_arn)
|
||||
|
||||
template = self.response_template(EB_LIST_TAGS_FOR_RESOURCE)
|
||||
return template.render(tags=tags, arn=resource_arn,)
|
||||
return template.render(tags=tags, arn=resource_arn)
|
||||
|
||||
|
||||
EB_CREATE_APPLICATION = """
|
||||
|
@ -299,9 +299,7 @@ class FakeListener(CloudFormationModel):
|
||||
|
||||
|
||||
class FakeListenerRule(CloudFormationModel):
|
||||
def __init__(
|
||||
self, listener_arn, arn, conditions, priority, actions,
|
||||
):
|
||||
def __init__(self, listener_arn, arn, conditions, priority, actions):
|
||||
self.listener_arn = listener_arn
|
||||
self.arn = arn
|
||||
self.conditions = conditions
|
||||
@ -676,7 +674,7 @@ class ELBv2Backend(BaseBackend):
|
||||
# TODO: check for error 'TooManyRules'
|
||||
|
||||
# create rule
|
||||
rule = FakeListenerRule(listener.arn, arn, conditions, priority, actions,)
|
||||
rule = FakeListenerRule(listener.arn, arn, conditions, priority, actions)
|
||||
listener.register(arn, rule)
|
||||
return rule
|
||||
|
||||
@ -1398,7 +1396,7 @@ Member must satisfy regular expression pattern: {}".format(
|
||||
|
||||
if protocol not in (None, "HTTP", "HTTPS", "TCP"):
|
||||
raise RESTError(
|
||||
"UnsupportedProtocol", "Protocol {0} is not supported".format(protocol),
|
||||
"UnsupportedProtocol", "Protocol {0} is not supported".format(protocol)
|
||||
)
|
||||
|
||||
# HTTPS checks
|
||||
|
@ -40,7 +40,7 @@ class FakeBootstrapAction(BaseModel):
|
||||
|
||||
class FakeInstance(BaseModel):
|
||||
def __init__(
|
||||
self, ec2_instance_id, instance_group, instance_fleet_id=None, instance_id=None,
|
||||
self, ec2_instance_id, instance_group, instance_fleet_id=None, instance_id=None
|
||||
):
|
||||
self.id = instance_id or random_instance_group_id()
|
||||
self.ec2_instance_id = ec2_instance_id
|
||||
@ -440,7 +440,7 @@ class ElasticMapReduceBackend(BaseBackend):
|
||||
)
|
||||
for instance in response.instances:
|
||||
instance = FakeInstance(
|
||||
ec2_instance_id=instance.id, instance_group=instance_group,
|
||||
ec2_instance_id=instance.id, instance_group=instance_group
|
||||
)
|
||||
cluster.add_instance(instance)
|
||||
|
||||
|
@ -101,7 +101,7 @@ class EMRContainersResponse(BaseResponse):
|
||||
virtual_cluster_id = self._get_param("virtualClusterId")
|
||||
|
||||
job = self.emrcontainers_backend.cancel_job_run(
|
||||
job_id=job_id, virtual_cluster_id=virtual_cluster_id,
|
||||
job_id=job_id, virtual_cluster_id=virtual_cluster_id
|
||||
)
|
||||
return 200, {}, json.dumps(dict(job))
|
||||
|
||||
@ -132,7 +132,7 @@ class EMRContainersResponse(BaseResponse):
|
||||
virtual_cluster_id = self._get_param("virtualClusterId")
|
||||
|
||||
job_run = self.emrcontainers_backend.describe_job_run(
|
||||
job_id=job_id, virtual_cluster_id=virtual_cluster_id,
|
||||
job_id=job_id, virtual_cluster_id=virtual_cluster_id
|
||||
)
|
||||
|
||||
response = {"jobRun": job_run}
|
||||
|
@ -94,7 +94,7 @@ class ElasticsearchServiceResponse(BaseResponse):
|
||||
|
||||
def delete_elasticsearch_domain(self):
|
||||
domain_name = self.path.split("/")[-1]
|
||||
self.es_backend.delete_elasticsearch_domain(domain_name=domain_name,)
|
||||
self.es_backend.delete_elasticsearch_domain(domain_name=domain_name)
|
||||
return 200, {}, json.dumps(dict())
|
||||
|
||||
def describe_elasticsearch_domain(self):
|
||||
@ -102,12 +102,12 @@ class ElasticsearchServiceResponse(BaseResponse):
|
||||
if not re.match(r"^[a-z][a-z0-9\-]+$", domain_name):
|
||||
raise InvalidDomainName(domain_name)
|
||||
domain_status = self.es_backend.describe_elasticsearch_domain(
|
||||
domain_name=domain_name,
|
||||
domain_name=domain_name
|
||||
)
|
||||
return 200, {}, json.dumps({"DomainStatus": domain_status})
|
||||
|
||||
def list_domain_names(self):
|
||||
params = self._get_params()
|
||||
engine_type = params.get("EngineType")
|
||||
domain_names = self.es_backend.list_domain_names(engine_type=engine_type,)
|
||||
domain_names = self.es_backend.list_domain_names(engine_type=engine_type)
|
||||
return 200, {}, json.dumps({"DomainNames": domain_names})
|
||||
|
@ -70,11 +70,13 @@ class Rule(CloudFormationModel):
|
||||
else "{}/".format(self.event_bus_name)
|
||||
)
|
||||
|
||||
return "arn:aws:events:{region}:{account_id}:rule/{event_bus_name}{name}".format(
|
||||
region=self.region_name,
|
||||
account_id=ACCOUNT_ID,
|
||||
event_bus_name=event_bus_name,
|
||||
name=self.name,
|
||||
return (
|
||||
"arn:aws:events:{region}:{account_id}:rule/{event_bus_name}{name}".format(
|
||||
region=self.region_name,
|
||||
account_id=ACCOUNT_ID,
|
||||
event_bus_name=event_bus_name,
|
||||
name=self.name,
|
||||
)
|
||||
)
|
||||
|
||||
@property
|
||||
@ -679,7 +681,7 @@ class Replay(BaseModel):
|
||||
|
||||
class Connection(BaseModel):
|
||||
def __init__(
|
||||
self, name, region_name, description, authorization_type, auth_parameters,
|
||||
self, name, region_name, description, authorization_type, auth_parameters
|
||||
):
|
||||
self.uuid = uuid4()
|
||||
self.name = name
|
||||
|
@ -282,7 +282,7 @@ class FirehoseBackend(BaseBackend):
|
||||
self.delivery_streams.pop(delivery_stream_name)
|
||||
|
||||
def describe_delivery_stream(
|
||||
self, delivery_stream_name, limit, exclusive_start_destination_id,
|
||||
self, delivery_stream_name, limit, exclusive_start_destination_id
|
||||
): # pylint: disable=unused-argument
|
||||
"""Return description of specified delivery stream and its status.
|
||||
|
||||
@ -369,7 +369,7 @@ class FirehoseBackend(BaseBackend):
|
||||
return result
|
||||
|
||||
def list_tags_for_delivery_stream(
|
||||
self, delivery_stream_name, exclusive_start_tag_key, limit,
|
||||
self, delivery_stream_name, exclusive_start_tag_key, limit
|
||||
):
|
||||
"""Return list of tags."""
|
||||
result = {"Tags": [], "HasMoreTags": False}
|
||||
|
@ -34,7 +34,7 @@ class FirehoseResponse(BaseResponse):
|
||||
def delete_delivery_stream(self):
|
||||
"""Prepare arguments and respond to DeleteDeliveryStream request."""
|
||||
self.firehose_backend.delete_delivery_stream(
|
||||
self._get_param("DeliveryStreamName"), self._get_param("AllowForceDelete"),
|
||||
self._get_param("DeliveryStreamName"), self._get_param("AllowForceDelete")
|
||||
)
|
||||
return json.dumps({})
|
||||
|
||||
@ -82,14 +82,14 @@ class FirehoseResponse(BaseResponse):
|
||||
def tag_delivery_stream(self):
|
||||
"""Prepare arguments and respond to TagDeliveryStream request."""
|
||||
self.firehose_backend.tag_delivery_stream(
|
||||
self._get_param("DeliveryStreamName"), self._get_param("Tags"),
|
||||
self._get_param("DeliveryStreamName"), self._get_param("Tags")
|
||||
)
|
||||
return json.dumps({})
|
||||
|
||||
def untag_delivery_stream(self):
|
||||
"""Prepare arguments and respond to UntagDeliveryStream()."""
|
||||
self.firehose_backend.untag_delivery_stream(
|
||||
self._get_param("DeliveryStreamName"), self._get_param("TagKeys"),
|
||||
self._get_param("DeliveryStreamName"), self._get_param("TagKeys")
|
||||
)
|
||||
return json.dumps({})
|
||||
|
||||
|
@ -402,7 +402,7 @@ class FakeCrawler(BaseModel):
|
||||
|
||||
class LastCrawlInfo(BaseModel):
|
||||
def __init__(
|
||||
self, error_message, log_group, log_stream, message_prefix, start_time, status,
|
||||
self, error_message, log_group, log_stream, message_prefix, start_time, status
|
||||
):
|
||||
self.error_message = error_message
|
||||
self.log_group = log_group
|
||||
|
@ -123,10 +123,12 @@ class AssumedRoleAccessKey(object):
|
||||
|
||||
@property
|
||||
def arn(self):
|
||||
return "arn:aws:sts::{account_id}:assumed-role/{role_name}/{session_name}".format(
|
||||
account_id=ACCOUNT_ID,
|
||||
role_name=self._owner_role_name,
|
||||
session_name=self._session_name,
|
||||
return (
|
||||
"arn:aws:sts::{account_id}:assumed-role/{role_name}/{session_name}".format(
|
||||
account_id=ACCOUNT_ID,
|
||||
role_name=self._owner_role_name,
|
||||
session_name=self._session_name,
|
||||
)
|
||||
)
|
||||
|
||||
def create_credentials(self):
|
||||
|
@ -94,10 +94,7 @@ class InvalidTagCharacters(RESTError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, tag, param="tags.X.member.key"):
|
||||
message = "1 validation error detected: Value '{}' at '{}' failed to satisfy ".format(
|
||||
tag, param
|
||||
)
|
||||
message += "constraint: Member must satisfy regular expression pattern: [\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]+"
|
||||
message = f"1 validation error detected: Value '{tag}' at '{param}' failed to satisfy constraint: Member must satisfy regular expression pattern: [\\p{{L}}\\p{{Z}}\\p{{N}}_.:/=+\\-@]+"
|
||||
|
||||
super().__init__("ValidationError", message)
|
||||
|
||||
|
@ -421,9 +421,7 @@ class InlinePolicy(CloudFormationModel):
|
||||
self.user_names = None
|
||||
self.update(policy_name, policy_document, group_names, role_names, user_names)
|
||||
|
||||
def update(
|
||||
self, policy_name, policy_document, group_names, role_names, user_names,
|
||||
):
|
||||
def update(self, policy_name, policy_document, group_names, role_names, user_names):
|
||||
self.policy_name = policy_name
|
||||
self.policy_document = (
|
||||
json.dumps(policy_document)
|
||||
@ -464,7 +462,7 @@ class InlinePolicy(CloudFormationModel):
|
||||
|
||||
@classmethod
|
||||
def update_from_cloudformation_json(
|
||||
cls, original_resource, new_resource_name, cloudformation_json, region_name,
|
||||
cls, original_resource, new_resource_name, cloudformation_json, region_name
|
||||
):
|
||||
properties = cloudformation_json["Properties"]
|
||||
|
||||
@ -908,11 +906,11 @@ class AccessKey(CloudFormationModel):
|
||||
user_name = properties.get("UserName")
|
||||
status = properties.get("Status", "Active")
|
||||
|
||||
return iam_backend.create_access_key(user_name, status=status,)
|
||||
return iam_backend.create_access_key(user_name, status=status)
|
||||
|
||||
@classmethod
|
||||
def update_from_cloudformation_json(
|
||||
cls, original_resource, new_resource_name, cloudformation_json, region_name,
|
||||
cls, original_resource, new_resource_name, cloudformation_json, region_name
|
||||
):
|
||||
properties = cloudformation_json["Properties"]
|
||||
|
||||
@ -1248,7 +1246,7 @@ class User(CloudFormationModel):
|
||||
|
||||
@classmethod
|
||||
def update_from_cloudformation_json(
|
||||
cls, original_resource, new_resource_name, cloudformation_json, region_name,
|
||||
cls, original_resource, new_resource_name, cloudformation_json, region_name
|
||||
):
|
||||
properties = cloudformation_json["Properties"]
|
||||
|
||||
@ -2783,7 +2781,7 @@ class IAMBackend(BaseBackend):
|
||||
inline_policy = self.get_inline_policy(resource_name)
|
||||
inline_policy.unapply_policy(self)
|
||||
inline_policy.update(
|
||||
policy_name, policy_document, group_names, role_names, user_names,
|
||||
policy_name, policy_document, group_names, role_names, user_names
|
||||
)
|
||||
inline_policy.apply_policy(self)
|
||||
return inline_policy
|
||||
|
@ -340,8 +340,8 @@ class IAMPolicyDocumentValidator:
|
||||
resource_partitions = resource.partition(":")
|
||||
|
||||
if resource_partitions[1] == "":
|
||||
self._resource_error = 'Resource {resource} must be in ARN format or "*".'.format(
|
||||
resource=resource
|
||||
self._resource_error = (
|
||||
f'Resource {resource} must be in ARN format or "*".'
|
||||
)
|
||||
return
|
||||
|
||||
@ -387,15 +387,14 @@ class IAMPolicyDocumentValidator:
|
||||
|
||||
service = resource_partitions[0]
|
||||
|
||||
if service in SERVICE_TYPE_REGION_INFORMATION_ERROR_ASSOCIATIONS.keys() and not resource_partitions[
|
||||
2
|
||||
].startswith(
|
||||
":"
|
||||
if (
|
||||
service in SERVICE_TYPE_REGION_INFORMATION_ERROR_ASSOCIATIONS.keys()
|
||||
and not resource_partitions[2].startswith(":")
|
||||
):
|
||||
self._resource_error = SERVICE_TYPE_REGION_INFORMATION_ERROR_ASSOCIATIONS[
|
||||
service
|
||||
].format(
|
||||
resource=resource
|
||||
self._resource_error = (
|
||||
SERVICE_TYPE_REGION_INFORMATION_ERROR_ASSOCIATIONS[service].format(
|
||||
resource=resource
|
||||
)
|
||||
)
|
||||
return
|
||||
|
||||
@ -514,8 +513,8 @@ class IAMPolicyDocumentValidator:
|
||||
assert 0 <= int(time_zone_minutes) <= 59
|
||||
else:
|
||||
seconds_with_decimal_fraction = time_parts[2]
|
||||
seconds_with_decimal_fraction_partition = seconds_with_decimal_fraction.partition(
|
||||
"."
|
||||
seconds_with_decimal_fraction_partition = (
|
||||
seconds_with_decimal_fraction.partition(".")
|
||||
)
|
||||
seconds = seconds_with_decimal_fraction_partition[0]
|
||||
assert 0 <= int(seconds) <= 59
|
||||
|
@ -401,7 +401,7 @@ class IoTResponse(BaseResponse):
|
||||
status = self._get_param("status")
|
||||
|
||||
cert = self.iot_backend.register_certificate_without_ca(
|
||||
certificate_pem=certificate_pem, status=status,
|
||||
certificate_pem=certificate_pem, status=status
|
||||
)
|
||||
return json.dumps(
|
||||
dict(certificateId=cert.certificate_id, certificateArn=cert.arn)
|
||||
|
@ -193,10 +193,10 @@ class Stream(CloudFormationModel):
|
||||
def init_shards(self, shard_count):
|
||||
self.shard_count = shard_count
|
||||
|
||||
step = 2 ** 128 // shard_count
|
||||
step = 2**128 // shard_count
|
||||
hash_ranges = itertools.chain(
|
||||
map(lambda i: (i, i * step, (i + 1) * step - 1), range(shard_count - 1)),
|
||||
[(shard_count - 1, (shard_count - 1) * step, 2 ** 128)],
|
||||
[(shard_count - 1, (shard_count - 1) * step, 2**128)],
|
||||
)
|
||||
for index, start, end in hash_ranges:
|
||||
shard = Shard(index, start, end)
|
||||
@ -361,7 +361,7 @@ class Stream(CloudFormationModel):
|
||||
|
||||
key = int(explicit_hash_key)
|
||||
|
||||
if key >= 2 ** 128:
|
||||
if key >= 2**128:
|
||||
raise InvalidArgumentError("explicit_hash_key")
|
||||
|
||||
else:
|
||||
@ -439,7 +439,7 @@ class Stream(CloudFormationModel):
|
||||
|
||||
@classmethod
|
||||
def update_from_cloudformation_json(
|
||||
cls, original_resource, new_resource_name, cloudformation_json, region_name,
|
||||
cls, original_resource, new_resource_name, cloudformation_json, region_name
|
||||
):
|
||||
properties = cloudformation_json["Properties"]
|
||||
|
||||
|
@ -153,7 +153,7 @@ class KinesisResponse(BaseResponse):
|
||||
stream_name = self.parameters.get("StreamName")
|
||||
target_shard_count = self.parameters.get("TargetShardCount")
|
||||
current_shard_count = self.kinesis_backend.update_shard_count(
|
||||
stream_name=stream_name, target_shard_count=target_shard_count,
|
||||
stream_name=stream_name, target_shard_count=target_shard_count
|
||||
)
|
||||
return json.dumps(
|
||||
dict(
|
||||
@ -204,7 +204,7 @@ class KinesisResponse(BaseResponse):
|
||||
stream_name = self.parameters.get("StreamName")
|
||||
shard_level_metrics = self.parameters.get("ShardLevelMetrics")
|
||||
current, desired = self.kinesis_backend.enable_enhanced_monitoring(
|
||||
stream_name=stream_name, shard_level_metrics=shard_level_metrics,
|
||||
stream_name=stream_name, shard_level_metrics=shard_level_metrics
|
||||
)
|
||||
return json.dumps(
|
||||
dict(
|
||||
@ -218,7 +218,7 @@ class KinesisResponse(BaseResponse):
|
||||
stream_name = self.parameters.get("StreamName")
|
||||
shard_level_metrics = self.parameters.get("ShardLevelMetrics")
|
||||
current, desired = self.kinesis_backend.disable_enhanced_monitoring(
|
||||
stream_name=stream_name, to_be_disabled=shard_level_metrics,
|
||||
stream_name=stream_name, to_be_disabled=shard_level_metrics
|
||||
)
|
||||
return json.dumps(
|
||||
dict(
|
||||
@ -237,7 +237,7 @@ class KinesisResponse(BaseResponse):
|
||||
stream_arn = self.parameters.get("StreamARN")
|
||||
consumer_name = self.parameters.get("ConsumerName")
|
||||
consumer = self.kinesis_backend.register_stream_consumer(
|
||||
stream_arn=stream_arn, consumer_name=consumer_name,
|
||||
stream_arn=stream_arn, consumer_name=consumer_name
|
||||
)
|
||||
return json.dumps(dict(Consumer=consumer.to_json()))
|
||||
|
||||
@ -276,5 +276,5 @@ class KinesisResponse(BaseResponse):
|
||||
|
||||
def stop_stream_encryption(self):
|
||||
stream_name = self.parameters.get("StreamName")
|
||||
self.kinesis_backend.stop_stream_encryption(stream_name=stream_name,)
|
||||
self.kinesis_backend.stop_stream_encryption(stream_name=stream_name)
|
||||
return json.dumps(dict())
|
||||
|
@ -17,6 +17,4 @@ class ResourceNotFoundException(KinesisvideoClientError):
|
||||
class ResourceInUseException(KinesisvideoClientError):
|
||||
def __init__(self, message):
|
||||
self.code = 400
|
||||
super().__init__(
|
||||
"ResourceInUseException", message,
|
||||
)
|
||||
super().__init__("ResourceInUseException", message)
|
||||
|
@ -1,9 +1,6 @@
|
||||
from moto.core import BaseBackend, BaseModel
|
||||
from datetime import datetime
|
||||
from .exceptions import (
|
||||
ResourceNotFoundException,
|
||||
ResourceInUseException,
|
||||
)
|
||||
from .exceptions import ResourceNotFoundException, ResourceInUseException
|
||||
import random
|
||||
import string
|
||||
from moto.core.utils import get_random_hex, BackendDict
|
||||
|
@ -31,7 +31,7 @@ class KinesisVideoResponse(BaseResponse):
|
||||
stream_name = self._get_param("StreamName")
|
||||
stream_arn = self._get_param("StreamARN")
|
||||
stream_info = self.kinesisvideo_backend.describe_stream(
|
||||
stream_name=stream_name, stream_arn=stream_arn,
|
||||
stream_name=stream_name, stream_arn=stream_arn
|
||||
)
|
||||
return json.dumps(dict(StreamInfo=stream_info))
|
||||
|
||||
@ -50,7 +50,7 @@ class KinesisVideoResponse(BaseResponse):
|
||||
stream_arn = self._get_param("StreamARN")
|
||||
current_version = self._get_param("CurrentVersion")
|
||||
self.kinesisvideo_backend.delete_stream(
|
||||
stream_arn=stream_arn, current_version=current_version,
|
||||
stream_arn=stream_arn, current_version=current_version
|
||||
)
|
||||
return json.dumps(dict())
|
||||
|
||||
@ -59,6 +59,6 @@ class KinesisVideoResponse(BaseResponse):
|
||||
stream_arn = self._get_param("StreamARN")
|
||||
api_name = self._get_param("APIName")
|
||||
data_endpoint = self.kinesisvideo_backend.get_data_endpoint(
|
||||
stream_name=stream_name, stream_arn=stream_arn, api_name=api_name,
|
||||
stream_name=stream_name, stream_arn=stream_arn, api_name=api_name
|
||||
)
|
||||
return json.dumps(dict(DataEndpoint=data_endpoint))
|
||||
|
@ -22,16 +22,18 @@ class KinesisVideoArchivedMediaResponse(BaseResponse):
|
||||
max_media_playlist_fragment_results = self._get_param(
|
||||
"MaxMediaPlaylistFragmentResults"
|
||||
)
|
||||
hls_streaming_session_url = self.kinesisvideoarchivedmedia_backend.get_hls_streaming_session_url(
|
||||
stream_name=stream_name,
|
||||
stream_arn=stream_arn,
|
||||
playback_mode=playback_mode,
|
||||
hls_fragment_selector=hls_fragment_selector,
|
||||
container_format=container_format,
|
||||
discontinuity_mode=discontinuity_mode,
|
||||
display_fragment_timestamp=display_fragment_timestamp,
|
||||
expires=expires,
|
||||
max_media_playlist_fragment_results=max_media_playlist_fragment_results,
|
||||
hls_streaming_session_url = (
|
||||
self.kinesisvideoarchivedmedia_backend.get_hls_streaming_session_url(
|
||||
stream_name=stream_name,
|
||||
stream_arn=stream_arn,
|
||||
playback_mode=playback_mode,
|
||||
hls_fragment_selector=hls_fragment_selector,
|
||||
container_format=container_format,
|
||||
discontinuity_mode=discontinuity_mode,
|
||||
display_fragment_timestamp=display_fragment_timestamp,
|
||||
expires=expires,
|
||||
max_media_playlist_fragment_results=max_media_playlist_fragment_results,
|
||||
)
|
||||
)
|
||||
return json.dumps(dict(HLSStreamingSessionURL=hls_streaming_session_url))
|
||||
|
||||
@ -44,15 +46,17 @@ class KinesisVideoArchivedMediaResponse(BaseResponse):
|
||||
dash_fragment_selector = self._get_param("DASHFragmentSelector")
|
||||
expires = self._get_int_param("Expires")
|
||||
max_manifest_fragment_results = self._get_param("MaxManifestFragmentResults")
|
||||
dash_streaming_session_url = self.kinesisvideoarchivedmedia_backend.get_dash_streaming_session_url(
|
||||
stream_name=stream_name,
|
||||
stream_arn=stream_arn,
|
||||
playback_mode=playback_mode,
|
||||
display_fragment_timestamp=display_fragment_timestamp,
|
||||
display_fragment_number=display_fragment_number,
|
||||
dash_fragment_selector=dash_fragment_selector,
|
||||
expires=expires,
|
||||
max_manifest_fragment_results=max_manifest_fragment_results,
|
||||
dash_streaming_session_url = (
|
||||
self.kinesisvideoarchivedmedia_backend.get_dash_streaming_session_url(
|
||||
stream_name=stream_name,
|
||||
stream_arn=stream_arn,
|
||||
playback_mode=playback_mode,
|
||||
display_fragment_timestamp=display_fragment_timestamp,
|
||||
display_fragment_number=display_fragment_number,
|
||||
dash_fragment_selector=dash_fragment_selector,
|
||||
expires=expires,
|
||||
max_manifest_fragment_results=max_manifest_fragment_results,
|
||||
)
|
||||
)
|
||||
return json.dumps(dict(DASHStreamingSessionURL=dash_streaming_session_url))
|
||||
|
||||
|
@ -180,7 +180,7 @@ class KmsBackend(BaseBackend):
|
||||
)
|
||||
|
||||
def _generate_default_keys(self, alias_name):
|
||||
"""Creates default kms keys """
|
||||
"""Creates default kms keys"""
|
||||
if alias_name in RESERVED_ALIASES:
|
||||
key = self.create_key(
|
||||
None,
|
||||
|
@ -2,8 +2,7 @@ import uuid
|
||||
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
from moto import core as moto_core
|
||||
from moto.core import BaseBackend, BaseModel
|
||||
from moto.core import ACCOUNT_ID, BaseBackend, BaseModel
|
||||
from moto.core.models import CloudFormationModel
|
||||
from moto.core.utils import unix_time_millis, BackendDict
|
||||
from moto.utilities.paginator import paginate
|
||||
@ -61,12 +60,7 @@ class LogStream(BaseModel):
|
||||
|
||||
def __init__(self, region, log_group, name):
|
||||
self.region = region
|
||||
self.arn = "arn:aws:logs:{region}:{id}:log-group:{log_group}:log-stream:{log_stream}".format(
|
||||
region=region,
|
||||
id=moto_core.ACCOUNT_ID,
|
||||
log_group=log_group,
|
||||
log_stream=name,
|
||||
)
|
||||
self.arn = f"arn:aws:logs:{region}:{ACCOUNT_ID}:log-group:{log_group}:log-stream:{name}"
|
||||
self.creation_time = int(unix_time_millis())
|
||||
self.first_event_timestamp = None
|
||||
self.last_event_timestamp = None
|
||||
@ -281,7 +275,7 @@ class LogGroup(CloudFormationModel):
|
||||
def __init__(self, region, name, tags, **kwargs):
|
||||
self.name = name
|
||||
self.region = region
|
||||
self.arn = f"arn:aws:logs:{region}:{moto_core.ACCOUNT_ID}:log-group:{name}"
|
||||
self.arn = f"arn:aws:logs:{region}:{ACCOUNT_ID}:log-group:{name}"
|
||||
self.creation_time = int(unix_time_millis())
|
||||
self.tags = tags
|
||||
self.streams = dict() # {name: LogStream}
|
||||
|
@ -16,7 +16,7 @@ def validate_param(
|
||||
assert constraint_expression(param_value)
|
||||
except (AssertionError, TypeError):
|
||||
raise InvalidParameterException(
|
||||
constraint=constraint, parameter=param_name, value=param_value,
|
||||
constraint=constraint, parameter=param_name, value=param_value
|
||||
)
|
||||
if pattern and param_value:
|
||||
try:
|
||||
|
@ -350,9 +350,7 @@ class ManagedBlockchainInvitation(BaseModel):
|
||||
|
||||
|
||||
class ManagedBlockchainMember(BaseModel):
|
||||
def __init__(
|
||||
self, member_id, networkid, member_configuration, region,
|
||||
):
|
||||
def __init__(self, member_id, networkid, member_configuration, region):
|
||||
self.creationdate = datetime.datetime.utcnow()
|
||||
self.id = member_id
|
||||
self.networkid = networkid
|
||||
@ -581,9 +579,7 @@ class ManagedBlockchainBackend(BaseBackend):
|
||||
)
|
||||
return self.networks.get(network_id)
|
||||
|
||||
def create_proposal(
|
||||
self, networkid, memberid, actions, description=None,
|
||||
):
|
||||
def create_proposal(self, networkid, memberid, actions, description=None):
|
||||
# Check if network exists
|
||||
if networkid not in self.networks:
|
||||
raise ResourceNotFoundException(
|
||||
@ -779,9 +775,7 @@ class ManagedBlockchainBackend(BaseBackend):
|
||||
)
|
||||
self.invitations.get(invitationid).reject_invitation()
|
||||
|
||||
def create_member(
|
||||
self, invitationid, networkid, member_configuration,
|
||||
):
|
||||
def create_member(self, invitationid, networkid, member_configuration):
|
||||
# Check if network exists
|
||||
if networkid not in self.networks:
|
||||
raise ResourceNotFoundException(
|
||||
@ -985,7 +979,7 @@ class ManagedBlockchainBackend(BaseBackend):
|
||||
chkregionpreregex = self.region_name + "[a-z]"
|
||||
if re.match(chkregionpreregex, availabilityzone, re.IGNORECASE) is None:
|
||||
raise InvalidRequestException(
|
||||
"CreateNode", "Availability Zone is not valid",
|
||||
"CreateNode", "Availability Zone is not valid"
|
||||
)
|
||||
|
||||
node_id = get_node_id()
|
||||
|
@ -136,7 +136,7 @@ class ManagedBlockchainResponse(BaseResponse):
|
||||
description = json_body.get("Description", None)
|
||||
|
||||
response = self.backend.create_proposal(
|
||||
network_id, memberid, actions, description,
|
||||
network_id, memberid, actions, description
|
||||
)
|
||||
return 200, headers, json.dumps(response)
|
||||
|
||||
@ -201,9 +201,7 @@ class ManagedBlockchainResponse(BaseResponse):
|
||||
votermemberid = json_body["VoterMemberId"]
|
||||
vote = json_body["Vote"]
|
||||
|
||||
self.backend.vote_on_proposal(
|
||||
network_id, proposal_id, votermemberid, vote,
|
||||
)
|
||||
self.backend.vote_on_proposal(network_id, proposal_id, votermemberid, vote)
|
||||
return 200, headers, ""
|
||||
|
||||
@classmethod
|
||||
@ -285,7 +283,7 @@ class ManagedBlockchainResponse(BaseResponse):
|
||||
member_configuration = json_body["MemberConfiguration"]
|
||||
|
||||
response = self.backend.create_member(
|
||||
invitationid, network_id, member_configuration,
|
||||
invitationid, network_id, member_configuration
|
||||
)
|
||||
return 200, headers, json.dumps(response)
|
||||
|
||||
@ -324,9 +322,7 @@ class ManagedBlockchainResponse(BaseResponse):
|
||||
|
||||
def _memberid_response_patch(self, network_id, member_id, json_body, headers):
|
||||
logpublishingconfiguration = json_body["LogPublishingConfiguration"]
|
||||
self.backend.update_member(
|
||||
network_id, member_id, logpublishingconfiguration,
|
||||
)
|
||||
self.backend.update_member(network_id, member_id, logpublishingconfiguration)
|
||||
return 200, headers, ""
|
||||
|
||||
def _memberid_response_delete(self, network_id, member_id, headers):
|
||||
@ -427,7 +423,7 @@ class ManagedBlockchainResponse(BaseResponse):
|
||||
):
|
||||
logpublishingconfiguration = json_body
|
||||
self.backend.update_node(
|
||||
network_id, member_id, node_id, logpublishingconfiguration,
|
||||
network_id, member_id, node_id, logpublishingconfiguration
|
||||
)
|
||||
return 200, headers, ""
|
||||
|
||||
|
@ -3,10 +3,7 @@ import json
|
||||
from moto.core.responses import BaseResponse
|
||||
from .models import mediaconnect_backends
|
||||
|
||||
try:
|
||||
from urllib import unquote
|
||||
except ImportError:
|
||||
from urllib.parse import unquote
|
||||
from urllib.parse import unquote
|
||||
|
||||
|
||||
class MediaConnectResponse(BaseResponse):
|
||||
@ -41,42 +38,40 @@ class MediaConnectResponse(BaseResponse):
|
||||
max_results = self._get_int_param("maxResults")
|
||||
next_token = self._get_param("nextToken")
|
||||
flows, next_token = self.mediaconnect_backend.list_flows(
|
||||
max_results=max_results, next_token=next_token,
|
||||
max_results=max_results, next_token=next_token
|
||||
)
|
||||
return json.dumps(dict(flows=flows, nextToken=next_token))
|
||||
|
||||
def describe_flow(self):
|
||||
flow_arn = unquote(self._get_param("flowArn"))
|
||||
flow, messages = self.mediaconnect_backend.describe_flow(flow_arn=flow_arn,)
|
||||
flow, messages = self.mediaconnect_backend.describe_flow(flow_arn=flow_arn)
|
||||
return json.dumps(dict(flow=flow, messages=messages))
|
||||
|
||||
def delete_flow(self):
|
||||
flow_arn = unquote(self._get_param("flowArn"))
|
||||
flow_arn, status = self.mediaconnect_backend.delete_flow(flow_arn=flow_arn,)
|
||||
flow_arn, status = self.mediaconnect_backend.delete_flow(flow_arn=flow_arn)
|
||||
return json.dumps(dict(flowArn=flow_arn, status=status))
|
||||
|
||||
def start_flow(self):
|
||||
flow_arn = unquote(self._get_param("flowArn"))
|
||||
flow_arn, status = self.mediaconnect_backend.start_flow(flow_arn=flow_arn,)
|
||||
flow_arn, status = self.mediaconnect_backend.start_flow(flow_arn=flow_arn)
|
||||
return json.dumps(dict(flowArn=flow_arn, status=status))
|
||||
|
||||
def stop_flow(self):
|
||||
flow_arn = unquote(self._get_param("flowArn"))
|
||||
flow_arn, status = self.mediaconnect_backend.stop_flow(flow_arn=flow_arn,)
|
||||
flow_arn, status = self.mediaconnect_backend.stop_flow(flow_arn=flow_arn)
|
||||
return json.dumps(dict(flowArn=flow_arn, status=status))
|
||||
|
||||
def tag_resource(self):
|
||||
resource_arn = unquote(self._get_param("resourceArn"))
|
||||
tags = self._get_param("tags")
|
||||
self.mediaconnect_backend.tag_resource(
|
||||
resource_arn=resource_arn, tags=tags,
|
||||
)
|
||||
self.mediaconnect_backend.tag_resource(resource_arn=resource_arn, tags=tags)
|
||||
return json.dumps(dict())
|
||||
|
||||
def list_tags_for_resource(self):
|
||||
resource_arn = unquote(self._get_param("resourceArn"))
|
||||
tags = self.mediaconnect_backend.list_tags_for_resource(
|
||||
resource_arn=resource_arn,
|
||||
resource_arn=resource_arn
|
||||
)
|
||||
return json.dumps(dict(tags=tags))
|
||||
|
||||
@ -116,5 +111,3 @@ class MediaConnectResponse(BaseResponse):
|
||||
flow_arn=flow_arn, output_name=output_name
|
||||
)
|
||||
return json.dumps(dict(flow_arn=flow_arn, output_name=output_name))
|
||||
|
||||
# add methods from here
|
||||
|
@ -46,7 +46,7 @@ class MediaLiveResponse(BaseResponse):
|
||||
max_results = self._get_int_param("maxResults")
|
||||
next_token = self._get_param("nextToken")
|
||||
channels, next_token = self.medialive_backend.list_channels(
|
||||
max_results=max_results, next_token=next_token,
|
||||
max_results=max_results, next_token=next_token
|
||||
)
|
||||
|
||||
return json.dumps(dict(channels=channels, nextToken=next_token))
|
||||
@ -54,20 +54,20 @@ class MediaLiveResponse(BaseResponse):
|
||||
def describe_channel(self):
|
||||
channel_id = self._get_param("channelId")
|
||||
return json.dumps(
|
||||
self.medialive_backend.describe_channel(channel_id=channel_id,)
|
||||
self.medialive_backend.describe_channel(channel_id=channel_id)
|
||||
)
|
||||
|
||||
def delete_channel(self):
|
||||
channel_id = self._get_param("channelId")
|
||||
return json.dumps(self.medialive_backend.delete_channel(channel_id=channel_id,))
|
||||
return json.dumps(self.medialive_backend.delete_channel(channel_id=channel_id))
|
||||
|
||||
def start_channel(self):
|
||||
channel_id = self._get_param("channelId")
|
||||
return json.dumps(self.medialive_backend.start_channel(channel_id=channel_id,))
|
||||
return json.dumps(self.medialive_backend.start_channel(channel_id=channel_id))
|
||||
|
||||
def stop_channel(self):
|
||||
channel_id = self._get_param("channelId")
|
||||
return json.dumps(self.medialive_backend.stop_channel(channel_id=channel_id,))
|
||||
return json.dumps(self.medialive_backend.stop_channel(channel_id=channel_id))
|
||||
|
||||
def update_channel(self):
|
||||
channel_id = self._get_param("channelId")
|
||||
@ -121,20 +121,20 @@ class MediaLiveResponse(BaseResponse):
|
||||
|
||||
def describe_input(self):
|
||||
input_id = self._get_param("inputId")
|
||||
return json.dumps(self.medialive_backend.describe_input(input_id=input_id,))
|
||||
return json.dumps(self.medialive_backend.describe_input(input_id=input_id))
|
||||
|
||||
def list_inputs(self):
|
||||
max_results = self._get_int_param("maxResults")
|
||||
next_token = self._get_param("nextToken")
|
||||
inputs, next_token = self.medialive_backend.list_inputs(
|
||||
max_results=max_results, next_token=next_token,
|
||||
max_results=max_results, next_token=next_token
|
||||
)
|
||||
|
||||
return json.dumps(dict(inputs=inputs, nextToken=next_token))
|
||||
|
||||
def delete_input(self):
|
||||
input_id = self._get_param("inputId")
|
||||
self.medialive_backend.delete_input(input_id=input_id,)
|
||||
self.medialive_backend.delete_input(input_id=input_id)
|
||||
return json.dumps({})
|
||||
|
||||
def update_input(self):
|
||||
|
@ -15,7 +15,7 @@ class MediaPackageResponse(BaseResponse):
|
||||
channel_id = self._get_param("id")
|
||||
tags = self._get_param("tags")
|
||||
channel = self.mediapackage_backend.create_channel(
|
||||
description=description, channel_id=channel_id, tags=tags,
|
||||
description=description, channel_id=channel_id, tags=tags
|
||||
)
|
||||
return json.dumps(channel.to_dict())
|
||||
|
||||
|
@ -31,7 +31,7 @@ class MediaStoreResponse(BaseResponse):
|
||||
next_token = self._get_param("NextToken")
|
||||
max_results = self._get_int_param("MaxResults")
|
||||
containers, next_token = self.mediastore_backend.list_containers(
|
||||
next_token=next_token, max_results=max_results,
|
||||
next_token=next_token, max_results=max_results
|
||||
)
|
||||
return json.dumps(dict(dict(Containers=containers), NextToken=next_token))
|
||||
|
||||
@ -44,14 +44,14 @@ class MediaStoreResponse(BaseResponse):
|
||||
container_name = self._get_param("ContainerName")
|
||||
lifecycle_policy = self._get_param("LifecyclePolicy")
|
||||
policy = self.mediastore_backend.put_lifecycle_policy(
|
||||
container_name=container_name, lifecycle_policy=lifecycle_policy,
|
||||
container_name=container_name, lifecycle_policy=lifecycle_policy
|
||||
)
|
||||
return json.dumps(policy)
|
||||
|
||||
def get_lifecycle_policy(self):
|
||||
container_name = self._get_param("ContainerName")
|
||||
lifecycle_policy = self.mediastore_backend.get_lifecycle_policy(
|
||||
container_name=container_name,
|
||||
container_name=container_name
|
||||
)
|
||||
return json.dumps(dict(LifecyclePolicy=lifecycle_policy))
|
||||
|
||||
@ -59,14 +59,14 @@ class MediaStoreResponse(BaseResponse):
|
||||
container_name = self._get_param("ContainerName")
|
||||
policy = self._get_param("Policy")
|
||||
container_policy = self.mediastore_backend.put_container_policy(
|
||||
container_name=container_name, policy=policy,
|
||||
container_name=container_name, policy=policy
|
||||
)
|
||||
return json.dumps(container_policy)
|
||||
|
||||
def get_container_policy(self):
|
||||
container_name = self._get_param("ContainerName")
|
||||
policy = self.mediastore_backend.get_container_policy(
|
||||
container_name=container_name,
|
||||
container_name=container_name
|
||||
)
|
||||
return json.dumps(dict(Policy=policy))
|
||||
|
||||
@ -74,14 +74,14 @@ class MediaStoreResponse(BaseResponse):
|
||||
container_name = self._get_param("ContainerName")
|
||||
metric_policy = self._get_param("MetricPolicy")
|
||||
self.mediastore_backend.put_metric_policy(
|
||||
container_name=container_name, metric_policy=metric_policy,
|
||||
container_name=container_name, metric_policy=metric_policy
|
||||
)
|
||||
return json.dumps(metric_policy)
|
||||
|
||||
def get_metric_policy(self):
|
||||
container_name = self._get_param("ContainerName")
|
||||
metric_policy = self.mediastore_backend.get_metric_policy(
|
||||
container_name=container_name,
|
||||
container_name=container_name
|
||||
)
|
||||
return json.dumps(dict(MetricPolicy=metric_policy))
|
||||
|
||||
|
@ -822,7 +822,7 @@ class OrganizationsBackend(BaseBackend):
|
||||
)
|
||||
|
||||
admin = next(
|
||||
(admin for admin in self.admins if admin.account.id == account_id), None,
|
||||
(admin for admin in self.admins if admin.account.id == account_id), None
|
||||
)
|
||||
if admin is None:
|
||||
account = next(
|
||||
@ -878,7 +878,7 @@ class OrganizationsBackend(BaseBackend):
|
||||
)
|
||||
elif re.match(account_id_regex, target_id):
|
||||
account = next(
|
||||
(account for account in self.accounts if account.id == target_id), None,
|
||||
(account for account in self.accounts if account.id == target_id), None
|
||||
)
|
||||
if account is not None:
|
||||
if policy in account.attached_policies:
|
||||
|
@ -77,12 +77,12 @@ class PinpointResponse(BaseResponse):
|
||||
|
||||
def delete_app(self):
|
||||
application_id = self.path.split("/")[-1]
|
||||
app = self.pinpoint_backend.delete_app(application_id=application_id,)
|
||||
app = self.pinpoint_backend.delete_app(application_id=application_id)
|
||||
return 200, {}, json.dumps(app.to_json())
|
||||
|
||||
def get_app(self):
|
||||
application_id = self.path.split("/")[-1]
|
||||
app = self.pinpoint_backend.get_app(application_id=application_id,)
|
||||
app = self.pinpoint_backend.get_app(application_id=application_id)
|
||||
return 200, {}, json.dumps(app.to_json())
|
||||
|
||||
def get_apps(self):
|
||||
@ -103,7 +103,7 @@ class PinpointResponse(BaseResponse):
|
||||
def get_application_settings(self):
|
||||
application_id = self.path.split("/")[-2]
|
||||
app_settings = self.pinpoint_backend.get_application_settings(
|
||||
application_id=application_id,
|
||||
application_id=application_id
|
||||
)
|
||||
app_settings = app_settings.to_json()
|
||||
app_settings["ApplicationId"] = application_id
|
||||
@ -111,22 +111,20 @@ class PinpointResponse(BaseResponse):
|
||||
|
||||
def list_tags_for_resource(self):
|
||||
resource_arn = unquote(self.path).split("/tags/")[-1]
|
||||
tags = self.pinpoint_backend.list_tags_for_resource(resource_arn=resource_arn,)
|
||||
tags = self.pinpoint_backend.list_tags_for_resource(resource_arn=resource_arn)
|
||||
return 200, {}, json.dumps(tags)
|
||||
|
||||
def tag_resource(self):
|
||||
resource_arn = unquote(self.path).split("/tags/")[-1]
|
||||
tags = json.loads(self.body).get("tags", {})
|
||||
self.pinpoint_backend.tag_resource(
|
||||
resource_arn=resource_arn, tags=tags,
|
||||
)
|
||||
self.pinpoint_backend.tag_resource(resource_arn=resource_arn, tags=tags)
|
||||
return 200, {}, "{}"
|
||||
|
||||
def untag_resource(self):
|
||||
resource_arn = unquote(self.path).split("/tags/")[-1]
|
||||
tag_keys = self.querystring.get("tagKeys")
|
||||
self.pinpoint_backend.untag_resource(
|
||||
resource_arn=resource_arn, tag_keys=tag_keys,
|
||||
resource_arn=resource_arn, tag_keys=tag_keys
|
||||
)
|
||||
return 200, {}, "{}"
|
||||
|
||||
@ -145,7 +143,7 @@ class PinpointResponse(BaseResponse):
|
||||
def get_event_stream(self):
|
||||
application_id = self.path.split("/")[-2]
|
||||
event_stream = self.pinpoint_backend.get_event_stream(
|
||||
application_id=application_id,
|
||||
application_id=application_id
|
||||
)
|
||||
resp = event_stream.to_json()
|
||||
resp["ApplicationId"] = application_id
|
||||
@ -154,7 +152,7 @@ class PinpointResponse(BaseResponse):
|
||||
def delete_event_stream(self):
|
||||
application_id = self.path.split("/")[-2]
|
||||
event_stream = self.pinpoint_backend.delete_event_stream(
|
||||
application_id=application_id,
|
||||
application_id=application_id
|
||||
)
|
||||
resp = event_stream.to_json()
|
||||
resp["ApplicationId"] = application_id
|
||||
|
@ -87,8 +87,10 @@ class ResourceShare(BaseModel):
|
||||
)
|
||||
|
||||
if root_id:
|
||||
ous = self.organizations_backend.list_organizational_units_for_parent(
|
||||
ParentId=root_id
|
||||
ous = (
|
||||
self.organizations_backend.list_organizational_units_for_parent(
|
||||
ParentId=root_id
|
||||
)
|
||||
)
|
||||
if any(principal == ou["Arn"] for ou in ous["OrganizationalUnits"]):
|
||||
continue
|
||||
@ -200,8 +202,7 @@ class ResourceAccessManagerBackend(BaseBackend):
|
||||
arn = kwargs["resourceShareArn"]
|
||||
|
||||
resource = next(
|
||||
(resource for resource in self.resource_shares if arn == resource.arn),
|
||||
None,
|
||||
(resource for resource in self.resource_shares if arn == resource.arn), None
|
||||
)
|
||||
|
||||
if not resource:
|
||||
@ -217,8 +218,7 @@ class ResourceAccessManagerBackend(BaseBackend):
|
||||
|
||||
def delete_resource_share(self, arn):
|
||||
resource = next(
|
||||
(resource for resource in self.resource_shares if arn == resource.arn),
|
||||
None,
|
||||
(resource for resource in self.resource_shares if arn == resource.arn), None
|
||||
)
|
||||
|
||||
if not resource:
|
||||
|
@ -30,8 +30,7 @@ class DBInstanceNotFoundError(RDSClientError):
|
||||
class DBSnapshotNotFoundError(RDSClientError):
|
||||
def __init__(self, snapshot_identifier):
|
||||
super().__init__(
|
||||
"DBSnapshotNotFound",
|
||||
"DBSnapshot {} not found.".format(snapshot_identifier),
|
||||
"DBSnapshotNotFound", f"DBSnapshot {snapshot_identifier} not found."
|
||||
)
|
||||
|
||||
|
||||
@ -39,15 +38,14 @@ class DBSecurityGroupNotFoundError(RDSClientError):
|
||||
def __init__(self, security_group_name):
|
||||
super().__init__(
|
||||
"DBSecurityGroupNotFound",
|
||||
"Security Group {0} not found.".format(security_group_name),
|
||||
f"Security Group {security_group_name} not found.",
|
||||
)
|
||||
|
||||
|
||||
class DBSubnetGroupNotFoundError(RDSClientError):
|
||||
def __init__(self, subnet_group_name):
|
||||
super().__init__(
|
||||
"DBSubnetGroupNotFound",
|
||||
"Subnet Group {0} not found.".format(subnet_group_name),
|
||||
"DBSubnetGroupNotFound", f"Subnet Group {subnet_group_name} not found."
|
||||
)
|
||||
|
||||
|
||||
@ -55,7 +53,7 @@ class DBParameterGroupNotFoundError(RDSClientError):
|
||||
def __init__(self, db_parameter_group_name):
|
||||
super().__init__(
|
||||
"DBParameterGroupNotFound",
|
||||
"DB Parameter Group {0} not found.".format(db_parameter_group_name),
|
||||
f"DB Parameter Group {db_parameter_group_name} not found.",
|
||||
)
|
||||
|
||||
|
||||
@ -63,7 +61,7 @@ class OptionGroupNotFoundFaultError(RDSClientError):
|
||||
def __init__(self, option_group_name):
|
||||
super().__init__(
|
||||
"OptionGroupNotFoundFault",
|
||||
"Specified OptionGroupName: {0} not found.".format(option_group_name),
|
||||
f"Specified OptionGroupName: {option_group_name} not found.",
|
||||
)
|
||||
|
||||
|
||||
|
@ -1284,7 +1284,7 @@ class RDSBackend(BaseBackend):
|
||||
return snapshot
|
||||
|
||||
def copy_database_snapshot(
|
||||
self, source_snapshot_identifier, target_snapshot_identifier, tags=None,
|
||||
self, source_snapshot_identifier, target_snapshot_identifier, tags=None
|
||||
):
|
||||
if source_snapshot_identifier not in self.database_snapshots:
|
||||
raise DBSnapshotNotFoundError(source_snapshot_identifier)
|
||||
|
@ -232,7 +232,7 @@ class RDSResponse(BaseResponse):
|
||||
target_snapshot_identifier = self._get_param("TargetDBSnapshotIdentifier")
|
||||
tags = self.unpack_complex_list_params("Tags.Tag", ("Key", "Value"))
|
||||
snapshot = self.backend.copy_database_snapshot(
|
||||
source_snapshot_identifier, target_snapshot_identifier, tags,
|
||||
source_snapshot_identifier, target_snapshot_identifier, tags
|
||||
)
|
||||
template = self.response_template(COPY_SNAPSHOT_TEMPLATE)
|
||||
return template.render(snapshot=snapshot)
|
||||
@ -542,7 +542,7 @@ class RDSResponse(BaseResponse):
|
||||
)
|
||||
tags = self.unpack_complex_list_params("Tags.Tag", ("Key", "Value"))
|
||||
snapshot = self.backend.copy_cluster_snapshot(
|
||||
source_snapshot_identifier, target_snapshot_identifier, tags,
|
||||
source_snapshot_identifier, target_snapshot_identifier, tags
|
||||
)
|
||||
template = self.response_template(COPY_CLUSTER_SNAPSHOT_TEMPLATE)
|
||||
return template.render(snapshot=snapshot)
|
||||
@ -586,7 +586,7 @@ class RDSResponse(BaseResponse):
|
||||
|
||||
def describe_export_tasks(self):
|
||||
export_task_identifier = self._get_param("ExportTaskIdentifier")
|
||||
tasks = self.backend.describe_export_tasks(export_task_identifier,)
|
||||
tasks = self.backend.describe_export_tasks(export_task_identifier)
|
||||
template = self.response_template(DESCRIBE_EXPORT_TASKS_TEMPLATE)
|
||||
return template.render(tasks=tasks)
|
||||
|
||||
|
@ -59,9 +59,7 @@ class Statement:
|
||||
|
||||
|
||||
class StatementResult:
|
||||
def __init__(
|
||||
self, column_metadata, records, total_number_rows, next_token=None,
|
||||
):
|
||||
def __init__(self, column_metadata, records, total_number_rows, next_token=None):
|
||||
self.column_metadata = column_metadata
|
||||
self.records = records
|
||||
self.total_number_rows = total_number_rows
|
||||
@ -91,9 +89,7 @@ class ColumnMetadata:
|
||||
|
||||
|
||||
class Record:
|
||||
def __init__(
|
||||
self, **kwargs,
|
||||
):
|
||||
def __init__(self, **kwargs):
|
||||
self.kwargs = kwargs
|
||||
|
||||
def __iter__(self):
|
||||
@ -145,7 +141,7 @@ class RedshiftDataAPIServiceBackend(BaseBackend):
|
||||
raise ResourceNotFoundException()
|
||||
|
||||
def execute_statement(
|
||||
self, cluster_identifier, database, db_user, parameters, secret_arn, sql,
|
||||
self, cluster_identifier, database, db_user, parameters, secret_arn, sql
|
||||
):
|
||||
"""
|
||||
Runs an SQL statement
|
||||
|
@ -10,13 +10,13 @@ class RedshiftDataAPIServiceResponse(BaseResponse):
|
||||
|
||||
def cancel_statement(self):
|
||||
statement_id = self._get_param("Id")
|
||||
status = self.redshiftdata_backend.cancel_statement(statement_id=statement_id,)
|
||||
status = self.redshiftdata_backend.cancel_statement(statement_id=statement_id)
|
||||
return 200, {}, json.dumps({"Status": status})
|
||||
|
||||
def describe_statement(self):
|
||||
statement_id = self._get_param("Id")
|
||||
statement = self.redshiftdata_backend.describe_statement(
|
||||
statement_id=statement_id,
|
||||
statement_id=statement_id
|
||||
)
|
||||
return 200, {}, json.dumps(dict(statement))
|
||||
|
||||
@ -54,7 +54,7 @@ class RedshiftDataAPIServiceResponse(BaseResponse):
|
||||
def get_statement_result(self):
|
||||
statement_id = self._get_param("Id")
|
||||
statement_result = self.redshiftdata_backend.get_statement_result(
|
||||
statement_id=statement_id,
|
||||
statement_id=statement_id
|
||||
)
|
||||
|
||||
return 200, {}, json.dumps(dict(statement_result))
|
||||
|
@ -570,7 +570,7 @@ class Route53Backend(BaseBackend):
|
||||
|
||||
@staticmethod
|
||||
def _validate_arn(region, arn):
|
||||
match = re.match(fr"arn:aws:logs:{region}:\d{{12}}:log-group:.+", arn)
|
||||
match = re.match(rf"arn:aws:logs:{region}:\d{{12}}:log-group:.+", arn)
|
||||
if not arn or not match:
|
||||
raise InvalidInput()
|
||||
|
||||
|
@ -371,7 +371,7 @@ class Route53(BaseResponse):
|
||||
caller_reference = root_elem.get("CallerReference")
|
||||
hosted_zone_id = root_elem.get("HostedZoneId")
|
||||
delegation_set = route53_backend.create_reusable_delegation_set(
|
||||
caller_reference=caller_reference, hosted_zone_id=hosted_zone_id,
|
||||
caller_reference=caller_reference, hosted_zone_id=hosted_zone_id
|
||||
)
|
||||
template = self.response_template(CREATE_REUSABLE_DELEGATION_SET_TEMPLATE)
|
||||
return (
|
||||
|
@ -440,7 +440,7 @@ class Route53ResolverBackend(BaseBackend):
|
||||
]
|
||||
)
|
||||
errmsg = self.tagger.validate_tags(
|
||||
tags or [], limit=ResolverEndpoint.MAX_TAGS_PER_RESOLVER_ENDPOINT,
|
||||
tags or [], limit=ResolverEndpoint.MAX_TAGS_PER_RESOLVER_ENDPOINT
|
||||
)
|
||||
if errmsg:
|
||||
raise TagValidationException(errmsg)
|
||||
@ -501,7 +501,7 @@ class Route53ResolverBackend(BaseBackend):
|
||||
]
|
||||
)
|
||||
errmsg = self.tagger.validate_tags(
|
||||
tags or [], limit=ResolverRule.MAX_TAGS_PER_RESOLVER_RULE,
|
||||
tags or [], limit=ResolverRule.MAX_TAGS_PER_RESOLVER_RULE
|
||||
)
|
||||
if errmsg:
|
||||
raise TagValidationException(errmsg)
|
||||
@ -816,7 +816,7 @@ class Route53ResolverBackend(BaseBackend):
|
||||
"""Add or overwrite one or more tags for specified resource."""
|
||||
self._matched_arn(resource_arn)
|
||||
errmsg = self.tagger.validate_tags(
|
||||
tags, limit=ResolverEndpoint.MAX_TAGS_PER_RESOLVER_ENDPOINT,
|
||||
tags, limit=ResolverEndpoint.MAX_TAGS_PER_RESOLVER_ENDPOINT
|
||||
)
|
||||
if errmsg:
|
||||
raise TagValidationException(errmsg)
|
||||
|
@ -21,11 +21,13 @@ class Route53ResolverResponse(BaseResponse):
|
||||
resolver_rule_id = self._get_param("ResolverRuleId")
|
||||
name = self._get_param("Name")
|
||||
vpc_id = self._get_param("VPCId")
|
||||
resolver_rule_association = self.route53resolver_backend.associate_resolver_rule(
|
||||
region=self.region,
|
||||
resolver_rule_id=resolver_rule_id,
|
||||
name=name,
|
||||
vpc_id=vpc_id,
|
||||
resolver_rule_association = (
|
||||
self.route53resolver_backend.associate_resolver_rule(
|
||||
region=self.region,
|
||||
resolver_rule_id=resolver_rule_id,
|
||||
name=name,
|
||||
vpc_id=vpc_id,
|
||||
)
|
||||
)
|
||||
return json.dumps(
|
||||
{"ResolverRuleAssociation": resolver_rule_association.description()}
|
||||
@ -75,7 +77,7 @@ class Route53ResolverResponse(BaseResponse):
|
||||
"""Delete a Resolver endpoint."""
|
||||
resolver_endpoint_id = self._get_param("ResolverEndpointId")
|
||||
resolver_endpoint = self.route53resolver_backend.delete_resolver_endpoint(
|
||||
resolver_endpoint_id=resolver_endpoint_id,
|
||||
resolver_endpoint_id=resolver_endpoint_id
|
||||
)
|
||||
return json.dumps({"ResolverEndpoint": resolver_endpoint.description()})
|
||||
|
||||
@ -83,7 +85,7 @@ class Route53ResolverResponse(BaseResponse):
|
||||
"""Delete a Resolver rule."""
|
||||
resolver_rule_id = self._get_param("ResolverRuleId")
|
||||
resolver_rule = self.route53resolver_backend.delete_resolver_rule(
|
||||
resolver_rule_id=resolver_rule_id,
|
||||
resolver_rule_id=resolver_rule_id
|
||||
)
|
||||
return json.dumps({"ResolverRule": resolver_rule.description()})
|
||||
|
||||
@ -91,8 +93,10 @@ class Route53ResolverResponse(BaseResponse):
|
||||
"""Remove the association between a Resolver rule and a VPC."""
|
||||
vpc_id = self._get_param("VPCId")
|
||||
resolver_rule_id = self._get_param("ResolverRuleId")
|
||||
resolver_rule_association = self.route53resolver_backend.disassociate_resolver_rule(
|
||||
vpc_id=vpc_id, resolver_rule_id=resolver_rule_id,
|
||||
resolver_rule_association = (
|
||||
self.route53resolver_backend.disassociate_resolver_rule(
|
||||
vpc_id=vpc_id, resolver_rule_id=resolver_rule_id
|
||||
)
|
||||
)
|
||||
return json.dumps(
|
||||
{"ResolverRuleAssociation": resolver_rule_association.description()}
|
||||
@ -102,7 +106,7 @@ class Route53ResolverResponse(BaseResponse):
|
||||
"""Return info about a specific Resolver endpoint."""
|
||||
resolver_endpoint_id = self._get_param("ResolverEndpointId")
|
||||
resolver_endpoint = self.route53resolver_backend.get_resolver_endpoint(
|
||||
resolver_endpoint_id=resolver_endpoint_id,
|
||||
resolver_endpoint_id=resolver_endpoint_id
|
||||
)
|
||||
return json.dumps({"ResolverEndpoint": resolver_endpoint.description()})
|
||||
|
||||
@ -110,15 +114,17 @@ class Route53ResolverResponse(BaseResponse):
|
||||
"""Return info about a specific Resolver rule."""
|
||||
resolver_rule_id = self._get_param("ResolverRuleId")
|
||||
resolver_rule = self.route53resolver_backend.get_resolver_rule(
|
||||
resolver_rule_id=resolver_rule_id,
|
||||
resolver_rule_id=resolver_rule_id
|
||||
)
|
||||
return json.dumps({"ResolverRule": resolver_rule.description()})
|
||||
|
||||
def get_resolver_rule_association(self):
|
||||
"""Return info about association between a Resolver rule and a VPC."""
|
||||
resolver_rule_association_id = self._get_param("ResolverRuleAssociationId")
|
||||
resolver_rule_association = self.route53resolver_backend.get_resolver_rule_association(
|
||||
resolver_rule_association_id=resolver_rule_association_id
|
||||
resolver_rule_association = (
|
||||
self.route53resolver_backend.get_resolver_rule_association(
|
||||
resolver_rule_association_id=resolver_rule_association_id
|
||||
)
|
||||
)
|
||||
return json.dumps(
|
||||
{"ResolverRuleAssociation": resolver_rule_association.description()}
|
||||
@ -219,7 +225,7 @@ class Route53ResolverResponse(BaseResponse):
|
||||
next_token = self._get_param("NextToken")
|
||||
max_results = self._get_param("MaxResults")
|
||||
tags, next_token = self.route53resolver_backend.list_tags_for_resource(
|
||||
resource_arn=resource_arn, next_token=next_token, max_results=max_results,
|
||||
resource_arn=resource_arn, next_token=next_token, max_results=max_results
|
||||
)
|
||||
|
||||
response = {"Tags": tags}
|
||||
@ -248,6 +254,6 @@ class Route53ResolverResponse(BaseResponse):
|
||||
resolver_endpoint_id = self._get_param("ResolverEndpointId")
|
||||
name = self._get_param("Name")
|
||||
resolver_endpoint = self.route53resolver_backend.update_resolver_endpoint(
|
||||
resolver_endpoint_id=resolver_endpoint_id, name=name,
|
||||
resolver_endpoint_id=resolver_endpoint_id, name=name
|
||||
)
|
||||
return json.dumps({"ResolverEndpoint": resolver_endpoint.description()})
|
||||
|
@ -92,7 +92,7 @@ def validate_name(value):
|
||||
return "have length less than or equal to 64"
|
||||
name_pattern = r"^(?!^[0-9]+$)([a-zA-Z0-9-_' ']+)$"
|
||||
if not re.match(name_pattern, value):
|
||||
return fr"satisfy regular expression pattern: {name_pattern}"
|
||||
return rf"satisfy regular expression pattern: {name_pattern}"
|
||||
return ""
|
||||
|
||||
|
||||
|
@ -602,9 +602,7 @@ class InvalidTagError(S3ClientError):
|
||||
code = 400
|
||||
|
||||
def __init__(self, value, *args, **kwargs):
|
||||
super().__init__(
|
||||
"InvalidTag", value, *args, **kwargs,
|
||||
)
|
||||
super().__init__("InvalidTag", value, *args, **kwargs)
|
||||
|
||||
|
||||
class ObjectLockConfigurationNotFoundError(S3ClientError):
|
||||
|
@ -578,7 +578,7 @@ class LifecycleAndFilter(BaseModel):
|
||||
|
||||
for key, value in self.tags.items():
|
||||
data.append(
|
||||
{"type": "LifecycleTagPredicate", "tag": {"key": key, "value": value},}
|
||||
{"type": "LifecycleTagPredicate", "tag": {"key": key, "value": value}}
|
||||
)
|
||||
|
||||
return data
|
||||
@ -1189,7 +1189,7 @@ class FakeBucket(CloudFormationModel):
|
||||
|
||||
@classmethod
|
||||
def update_from_cloudformation_json(
|
||||
cls, original_resource, new_resource_name, cloudformation_json, region_name,
|
||||
cls, original_resource, new_resource_name, cloudformation_json, region_name
|
||||
):
|
||||
properties = cloudformation_json["Properties"]
|
||||
|
||||
@ -1725,9 +1725,7 @@ class S3Backend(BaseBackend, CloudWatchMetricProvider):
|
||||
if errmsg:
|
||||
raise InvalidTagError(errmsg)
|
||||
self.tagger.delete_all_tags_for_resource(key.arn)
|
||||
self.tagger.tag_resource(
|
||||
key.arn, boto_tags_dict,
|
||||
)
|
||||
self.tagger.tag_resource(key.arn, boto_tags_dict)
|
||||
return key
|
||||
|
||||
def get_bucket_tagging(self, bucket_name):
|
||||
@ -1738,7 +1736,7 @@ class S3Backend(BaseBackend, CloudWatchMetricProvider):
|
||||
bucket = self.get_bucket(bucket_name)
|
||||
self.tagger.delete_all_tags_for_resource(bucket.arn)
|
||||
self.tagger.tag_resource(
|
||||
bucket.arn, [{"Key": key, "Value": value} for key, value in tags.items()],
|
||||
bucket.arn, [{"Key": key, "Value": value} for key, value in tags.items()]
|
||||
)
|
||||
|
||||
def put_object_lock_configuration(
|
||||
|
@ -5,13 +5,7 @@ from typing import List, Union
|
||||
|
||||
from moto import settings
|
||||
from moto.core.utils import amzn_request_id, str_to_rfc_1123_datetime
|
||||
from urllib.parse import (
|
||||
parse_qs,
|
||||
urlparse,
|
||||
unquote,
|
||||
urlencode,
|
||||
urlunparse,
|
||||
)
|
||||
from urllib.parse import parse_qs, urlparse, unquote, urlencode, urlunparse
|
||||
|
||||
import xmltodict
|
||||
|
||||
@ -52,19 +46,8 @@ from .exceptions import (
|
||||
InvalidRange,
|
||||
LockNotEnabled,
|
||||
)
|
||||
from .models import (
|
||||
s3_backend,
|
||||
get_canned_acl,
|
||||
FakeGrantee,
|
||||
FakeGrant,
|
||||
FakeAcl,
|
||||
FakeKey,
|
||||
)
|
||||
from .utils import (
|
||||
bucket_name_from_url,
|
||||
metadata_from_headers,
|
||||
parse_region_from_url,
|
||||
)
|
||||
from .models import s3_backend, get_canned_acl, FakeGrantee, FakeGrant, FakeAcl, FakeKey
|
||||
from .utils import bucket_name_from_url, metadata_from_headers, parse_region_from_url
|
||||
from xml.dom import minidom
|
||||
|
||||
|
||||
@ -72,7 +55,7 @@ DEFAULT_REGION_NAME = "us-east-1"
|
||||
|
||||
ACTION_MAP = {
|
||||
"BUCKET": {
|
||||
"HEAD": {"DEFAULT": "HeadBucket",},
|
||||
"HEAD": {"DEFAULT": "HeadBucket"},
|
||||
"GET": {
|
||||
"uploads": "ListBucketMultipartUploads",
|
||||
"location": "GetBucketLocation",
|
||||
@ -115,7 +98,7 @@ ACTION_MAP = {
|
||||
},
|
||||
},
|
||||
"KEY": {
|
||||
"HEAD": {"DEFAULT": "HeadObject",},
|
||||
"HEAD": {"DEFAULT": "HeadObject"},
|
||||
"GET": {
|
||||
"uploadId": "ListMultipartUploadParts",
|
||||
"acl": "GetObjectAcl",
|
||||
@ -427,7 +410,7 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin):
|
||||
template = self.response_template(S3_BUCKET_LOCK_CONFIGURATION)
|
||||
|
||||
return template.render(
|
||||
lock_enabled=lock_enabled, mode=mode, days=days, years=years,
|
||||
lock_enabled=lock_enabled, mode=mode, days=days, years=years
|
||||
)
|
||||
|
||||
if "uploads" in querystring:
|
||||
@ -509,8 +492,8 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin):
|
||||
template = self.response_template(S3_BUCKET_CORS_RESPONSE)
|
||||
return template.render(cors=cors)
|
||||
elif "notification" in querystring:
|
||||
notification_configuration = self.backend.get_bucket_notification_configuration(
|
||||
bucket_name
|
||||
notification_configuration = (
|
||||
self.backend.get_bucket_notification_configuration(bucket_name)
|
||||
)
|
||||
if not notification_configuration:
|
||||
return 200, {}, ""
|
||||
|
@ -37,7 +37,7 @@ class S3ControlResponse(BaseResponse):
|
||||
def get_public_access_block(self, request):
|
||||
account_id = request.headers.get("x-amz-account-id")
|
||||
public_block_config = s3control_backend.get_public_access_block(
|
||||
account_id=account_id,
|
||||
account_id=account_id
|
||||
)
|
||||
template = self.response_template(S3_PUBLIC_ACCESS_BLOCK_CONFIGURATION)
|
||||
return 200, {}, template.render(public_block_config=public_block_config)
|
||||
@ -53,7 +53,7 @@ class S3ControlResponse(BaseResponse):
|
||||
|
||||
def delete_public_access_block(self, request):
|
||||
account_id = request.headers.get("x-amz-account-id")
|
||||
s3control_backend.delete_public_access_block(account_id=account_id,)
|
||||
s3control_backend.delete_public_access_block(account_id=account_id)
|
||||
return 204, {}, json.dumps({})
|
||||
|
||||
def _parse_pab_config(self, body):
|
||||
@ -110,16 +110,14 @@ class S3ControlResponse(BaseResponse):
|
||||
account_id, name = self._get_accountid_and_name_from_accesspoint(full_url)
|
||||
|
||||
access_point = s3control_backend.get_access_point(
|
||||
account_id=account_id, name=name,
|
||||
account_id=account_id, name=name
|
||||
)
|
||||
template = self.response_template(GET_ACCESS_POINT_TEMPLATE)
|
||||
return 200, {}, template.render(access_point=access_point)
|
||||
|
||||
def delete_access_point(self, full_url):
|
||||
account_id, name = self._get_accountid_and_name_from_accesspoint(full_url)
|
||||
s3control_backend.delete_access_point(
|
||||
account_id=account_id, name=name,
|
||||
)
|
||||
s3control_backend.delete_access_point(account_id=account_id, name=name)
|
||||
return 204, {}, ""
|
||||
|
||||
def create_access_point_policy(self, full_url):
|
||||
@ -137,9 +135,7 @@ class S3ControlResponse(BaseResponse):
|
||||
|
||||
def delete_access_point_policy(self, full_url):
|
||||
account_id, name = self._get_accountid_and_name_from_policy(full_url)
|
||||
s3control_backend.delete_access_point_policy(
|
||||
account_id=account_id, name=name,
|
||||
)
|
||||
s3control_backend.delete_access_point_policy(account_id=account_id, name=name)
|
||||
return 204, {}, ""
|
||||
|
||||
def get_access_point_policy_status(self, full_url):
|
||||
|
@ -318,7 +318,7 @@ class FakeEndpoint(BaseObject, CloudFormationModel):
|
||||
|
||||
@classmethod
|
||||
def update_from_cloudformation_json(
|
||||
cls, original_resource, new_resource_name, cloudformation_json, region_name,
|
||||
cls, original_resource, new_resource_name, cloudformation_json, region_name
|
||||
):
|
||||
# Changes to the Endpoint will not change resource name
|
||||
cls.delete_from_cloudformation_json(
|
||||
@ -510,7 +510,7 @@ class FakeEndpointConfig(BaseObject, CloudFormationModel):
|
||||
|
||||
@classmethod
|
||||
def update_from_cloudformation_json(
|
||||
cls, original_resource, new_resource_name, cloudformation_json, region_name,
|
||||
cls, original_resource, new_resource_name, cloudformation_json, region_name
|
||||
):
|
||||
# Most changes to the endpoint config will change resource name for EndpointConfigs
|
||||
cls.delete_from_cloudformation_json(
|
||||
@ -624,7 +624,7 @@ class Model(BaseObject, CloudFormationModel):
|
||||
|
||||
@classmethod
|
||||
def update_from_cloudformation_json(
|
||||
cls, original_resource, new_resource_name, cloudformation_json, region_name,
|
||||
cls, original_resource, new_resource_name, cloudformation_json, region_name
|
||||
):
|
||||
# Most changes to the model will change resource name for Models
|
||||
cls.delete_from_cloudformation_json(
|
||||
@ -839,7 +839,7 @@ class FakeSagemakerNotebookInstance(CloudFormationModel):
|
||||
|
||||
@classmethod
|
||||
def update_from_cloudformation_json(
|
||||
cls, original_resource, new_resource_name, cloudformation_json, region_name,
|
||||
cls, original_resource, new_resource_name, cloudformation_json, region_name
|
||||
):
|
||||
# Operations keep same resource name so delete old and create new to mimic update
|
||||
cls.delete_from_cloudformation_json(
|
||||
@ -877,8 +877,10 @@ class FakeSageMakerNotebookInstanceLifecycleConfig(BaseObject, CloudFormationMod
|
||||
self.creation_time = self.last_modified_time = datetime.now().strftime(
|
||||
"%Y-%m-%d %H:%M:%S"
|
||||
)
|
||||
self.notebook_instance_lifecycle_config_arn = FakeSageMakerNotebookInstanceLifecycleConfig.arn_formatter(
|
||||
self.notebook_instance_lifecycle_config_name, self.region_name
|
||||
self.notebook_instance_lifecycle_config_arn = (
|
||||
FakeSageMakerNotebookInstanceLifecycleConfig.arn_formatter(
|
||||
self.notebook_instance_lifecycle_config_name, self.region_name
|
||||
)
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
@ -945,7 +947,7 @@ class FakeSageMakerNotebookInstanceLifecycleConfig(BaseObject, CloudFormationMod
|
||||
|
||||
@classmethod
|
||||
def update_from_cloudformation_json(
|
||||
cls, original_resource, new_resource_name, cloudformation_json, region_name,
|
||||
cls, original_resource, new_resource_name, cloudformation_json, region_name
|
||||
):
|
||||
# Operations keep same resource name so delete old and create new to mimic update
|
||||
cls.delete_from_cloudformation_json(
|
||||
@ -1298,9 +1300,7 @@ class SageMakerModelBackend(BaseBackend):
|
||||
except RESTError:
|
||||
return []
|
||||
|
||||
def create_trial(
|
||||
self, trial_name, experiment_name,
|
||||
):
|
||||
def create_trial(self, trial_name, experiment_name):
|
||||
trial = FakeTrial(
|
||||
region_name=self.region_name,
|
||||
trial_name=trial_name,
|
||||
@ -1364,9 +1364,7 @@ class SageMakerModelBackend(BaseBackend):
|
||||
if evaluate_filter_expression(trial_data)
|
||||
]
|
||||
|
||||
def create_trial_component(
|
||||
self, trial_component_name, trial_name,
|
||||
):
|
||||
def create_trial_component(self, trial_component_name, trial_name):
|
||||
trial_component = FakeTrialComponent(
|
||||
region_name=self.region_name,
|
||||
trial_component_name=trial_component_name,
|
||||
@ -1658,9 +1656,7 @@ class SageMakerModelBackend(BaseBackend):
|
||||
)
|
||||
raise ValidationError(message=message)
|
||||
|
||||
def create_endpoint(
|
||||
self, endpoint_name, endpoint_config_name, tags,
|
||||
):
|
||||
def create_endpoint(self, endpoint_name, endpoint_config_name, tags):
|
||||
try:
|
||||
endpoint_config = self.describe_endpoint_config(endpoint_config_name)
|
||||
except KeyError:
|
||||
@ -2009,9 +2005,7 @@ class SageMakerModelBackend(BaseBackend):
|
||||
|
||||
|
||||
class FakeExperiment(BaseObject):
|
||||
def __init__(
|
||||
self, region_name, experiment_name, tags,
|
||||
):
|
||||
def __init__(self, region_name, experiment_name, tags):
|
||||
self.experiment_name = experiment_name
|
||||
self.experiment_arn = FakeExperiment.arn_formatter(experiment_name, region_name)
|
||||
self.tags = tags
|
||||
@ -2044,7 +2038,7 @@ class FakeExperiment(BaseObject):
|
||||
|
||||
class FakeTrial(BaseObject):
|
||||
def __init__(
|
||||
self, region_name, trial_name, experiment_name, tags, trial_components,
|
||||
self, region_name, trial_name, experiment_name, tags, trial_components
|
||||
):
|
||||
self.trial_name = trial_name
|
||||
self.trial_arn = FakeTrial.arn_formatter(trial_name, region_name)
|
||||
@ -2079,9 +2073,7 @@ class FakeTrial(BaseObject):
|
||||
|
||||
|
||||
class FakeTrialComponent(BaseObject):
|
||||
def __init__(
|
||||
self, region_name, trial_component_name, trial_name, tags,
|
||||
):
|
||||
def __init__(self, region_name, trial_component_name, trial_name, tags):
|
||||
self.trial_component_name = trial_component_name
|
||||
self.trial_component_arn = FakeTrialComponent.arn_formatter(
|
||||
trial_component_name, region_name
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user