Techdebt: Replace string-format with f-strings (for a* dirs) (#5661)
This commit is contained in:
parent
df64b7b777
commit
b0b943949d
@ -330,7 +330,7 @@ class CertBundle(BaseModel):
|
||||
"ENCRYPTION", ""
|
||||
),
|
||||
"Status": self.status, # One of PENDING_VALIDATION, ISSUED, INACTIVE, EXPIRED, VALIDATION_TIMED_OUT, REVOKED, FAILED.
|
||||
"Subject": "CN={0}".format(self.common_name),
|
||||
"Subject": f"CN={self.common_name}",
|
||||
"SubjectAlternativeNames": sans,
|
||||
"Type": self.type, # One of IMPORTED, AMAZON_ISSUED,
|
||||
"ExtendedKeyUsages": [],
|
||||
|
@ -4,6 +4,4 @@ from moto.moto_api._internal import mock_random
|
||||
def make_arn_for_certificate(account_id: str, region_name: str) -> str:
|
||||
# Example
|
||||
# arn:aws:acm:eu-west-2:764371465172:certificate/c4b738b8-56fe-4b3a-b841-1c047654780b
|
||||
return "arn:aws:acm:{0}:{1}:certificate/{2}".format(
|
||||
region_name, account_id, mock_random.uuid4()
|
||||
)
|
||||
return f"arn:aws:acm:{region_name}:{account_id}:certificate/{mock_random.uuid4()}"
|
||||
|
@ -13,4 +13,4 @@ class TypeUnknownParser(IntegrationParser):
|
||||
self, request: requests.PreparedRequest, integration: Integration
|
||||
) -> Tuple[int, Union[str, bytes]]:
|
||||
_type = integration.integration_type
|
||||
raise NotImplementedError("The {0} type has not been implemented".format(_type))
|
||||
raise NotImplementedError(f"The {_type} type has not been implemented")
|
||||
|
@ -96,7 +96,7 @@ class Deployment(CloudFormationModel):
|
||||
cloudformation_json: Dict[str, Any],
|
||||
account_id: str,
|
||||
region_name: str,
|
||||
**kwargs: Any
|
||||
**kwargs: Any,
|
||||
) -> "Deployment":
|
||||
properties = cloudformation_json["Properties"]
|
||||
rest_api_id = properties["RestApiId"]
|
||||
@ -280,7 +280,7 @@ class Method(CloudFormationModel):
|
||||
cloudformation_json: Dict[str, Any],
|
||||
account_id: str,
|
||||
region_name: str,
|
||||
**kwargs: Any
|
||||
**kwargs: Any,
|
||||
) -> "Method":
|
||||
properties = cloudformation_json["Properties"]
|
||||
rest_api_id = properties["RestApiId"]
|
||||
@ -389,7 +389,7 @@ class Resource(CloudFormationModel):
|
||||
cloudformation_json: Dict[str, Any],
|
||||
account_id: str,
|
||||
region_name: str,
|
||||
**kwargs: Any
|
||||
**kwargs: Any,
|
||||
) -> "Resource":
|
||||
properties = cloudformation_json["Properties"]
|
||||
api_id = properties["RestApiId"]
|
||||
@ -515,7 +515,7 @@ class Authorizer(BaseModel):
|
||||
authorizer_id: Optional[str],
|
||||
name: Optional[str],
|
||||
authorizer_type: Optional[str],
|
||||
**kwargs: Any
|
||||
**kwargs: Any,
|
||||
):
|
||||
self.id = authorizer_id
|
||||
self.name = name
|
||||
@ -569,11 +569,11 @@ class Authorizer(BaseModel):
|
||||
self.name = op["value"]
|
||||
elif "/providerARNs" in op["path"]:
|
||||
# TODO: add and remove
|
||||
raise Exception('Patch operation for "%s" not implemented' % op["path"])
|
||||
raise Exception(f'Patch operation for "{op["path"]}" not implemented')
|
||||
elif "/type" in op["path"]:
|
||||
self.type = op["value"]
|
||||
else:
|
||||
raise Exception('Patch operation "%s" not implemented' % op["op"])
|
||||
raise Exception(f'Patch operation "{op["op"]}" not implemented')
|
||||
return self
|
||||
|
||||
|
||||
@ -748,7 +748,7 @@ class Stage(BaseModel):
|
||||
elif op["op"] == "replace":
|
||||
self.variables[key] = op["value"]
|
||||
else:
|
||||
raise Exception('Patch operation "%s" not implemented' % op["op"])
|
||||
raise Exception(f'Patch operation "{op["op"]}" not implemented')
|
||||
|
||||
|
||||
class ApiKey(BaseModel):
|
||||
@ -801,7 +801,7 @@ class ApiKey(BaseModel):
|
||||
elif "/enabled" in op["path"]:
|
||||
self.enabled = self._str2bool(op["value"])
|
||||
else:
|
||||
raise Exception('Patch operation "%s" not implemented' % op["op"])
|
||||
raise Exception(f'Patch operation "{op["op"]}" not implemented')
|
||||
return self
|
||||
|
||||
def _str2bool(self, v: str) -> bool:
|
||||
@ -978,7 +978,7 @@ class RestAPI(CloudFormationModel):
|
||||
region_name: str,
|
||||
name: str,
|
||||
description: str,
|
||||
**kwargs: Any
|
||||
**kwargs: Any,
|
||||
):
|
||||
self.id = api_id
|
||||
self.account_id = account_id
|
||||
@ -1064,7 +1064,7 @@ class RestAPI(CloudFormationModel):
|
||||
for res_id, res_obj in self.resources.items():
|
||||
if res_obj.path_part == "/" and not res_obj.parent_id:
|
||||
return res_id
|
||||
raise Exception("Unable to find root resource for API %s" % self)
|
||||
raise Exception(f"Unable to find root resource for API {self}")
|
||||
raise UnformattedGetAttTemplateException()
|
||||
|
||||
@property
|
||||
@ -1086,7 +1086,7 @@ class RestAPI(CloudFormationModel):
|
||||
cloudformation_json: Dict[str, Any],
|
||||
account_id: str,
|
||||
region_name: str,
|
||||
**kwargs: Any
|
||||
**kwargs: Any,
|
||||
) -> "RestAPI":
|
||||
properties = cloudformation_json["Properties"]
|
||||
name = properties["Name"]
|
||||
@ -1327,11 +1327,11 @@ class RestAPI(CloudFormationModel):
|
||||
class DomainName(BaseModel):
|
||||
def __init__(self, domain_name: str, **kwargs: Any):
|
||||
self.domain_name = domain_name
|
||||
self.regional_domain_name = "d-%s.execute-api.%s.amazonaws.com" % (
|
||||
create_id(),
|
||||
kwargs.get("region_name") or "us-east-1",
|
||||
region = kwargs.get("region_name") or "us-east-1"
|
||||
self.regional_domain_name = (
|
||||
f"d-{create_id()}.execute-api.{region}.amazonaws.com"
|
||||
)
|
||||
self.distribution_domain_name = "d%s.cloudfront.net" % create_id()
|
||||
self.distribution_domain_name = f"d{create_id()}.cloudfront.net"
|
||||
self.domain_name_status = "AVAILABLE"
|
||||
self.status_message = "Domain Name Available"
|
||||
self.regional_hosted_zone_id = "Z2FDTNDATAQYW2"
|
||||
|
@ -249,7 +249,7 @@ class APIGatewayResponse(BaseResponse):
|
||||
function_id, resource_id, method_type, response_code
|
||||
)
|
||||
return 204, {}, json.dumps(method_response.to_json()) # type: ignore[union-attr]
|
||||
raise Exception('Unexpected HTTP method "%s"' % self.method)
|
||||
raise Exception(f'Unexpected HTTP method "{self.method}"')
|
||||
|
||||
def restapis_authorizers(self, request: Any, full_url: str, headers: Dict[str, str]) -> TYPE_RESPONSE: # type: ignore[return]
|
||||
self.setup_class(request, full_url, headers)
|
||||
@ -702,7 +702,7 @@ class APIGatewayResponse(BaseResponse):
|
||||
self.backend.delete_domain_name(domain_name)
|
||||
return 202, {}, json.dumps({})
|
||||
else:
|
||||
msg = 'Method "%s" for API GW domain names not implemented' % self.method
|
||||
msg = f'Method "{self.method}" for API GW domain names not implemented'
|
||||
return 404, {}, json.dumps({"error": msg})
|
||||
|
||||
def models(self, request: Any, full_url: str, headers: Dict[str, str]) -> TYPE_RESPONSE: # type: ignore[return]
|
||||
|
@ -142,7 +142,7 @@ class ApplicationAutoscalingBackend(BaseBackend):
|
||||
_, cluster, service = r_id.split("/")
|
||||
result, _ = self.ecs_backend.describe_services(cluster, [service])
|
||||
if len(result) != 1:
|
||||
raise AWSValidationException("ECS service doesn't exist: {}".format(r_id))
|
||||
raise AWSValidationException(f"ECS service doesn't exist: {r_id}")
|
||||
return True
|
||||
|
||||
def _add_scalable_target(
|
||||
@ -162,9 +162,7 @@ class ApplicationAutoscalingBackend(BaseBackend):
|
||||
del self.targets[dimension][r_id]
|
||||
else:
|
||||
raise AWSValidationException(
|
||||
"No scalable target found for service namespace: {}, resource ID: {}, scalable dimension: {}".format(
|
||||
namespace, r_id, dimension
|
||||
)
|
||||
f"No scalable target found for service namespace: {namespace}, resource ID: {r_id}, scalable dimension: {dimension}"
|
||||
)
|
||||
|
||||
def put_scaling_policy(
|
||||
@ -247,9 +245,7 @@ class ApplicationAutoscalingBackend(BaseBackend):
|
||||
del self.policies[policy_key]
|
||||
else:
|
||||
raise AWSValidationException(
|
||||
"No scaling policy found for service namespace: {}, resource ID: {}, scalable dimension: {}, policy name: {}".format(
|
||||
service_namespace, resource_id, scalable_dimension, policy_name
|
||||
)
|
||||
f"No scaling policy found for service namespace: {service_namespace}, resource ID: {resource_id}, scalable dimension: {scalable_dimension}, policy name: {policy_name}"
|
||||
)
|
||||
|
||||
def delete_scheduled_action(
|
||||
@ -458,7 +454,7 @@ class FakeApplicationAutoscalingPolicy(BaseModel):
|
||||
self.target_tracking_scaling_policy_configuration = policy_body
|
||||
else:
|
||||
raise AWSValidationException(
|
||||
"Unknown policy type {} specified.".format(policy_type)
|
||||
f"Unknown policy type {policy_type} specified."
|
||||
)
|
||||
|
||||
self._policy_body = policy_body
|
||||
@ -468,13 +464,7 @@ class FakeApplicationAutoscalingPolicy(BaseModel):
|
||||
self.policy_name = policy_name
|
||||
self.policy_type = policy_type
|
||||
self._guid = mock_random.uuid4()
|
||||
self.policy_arn = "arn:aws:autoscaling:{}:scalingPolicy:{}:resource/{}/{}:policyName/{}".format(
|
||||
region_name,
|
||||
self._guid,
|
||||
self.service_namespace,
|
||||
self.resource_id,
|
||||
self.policy_name,
|
||||
)
|
||||
self.policy_arn = f"arn:aws:autoscaling:{region_name}:scalingPolicy:{self._guid}:resource/{self.service_namespace}/{self.resource_id}:policyName/{self.policy_name}"
|
||||
self.creation_time = time.time()
|
||||
|
||||
@staticmethod
|
||||
@ -484,8 +474,8 @@ class FakeApplicationAutoscalingPolicy(BaseModel):
|
||||
scalable_dimension: str,
|
||||
policy_name: str,
|
||||
) -> str:
|
||||
return "{}\t{}\t{}\t{}".format(
|
||||
service_namespace, resource_id, scalable_dimension, policy_name
|
||||
return (
|
||||
f"{service_namespace}\t{resource_id}\t{scalable_dimension}\t{policy_name}"
|
||||
)
|
||||
|
||||
|
||||
|
@ -116,22 +116,18 @@ class ApplicationAutoScalingResponse(BaseResponse):
|
||||
message = None
|
||||
if dimension is not None and dimension not in dimensions:
|
||||
messages.append(
|
||||
"Value '{}' at 'scalableDimension' "
|
||||
"failed to satisfy constraint: Member must satisfy enum value set: "
|
||||
"{}".format(dimension, dimensions)
|
||||
f"Value '{dimension}' at 'scalableDimension' failed to satisfy constraint: Member must satisfy enum value set: {dimensions}"
|
||||
)
|
||||
namespaces = [n.value for n in ServiceNamespaceValueSet]
|
||||
if namespace is not None and namespace not in namespaces:
|
||||
messages.append(
|
||||
"Value '{}' at 'serviceNamespace' "
|
||||
"failed to satisfy constraint: Member must satisfy enum value set: "
|
||||
"{}".format(namespace, namespaces)
|
||||
f"Value '{namespace}' at 'serviceNamespace' failed to satisfy constraint: Member must satisfy enum value set: {namespaces}"
|
||||
)
|
||||
if len(messages) == 1:
|
||||
message = "1 validation error detected: {}".format(messages[0])
|
||||
message = f"1 validation error detected: {messages[0]}"
|
||||
elif len(messages) > 1:
|
||||
message = "{} validation errors detected: {}".format(
|
||||
len(messages), "; ".join(messages)
|
||||
message = (
|
||||
f'{len(messages)} validation errors detected: {"; ".join(messages)}'
|
||||
)
|
||||
if message:
|
||||
raise AWSValidationException(message)
|
||||
|
@ -49,7 +49,7 @@ class WorkGroup(TaggableResourceMixin, BaseModel):
|
||||
super().__init__(
|
||||
athena_backend.account_id,
|
||||
self.region_name,
|
||||
"workgroup/{}".format(name),
|
||||
f"workgroup/{name}",
|
||||
tags,
|
||||
)
|
||||
self.athena_backend = athena_backend
|
||||
@ -72,7 +72,7 @@ class DataCatalog(TaggableResourceMixin, BaseModel):
|
||||
super().__init__(
|
||||
athena_backend.account_id,
|
||||
self.region_name,
|
||||
"datacatalog/{}".format(name),
|
||||
f"datacatalog/{name}",
|
||||
tags,
|
||||
)
|
||||
self.athena_backend = athena_backend
|
||||
|
@ -17,9 +17,7 @@ class ResourceContentionError(RESTError):
|
||||
|
||||
class InvalidInstanceError(AutoscalingClientError):
|
||||
def __init__(self, instance_id: str):
|
||||
super().__init__(
|
||||
"ValidationError", "Instance [{0}] is invalid.".format(instance_id)
|
||||
)
|
||||
super().__init__("ValidationError", f"Instance [{instance_id}] is invalid.")
|
||||
|
||||
|
||||
class ValidationError(AutoscalingClientError):
|
||||
|
@ -1219,7 +1219,7 @@ class AutoScalingBackend(BaseBackend):
|
||||
) -> FakeLifeCycleHook:
|
||||
lifecycle_hook = FakeLifeCycleHook(name, as_name, transition, timeout, result)
|
||||
|
||||
self.lifecycle_hooks["%s_%s" % (as_name, name)] = lifecycle_hook
|
||||
self.lifecycle_hooks[f"{as_name}_{name}"] = lifecycle_hook
|
||||
return lifecycle_hook
|
||||
|
||||
def describe_lifecycle_hooks(
|
||||
@ -1235,7 +1235,7 @@ class AutoScalingBackend(BaseBackend):
|
||||
]
|
||||
|
||||
def delete_lifecycle_hook(self, as_name: str, name: str) -> None:
|
||||
self.lifecycle_hooks.pop("%s_%s" % (as_name, name), None)
|
||||
self.lifecycle_hooks.pop(f"{as_name}_{name}", None)
|
||||
|
||||
def put_scaling_policy(
|
||||
self,
|
||||
|
@ -22,9 +22,7 @@ class InvalidRoleFormat(LambdaClientError):
|
||||
pattern = r"arn:(aws[a-zA-Z-]*)?:iam::(\d{12}):role/?[a-zA-Z_0-9+=,.@\-_/]+"
|
||||
|
||||
def __init__(self, role: str):
|
||||
message = "1 validation error detected: Value '{0}' at 'role' failed to satisfy constraint: Member must satisfy regular expression pattern: {1}".format(
|
||||
role, InvalidRoleFormat.pattern
|
||||
)
|
||||
message = f"1 validation error detected: Value '{role}' at 'role' failed to satisfy constraint: Member must satisfy regular expression pattern: {InvalidRoleFormat.pattern}"
|
||||
super().__init__("ValidationException", message)
|
||||
|
||||
|
||||
|
@ -445,7 +445,7 @@ class LambdaFunction(CloudFormationModel, DockerModel):
|
||||
self.code_signing_config_arn = spec.get("CodeSigningConfigArn")
|
||||
self.tracing_config = spec.get("TracingConfig") or {"Mode": "PassThrough"}
|
||||
|
||||
self.logs_group_name = "/aws/lambda/{}".format(self.function_name)
|
||||
self.logs_group_name = f"/aws/lambda/{self.function_name}"
|
||||
|
||||
# this isn't finished yet. it needs to find out the VpcId value
|
||||
self._vpc_config = spec.get(
|
||||
@ -466,7 +466,7 @@ class LambdaFunction(CloudFormationModel, DockerModel):
|
||||
|
||||
# TODO: we should be putting this in a lambda bucket
|
||||
self.code["UUID"] = str(random.uuid4())
|
||||
self.code["S3Key"] = "{}-{}".format(self.function_name, self.code["UUID"])
|
||||
self.code["S3Key"] = f"{self.function_name}-{self.code['UUID']}"
|
||||
elif "S3Bucket" in self.code:
|
||||
key = _validate_s3_bucket_and_key(self.account_id, data=self.code)
|
||||
if key:
|
||||
@ -531,9 +531,7 @@ class LambdaFunction(CloudFormationModel, DockerModel):
|
||||
if not all(layer_versions):
|
||||
raise ValueError(
|
||||
"InvalidParameterValueException",
|
||||
"One or more LayerVersion does not exist {0}".format(
|
||||
layers_versions_arns
|
||||
),
|
||||
f"One or more LayerVersion does not exist {layers_versions_arns}",
|
||||
)
|
||||
return [{"Arn": lv.arn, "CodeSize": lv.code_size} for lv in layer_versions]
|
||||
|
||||
@ -577,9 +575,7 @@ class LambdaFunction(CloudFormationModel, DockerModel):
|
||||
resp = {"Configuration": self.get_configuration()}
|
||||
if "S3Key" in self.code:
|
||||
resp["Code"] = {
|
||||
"Location": "s3://awslambda-{0}-tasks.s3-{0}.amazonaws.com/{1}".format(
|
||||
self.region, self.code["S3Key"]
|
||||
),
|
||||
"Location": f"s3://awslambda-{self.region}-tasks.s3-{self.region}.amazonaws.com/{self.code['S3Key']}",
|
||||
"RepositoryType": "S3",
|
||||
}
|
||||
elif "ImageUri" in self.code:
|
||||
@ -641,7 +637,7 @@ class LambdaFunction(CloudFormationModel, DockerModel):
|
||||
|
||||
# TODO: we should be putting this in a lambda bucket
|
||||
self.code["UUID"] = str(random.uuid4())
|
||||
self.code["S3Key"] = "{}-{}".format(self.function_name, self.code["UUID"])
|
||||
self.code["S3Key"] = f"{self.function_name}-{self.code['UUID']}"
|
||||
elif "S3Bucket" in updated_spec and "S3Key" in updated_spec:
|
||||
key = None
|
||||
try:
|
||||
@ -696,7 +692,7 @@ class LambdaFunction(CloudFormationModel, DockerModel):
|
||||
# Should get invoke_id /RequestId from invocation
|
||||
env_vars = {
|
||||
"_HANDLER": self.handler,
|
||||
"AWS_EXECUTION_ENV": "AWS_Lambda_{}".format(self.run_time),
|
||||
"AWS_EXECUTION_ENV": f"AWS_Lambda_{self.run_time}",
|
||||
"AWS_LAMBDA_FUNCTION_TIMEOUT": self.timeout,
|
||||
"AWS_LAMBDA_FUNCTION_NAME": self.function_name,
|
||||
"AWS_LAMBDA_FUNCTION_MEMORY_SIZE": self.memory_size,
|
||||
@ -746,8 +742,8 @@ class LambdaFunction(CloudFormationModel, DockerModel):
|
||||
image_ref,
|
||||
[self.handler, json.dumps(event)],
|
||||
remove=False,
|
||||
mem_limit="{}m".format(self.memory_size),
|
||||
volumes=["{}:/var/task".format(data_vol.name)],
|
||||
mem_limit=f"{self.memory_size}m",
|
||||
volumes=[f"{data_vol.name}:/var/task"],
|
||||
environment=env_vars,
|
||||
detach=True,
|
||||
log_config=log_config,
|
||||
@ -783,19 +779,16 @@ class LambdaFunction(CloudFormationModel, DockerModel):
|
||||
return resp, invocation_error, logs
|
||||
except docker.errors.DockerException as e:
|
||||
# Docker itself is probably not running - there will be no Lambda-logs to handle
|
||||
msg = "error running docker: {}".format(e)
|
||||
msg = f"error running docker: {e}"
|
||||
self.save_logs(msg)
|
||||
return msg, True, ""
|
||||
|
||||
def save_logs(self, output: str) -> None:
|
||||
# Send output to "logs" backend
|
||||
invoke_id = random.uuid4().hex
|
||||
date = datetime.datetime.utcnow()
|
||||
log_stream_name = (
|
||||
"{date.year}/{date.month:02d}/{date.day:02d}/[{version}]{invoke_id}".format(
|
||||
date=datetime.datetime.utcnow(),
|
||||
version=self.version,
|
||||
invoke_id=invoke_id,
|
||||
)
|
||||
f"{date.year}/{date.month:02d}/{date.day:02d}/[{self.version}]{invoke_id}"
|
||||
)
|
||||
self.logs_backend.create_log_stream(self.logs_group_name, log_stream_name)
|
||||
log_events = [
|
||||
@ -1059,8 +1052,8 @@ class EventSourceMapping(CloudFormationModel):
|
||||
if batch_size is None:
|
||||
self._batch_size = batch_size_for_source[0]
|
||||
elif batch_size > batch_size_for_source[1]:
|
||||
error_message = "BatchSize {} exceeds the max of {}".format(
|
||||
batch_size, batch_size_for_source[1]
|
||||
error_message = (
|
||||
f"BatchSize {batch_size} exceeds the max of {batch_size_for_source[1]}"
|
||||
)
|
||||
raise ValueError("InvalidParameterValueException", error_message)
|
||||
else:
|
||||
@ -1344,7 +1337,7 @@ class LambdaStorage(object):
|
||||
|
||||
for function_group in self._functions.values():
|
||||
latest = copy.deepcopy(function_group["latest"])
|
||||
latest.function_arn = "{}:$LATEST".format(latest.function_arn)
|
||||
latest.function_arn = f"{latest.function_arn}:$LATEST"
|
||||
result.append(latest)
|
||||
|
||||
result.extend(function_group["versions"])
|
||||
@ -1569,9 +1562,7 @@ class LambdaBackend(BaseBackend):
|
||||
required = ["EventSourceArn", "FunctionName"]
|
||||
for param in required:
|
||||
if not spec.get(param):
|
||||
raise RESTError(
|
||||
"InvalidParameterValueException", "Missing {}".format(param)
|
||||
)
|
||||
raise RESTError("InvalidParameterValueException", f"Missing {param}")
|
||||
|
||||
# Validate function name
|
||||
func = self._lambdas.get_function_by_name_or_arn(spec.get("FunctionName", ""))
|
||||
@ -1589,8 +1580,7 @@ class LambdaBackend(BaseBackend):
|
||||
)
|
||||
if queue.fifo_queue:
|
||||
raise RESTError(
|
||||
"InvalidParameterValueException",
|
||||
"{} is FIFO".format(queue.queue_arn),
|
||||
"InvalidParameterValueException", f"{queue.queue_arn} is FIFO"
|
||||
)
|
||||
else:
|
||||
spec.update({"FunctionArn": func.function_arn})
|
||||
@ -1618,7 +1608,7 @@ class LambdaBackend(BaseBackend):
|
||||
required = ["LayerName", "Content"]
|
||||
for param in required:
|
||||
if not spec.get(param):
|
||||
raise InvalidParameterValueException("Missing {}".format(param))
|
||||
raise InvalidParameterValueException(f"Missing {param}")
|
||||
layer_version = LayerVersion(
|
||||
spec, account_id=self.account_id, region=self.region_name
|
||||
)
|
||||
|
@ -142,7 +142,7 @@ class LambdaResponse(BaseResponse):
|
||||
elif request.method == "DELETE":
|
||||
return self._untag_resource()
|
||||
else:
|
||||
raise ValueError("Cannot handle {0} request".format(request.method))
|
||||
raise ValueError(f"Cannot handle {request.method} request")
|
||||
|
||||
def policy(self, request: Any, full_url: str, headers: Any) -> TYPE_RESPONSE:
|
||||
self.setup_class(request, full_url, headers)
|
||||
@ -153,7 +153,7 @@ class LambdaResponse(BaseResponse):
|
||||
elif request.method == "DELETE":
|
||||
return self._del_policy(request, self.querystring)
|
||||
else:
|
||||
raise ValueError("Cannot handle {0} request".format(request.method))
|
||||
raise ValueError(f"Cannot handle {request.method} request")
|
||||
|
||||
def configuration(self, request: Any, full_url: str, headers: Any) -> TYPE_RESPONSE:
|
||||
self.setup_class(request, full_url, headers)
|
||||
|
@ -7,7 +7,7 @@ LAYER_ARN = namedtuple("LAYER_ARN", ["region", "account", "layer_name", "version
|
||||
|
||||
|
||||
def make_arn(resource_type: str, region: str, account: str, name: str) -> str:
|
||||
return "arn:aws:lambda:{0}:{1}:{2}:{3}".format(region, account, resource_type, name)
|
||||
return f"arn:aws:lambda:{region}:{account}:{resource_type}:{name}"
|
||||
|
||||
|
||||
make_function_arn = partial(make_arn, "function")
|
||||
@ -18,7 +18,7 @@ def make_ver_arn(
|
||||
resource_type: str, region: str, account: str, name: str, version: str = "1"
|
||||
) -> str:
|
||||
arn = make_arn(resource_type, region, account, name)
|
||||
return "{0}:{1}".format(arn, version)
|
||||
return f"{arn}:{version}"
|
||||
|
||||
|
||||
make_function_ver_arn = partial(make_ver_arn, "function")
|
||||
|
Loading…
Reference in New Issue
Block a user