From 1a8ddc0f2b89672c4bea81a257fa2ec86e0bc789 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Thu, 17 Nov 2022 21:41:08 -0100 Subject: [PATCH] Techdebt: Replace string-format with f-strings (for tests dirs) (#5678) --- tests/terraformtests/terraform-provider-aws | 2 +- tests/test_acm/test_acm.py | 4 +- tests/test_apigateway/test_apigateway.py | 16 +- .../test_apigateway_cloudformation.py | 2 +- .../test_apigateway_deployments.py | 8 +- tests/test_apigateway/test_server.py | 34 +- .../test_applicationautoscaling.py | 16 +- .../test_validation.py | 4 +- tests/test_autoscaling/test_autoscaling.py | 10 +- .../test_autoscaling_groups.py | 2 +- tests/test_autoscaling/test_elb.py | 10 +- .../test_launch_configurations.py | 12 +- tests/test_awslambda/test_lambda.py | 82 ++--- .../test_lambda_eventsourcemapping.py | 2 +- tests/test_awslambda/test_lambda_layers.py | 4 +- tests/test_awslambda/test_lambda_tags.py | 6 +- tests/test_awslambda/utilities.py | 13 +- tests/test_batch/test_batch_jobs.py | 4 +- .../test_batch/test_batch_task_definition.py | 4 +- .../test_cloudformation_custom_resources.py | 9 +- .../test_cloudformation_depends_on.py | 4 +- .../test_cloudformation_stack_crud_boto3.py | 30 +- .../test_cloudformation_stack_integration.py | 20 +- .../test_cloudwatch/test_cloudwatch_alarms.py | 10 +- tests/test_codebuild/test_codebuild.py | 66 +--- tests/test_codecommit/test_codecommit.py | 42 +-- tests/test_codepipeline/test_codepipeline.py | 27 +- .../test_cognitoidentity.py | 12 +- tests/test_cognitoidp/test_cognitoidp.py | 24 +- tests/test_config/test_config.py | 90 +++-- tests/test_core/test_auth.py | 43 +-- tests/test_core/test_instance_metadata.py | 10 +- tests/test_core/test_moto_api.py | 4 +- tests/test_core/test_request_mocking.py | 8 +- tests/test_core/test_url_base_regex.py | 4 +- tests/test_datapipeline/test_datapipeline.py | 2 +- tests/test_dynamodb/test_dynamodb.py | 30 +- tests/test_dynamodb/test_dynamodb_executor.py | 4 +- .../test_dynamodb_table_with_range_key.py | 6 +- .../test_dynamodb_table_without_range_key.py | 2 +- tests/test_dynamodb_v20111205/test_server.py | 2 +- tests/test_ec2/test_amis.py | 14 +- tests/test_ec2/test_elastic_block_store.py | 4 +- tests/test_ec2/test_instances.py | 10 +- tests/test_ec2/test_network_acls.py | 4 +- tests/test_ec2/test_prefix_lists.py | 4 +- tests/test_ec2/test_regions.py | 8 +- tests/test_ec2/test_route_tables.py | 4 +- tests/test_ec2/test_security_groups.py | 10 +- tests/test_ec2/test_spot_fleet.py | 14 +- tests/test_ec2/test_subnets.py | 28 +- tests/test_ec2/test_tags.py | 2 +- tests/test_ec2/test_transit_gateway.py | 4 +- tests/test_ec2/test_vpc_peering.py | 12 +- tests/test_ec2/test_vpcs.py | 24 +- tests/test_ecr/test_ecr_boto3.py | 4 +- tests/test_ecs/test_ecs_account_settings.py | 6 +- tests/test_ecs/test_ecs_boto3.py | 172 ++++------ tests/test_efs/test_file_system.py | 2 +- tests/test_efs/test_mount_target.py | 10 +- tests/test_efs/test_server.py | 8 +- tests/test_eks/test_eks_utils.py | 9 +- .../test_elastictranscoder.py | 30 +- tests/test_elb/test_elb.py | 2 +- tests/test_elbv2/test_elbv2.py | 6 +- tests/test_elbv2/test_elbv2_cloudformation.py | 8 +- tests/test_elbv2/test_elbv2_target_groups.py | 14 +- tests/test_emr/test_emr_boto3.py | 10 +- tests/test_events/test_events.py | 242 +++++--------- .../test_events/test_events_cloudformation.py | 6 +- tests/test_events/test_events_integration.py | 4 +- .../test_firehose_destination_types.py | 60 +--- tests/test_glue/helpers.py | 12 +- tests/test_glue/test_datacatalog.py | 6 +- tests/test_iam/test_iam.py | 308 ++++++++---------- tests/test_iam/test_iam_cloudformation.py | 28 +- tests/test_iam/test_iam_groups.py | 11 +- tests/test_iam/test_iam_oidc.py | 20 +- .../test_iam/test_iam_server_certificates.py | 6 +- tests/test_iot/test_iot.py | 8 +- tests/test_iot/test_iot_certificates.py | 4 +- tests/test_iot/test_iot_policies.py | 2 +- tests/test_iot/test_server.py | 2 +- tests/test_iotdata/test_server.py | 4 +- tests/test_kinesis/test_kinesis.py | 4 +- .../test_kinesisvideoarchivedmedia.py | 8 +- tests/test_kms/test_kms_boto3.py | 14 +- tests/test_logs/test_integration.py | 12 +- tests/test_logs/test_logs.py | 18 +- tests/test_logs/test_logs_filter.py | 4 +- .../test_managedblockchain_members.py | 8 +- .../test_managedblockchain_nodes.py | 6 +- .../test_managedblockchain_proposalvotes.py | 4 +- tests/test_medialive/test_medialive.py | 10 +- tests/test_mediapackage/test_mediapackage.py | 34 +- tests/test_mediastore/test_mediastore.py | 8 +- tests/test_opsworks/test_stack.py | 2 +- .../test_organizations_boto3.py | 8 +- tests/test_ram/test_ram.py | 44 +-- tests/test_rds/test_filters.py | 8 +- tests/test_rds/test_rds.py | 22 +- tests/test_redshift/test_redshift.py | 62 ++-- tests/test_redshiftdata/test_server.py | 5 +- .../test_resourcegroupstaggingapi.py | 8 +- tests/test_route53/test_route53.py | 10 +- .../test_route53_cloudformation.py | 6 +- tests/test_s3/test_s3.py | 30 +- tests/test_s3/test_s3_acl.py | 2 +- tests/test_s3/test_s3_cloudformation.py | 18 +- tests/test_s3/test_s3_config.py | 10 +- tests/test_s3/test_s3_copyobject.py | 10 +- tests/test_s3/test_server.py | 4 +- .../cloudformation_test_configs.py | 12 +- .../test_sagemaker_cloudformation.py | 20 +- .../test_sagemaker/test_sagemaker_endpoint.py | 42 +-- tests/test_sagemaker/test_sagemaker_models.py | 4 +- .../test_sagemaker_notebooks.py | 10 +- .../test_sagemaker_processing.py | 42 ++- .../test_sagemaker/test_sagemaker_training.py | 52 ++- .../test_secretsmanager.py | 4 +- tests/test_ses/test_ses_boto3.py | 8 +- tests/test_ses/test_ses_sns_boto3.py | 2 +- tests/test_sns/test_application_boto3.py | 4 +- tests/test_sns/test_publish_batch.py | 4 +- tests/test_sns/test_publishing_boto3.py | 12 +- tests/test_sns/test_server.py | 4 +- tests/test_sns/test_topics_boto3.py | 35 +- tests/test_sqs/test_server.py | 6 +- tests/test_sqs/test_sqs.py | 23 +- tests/test_sqs/test_sqs_cloudformation.py | 4 +- tests/test_ssm/test_ssm_boto3.py | 133 ++++---- .../test_stepfunctions/test_stepfunctions.py | 2 +- .../test_stepfunctions_cloudformation.py | 4 +- tests/test_sts/test_sts.py | 150 +++------ tests/test_sts/test_sts_integration.py | 22 +- tests/test_swf/models/test_domain.py | 2 +- .../test_swf/responses/test_activity_tasks.py | 4 +- .../test_swf/responses/test_decision_tasks.py | 4 +- tests/test_swf/responses/test_domains.py | 2 +- .../responses/test_workflow_executions.py | 4 +- .../test_transcribe/test_transcribe_boto3.py | 30 +- tests/test_wafv2/test_server.py | 4 +- tests/test_wafv2/test_utils.py | 8 +- tests/test_wafv2/test_wafv2.py | 4 +- 144 files changed, 1084 insertions(+), 1786 deletions(-) diff --git a/tests/terraformtests/terraform-provider-aws b/tests/terraformtests/terraform-provider-aws index f9a6db6e3..01a50d074 160000 --- a/tests/terraformtests/terraform-provider-aws +++ b/tests/terraformtests/terraform-provider-aws @@ -1 +1 @@ -Subproject commit f9a6db6e3c3f3299701747972fd6c37ba4af36f4 +Subproject commit 01a50d07400ee7513b31ec10e9832a2d8290b4e2 diff --git a/tests/test_acm/test_acm.py b/tests/test_acm/test_acm.py index ff362cfdb..458612c05 100644 --- a/tests/test_acm/test_acm.py +++ b/tests/test_acm/test_acm.py @@ -511,7 +511,7 @@ def test_add_too_many_tags(): with pytest.raises(ClientError) as ex: client.add_tags_to_certificate( CertificateArn=arn, - Tags=[{"Key": "a-%d" % i, "Value": "abcd"} for i in range(1, 52)], + Tags=[{"Key": f"a-{i}", "Value": "abcd"} for i in range(1, 52)], ) ex.value.response["Error"]["Code"].should.equal("TooManyTagsException") ex.value.response["Error"]["Message"].should.contain("contains too many Tags") @@ -520,7 +520,7 @@ def test_add_too_many_tags(): # Add 49 tags first, then try to add 2 more. client.add_tags_to_certificate( CertificateArn=arn, - Tags=[{"Key": "p-%d" % i, "Value": "pqrs"} for i in range(1, 50)], + Tags=[{"Key": f"p-{i}", "Value": "pqrs"} for i in range(1, 50)], ) client.list_tags_for_certificate(CertificateArn=arn)["Tags"].should.have.length_of( 49 diff --git a/tests/test_apigateway/test_apigateway.py b/tests/test_apigateway/test_apigateway.py index 34c27476e..6cde7cfe3 100644 --- a/tests/test_apigateway/test_apigateway.py +++ b/tests/test_apigateway/test_apigateway.py @@ -931,12 +931,8 @@ def test_create_authorizer(): response["ResponseMetadata"].pop("HTTPHeaders", None) response["ResponseMetadata"].pop("RetryAttempts", None) - response["items"][0]["id"].should.match( - r"{0}|{1}".format(authorizer_id2, authorizer_id) - ) - response["items"][1]["id"].should.match( - r"{0}|{1}".format(authorizer_id2, authorizer_id) - ) + response["items"][0]["id"].should.match(rf"{authorizer_id2}|{authorizer_id}") + response["items"][1]["id"].should.match(rf"{authorizer_id2}|{authorizer_id}") new_authorizer_name_with_vars = "authorizer_with_vars" response = client.create_authorizer( @@ -1186,9 +1182,7 @@ def test_put_integration_validation(): client.put_integration( restApiId=api_id, resourceId=root_id, - credentials="arn:aws:iam::{}:role/service-role/testfunction-role-oe783psq".format( - ACCOUNT_ID - ), + credentials=f"arn:aws:iam::{ACCOUNT_ID}:role/service-role/testfunction-role-oe783psq", httpMethod="GET", type=_type, uri="arn:aws:apigateway:us-west-2:s3:path/b/k", @@ -1210,9 +1204,7 @@ def test_put_integration_validation(): client.put_integration( restApiId=api_id, resourceId=root_id, - credentials="arn:aws:iam::{}:role/service-role/testfunction-role-oe783psq".format( - ACCOUNT_ID - ), + credentials=f"arn:aws:iam::{ACCOUNT_ID}:role/service-role/testfunction-role-oe783psq", httpMethod="GET", type=_type, uri="arn:aws:apigateway:us-west-2:s3:path/b/k", diff --git a/tests/test_apigateway/test_apigateway_cloudformation.py b/tests/test_apigateway/test_apigateway_cloudformation.py index b1d72fa39..8162d2912 100644 --- a/tests/test_apigateway/test_apigateway_cloudformation.py +++ b/tests/test_apigateway/test_apigateway_cloudformation.py @@ -367,7 +367,7 @@ def test_simple_apigateway_with_lambda_proxy(): statement = policy["Statement"][0] statement["FunctionName"].should.contain(fn_name) statement["Condition"]["ArnLike"]["AWS:SourceArn"].should.equal( - "arn:aws:execute-api:us-east-1:123456789012:{}/*/*".format(api_id) + f"arn:aws:execute-api:us-east-1:123456789012:{api_id}/*/*" ) diff --git a/tests/test_apigateway/test_apigateway_deployments.py b/tests/test_apigateway/test_apigateway_deployments.py index 641e7bc25..dd151d198 100644 --- a/tests/test_apigateway/test_apigateway_deployments.py +++ b/tests/test_apigateway/test_apigateway_deployments.py @@ -143,12 +143,8 @@ def test_create_multiple_deployments(): response = client.get_deployments(restApiId=api_id) - response["items"][0]["id"].should.match( - r"{0}|{1}".format(deployment_id2, deployment_id) - ) - response["items"][1]["id"].should.match( - r"{0}|{1}".format(deployment_id2, deployment_id) - ) + response["items"][0]["id"].should.match(rf"{deployment_id2}|{deployment_id}") + response["items"][1]["id"].should.match(rf"{deployment_id2}|{deployment_id}") @mock_apigateway diff --git a/tests/test_apigateway/test_server.py b/tests/test_apigateway/test_server.py index e9ead18b5..32ac93a11 100644 --- a/tests/test_apigateway/test_server.py +++ b/tests/test_apigateway/test_server.py @@ -34,16 +34,16 @@ def test_usage_plans_apis(): json.loads(res.data)["item"].should.have.length_of(1) # Get single usage plan - res = test_client.get("/usageplans/{0}".format(created_plan["id"])) + res = test_client.get(f"/usageplans/{created_plan['id']}") fetched_plan = json.loads(res.data) fetched_plan.should.equal(created_plan) # Not existing usage plan - res = test_client.get("/usageplans/{0}".format("not_existing")) + res = test_client.get("/usageplans/not_existing") res.status_code.should.equal(404) # Delete usage plan - res = test_client.delete("/usageplans/{0}".format(created_plan["id"])) + res = test_client.delete(f"/usageplans/{created_plan['id']}") res.data.should.equal(b"{}") # List usage plans (expect empty again) @@ -61,53 +61,45 @@ def test_usage_plans_keys(): created_api_key = json.loads(res.data) # List usage plans keys (expect empty) - res = test_client.get("/usageplans/{0}/keys".format(usage_plan_id)) + res = test_client.get(f"/usageplans/{usage_plan_id}/keys") json.loads(res.data)["item"].should.have.length_of(0) # Invalid api key (does not exists at all) - res = test_client.get( - "/usageplans/{0}/keys/{1}".format(usage_plan_id, "not_existing") - ) + res = test_client.get(f"/usageplans/{usage_plan_id}/keys/not_existing") res.status_code.should.equal(404) # not existing usage plan with existing api key - res = test_client.get( - "/usageplans/{0}/keys/{1}".format("not_existing", created_api_key["id"]) - ) + res = test_client.get(f"/usageplans/not_existing/keys/{created_api_key['id']}") res.status_code.should.equal(404) # not jet added api key - res = test_client.get( - "/usageplans/{0}/keys/{1}".format(usage_plan_id, created_api_key["id"]) - ) + res = test_client.get(f"/usageplans/{usage_plan_id}/keys/{created_api_key['id']}") res.status_code.should.equal(404) # Create usage plan key res = test_client.post( - "/usageplans/{0}/keys".format(usage_plan_id), + f"/usageplans/{usage_plan_id}/keys", data=json.dumps({"keyId": created_api_key["id"], "keyType": "API_KEY"}), ) created_usage_plan_key = json.loads(res.data) # List usage plans keys (expect 1 key) - res = test_client.get("/usageplans/{0}/keys".format(usage_plan_id)) + res = test_client.get(f"/usageplans/{usage_plan_id}/keys") json.loads(res.data)["item"].should.have.length_of(1) # Get single usage plan key - res = test_client.get( - "/usageplans/{0}/keys/{1}".format(usage_plan_id, created_api_key["id"]) - ) + res = test_client.get(f"/usageplans/{usage_plan_id}/keys/{created_api_key['id']}") fetched_plan_key = json.loads(res.data) fetched_plan_key.should.equal(created_usage_plan_key) # Delete usage plan key res = test_client.delete( - "/usageplans/{0}/keys/{1}".format(usage_plan_id, created_api_key["id"]) + f"/usageplans/{usage_plan_id}/keys/{created_api_key['id']}" ) res.data.should.equal(b"{}") # List usage plans keys (expect to be empty again) - res = test_client.get("/usageplans/{0}/keys".format(usage_plan_id)) + res = test_client.get(f"/usageplans/{usage_plan_id}/keys") json.loads(res.data)["item"].should.have.length_of(0) @@ -118,7 +110,7 @@ def test_create_usage_plans_key_non_existent_api_key(): # Create usage plan key with non-existent api key res = test_client.post( - "/usageplans/{0}/keys".format(usage_plan_id), + f"/usageplans/{usage_plan_id}/keys", data=json.dumps({"keyId": "non-existent", "keyType": "API_KEY"}), ) res.status_code.should.equal(404) diff --git a/tests/test_applicationautoscaling/test_applicationautoscaling.py b/tests/test_applicationautoscaling/test_applicationautoscaling.py index 7782ae98d..15eccd542 100644 --- a/tests/test_applicationautoscaling/test_applicationautoscaling.py +++ b/tests/test_applicationautoscaling/test_applicationautoscaling.py @@ -9,7 +9,7 @@ DEFAULT_ECS_CLUSTER = "default" DEFAULT_ECS_TASK = "test_ecs_task" DEFAULT_ECS_SERVICE = "sample-webapp" DEFAULT_SERVICE_NAMESPACE = "ecs" -DEFAULT_RESOURCE_ID = "service/{}/{}".format(DEFAULT_ECS_CLUSTER, DEFAULT_ECS_SERVICE) +DEFAULT_RESOURCE_ID = f"service/{DEFAULT_ECS_CLUSTER}/{DEFAULT_ECS_SERVICE}" DEFAULT_SCALABLE_DIMENSION = "ecs:service:DesiredCount" DEFAULT_MIN_CAPACITY = 1 DEFAULT_MAX_CAPACITY = 1 @@ -122,12 +122,12 @@ def test_describe_scalable_targets_only_return_ecs_targets(): register_scalable_target( client, ServiceNamespace="ecs", - ResourceId="service/{}/test1".format(DEFAULT_ECS_CLUSTER), + ResourceId=f"service/{DEFAULT_ECS_CLUSTER}/test1", ) register_scalable_target( client, ServiceNamespace="ecs", - ResourceId="service/{}/test2".format(DEFAULT_ECS_CLUSTER), + ResourceId=f"service/{DEFAULT_ECS_CLUSTER}/test2", ) register_scalable_target( client, @@ -158,7 +158,7 @@ def test_describe_scalable_targets_next_token_success(): register_scalable_target( client, ServiceNamespace="ecs", - ResourceId="service/{}/{}".format(DEFAULT_ECS_CLUSTER, i), + ResourceId=f"service/{DEFAULT_ECS_CLUSTER}/{i}", ) response = client.describe_scalable_targets( ServiceNamespace=DEFAULT_SERVICE_NAMESPACE @@ -379,9 +379,7 @@ def test_put_scaling_policy(policy_type, policy_body_kwargs): ) response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) response["PolicyARN"].should.match( - r"arn:aws:autoscaling:.*1:scalingPolicy:.*:resource/{}/{}:policyName/{}".format( - namespace, resource_id, policy_name - ) + rf"arn:aws:autoscaling:.*1:scalingPolicy:.*:resource/{namespace}/{resource_id}:policyName/{policy_name}" ) @@ -434,9 +432,7 @@ def test_describe_scaling_policies(): policy["PolicyType"].should.equal(policy_type) policy["TargetTrackingScalingPolicyConfiguration"].should.equal(policy_body) policy["PolicyARN"].should.match( - r"arn:aws:autoscaling:.*1:scalingPolicy:.*:resource/{}/{}:policyName/{}".format( - namespace, resource_id, policy_name - ) + rf"arn:aws:autoscaling:.*1:scalingPolicy:.*:resource/{namespace}/{resource_id}:policyName/{policy_name}" ) policy.should.have.key("CreationTime").which.should.be.a("datetime.datetime") diff --git a/tests/test_applicationautoscaling/test_validation.py b/tests/test_applicationautoscaling/test_validation.py index a3734a450..1f126d54f 100644 --- a/tests/test_applicationautoscaling/test_validation.py +++ b/tests/test_applicationautoscaling/test_validation.py @@ -12,7 +12,7 @@ DEFAULT_ECS_CLUSTER = "default" DEFAULT_ECS_TASK = "test_ecs_task" DEFAULT_ECS_SERVICE = "sample-webapp" DEFAULT_SERVICE_NAMESPACE = "ecs" -DEFAULT_RESOURCE_ID = "service/{}/{}".format(DEFAULT_ECS_CLUSTER, DEFAULT_ECS_SERVICE) +DEFAULT_RESOURCE_ID = f"service/{DEFAULT_ECS_CLUSTER}/{DEFAULT_ECS_SERVICE}" DEFAULT_SCALABLE_DIMENSION = "ecs:service:DesiredCount" DEFAULT_MIN_CAPACITY = 1 DEFAULT_MAX_CAPACITY = 1 @@ -71,7 +71,7 @@ def test_describe_scalable_targets_with_multiple_invalid_parameters_should_retur @mock_applicationautoscaling def test_register_scalable_target_ecs_with_non_existent_service_should_return_clusternotfound_exception(): client = boto3.client("application-autoscaling", region_name=DEFAULT_REGION) - resource_id = "service/{}/foo".format(DEFAULT_ECS_CLUSTER) + resource_id = f"service/{DEFAULT_ECS_CLUSTER}/foo" with pytest.raises(ClientError) as ex: register_scalable_target(client, ServiceNamespace="ecs", ResourceId=resource_id) diff --git a/tests/test_autoscaling/test_autoscaling.py b/tests/test_autoscaling/test_autoscaling.py index 2ea05c872..ed88a577f 100644 --- a/tests/test_autoscaling/test_autoscaling.py +++ b/tests/test_autoscaling/test_autoscaling.py @@ -99,7 +99,7 @@ def test_create_autoscaling_group_from_invalid_instance_id(): ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.value.response["Error"]["Code"].should.equal("ValidationError") ex.value.response["Error"]["Message"].should.equal( - "Instance [{0}] is invalid.".format(invalid_instance_id) + f"Instance [{invalid_instance_id}] is invalid." ) @@ -518,9 +518,7 @@ def test_update_autoscaling_group_launch_config(): AutoScalingGroupName="test_asg", LaunchConfigurationName="test_launch_configuration_new", MinSize=1, - VPCZoneIdentifier="{subnet1},{subnet2}".format( - subnet1=mocked_networking["subnet1"], subnet2=mocked_networking["subnet2"] - ), + VPCZoneIdentifier=f"{mocked_networking['subnet1']},{mocked_networking['subnet2']}", NewInstancesProtectedFromScaleIn=False, ) @@ -566,9 +564,7 @@ def test_update_autoscaling_group_launch_template(): "Version": "1", }, MinSize=1, - VPCZoneIdentifier="{subnet1},{subnet2}".format( - subnet1=mocked_networking["subnet1"], subnet2=mocked_networking["subnet2"] - ), + VPCZoneIdentifier=f"{mocked_networking['subnet1']},{mocked_networking['subnet2']}", NewInstancesProtectedFromScaleIn=False, ) diff --git a/tests/test_autoscaling/test_autoscaling_groups.py b/tests/test_autoscaling/test_autoscaling_groups.py index 3be7c5d01..db0e535b2 100644 --- a/tests/test_autoscaling/test_autoscaling_groups.py +++ b/tests/test_autoscaling/test_autoscaling_groups.py @@ -61,7 +61,7 @@ class TestAutoScalingGroup(TestCase): def test_list_many_autoscaling_groups(self): for i in range(51): - self._create_group("TestGroup%d" % i) + self._create_group(f"TestGroup{i}") response = self.as_client.describe_auto_scaling_groups() groups = response["AutoScalingGroups"] diff --git a/tests/test_autoscaling/test_elb.py b/tests/test_autoscaling/test_elb.py index 15e632789..268ac21a1 100644 --- a/tests/test_autoscaling/test_elb.py +++ b/tests/test_autoscaling/test_elb.py @@ -203,10 +203,7 @@ class TestAutoScalingELB(TestCase): } ], TerminationPolicies=["OldestInstance", "NewestInstance"], - VPCZoneIdentifier="{subnet1},{subnet2}".format( - subnet1=self.mocked_networking["subnet1"], - subnet2=self.mocked_networking["subnet2"], - ), + VPCZoneIdentifier=f"{self.mocked_networking['subnet1']},{self.mocked_networking['subnet2']}", ) self.as_client.put_scheduled_update_group_action( @@ -231,10 +228,7 @@ class TestAutoScalingELB(TestCase): group["MinSize"].should.equal(INSTANCE_COUNT_GROUP) group["Instances"].should.have.length_of(INSTANCE_COUNT_GROUP) group["VPCZoneIdentifier"].should.equal( - "{subnet1},{subnet2}".format( - subnet1=self.mocked_networking["subnet1"], - subnet2=self.mocked_networking["subnet2"], - ) + f"{self.mocked_networking['subnet1']},{self.mocked_networking['subnet2']}" ) group["LaunchConfigurationName"].should.equal(self.lc_name) group["DefaultCooldown"].should.equal(60) diff --git a/tests/test_autoscaling/test_launch_configurations.py b/tests/test_autoscaling/test_launch_configurations.py index 39bd85e6e..0ef4aae75 100644 --- a/tests/test_autoscaling/test_launch_configurations.py +++ b/tests/test_autoscaling/test_launch_configurations.py @@ -21,9 +21,7 @@ def test_create_launch_configuration(): SecurityGroups=["default", "default2"], UserData="This is some user_data", InstanceMonitoring={"Enabled": True}, - IamInstanceProfile="arn:aws:iam::{}:instance-profile/testing".format( - ACCOUNT_ID - ), + IamInstanceProfile=f"arn:aws:iam::{ACCOUNT_ID}:instance-profile/testing", SpotPrice="0.1", ) @@ -39,7 +37,7 @@ def test_create_launch_configuration(): userdata.should.equal(b"This is some user_data") launch_config["InstanceMonitoring"].should.equal({"Enabled": True}) launch_config["IamInstanceProfile"].should.equal( - "arn:aws:iam::{}:instance-profile/testing".format(ACCOUNT_ID) + f"arn:aws:iam::{ACCOUNT_ID}:instance-profile/testing" ) launch_config["SpotPrice"].should.equal("0.1") launch_config["BlockDeviceMappings"].should.equal([]) @@ -56,9 +54,7 @@ def test_create_launch_configuration_with_block_device_mappings(): SecurityGroups=["default", "default2"], UserData="This is some user_data", InstanceMonitoring={"Enabled": True}, - IamInstanceProfile="arn:aws:iam::{}:instance-profile/testing".format( - ACCOUNT_ID - ), + IamInstanceProfile=f"arn:aws:iam::{ACCOUNT_ID}:instance-profile/testing", SpotPrice="0.1", BlockDeviceMappings=[ {"DeviceName": "/dev/xvdb", "VirtualName": "ephemeral0"}, @@ -236,7 +232,7 @@ def test_launch_configuration_describe_paginated(): conn = boto3.client("autoscaling", region_name="us-east-1") for i in range(51): conn.create_launch_configuration( - LaunchConfigurationName="TestLC%d" % i, + LaunchConfigurationName=f"TestLC{i}", ImageId=EXAMPLE_AMI_ID, InstanceType="t2.medium", ) diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index ecb25030a..f1e9c1b35 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -60,17 +60,13 @@ def test_list_functions(): v1 = [f for f in our_functions if f["Version"] == "1"][0] v1["Description"].should.equal("v2") v1["FunctionArn"].should.equal( - "arn:aws:lambda:{}:{}:function:{}:1".format( - _lambda_region, ACCOUNT_ID, function_name - ) + f"arn:aws:lambda:{_lambda_region}:{ACCOUNT_ID}:function:{function_name}:1" ) latest = [f for f in our_functions if f["Version"] == "$LATEST"][0] latest["Description"].should.equal("") latest["FunctionArn"].should.equal( - "arn:aws:lambda:{}:{}:function:{}:$LATEST".format( - _lambda_region, ACCOUNT_ID, function_name - ) + f"arn:aws:lambda:{_lambda_region}:{ACCOUNT_ID}:function:{function_name}:$LATEST" ) @@ -125,9 +121,7 @@ def test_create_function_from_aws_bucket(): result.should.have.key("FunctionName").equals(function_name) result.should.have.key("FunctionArn").equals( - "arn:aws:lambda:{}:{}:function:{}".format( - _lambda_region, ACCOUNT_ID, function_name - ) + f"arn:aws:lambda:{_lambda_region}:{ACCOUNT_ID}:function:{function_name}" ) result.should.have.key("Runtime").equals("python2.7") result.should.have.key("Handler").equals("lambda_function.lambda_handler") @@ -163,9 +157,7 @@ def test_create_function_from_zipfile(): result.should.equal( { "FunctionName": function_name, - "FunctionArn": "arn:aws:lambda:{}:{}:function:{}".format( - _lambda_region, ACCOUNT_ID, function_name - ), + "FunctionArn": f"arn:aws:lambda:{_lambda_region}:{ACCOUNT_ID}:function:{function_name}", "Runtime": "python2.7", "Role": result["Role"], "Handler": "lambda_function.lambda_handler", @@ -281,7 +273,7 @@ def test_get_function(): result["Configuration"].pop("LastModified") result["Code"]["Location"].should.equal( - "s3://awslambda-{0}-tasks.s3-{0}.amazonaws.com/test.zip".format(_lambda_region) + f"s3://awslambda-{_lambda_region}-tasks.s3-{_lambda_region}.amazonaws.com/test.zip" ) result["Code"]["RepositoryType"].should.equal("S3") @@ -309,9 +301,7 @@ def test_get_function(): result = conn.get_function(FunctionName=function_name, Qualifier="$LATEST") result["Configuration"]["Version"].should.equal("$LATEST") result["Configuration"]["FunctionArn"].should.equal( - "arn:aws:lambda:us-west-2:{}:function:{}:$LATEST".format( - ACCOUNT_ID, function_name - ) + f"arn:aws:lambda:us-west-2:{ACCOUNT_ID}:function:{function_name}:$LATEST" ) # Test get function when can't find function name @@ -376,9 +366,7 @@ def test_get_function_configuration(key): ) result["Version"].should.equal("$LATEST") result["FunctionArn"].should.equal( - "arn:aws:lambda:{}:{}:function:{}:$LATEST".format( - _lambda_region, ACCOUNT_ID, function_name - ) + f"arn:aws:lambda:{_lambda_region}:{ACCOUNT_ID}:function:{function_name}:$LATEST" ) # Test get function when can't find function name @@ -591,7 +579,7 @@ def test_publish(): # #SetComprehension ;-) published_arn = list({f["FunctionArn"] for f in our_functions} - {latest_arn})[0] - published_arn.should.contain("{}:1".format(function_name)) + published_arn.should.contain(f"{function_name}:1") conn.delete_function(FunctionName=function_name, Qualifier="1") @@ -639,9 +627,7 @@ def test_list_create_list_get_delete_list(): ) expected_function_result = { "Code": { - "Location": "s3://awslambda-{0}-tasks.s3-{0}.amazonaws.com/test.zip".format( - _lambda_region - ), + "Location": f"s3://awslambda-{_lambda_region}-tasks.s3-{_lambda_region}.amazonaws.com/test.zip", "RepositoryType": "S3", }, "Configuration": { @@ -673,9 +659,7 @@ def test_list_create_list_get_delete_list(): f["FunctionArn"] for f in functions if f["FunctionName"] == function_name ][0] func_arn.should.equal( - "arn:aws:lambda:{}:{}:function:{}".format( - _lambda_region, ACCOUNT_ID, function_name - ) + f"arn:aws:lambda:{_lambda_region}:{ACCOUNT_ID}:function:{function_name}" ) functions = conn.list_functions(FunctionVersion="ALL")["Functions"] our_functions = [f for f in functions if f["FunctionName"] == function_name] @@ -683,9 +667,7 @@ def test_list_create_list_get_delete_list(): latest = [f for f in our_functions if f["Version"] == "$LATEST"][0] latest["FunctionArn"].should.equal( - "arn:aws:lambda:{}:{}:function:{}:$LATEST".format( - _lambda_region, ACCOUNT_ID, function_name - ) + f"arn:aws:lambda:{_lambda_region}:{ACCOUNT_ID}:function:{function_name}:$LATEST" ) latest.pop("FunctionArn") latest.pop("LastModified") @@ -694,17 +676,13 @@ def test_list_create_list_get_delete_list(): published = [f for f in our_functions if f["Version"] != "$LATEST"][0] published["Version"].should.equal("1") published["FunctionArn"].should.equal( - "arn:aws:lambda:{}:{}:function:{}:1".format( - _lambda_region, ACCOUNT_ID, function_name - ) + f"arn:aws:lambda:{_lambda_region}:{ACCOUNT_ID}:function:{function_name}:1" ) func = conn.get_function(FunctionName=function_name) func["Configuration"]["FunctionArn"].should.equal( - "arn:aws:lambda:{}:{}:function:{}".format( - _lambda_region, ACCOUNT_ID, function_name - ) + f"arn:aws:lambda:{_lambda_region}:{ACCOUNT_ID}:function:{function_name}" ) # this is hard to match against, so remove it @@ -746,7 +724,7 @@ def test_get_function_created_with_zipfile(): assert len(response["Code"]) == 2 assert response["Code"]["RepositoryType"] == "S3" assert response["Code"]["Location"].startswith( - "s3://awslambda-{0}-tasks.s3-{0}.amazonaws.com".format(_lambda_region) + f"s3://awslambda-{_lambda_region}-tasks.s3-{_lambda_region}.amazonaws.com" ) response.should.have.key("Configuration") config = response["Configuration"] @@ -801,17 +779,18 @@ def test_list_versions_by_function(): assert res["ResponseMetadata"]["HTTPStatusCode"] == 201 versions = conn.list_versions_by_function(FunctionName=function_name) assert len(versions["Versions"]) == 3 - assert versions["Versions"][0][ - "FunctionArn" - ] == "arn:aws:lambda:us-west-2:{}:function:{}:$LATEST".format( - ACCOUNT_ID, function_name + assert ( + versions["Versions"][0]["FunctionArn"] + == f"arn:aws:lambda:us-west-2:{ACCOUNT_ID}:function:{function_name}:$LATEST" + ) + assert ( + versions["Versions"][1]["FunctionArn"] + == f"arn:aws:lambda:us-west-2:{ACCOUNT_ID}:function:{function_name}:1" + ) + assert ( + versions["Versions"][2]["FunctionArn"] + == f"arn:aws:lambda:us-west-2:{ACCOUNT_ID}:function:{function_name}:2" ) - assert versions["Versions"][1][ - "FunctionArn" - ] == "arn:aws:lambda:us-west-2:{}:function:{}:1".format(ACCOUNT_ID, function_name) - assert versions["Versions"][2][ - "FunctionArn" - ] == "arn:aws:lambda:us-west-2:{}:function:{}:2".format(ACCOUNT_ID, function_name) conn.create_function( FunctionName="testFunction_2", @@ -826,10 +805,9 @@ def test_list_versions_by_function(): ) versions = conn.list_versions_by_function(FunctionName="testFunction_2") assert len(versions["Versions"]) == 1 - assert versions["Versions"][0][ - "FunctionArn" - ] == "arn:aws:lambda:us-west-2:{}:function:testFunction_2:$LATEST".format( - ACCOUNT_ID + assert ( + versions["Versions"][0]["FunctionArn"] + == f"arn:aws:lambda:us-west-2:{ACCOUNT_ID}:function:testFunction_2:$LATEST" ) @@ -980,7 +958,7 @@ def test_update_function_zip(key): assert len(response["Code"]) == 2 assert response["Code"]["RepositoryType"] == "S3" assert response["Code"]["Location"].startswith( - "s3://awslambda-{0}-tasks.s3-{0}.amazonaws.com".format(_lambda_region) + f"s3://awslambda-{_lambda_region}-tasks.s3-{_lambda_region}.amazonaws.com" ) config = response["Configuration"] @@ -1038,7 +1016,7 @@ def test_update_function_s3(): assert len(response["Code"]) == 2 assert response["Code"]["RepositoryType"] == "S3" assert response["Code"]["Location"].startswith( - "s3://awslambda-{0}-tasks.s3-{0}.amazonaws.com".format(_lambda_region) + f"s3://awslambda-{_lambda_region}-tasks.s3-{_lambda_region}.amazonaws.com" ) config = response["Configuration"] diff --git a/tests/test_awslambda/test_lambda_eventsourcemapping.py b/tests/test_awslambda/test_lambda_eventsourcemapping.py index c8eae48e1..b3d2181a0 100644 --- a/tests/test_awslambda/test_lambda_eventsourcemapping.py +++ b/tests/test_awslambda/test_lambda_eventsourcemapping.py @@ -238,7 +238,7 @@ def test_invoke_function_from_sqs_exception(): entries = [] for i in range(3): - body = {"uuid": str(uuid.uuid4()), "test": "test_{}".format(i)} + body = {"uuid": str(uuid.uuid4()), "test": f"test_{i}"} entry = {"Id": str(i), "MessageBody": json.dumps(body)} entries.append(entry) diff --git a/tests/test_awslambda/test_lambda_layers.py b/tests/test_awslambda/test_lambda_layers.py index 7404b9259..e015ee8ff 100644 --- a/tests/test_awslambda/test_lambda_layers.py +++ b/tests/test_awslambda/test_lambda_layers.py @@ -65,9 +65,7 @@ def test_get_lambda_layers(): for version in result["LayerVersions"]: version.pop("CreatedDate") result["LayerVersions"].sort(key=lambda x: x["Version"]) - expected_arn = "arn:aws:lambda:{0}:{1}:layer:{2}:".format( - _lambda_region, ACCOUNT_ID, layer_name - ) + expected_arn = f"arn:aws:lambda:{_lambda_region}:{ACCOUNT_ID}:layer:{layer_name}:" result["LayerVersions"].should.equal( [ { diff --git a/tests/test_awslambda/test_lambda_tags.py b/tests/test_awslambda/test_lambda_tags.py index 916f6ad7d..fa6cbcaaf 100644 --- a/tests/test_awslambda/test_lambda_tags.py +++ b/tests/test_awslambda/test_lambda_tags.py @@ -106,15 +106,15 @@ def test_tags_not_found(): """ conn = boto3.client("lambda", _lambda_region) conn.list_tags.when.called_with( - Resource="arn:aws:lambda:{}:function:not-found".format(ACCOUNT_ID) + Resource=f"arn:aws:lambda:{ACCOUNT_ID}:function:not-found" ).should.throw(botocore.client.ClientError) conn.tag_resource.when.called_with( - Resource="arn:aws:lambda:{}:function:not-found".format(ACCOUNT_ID), + Resource=f"arn:aws:lambda:{ACCOUNT_ID}:function:not-found", Tags=dict(spam="eggs"), ).should.throw(botocore.client.ClientError) conn.untag_resource.when.called_with( - Resource="arn:aws:lambda:{}:function:not-found".format(ACCOUNT_ID), + Resource=f"arn:aws:lambda:{ACCOUNT_ID}:function:not-found", TagKeys=["spam"], ).should.throw(botocore.client.ClientError) diff --git a/tests/test_awslambda/utilities.py b/tests/test_awslambda/utilities.py index 8c0f35764..e20ab5103 100644 --- a/tests/test_awslambda/utilities.py +++ b/tests/test_awslambda/utilities.py @@ -30,7 +30,12 @@ def lambda_handler(event, context): def get_test_zip_file2(): - func_str = """ + base_url = ( + "motoserver:5000" + if settings.TEST_SERVER_MODE + else "ec2.us-west-2.amazonaws.com" + ) + func_str = f""" import boto3 def lambda_handler(event, context): @@ -40,11 +45,7 @@ def lambda_handler(event, context): vol = ec2.Volume(volume_id) return {{'id': vol.id, 'state': vol.state, 'size': vol.size}} -""".format( - base_url="motoserver:5000" - if settings.TEST_SERVER_MODE - else "ec2.us-west-2.amazonaws.com" - ) +""" return _process_lambda(func_str) diff --git a/tests/test_batch/test_batch_jobs.py b/tests/test_batch/test_batch_jobs.py index b6178db38..72db8bffa 100644 --- a/tests/test_batch/test_batch_jobs.py +++ b/tests/test_batch/test_batch_jobs.py @@ -385,9 +385,7 @@ def _wait_for_job_statuses(client, job_id, statuses, seconds_to_wait=30): time.sleep(0.1) else: raise RuntimeError( - "Time out waiting for job status {status}!\n Last status: {last_status}".format( - status=statuses, last_status=last_job_status - ) + f"Time out waiting for job status {statuses}!\n Last status: {last_job_status}" ) diff --git a/tests/test_batch/test_batch_task_definition.py b/tests/test_batch/test_batch_task_definition.py index ecfa58835..a3235f750 100644 --- a/tests/test_batch/test_batch_task_definition.py +++ b/tests/test_batch/test_batch_task_definition.py @@ -18,7 +18,7 @@ def test_register_task_definition(use_resource_reqs): resp.should.contain("revision") assert resp["jobDefinitionArn"].endswith( - "{0}:{1}".format(resp["jobDefinitionName"], resp["revision"]) + f"{resp['jobDefinitionName']}:{resp['revision']}" ) @@ -113,7 +113,7 @@ def test_reregister_task_definition(use_resource_reqs): resp1.should.contain("revision") assert resp1["jobDefinitionArn"].endswith( - "{0}:{1}".format(resp1["jobDefinitionName"], resp1["revision"]) + f"{resp1['jobDefinitionName']}:{resp1['revision']}" ) resp1["revision"].should.equal(1) diff --git a/tests/test_cloudformation/test_cloudformation_custom_resources.py b/tests/test_cloudformation/test_cloudformation_custom_resources.py index 872e6b893..a7dfa3dca 100644 --- a/tests/test_cloudformation/test_cloudformation_custom_resources.py +++ b/tests/test_cloudformation/test_cloudformation_custom_resources.py @@ -12,7 +12,7 @@ from .fixtures.custom_lambda import get_template def get_lambda_code(): - pfunc = """ + return f""" def lambda_handler(event, context): # Need to print this, one of the tests verifies the correct input print(event) @@ -21,17 +21,14 @@ def lambda_handler(event, context): response["StackId"] = event["StackId"] response["RequestId"] = event["RequestId"] response["LogicalResourceId"] = event["LogicalResourceId"] - response["PhysicalResourceId"] = "{resource_id}" + response["PhysicalResourceId"] = "CustomResource{str(uuid4())[0:6]}" response_data = dict() response_data["info_value"] = "special value" if event["RequestType"] == "Create": response["Data"] = response_data import cfnresponse cfnresponse.send(event, context, cfnresponse.SUCCESS, response_data) -""".format( - resource_id=f"CustomResource{str(uuid4())[0:6]}" - ) - return pfunc +""" @mock_cloudformation diff --git a/tests/test_cloudformation/test_cloudformation_depends_on.py b/tests/test_cloudformation/test_cloudformation_depends_on.py index 3e6328d07..c593cf594 100644 --- a/tests/test_cloudformation/test_cloudformation_depends_on.py +++ b/tests/test_cloudformation/test_cloudformation_depends_on.py @@ -112,9 +112,7 @@ def test_create_stack_with_depends_on(): ecs = boto3.client("ecs", region_name="us-east-1") cluster_arn = ecs.list_clusters()["clusterArns"][0] - assert cluster_arn == "arn:aws:ecs:us-east-1:{}:cluster/test-cluster".format( - ACCOUNT_ID - ) + assert cluster_arn == f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:cluster/test-cluster" @mock_cloudformation diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py index 4fe403d6a..bac16975e 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py @@ -1075,10 +1075,10 @@ def test_creating_stacks_across_regions(): list(west2_cf.stacks.all()).should.have.length_of(1) list(west1_cf.stacks.all())[0].stack_id.should.contain( - "arn:aws:cloudformation:us-west-1:{}:stack/test_stack/".format(ACCOUNT_ID) + f"arn:aws:cloudformation:us-west-1:{ACCOUNT_ID}:stack/test_stack/" ) list(west2_cf.stacks.all())[0].stack_id.should.contain( - "arn:aws:cloudformation:us-west-2:{}:stack/test_stack/".format(ACCOUNT_ID) + f"arn:aws:cloudformation:us-west-2:{ACCOUNT_ID}:stack/test_stack/" ) @@ -1110,7 +1110,7 @@ def test_create_stack_with_notification_arn(): messages.should.have.length_of(1) msg = json.loads(messages[0].body) msg["Subject"].should.equal("AWS CloudFormation Notification") - msg["Message"].should.contain("StackId='{}'\n".format(stack.stack_id)) + msg["Message"].should.contain(f"StackId='{stack.stack_id}'\n") msg["Message"].should.contain("LogicalResourceId='test_stack_with_notifications'\n") msg["Message"].should.contain("ResourceStatus='CREATE_IN_PROGRESS'\n") msg["Message"].should.contain("ResourceStatusReason='User Initiated'\n") @@ -1128,7 +1128,7 @@ def test_create_stack_with_notification_arn(): messages = queue.receive_messages() messages.should.have.length_of(1) msg = json.loads(messages[0].body) - msg["Message"].should.contain("StackId='{}'\n".format(stack.stack_id)) + msg["Message"].should.contain(f"StackId='{stack.stack_id}'\n") msg["Message"].should.contain("LogicalResourceId='test_stack_with_notifications'\n") msg["Message"].should.contain("ResourceStatus='CREATE_COMPLETE'\n") msg["Message"].should.contain("ResourceStatusReason='None'\n") @@ -1150,10 +1150,10 @@ def test_create_stack_with_role_arn(): cf.create_stack( StackName="test_stack_with_notifications", TemplateBody=dummy_template_json, - RoleARN="arn:aws:iam::{}:role/moto".format(ACCOUNT_ID), + RoleARN=f"arn:aws:iam::{ACCOUNT_ID}:role/moto", ) stack = list(cf.stacks.all())[0] - stack.role_arn.should.equal("arn:aws:iam::{}:role/moto".format(ACCOUNT_ID)) + stack.role_arn.should.equal(f"arn:aws:iam::{ACCOUNT_ID}:role/moto") @mock_cloudformation @@ -1380,11 +1380,11 @@ def test_create_change_set_from_s3_url(): Tags=[{"Key": "tag-key", "Value": "tag-value"}], ) assert ( - "arn:aws:cloudformation:us-west-1:{}:changeSet/NewChangeSet/".format(ACCOUNT_ID) + f"arn:aws:cloudformation:us-west-1:{ACCOUNT_ID}:changeSet/NewChangeSet/" in response["Id"] ) assert ( - "arn:aws:cloudformation:us-west-1:{}:stack/NewStack".format(ACCOUNT_ID) + f"arn:aws:cloudformation:us-west-1:{ACCOUNT_ID}:stack/NewStack" in response["StackId"] ) @@ -1503,9 +1503,7 @@ def test_execute_change_set_w_name(): def test_describe_stack_pagination(): conn = boto3.client("cloudformation", region_name="us-east-1") for i in range(100): - conn.create_stack( - StackName="test_stack_{}".format(i), TemplateBody=dummy_template_json - ) + conn.create_stack(StackName=f"test_stack_{i}", TemplateBody=dummy_template_json) resp = conn.describe_stacks() stacks = resp["Stacks"] @@ -1789,7 +1787,7 @@ def test_describe_updated_stack(): cf_conn.update_stack( StackName="test_stack", - RoleARN="arn:aws:iam::{}:role/moto".format(ACCOUNT_ID), + RoleARN=f"arn:aws:iam::{ACCOUNT_ID}:role/moto", TemplateBody=dummy_update_template_json, Tags=[{"Key": "foo", "Value": "baz"}], Parameters=[{"ParameterKey": "KeyName", "ParameterValue": "value"}], @@ -1801,7 +1799,7 @@ def test_describe_updated_stack(): stack_by_id["StackId"].should.equal(stack["StackId"]) stack_by_id["StackName"].should.equal("test_stack") stack_by_id["StackStatus"].should.equal("UPDATE_COMPLETE") - stack_by_id["RoleARN"].should.equal("arn:aws:iam::{}:role/moto".format(ACCOUNT_ID)) + stack_by_id["RoleARN"].should.equal(f"arn:aws:iam::{ACCOUNT_ID}:role/moto") stack_by_id["Tags"].should.equal([{"Key": "foo", "Value": "baz"}]) # Verify the updated template is persisted @@ -1937,9 +1935,7 @@ def test_update_stack_when_rolled_back(): err = ex.value.response["Error"] err.should.have.key("Code").being.equal("ValidationError") err.should.have.key("Message").match( - r"Stack:arn:aws:cloudformation:us-east-1:{}:stack/test_stack/[a-z0-9-]+ is in ROLLBACK_COMPLETE state and can not be updated.".format( - ACCOUNT_ID - ) + rf"Stack:arn:aws:cloudformation:us-east-1:{ACCOUNT_ID}:stack/test_stack/[a-z0-9-]+ is in ROLLBACK_COMPLETE state and can not be updated." ) @@ -2121,7 +2117,7 @@ def test_list_exports_with_token(): # Add index to ensure name is unique dummy_output_template["Outputs"]["StackVPC"]["Export"]["Name"] += str(i) cf.create_stack( - StackName="test_stack_{}".format(i), + StackName=f"test_stack_{i}", TemplateBody=json.dumps(dummy_output_template), ) exports = cf.list_exports() diff --git a/tests/test_cloudformation/test_cloudformation_stack_integration.py b/tests/test_cloudformation/test_cloudformation_stack_integration.py index ff8aaa177..a284ab101 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_integration.py +++ b/tests/test_cloudformation/test_cloudformation_stack_integration.py @@ -52,7 +52,7 @@ def test_fn_join_boto3(): stack = cf.describe_stacks()["Stacks"][0] fn_join_output = stack["Outputs"][0] - fn_join_output["OutputValue"].should.equal("test eip:{0}".format(eip["PublicIp"])) + fn_join_output["OutputValue"].should.equal(f"test eip:{eip['PublicIp']}") @mock_cloudformation @@ -301,9 +301,7 @@ def lambda_handler(event, context): [ { "Version": 1, - "LayerVersionArn": "arn:aws:lambda:{}:{}:layer:{}:1".format( - region, ACCOUNT_ID, layer_name - ), + "LayerVersionArn": f"arn:aws:lambda:{region}:{ACCOUNT_ID}:layer:{layer_name}:1", "CompatibleRuntimes": ["python2.7", "python3.6"], "Description": "Test Layer", "LicenseInfo": "MIT", @@ -424,7 +422,7 @@ def test_stack_spot_fleet(): "Type": "AWS::EC2::SpotFleet", "Properties": { "SpotFleetRequestConfigData": { - "IamFleetRole": "arn:aws:iam::{}:role/fleet".format(ACCOUNT_ID), + "IamFleetRole": f"arn:aws:iam::{ACCOUNT_ID}:role/fleet", "SpotPrice": "0.12", "TargetCapacity": 6, "AllocationStrategy": "diversified", @@ -445,9 +443,7 @@ def test_stack_spot_fleet(): "SecurityGroups": [{"GroupId": "sg-123"}], "SubnetId": subnet_id, "IamInstanceProfile": { - "Arn": "arn:aws:iam::{}:role/fleet".format( - ACCOUNT_ID - ) + "Arn": f"arn:aws:iam::{ACCOUNT_ID}:role/fleet" }, "WeightedCapacity": "4", "SpotPrice": "10.00", @@ -480,7 +476,7 @@ def test_stack_spot_fleet(): spot_fleet_config["SpotPrice"].should.equal("0.12") spot_fleet_config["TargetCapacity"].should.equal(6) spot_fleet_config["IamFleetRole"].should.equal( - "arn:aws:iam::{}:role/fleet".format(ACCOUNT_ID) + f"arn:aws:iam::{ACCOUNT_ID}:role/fleet" ) spot_fleet_config["AllocationStrategy"].should.equal("diversified") spot_fleet_config["FulfilledCapacity"].should.equal(6.0) @@ -513,7 +509,7 @@ def test_stack_spot_fleet_should_figure_out_default_price(): "Type": "AWS::EC2::SpotFleet", "Properties": { "SpotFleetRequestConfigData": { - "IamFleetRole": "arn:aws:iam::{}:role/fleet".format(ACCOUNT_ID), + "IamFleetRole": f"arn:aws:iam::{ACCOUNT_ID}:role/fleet", "TargetCapacity": 6, "AllocationStrategy": "diversified", "LaunchSpecifications": [ @@ -532,9 +528,7 @@ def test_stack_spot_fleet_should_figure_out_default_price(): "SecurityGroups": [{"GroupId": "sg-123"}], "SubnetId": subnet_id, "IamInstanceProfile": { - "Arn": "arn:aws:iam::{}:role/fleet".format( - ACCOUNT_ID - ) + "Arn": f"arn:aws:iam::{ACCOUNT_ID}:role/fleet" }, "WeightedCapacity": "4", }, diff --git a/tests/test_cloudwatch/test_cloudwatch_alarms.py b/tests/test_cloudwatch/test_cloudwatch_alarms.py index 46e454486..e4ea033a7 100644 --- a/tests/test_cloudwatch/test_cloudwatch_alarms.py +++ b/tests/test_cloudwatch/test_cloudwatch_alarms.py @@ -19,8 +19,8 @@ def test_create_alarm(): Dimensions=[{"Name": "InstanceId", "Value": "i-0123457"}], EvaluationPeriods=5, InsufficientDataActions=["arn:insufficient"], - Namespace="{0}_namespace".format(name), - MetricName="{0}_metric".format(name), + Namespace=f"{name}_namespace", + MetricName=f"{name}_metric", OKActions=["arn:ok"], Period=60, Statistic="Average", @@ -48,7 +48,7 @@ def test_create_alarm(): alarm.should.have.key("InsufficientDataActions").equal(["arn:insufficient"]) alarm.should.have.key("Unit").equal("Seconds") alarm.should.have.key("AlarmArn").equal( - "arn:aws:cloudwatch:{}:{}:alarm:{}".format(region, ACCOUNT_ID, name) + f"arn:aws:cloudwatch:{region}:{ACCOUNT_ID}:alarm:{name}" ) # default value should be True alarm.should.have.key("ActionsEnabled").equal(True) @@ -70,8 +70,8 @@ def test_delete_alarm(): Dimensions=[{"Name": "InstanceId", "Value": "i-0123457"}], EvaluationPeriods=5, InsufficientDataActions=["arn:insufficient"], - Namespace="{0}_namespace".format(name), - MetricName="{0}_metric".format(name), + Namespace=f"{name}_namespace", + MetricName=f"{name}_metric", OKActions=["arn:ok"], Period=60, Statistic="Average", diff --git a/tests/test_codebuild/test_codebuild.py b/tests/test_codebuild/test_codebuild.py index 60905952c..05471c077 100644 --- a/tests/test_codebuild/test_codebuild.py +++ b/tests/test_codebuild/test_codebuild.py @@ -27,9 +27,7 @@ def test_codebuild_create_project_s3_artifacts(): environment["computeType"] = "BUILD_GENERAL1_SMALL" service_role = ( - "arn:aws:iam::{0}:role/service-role/my-codebuild-service-role".format( - ACCOUNT_ID - ) + f"arn:aws:iam::{ACCOUNT_ID}:role/service-role/my-codebuild-service-role" ) response = client.create_project( @@ -78,9 +76,7 @@ def test_codebuild_create_project_no_artifacts(): environment["image"] = "contents_not_validated" environment["computeType"] = "BUILD_GENERAL1_SMALL" service_role = ( - "arn:aws:iam::{0}:role/service-role/my-codebuild-service-role".format( - ACCOUNT_ID - ) + f"arn:aws:iam::{ACCOUNT_ID}:role/service-role/my-codebuild-service-role" ) response = client.create_project( @@ -127,9 +123,7 @@ def test_codebuild_create_project_with_invalid_name(): environment["image"] = "contents_not_validated" environment["computeType"] = "BUILD_GENERAL1_SMALL" service_role = ( - "arn:aws:iam::{0}:role/service-role/my-codebuild-service-role".format( - ACCOUNT_ID - ) + f"arn:aws:iam::{ACCOUNT_ID}:role/service-role/my-codebuild-service-role" ) with pytest.raises(client.exceptions.from_code("InvalidInputException")) as err: @@ -160,9 +154,7 @@ def test_codebuild_create_project_with_invalid_name_length(): environment["image"] = "contents_not_validated" environment["computeType"] = "BUILD_GENERAL1_SMALL" service_role = ( - "arn:aws:iam::{0}:role/service-role/my-codebuild-service-role".format( - ACCOUNT_ID - ) + f"arn:aws:iam::{ACCOUNT_ID}:role/service-role/my-codebuild-service-role" ) with pytest.raises(client.exceptions.from_code("InvalidInputException")) as err: @@ -192,9 +184,7 @@ def test_codebuild_create_project_when_exists(): environment["image"] = "contents_not_validated" environment["computeType"] = "BUILD_GENERAL1_SMALL" service_role = ( - "arn:aws:iam::{0}:role/service-role/my-codebuild-service-role".format( - ACCOUNT_ID - ) + f"arn:aws:iam::{ACCOUNT_ID}:role/service-role/my-codebuild-service-role" ) client.create_project( @@ -234,9 +224,7 @@ def test_codebuild_list_projects(): environment["image"] = "contents_not_validated" environment["computeType"] = "BUILD_GENERAL1_SMALL" service_role = ( - "arn:aws:iam::{0}:role/service-role/my-codebuild-service-role".format( - ACCOUNT_ID - ) + f"arn:aws:iam::{ACCOUNT_ID}:role/service-role/my-codebuild-service-role" ) client.create_project( @@ -275,9 +263,7 @@ def test_codebuild_list_builds_for_project_no_history(): environment["image"] = "contents_not_validated" environment["computeType"] = "BUILD_GENERAL1_SMALL" service_role = ( - "arn:aws:iam::{0}:role/service-role/my-codebuild-service-role".format( - ACCOUNT_ID - ) + f"arn:aws:iam::{ACCOUNT_ID}:role/service-role/my-codebuild-service-role" ) client.create_project( @@ -309,9 +295,7 @@ def test_codebuild_list_builds_for_project_with_history(): environment["image"] = "contents_not_validated" environment["computeType"] = "BUILD_GENERAL1_SMALL" service_role = ( - "arn:aws:iam::{0}:role/service-role/my-codebuild-service-role".format( - ACCOUNT_ID - ) + f"arn:aws:iam::{ACCOUNT_ID}:role/service-role/my-codebuild-service-role" ) client.create_project( @@ -392,9 +376,7 @@ def test_codebuild_start_build_no_overrides(): environment["image"] = "contents_not_validated" environment["computeType"] = "BUILD_GENERAL1_SMALL" service_role = ( - "arn:aws:iam::{0}:role/service-role/my-codebuild-service-role".format( - ACCOUNT_ID - ) + f"arn:aws:iam::{ACCOUNT_ID}:role/service-role/my-codebuild-service-role" ) client.create_project( @@ -426,9 +408,7 @@ def test_codebuild_start_build_multiple_times(): environment["image"] = "contents_not_validated" environment["computeType"] = "BUILD_GENERAL1_SMALL" service_role = ( - "arn:aws:iam::{0}:role/service-role/my-codebuild-service-role".format( - ACCOUNT_ID - ) + f"arn:aws:iam::{ACCOUNT_ID}:role/service-role/my-codebuild-service-role" ) client.create_project( @@ -463,9 +443,7 @@ def test_codebuild_start_build_with_overrides(): environment["image"] = "contents_not_validated" environment["computeType"] = "BUILD_GENERAL1_SMALL" service_role = ( - "arn:aws:iam::{0}:role/service-role/my-codebuild-service-role".format( - ACCOUNT_ID - ) + f"arn:aws:iam::{ACCOUNT_ID}:role/service-role/my-codebuild-service-role" ) branch_override = "fix/testing" @@ -504,9 +482,7 @@ def test_codebuild_batch_get_builds_1_project(): environment["image"] = "contents_not_validated" environment["computeType"] = "BUILD_GENERAL1_SMALL" service_role = ( - "arn:aws:iam::{0}:role/service-role/my-codebuild-service-role".format( - ACCOUNT_ID - ) + f"arn:aws:iam::{ACCOUNT_ID}:role/service-role/my-codebuild-service-role" ) client.create_project( @@ -543,9 +519,7 @@ def test_codebuild_batch_get_builds_2_projects(): environment["image"] = "contents_not_validated" environment["computeType"] = "BUILD_GENERAL1_SMALL" service_role = ( - "arn:aws:iam::{0}:role/service-role/my-codebuild-service-role".format( - ACCOUNT_ID - ) + f"arn:aws:iam::{ACCOUNT_ID}:role/service-role/my-codebuild-service-role" ) client.create_project( @@ -584,7 +558,7 @@ def test_codebuild_batch_get_builds_invalid_build_id(): client = boto3.client("codebuild", region_name="eu-central-1") with pytest.raises(client.exceptions.InvalidInputException) as err: - client.batch_get_builds(ids=["some_project{}".format(uuid1())]) + client.batch_get_builds(ids=[f"some_project{uuid1()}"]) err.value.response["Error"]["Code"].should.equal("InvalidInputException") @@ -613,9 +587,7 @@ def test_codebuild_delete_project(): environment["image"] = "contents_not_validated" environment["computeType"] = "BUILD_GENERAL1_SMALL" service_role = ( - "arn:aws:iam::{0}:role/service-role/my-codebuild-service-role".format( - ACCOUNT_ID - ) + f"arn:aws:iam::{ACCOUNT_ID}:role/service-role/my-codebuild-service-role" ) client.create_project( @@ -653,9 +625,7 @@ def test_codebuild_stop_build(): environment["image"] = "contents_not_validated" environment["computeType"] = "BUILD_GENERAL1_SMALL" service_role = ( - "arn:aws:iam::{0}:role/service-role/my-codebuild-service-role".format( - ACCOUNT_ID - ) + f"arn:aws:iam::{ACCOUNT_ID}:role/service-role/my-codebuild-service-role" ) client.create_project( @@ -678,7 +648,7 @@ def test_codebuild_stop_build_no_build(): client = boto3.client("codebuild", region_name="eu-central-1") with pytest.raises(client.exceptions.ResourceNotFoundException) as err: - client.stop_build(id="some_project:{0}".format(uuid1())) + client.stop_build(id=f"some_project:{uuid1()}") err.value.response["Error"]["Code"].should.equal("ResourceNotFoundException") @@ -687,5 +657,5 @@ def test_codebuild_stop_build_bad_uid(): client = boto3.client("codebuild", region_name="eu-central-1") with pytest.raises(client.exceptions.InvalidInputException) as err: - client.stop_build(id="some_project{0}".format(uuid1())) + client.stop_build(id=f"some_project{uuid1()}") err.value.response["Error"]["Code"].should.equal("InvalidInputException") diff --git a/tests/test_codecommit/test_codecommit.py b/tests/test_codecommit/test_codecommit.py index cf918b34c..6d6bdc0e6 100644 --- a/tests/test_codecommit/test_codecommit.py +++ b/tests/test_codecommit/test_codecommit.py @@ -24,19 +24,13 @@ def test_create_repository(): "description repo one" ) response["repositoryMetadata"]["cloneUrlSsh"].should.equal( - "ssh://git-codecommit.{0}.amazonaws.com/v1/repos/{1}".format( - "eu-central-1", "repository_one" - ) + "ssh://git-codecommit.eu-central-1.amazonaws.com/v1/repos/repository_one" ) response["repositoryMetadata"]["cloneUrlHttp"].should.equal( - "https://git-codecommit.{0}.amazonaws.com/v1/repos/{1}".format( - "eu-central-1", "repository_one" - ) + "https://git-codecommit.eu-central-1.amazonaws.com/v1/repos/repository_one" ) response["repositoryMetadata"]["Arn"].should.equal( - "arn:aws:codecommit:{0}:{1}:{2}".format( - "eu-central-1", ACCOUNT_ID, "repository_one" - ) + f"arn:aws:codecommit:eu-central-1:{ACCOUNT_ID}:repository_one" ) response["repositoryMetadata"]["accountId"].should.equal(ACCOUNT_ID) @@ -58,19 +52,13 @@ def test_create_repository_without_description(): response["repositoryMetadata"]["lastModifiedDate"].should_not.be.none response["repositoryMetadata"]["repositoryId"].should_not.be.empty response["repositoryMetadata"]["cloneUrlSsh"].should.equal( - "ssh://git-codecommit.{0}.amazonaws.com/v1/repos/{1}".format( - "eu-central-1", "repository_two" - ) + "ssh://git-codecommit.eu-central-1.amazonaws.com/v1/repos/repository_two" ) response["repositoryMetadata"]["cloneUrlHttp"].should.equal( - "https://git-codecommit.{0}.amazonaws.com/v1/repos/{1}".format( - "eu-central-1", "repository_two" - ) + "https://git-codecommit.eu-central-1.amazonaws.com/v1/repos/repository_two" ) response["repositoryMetadata"]["Arn"].should.equal( - "arn:aws:codecommit:{0}:{1}:{2}".format( - "eu-central-1", ACCOUNT_ID, "repository_two" - ) + f"arn:aws:codecommit:eu-central-1:{ACCOUNT_ID}:repository_two" ) response["repositoryMetadata"]["accountId"].should.equal(ACCOUNT_ID) @@ -91,7 +79,7 @@ def test_create_repository_repository_name_exists(): ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("RepositoryNameExistsException") ex.response["Error"]["Message"].should.equal( - "Repository named {0} already exists".format("repository_two") + "Repository named repository_two already exists" ) @@ -138,19 +126,13 @@ def test_get_repository(): "description repo one" ) response.get("repositoryMetadata").get("cloneUrlSsh").should.equal( - "ssh://git-codecommit.{0}.amazonaws.com/v1/repos/{1}".format( - "eu-central-1", "repository_one" - ) + "ssh://git-codecommit.eu-central-1.amazonaws.com/v1/repos/repository_one" ) response.get("repositoryMetadata").get("cloneUrlHttp").should.equal( - "https://git-codecommit.{0}.amazonaws.com/v1/repos/{1}".format( - "eu-central-1", "repository_one" - ) + "https://git-codecommit.eu-central-1.amazonaws.com/v1/repos/repository_one" ) response.get("repositoryMetadata").get("Arn").should.equal( - "arn:aws:codecommit:{0}:{1}:{2}".format( - "eu-central-1", ACCOUNT_ID, "repository_one" - ) + f"arn:aws:codecommit:eu-central-1:{ACCOUNT_ID}:repository_one" ) response.get("repositoryMetadata").get("accountId").should.equal(ACCOUNT_ID) @@ -162,9 +144,7 @@ def test_get_repository(): ex.operation_name.should.equal("GetRepository") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("RepositoryDoesNotExistException") - ex.response["Error"]["Message"].should.equal( - "{0} does not exist".format(repository_name) - ) + ex.response["Error"]["Message"].should.equal(f"{repository_name} does not exist") @mock_codecommit diff --git a/tests/test_codepipeline/test_codepipeline.py b/tests/test_codepipeline/test_codepipeline.py index fc2698a06..213755c79 100644 --- a/tests/test_codepipeline/test_codepipeline.py +++ b/tests/test_codepipeline/test_codepipeline.py @@ -459,7 +459,7 @@ def test_list_tags_for_resource(): create_basic_codepipeline(client, name) response = client.list_tags_for_resource( - resourceArn="arn:aws:codepipeline:us-east-1:123456789012:{}".format(name) + resourceArn=f"arn:aws:codepipeline:us-east-1:123456789012:{name}" ) response["tags"].should.equal([{"key": "key", "value": "value"}]) @@ -488,12 +488,12 @@ def test_tag_resource(): create_basic_codepipeline(client, name) client.tag_resource( - resourceArn="arn:aws:codepipeline:us-east-1:123456789012:{}".format(name), + resourceArn=f"arn:aws:codepipeline:us-east-1:123456789012:{name}", tags=[{"key": "key-2", "value": "value-2"}], ) response = client.list_tags_for_resource( - resourceArn="arn:aws:codepipeline:us-east-1:123456789012:{}".format(name) + resourceArn=f"arn:aws:codepipeline:us-east-1:123456789012:{name}" ) response["tags"].should.equal( [{"key": "key", "value": "value"}, {"key": "key-2", "value": "value-2"}] @@ -521,7 +521,7 @@ def test_tag_resource_errors(): with pytest.raises(ClientError) as e: client.tag_resource( - resourceArn="arn:aws:codepipeline:us-east-1:123456789012:{}".format(name), + resourceArn=f"arn:aws:codepipeline:us-east-1:123456789012:{name}", tags=[{"key": "aws:key", "value": "value"}], ) ex = e.value @@ -536,20 +536,15 @@ def test_tag_resource_errors(): with pytest.raises(ClientError) as e: client.tag_resource( - resourceArn="arn:aws:codepipeline:us-east-1:123456789012:{}".format(name), - tags=[ - {"key": "key-{}".format(i), "value": "value-{}".format(i)} - for i in range(50) - ], + resourceArn=f"arn:aws:codepipeline:us-east-1:123456789012:{name}", + tags=[{"key": f"key-{i}", "value": f"value-{i}"} for i in range(50)], ) ex = e.value ex.operation_name.should.equal("TagResource") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("TooManyTagsException") ex.response["Error"]["Message"].should.equal( - "Tag limit exceeded for resource [arn:aws:codepipeline:us-east-1:123456789012:{}].".format( - name - ) + f"Tag limit exceeded for resource [arn:aws:codepipeline:us-east-1:123456789012:{name}]." ) @@ -560,23 +555,23 @@ def test_untag_resource(): create_basic_codepipeline(client, name) response = client.list_tags_for_resource( - resourceArn="arn:aws:codepipeline:us-east-1:123456789012:{}".format(name) + resourceArn=f"arn:aws:codepipeline:us-east-1:123456789012:{name}" ) response["tags"].should.equal([{"key": "key", "value": "value"}]) client.untag_resource( - resourceArn="arn:aws:codepipeline:us-east-1:123456789012:{}".format(name), + resourceArn=f"arn:aws:codepipeline:us-east-1:123456789012:{name}", tagKeys=["key"], ) response = client.list_tags_for_resource( - resourceArn="arn:aws:codepipeline:us-east-1:123456789012:{}".format(name) + resourceArn=f"arn:aws:codepipeline:us-east-1:123456789012:{name}" ) response["tags"].should.have.length_of(0) # removing a not existing tag should raise no exception client.untag_resource( - resourceArn="arn:aws:codepipeline:us-east-1:123456789012:{}".format(name), + resourceArn=f"arn:aws:codepipeline:us-east-1:123456789012:{name}", tagKeys=["key"], ) diff --git a/tests/test_cognitoidentity/test_cognitoidentity.py b/tests/test_cognitoidentity/test_cognitoidentity.py index c61e4e01e..5e9d1e424 100644 --- a/tests/test_cognitoidentity/test_cognitoidentity.py +++ b/tests/test_cognitoidentity/test_cognitoidentity.py @@ -44,9 +44,7 @@ def test_create_identity_pool(): AllowUnauthenticatedIdentities=False, SupportedLoginProviders={"graph.facebook.com": "123456789012345"}, DeveloperProviderName="devname", - OpenIdConnectProviderARNs=[ - "arn:aws:rds:eu-west-2:{}:db:mysql-db".format(ACCOUNT_ID) - ], + OpenIdConnectProviderARNs=[f"arn:aws:rds:eu-west-2:{ACCOUNT_ID}:db:mysql-db"], CognitoIdentityProviders=[ { "ProviderName": "testprovider", @@ -54,7 +52,7 @@ def test_create_identity_pool(): "ServerSideTokenCheck": True, } ], - SamlProviderARNs=["arn:aws:rds:eu-west-2:{}:db:mysql-db".format(ACCOUNT_ID)], + SamlProviderARNs=[f"arn:aws:rds:eu-west-2:{ACCOUNT_ID}:db:mysql-db"], ) assert result["IdentityPoolId"] != "" @@ -68,9 +66,7 @@ def test_describe_identity_pool(): AllowUnauthenticatedIdentities=False, SupportedLoginProviders={"graph.facebook.com": "123456789012345"}, DeveloperProviderName="devname", - OpenIdConnectProviderARNs=[ - "arn:aws:rds:eu-west-2:{}:db:mysql-db".format(ACCOUNT_ID) - ], + OpenIdConnectProviderARNs=[f"arn:aws:rds:eu-west-2:{ACCOUNT_ID}:db:mysql-db"], CognitoIdentityProviders=[ { "ProviderName": "testprovider", @@ -78,7 +74,7 @@ def test_describe_identity_pool(): "ServerSideTokenCheck": True, } ], - SamlProviderARNs=["arn:aws:rds:eu-west-2:{}:db:mysql-db".format(ACCOUNT_ID)], + SamlProviderARNs=[f"arn:aws:rds:eu-west-2:{ACCOUNT_ID}:db:mysql-db"], ) result = conn.describe_identity_pool(IdentityPoolId=res["IdentityPoolId"]) diff --git a/tests/test_cognitoidp/test_cognitoidp.py b/tests/test_cognitoidp/test_cognitoidp.py index 3ec1fd496..630fefa2b 100644 --- a/tests/test_cognitoidp/test_cognitoidp.py +++ b/tests/test_cognitoidp/test_cognitoidp.py @@ -35,9 +35,7 @@ def test_create_user_pool(): result["UserPool"]["Id"].should.match(r"[\w-]+_[0-9a-zA-Z]+") result["UserPool"]["Arn"].should.equal( - "arn:aws:cognito-idp:us-west-2:{}:userpool/{}".format( - ACCOUNT_ID, result["UserPool"]["Id"] - ) + f"arn:aws:cognito-idp:us-west-2:{ACCOUNT_ID}:userpool/{result['UserPool']['Id']}" ) result["UserPool"]["Name"].should.equal(name) result["UserPool"]["LambdaConfig"]["PreSignUp"].should.equal(value) @@ -847,9 +845,7 @@ def test_create_user_pool_domain_custom_domain_config(): domain = str(uuid.uuid4()) custom_domain_config = { - "CertificateArn": "arn:aws:acm:us-east-1:{}:certificate/123456789012".format( - ACCOUNT_ID - ) + "CertificateArn": f"arn:aws:acm:us-east-1:{ACCOUNT_ID}:certificate/123456789012" } user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] result = conn.create_user_pool_domain( @@ -896,9 +892,7 @@ def test_update_user_pool_domain(): domain = str(uuid.uuid4()) custom_domain_config = { - "CertificateArn": "arn:aws:acm:us-east-1:{}:certificate/123456789012".format( - ACCOUNT_ID - ) + "CertificateArn": f"arn:aws:acm:us-east-1:{ACCOUNT_ID}:certificate/123456789012" } user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] conn.create_user_pool_domain(UserPoolId=user_pool_id, Domain=domain) @@ -2395,7 +2389,7 @@ def test_list_users_inherent_attributes(): for name, filter_value, response_field, response_field_expected_value in filters: result = conn.list_users( - UserPoolId=user_pool_id, Filter='{}="{}"'.format(name, filter_value) + UserPoolId=user_pool_id, Filter=f'{name}="{filter_value}"' ) result["Users"].should.have.length_of(1) result["Users"][0][response_field].should.equal(response_field_expected_value) @@ -2839,8 +2833,8 @@ def test_token_legitimacy(): access_token = outputs["access_token"] client_id = outputs["client_id"] username = outputs["username"] - issuer = "https://cognito-idp.us-west-2.amazonaws.com/{}".format( - outputs["user_pool_id"] + issuer = ( + f"https://cognito-idp.us-west-2.amazonaws.com/{outputs['user_pool_id']}" ) id_claims = json.loads(jws.verify(id_token, json_web_key, "RS256")) id_claims["iss"].should.equal(issuer) @@ -3451,7 +3445,7 @@ def test_resource_server(): ex.value.operation_name.should.equal("CreateResourceServer") ex.value.response["Error"]["Code"].should.equal("InvalidParameterException") ex.value.response["Error"]["Message"].should.equal( - "%s already exists in user pool %s." % (identifier, user_pool_id) + f"{identifier} already exists in user pool {user_pool_id}." ) ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) @@ -4308,8 +4302,6 @@ def verify_kid_header(token): def fetch_public_keys(): - keys_url = "https://cognito-idp.{}.amazonaws.com/{}/.well-known/jwks.json".format( - "us-west-2", "someuserpoolid" - ) + keys_url = "https://cognito-idp.us-west-2.amazonaws.com/someuserpoolid/.well-known/jwks.json" response = requests.get(keys_url).json() return response["keys"] diff --git a/tests/test_config/test_config.py b/tests/test_config/test_config.py index 86a576815..0b8e9d8f8 100644 --- a/tests/test_config/test_config.py +++ b/tests/test_config/test_config.py @@ -289,9 +289,7 @@ def test_put_configuration_aggregator(): AccountAggregationSources=[ {"AccountIds": ["012345678910"], "AllAwsRegions": True} ], - Tags=[ - {"Key": "{}".format(x), "Value": "{}".format(x)} for x in range(0, 51) - ], + Tags=[{"Key": f"{x}", "Value": f"{x}"} for x in range(0, 51)], ) assert ( "Member must have length less than or equal to 50" @@ -399,9 +397,7 @@ def test_put_configuration_aggregator(): account_aggregation_source ] assert ( - "arn:aws:config:us-west-2:{}:config-aggregator/config-aggregator-".format( - ACCOUNT_ID - ) + f"arn:aws:config:us-west-2:{ACCOUNT_ID}:config-aggregator/config-aggregator-" in result["ConfigurationAggregator"]["ConfigurationAggregatorArn"] ) assert ( @@ -455,7 +451,7 @@ def test_describe_configuration_aggregators(): # Make 10 config aggregators: for x in range(0, 10): client.put_configuration_aggregator( - ConfigurationAggregatorName="testing{}".format(x), + ConfigurationAggregatorName=f"testing{x}", AccountAggregationSources=[ {"AccountIds": ["012345678910"], "AllAwsRegions": True} ], @@ -509,19 +505,19 @@ def test_describe_configuration_aggregators(): assert result["NextToken"] == "testing4" assert [ agg["ConfigurationAggregatorName"] for agg in result["ConfigurationAggregators"] - ] == ["testing{}".format(x) for x in range(0, 4)] + ] == [f"testing{x}" for x in range(0, 4)] result = client.describe_configuration_aggregators(Limit=4, NextToken="testing4") assert len(result["ConfigurationAggregators"]) == 4 assert result["NextToken"] == "testing8" assert [ agg["ConfigurationAggregatorName"] for agg in result["ConfigurationAggregators"] - ] == ["testing{}".format(x) for x in range(4, 8)] + ] == [f"testing{x}" for x in range(4, 8)] result = client.describe_configuration_aggregators(Limit=4, NextToken="testing8") assert len(result["ConfigurationAggregators"]) == 2 assert not result.get("NextToken") assert [ agg["ConfigurationAggregatorName"] for agg in result["ConfigurationAggregators"] - ] == ["testing{}".format(x) for x in range(8, 10)] + ] == [f"testing{x}" for x in range(8, 10)] # Test Pagination with Filtering: result = client.describe_configuration_aggregators( @@ -560,9 +556,7 @@ def test_put_aggregation_authorization(): client.put_aggregation_authorization( AuthorizedAccountId="012345678910", AuthorizedAwsRegion="us-west-2", - Tags=[ - {"Key": "{}".format(x), "Value": "{}".format(x)} for x in range(0, 51) - ], + Tags=[{"Key": f"{x}", "Value": f"{x}"} for x in range(0, 51)], ) assert ( "Member must have length less than or equal to 50" @@ -626,10 +620,9 @@ def test_put_aggregation_authorization(): Tags=[{"Key": "tag", "Value": "a"}], ) - assert result["AggregationAuthorization"][ - "AggregationAuthorizationArn" - ] == "arn:aws:config:us-west-2:{}:aggregation-authorization/012345678910/us-east-1".format( - ACCOUNT_ID + assert ( + result["AggregationAuthorization"]["AggregationAuthorizationArn"] + == f"arn:aws:config:us-west-2:{ACCOUNT_ID}:aggregation-authorization/012345678910/us-east-1" ) assert result["AggregationAuthorization"]["AuthorizedAccountId"] == "012345678910" assert result["AggregationAuthorization"]["AuthorizedAwsRegion"] == "us-east-1" @@ -641,10 +634,9 @@ def test_put_aggregation_authorization(): result = client.put_aggregation_authorization( AuthorizedAccountId="012345678910", AuthorizedAwsRegion="us-east-1" ) - assert result["AggregationAuthorization"][ - "AggregationAuthorizationArn" - ] == "arn:aws:config:us-west-2:{}:aggregation-authorization/012345678910/us-east-1".format( - ACCOUNT_ID + assert ( + result["AggregationAuthorization"]["AggregationAuthorizationArn"] + == f"arn:aws:config:us-west-2:{ACCOUNT_ID}:aggregation-authorization/012345678910/us-east-1" ) assert result["AggregationAuthorization"]["AuthorizedAccountId"] == "012345678910" assert result["AggregationAuthorization"]["AuthorizedAwsRegion"] == "us-east-1" @@ -661,7 +653,7 @@ def test_describe_aggregation_authorizations(): # Make 10 account authorizations: for i in range(0, 10): client.put_aggregation_authorization( - AuthorizedAccountId="{}".format(str(i) * 12), + AuthorizedAccountId=f"{str(i) * 12}", AuthorizedAwsRegion="us-west-2", ) @@ -679,7 +671,7 @@ def test_describe_aggregation_authorizations(): assert result["NextToken"] == ("4" * 12) + "/us-west-2" assert [ auth["AuthorizedAccountId"] for auth in result["AggregationAuthorizations"] - ] == ["{}".format(str(x) * 12) for x in range(0, 4)] + ] == [f"{str(x) * 12}" for x in range(0, 4)] result = client.describe_aggregation_authorizations( Limit=4, NextToken=("4" * 12) + "/us-west-2" @@ -688,7 +680,7 @@ def test_describe_aggregation_authorizations(): assert result["NextToken"] == ("8" * 12) + "/us-west-2" assert [ auth["AuthorizedAccountId"] for auth in result["AggregationAuthorizations"] - ] == ["{}".format(str(x) * 12) for x in range(4, 8)] + ] == [f"{str(x) * 12}" for x in range(4, 8)] result = client.describe_aggregation_authorizations( Limit=4, NextToken=("8" * 12) + "/us-west-2" @@ -697,7 +689,7 @@ def test_describe_aggregation_authorizations(): assert not result.get("NextToken") assert [ auth["AuthorizedAccountId"] for auth in result["AggregationAuthorizations"] - ] == ["{}".format(str(x) * 12) for x in range(8, 10)] + ] == [f"{str(x) * 12}" for x in range(8, 10)] # Test with an invalid filter: with pytest.raises(ClientError) as ce: @@ -1252,7 +1244,7 @@ def test_list_discovered_resource(): s3_client = boto3.client("s3", region_name="us-west-2") for x in range(0, 10): s3_client.create_bucket( - Bucket="bucket{}".format(x), + Bucket=f"bucket{x}", CreateBucketConfiguration={"LocationConstraint": "us-west-2"}, ) @@ -1269,8 +1261,8 @@ def test_list_discovered_resource(): for x in range(0, 10): assert result["resourceIdentifiers"][x] == { "resourceType": "AWS::S3::Bucket", - "resourceId": "bucket{}".format(x), - "resourceName": "bucket{}".format(x), + "resourceId": f"bucket{x}", + "resourceName": f"bucket{x}", } assert not result.get("nextToken") @@ -1330,7 +1322,7 @@ def test_list_discovered_resource(): ) # More than 20 resourceIds: - resource_ids = ["{}".format(x) for x in range(0, 21)] + resource_ids = [f"{x}" for x in range(0, 21)] with pytest.raises(ClientError) as ce: client.list_discovered_resources( resourceType="AWS::S3::Bucket", resourceIds=resource_ids @@ -1378,14 +1370,14 @@ def test_list_aggregate_discovered_resource(): s3_client = boto3.client("s3", region_name="us-west-2") for x in range(0, 10): s3_client.create_bucket( - Bucket="bucket{}".format(x), + Bucket=f"bucket{x}", CreateBucketConfiguration={"LocationConstraint": "us-west-2"}, ) s3_client_eu = boto3.client("s3", region_name="eu-west-1") for x in range(10, 12): s3_client_eu.create_bucket( - Bucket="eu-bucket{}".format(x), + Bucket=f"eu-bucket{x}", CreateBucketConfiguration={"LocationConstraint": "eu-west-1"}, ) @@ -1398,16 +1390,16 @@ def test_list_aggregate_discovered_resource(): assert result["ResourceIdentifiers"][x] == { "SourceAccountId": ACCOUNT_ID, "ResourceType": "AWS::S3::Bucket", - "ResourceId": "bucket{}".format(x), - "ResourceName": "bucket{}".format(x), + "ResourceId": f"bucket{x}", + "ResourceName": f"bucket{x}", "SourceRegion": "us-west-2", } for x in range(11, 12): assert result["ResourceIdentifiers"][x] == { "SourceAccountId": ACCOUNT_ID, "ResourceType": "AWS::S3::Bucket", - "ResourceId": "eu-bucket{}".format(x), - "ResourceName": "eu-bucket{}".format(x), + "ResourceId": f"eu-bucket{x}", + "ResourceName": f"eu-bucket{x}", "SourceRegion": "eu-west-1", } @@ -1519,7 +1511,7 @@ def test_get_resource_config_history(): s3_client = boto3.client("s3", region_name="us-west-2") for x in range(0, 10): s3_client.create_bucket( - Bucket="bucket{}".format(x), + Bucket=f"bucket{x}", CreateBucketConfiguration={"LocationConstraint": "us-west-2"}, ) @@ -1580,18 +1572,18 @@ def test_batch_get_resource_config(): s3_client = boto3.client("s3", region_name="us-west-2") for x in range(0, 10): s3_client.create_bucket( - Bucket="bucket{}".format(x), + Bucket=f"bucket{x}", CreateBucketConfiguration={"LocationConstraint": "us-west-2"}, ) # Get them all: keys = [ - {"resourceType": "AWS::S3::Bucket", "resourceId": "bucket{}".format(x)} + {"resourceType": "AWS::S3::Bucket", "resourceId": f"bucket{x}"} for x in range(0, 10) ] result = client.batch_get_resource_config(resourceKeys=keys) assert len(result["baseConfigurationItems"]) == 10 - buckets_missing = ["bucket{}".format(x) for x in range(0, 10)] + buckets_missing = [f"bucket{x}" for x in range(0, 10)] for r in result["baseConfigurationItems"]: buckets_missing.remove(r["resourceName"]) @@ -1656,22 +1648,22 @@ def test_batch_get_aggregate_resource_config(): s3_client = boto3.client("s3", region_name="us-west-2") for x in range(0, 10): s3_client.create_bucket( - Bucket="bucket{}".format(x), + Bucket=f"bucket{x}", CreateBucketConfiguration={"LocationConstraint": "us-west-2"}, ) s3_client.put_bucket_tagging( - Bucket="bucket{}".format(x), + Bucket=f"bucket{x}", Tagging={"TagSet": [{"Key": "Some", "Value": "Tag"}]}, ) s3_client_eu = boto3.client("s3", region_name="eu-west-1") for x in range(10, 12): s3_client_eu.create_bucket( - Bucket="eu-bucket{}".format(x), + Bucket=f"eu-bucket{x}", CreateBucketConfiguration={"LocationConstraint": "eu-west-1"}, ) s3_client.put_bucket_tagging( - Bucket="eu-bucket{}".format(x), + Bucket=f"eu-bucket{x}", Tagging={"TagSet": [{"Key": "Some", "Value": "Tag"}]}, ) @@ -1681,7 +1673,7 @@ def test_batch_get_aggregate_resource_config(): "SourceAccountId": ACCOUNT_ID, "SourceRegion": "us-west-2", "ResourceType": "AWS::S3::Bucket", - "ResourceId": "bucket{}".format(x), + "ResourceId": f"bucket{x}", } for x in range(0, 10) ] @@ -1690,7 +1682,7 @@ def test_batch_get_aggregate_resource_config(): "SourceAccountId": ACCOUNT_ID, "SourceRegion": "eu-west-1", "ResourceType": "AWS::S3::Bucket", - "ResourceId": "eu-bucket{}".format(x), + "ResourceId": f"eu-bucket{x}", } for x in range(10, 12) ] @@ -1704,8 +1696,8 @@ def test_batch_get_aggregate_resource_config(): # Verify all the buckets are there: assert len(result["BaseConfigurationItems"]) == 12 - missing_buckets = ["bucket{}".format(x) for x in range(0, 10)] + [ - "eu-bucket{}".format(x) for x in range(10, 12) + missing_buckets = [f"bucket{x}" for x in range(0, 10)] + [ + f"eu-bucket{x}" for x in range(10, 12) ] for r in result["BaseConfigurationItems"]: @@ -2063,7 +2055,7 @@ def test_get_organization_conformance_pack_detailed_status(): status = response["OrganizationConformancePackDetailedStatuses"][0] status["AccountId"].should.equal(ACCOUNT_ID) status["ConformancePackName"].should.equal( - "OrgConformsPack-{}".format(arn[arn.rfind("/") + 1 :]) + f"OrgConformsPack-{arn[arn.rfind('/') + 1 :]}" ) status["Status"].should.equal("CREATE_SUCCESSFUL") update_time = status["LastUpdateTime"] @@ -2085,7 +2077,7 @@ def test_get_organization_conformance_pack_detailed_status(): status = response["OrganizationConformancePackDetailedStatuses"][0] status["AccountId"].should.equal(ACCOUNT_ID) status["ConformancePackName"].should.equal( - "OrgConformsPack-{}".format(arn[arn.rfind("/") + 1 :]) + f"OrgConformsPack-{arn[arn.rfind('/') + 1 :]}" ) status["Status"].should.equal("UPDATE_SUCCESSFUL") status["LastUpdateTime"].should.be.greater_than(update_time) diff --git a/tests/test_core/test_auth.py b/tests/test_core/test_auth.py index 6966a8f6d..baf8c7faf 100644 --- a/tests/test_core/test_auth.py +++ b/tests/test_core/test_auth.py @@ -258,11 +258,7 @@ def test_access_denied_with_no_policy(): ex.value.response["Error"]["Code"].should.equal("AccessDenied") ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) ex.value.response["Error"]["Message"].should.equal( - "User: arn:aws:iam::{account_id}:user/{user_name} is not authorized to perform: {operation}".format( - account_id=ACCOUNT_ID, - user_name=user_name, - operation="ec2:DescribeInstances", - ) + f"User: arn:aws:iam::{ACCOUNT_ID}:user/{user_name} is not authorized to perform: ec2:DescribeInstances" ) @@ -288,11 +284,7 @@ def test_access_denied_with_not_allowing_policy(): ex.value.response["Error"]["Code"].should.equal("AccessDenied") ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) ex.value.response["Error"]["Message"].should.equal( - "User: arn:aws:iam::{account_id}:user/{user_name} is not authorized to perform: {operation}".format( - account_id=ACCOUNT_ID, - user_name=user_name, - operation="ec2:DescribeInstances", - ) + f"User: arn:aws:iam::{ACCOUNT_ID}:user/{user_name} is not authorized to perform: ec2:DescribeInstances" ) @@ -324,9 +316,7 @@ def test_access_denied_for_run_instances(): ex.value.response["Error"]["Code"].should.equal("AccessDenied") ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) ex.value.response["Error"]["Message"].should.equal( - "User: arn:aws:iam::{account_id}:user/{user_name} is not authorized to perform: {operation}".format( - account_id=ACCOUNT_ID, user_name=user_name, operation="ec2:RunInstances" - ) + f"User: arn:aws:iam::{ACCOUNT_ID}:user/{user_name} is not authorized to perform: ec2:RunInstances" ) @@ -355,9 +345,7 @@ def test_access_denied_with_denying_policy(): ex.value.response["Error"]["Code"].should.equal("AccessDenied") ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) ex.value.response["Error"]["Message"].should.equal( - "User: arn:aws:iam::{account_id}:user/{user_name} is not authorized to perform: {operation}".format( - account_id=ACCOUNT_ID, user_name=user_name, operation="ec2:CreateVpc" - ) + f"User: arn:aws:iam::{ACCOUNT_ID}:user/{user_name} is not authorized to perform: ec2:CreateVpc" ) @@ -535,9 +523,7 @@ def test_access_denied_with_many_irrelevant_policies(): ex.value.response["Error"]["Code"].should.equal("AccessDenied") ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) ex.value.response["Error"]["Message"].should.equal( - "User: arn:aws:iam::{account_id}:user/{user_name} is not authorized to perform: {operation}".format( - account_id=ACCOUNT_ID, user_name=user_name, operation="ec2:CreateKeyPair" - ) + f"User: arn:aws:iam::{ACCOUNT_ID}:user/{user_name} is not authorized to perform: ec2:CreateKeyPair" ) @@ -552,9 +538,7 @@ def test_allowed_with_temporary_credentials(): "Version": "2012-10-17", "Statement": { "Effect": "Allow", - "Principal": { - "AWS": "arn:aws:iam::{account_id}:root".format(account_id=ACCOUNT_ID) - }, + "Principal": {"AWS": f"arn:aws:iam::{ACCOUNT_ID}:root"}, "Action": "sts:AssumeRole", }, } @@ -607,9 +591,7 @@ def test_access_denied_with_temporary_credentials(): "Version": "2012-10-17", "Statement": { "Effect": "Allow", - "Principal": { - "AWS": "arn:aws:iam::{account_id}:root".format(account_id=ACCOUNT_ID) - }, + "Principal": {"AWS": f"arn:aws:iam::{ACCOUNT_ID}:root"}, "Action": "sts:AssumeRole", }, } @@ -638,12 +620,7 @@ def test_access_denied_with_temporary_credentials(): ex.value.response["Error"]["Code"].should.equal("AccessDenied") ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) ex.value.response["Error"]["Message"].should.equal( - "User: arn:aws:sts::{account_id}:assumed-role/{role_name}/{session_name} is not authorized to perform: {operation}".format( - account_id=ACCOUNT_ID, - role_name=role_name, - session_name=session_name, - operation="rds:CreateDBInstance", - ) + f"User: arn:aws:sts::{ACCOUNT_ID}:assumed-role/{role_name}/{session_name} is not authorized to perform: rds:CreateDBInstance" ) @@ -753,9 +730,7 @@ def test_s3_invalid_token_with_temporary_credentials(): "Version": "2012-10-17", "Statement": { "Effect": "Allow", - "Principal": { - "AWS": "arn:aws:iam::{account_id}:root".format(account_id=ACCOUNT_ID) - }, + "Principal": {"AWS": f"arn:aws:iam::{ACCOUNT_ID}:root"}, "Action": "sts:AssumeRole", }, } diff --git a/tests/test_core/test_instance_metadata.py b/tests/test_core/test_instance_metadata.py index f928cd914..9ab504a43 100644 --- a/tests/test_core/test_instance_metadata.py +++ b/tests/test_core/test_instance_metadata.py @@ -11,13 +11,13 @@ else: @mock_ec2 def test_latest_meta_data(): - res = requests.get("{0}/latest/meta-data/".format(BASE_URL)) + res = requests.get(f"{BASE_URL}/latest/meta-data/") res.content.should.equal(b"iam") @mock_ec2 def test_meta_data_iam(): - res = requests.get("{0}/latest/meta-data/iam".format(BASE_URL)) + res = requests.get(f"{BASE_URL}/latest/meta-data/iam") json_response = res.json() default_role = json_response["security-credentials"]["default-role"] default_role.should.contain("AccessKeyId") @@ -28,16 +28,14 @@ def test_meta_data_iam(): @mock_ec2 def test_meta_data_security_credentials(): - res = requests.get( - "{0}/latest/meta-data/iam/security-credentials/".format(BASE_URL) - ) + res = requests.get(f"{BASE_URL}/latest/meta-data/iam/security-credentials/") res.content.should.equal(b"default-role") @mock_ec2 def test_meta_data_default_role(): res = requests.get( - "{0}/latest/meta-data/iam/security-credentials/default-role".format(BASE_URL) + f"{BASE_URL}/latest/meta-data/iam/security-credentials/default-role" ) json_response = res.json() json_response.should.contain("AccessKeyId") diff --git a/tests/test_core/test_moto_api.py b/tests/test_core/test_moto_api.py index 107b80e53..3d69ba23b 100644 --- a/tests/test_core/test_moto_api.py +++ b/tests/test_core/test_moto_api.py @@ -21,7 +21,7 @@ def test_reset_api(): conn.create_queue(QueueName="queue1") conn.list_queues()["QueueUrls"].should.have.length_of(1) - res = requests.post("{base_url}/moto-api/reset".format(base_url=base_url)) + res = requests.post(f"{base_url}/moto-api/reset") res.content.should.equal(b'{"status": "ok"}') conn.list_queues().shouldnt.contain("QueueUrls") # No more queues @@ -32,7 +32,7 @@ def test_data_api(): conn = boto3.client("sqs", region_name="us-west-1") conn.create_queue(QueueName="queue1") - res = requests.post("{base_url}/moto-api/data.json".format(base_url=base_url)) + res = requests.post(f"{base_url}/moto-api/data.json") queues = res.json()["sqs"]["Queue"] len(queues).should.equal(1) queue = queues[0] diff --git a/tests/test_core/test_request_mocking.py b/tests/test_core/test_request_mocking.py index dbe07ea77..17855d75a 100644 --- a/tests/test_core/test_request_mocking.py +++ b/tests/test_core/test_request_mocking.py @@ -8,7 +8,7 @@ from moto import mock_s3, mock_sts, mock_sqs, settings @mock_sqs @pytest.mark.network -def test_passthrough_requests(): +def test_passthrough_requests() -> None: conn = boto3.client("sqs", region_name="us-west-1") conn.create_queue(QueueName="queue1") @@ -19,7 +19,7 @@ def test_passthrough_requests(): if not settings.TEST_SERVER_MODE: @mock_sqs - def test_requests_to_amazon_subdomains_dont_work(): + def test_requests_to_amazon_subdomains_dont_work() -> None: res = requests.get("https://fakeservice.amazonaws.com/foo/bar") assert res.content == b"The method is not implemented" assert res.status_code == 400 @@ -27,7 +27,7 @@ if not settings.TEST_SERVER_MODE: @mock_sts @mock_s3 -def test_decorator_ordering(): +def test_decorator_ordering() -> None: """ https://github.com/spulec/moto/issues/3790#issuecomment-803979809 """ @@ -49,4 +49,4 @@ def test_decorator_ordering(): ) resp = requests.get(presigned_url) - resp.status_code.should.equal(200) + resp.status_code.should.equal(200) # type: ignore[attr-defined] diff --git a/tests/test_core/test_url_base_regex.py b/tests/test_core/test_url_base_regex.py index 18fa2e339..54d9d9070 100644 --- a/tests/test_core/test_url_base_regex.py +++ b/tests/test_core/test_url_base_regex.py @@ -19,11 +19,11 @@ class TestMockBucketStartingWithServiceName: @pytest.mark.parametrize("service_name,decorator", service_names) def test_bucketname_starting_with_service_name(self, service_name, decorator): - decorator = getattr(moto, "mock_{}".format(service_name)) + decorator = getattr(moto, f"mock_{service_name}") with decorator(): with mock_s3(): s3_client = boto3.client("s3", "eu-west-1") - bucket_name = "{}-bucket".format(service_name) + bucket_name = f"{service_name}-bucket" s3_client.create_bucket( ACL="private", Bucket=bucket_name, diff --git a/tests/test_datapipeline/test_datapipeline.py b/tests/test_datapipeline/test_datapipeline.py index b9eeeaee0..166568773 100644 --- a/tests/test_datapipeline/test_datapipeline.py +++ b/tests/test_datapipeline/test_datapipeline.py @@ -157,7 +157,7 @@ def test_listing_pipelines_boto3(): def test_listing_paginated_pipelines_boto3(): conn = boto3.client("datapipeline", region_name="us-west-2") for i in range(100): - conn.create_pipeline(name="mypipeline%d" % i, uniqueId="some-unique-id%d" % i) + conn.create_pipeline(name=f"mypipeline{i}", uniqueId=f"some-unique-id{i}") response = conn.list_pipelines() diff --git a/tests/test_dynamodb/test_dynamodb.py b/tests/test_dynamodb/test_dynamodb.py index d45078eba..f25bba2b9 100644 --- a/tests/test_dynamodb/test_dynamodb.py +++ b/tests/test_dynamodb/test_dynamodb.py @@ -145,7 +145,7 @@ def test_list_table_tags_paginated(): table_description = conn.describe_table(TableName=name) arn = table_description["Table"]["TableArn"] for i in range(11): - tags = [{"Key": "TestTag%d" % i, "Value": "TestValue"}] + tags = [{"Key": f"TestTag{i}", "Value": "TestValue"}] conn.tag_resource(ResourceArn=arn, Tags=tags) resp = conn.list_tags_of_resource(ResourceArn=arn) assert len(resp["Tags"]) == 10 @@ -3861,7 +3861,7 @@ def test_transact_write_items_put_conditional_expressions(): { "Put": { "Item": { - "id": {"S": "foo{}".format(str(i))}, + "id": {"S": f"foo{i}"}, "foo": {"S": "bar"}, }, "TableName": "test-table", @@ -4285,9 +4285,7 @@ def assert_correct_client_error( braces = braces or ["{", "}"] assert client_error.response["Error"]["Code"] == code if message_values is not None: - values_string = "{open_brace}(?P.*){close_brace}".format( - open_brace=braces[0], close_brace=braces[1] - ) + values_string = f"{braces[0]}(?P.*){braces[1]}" re_msg = re.compile(message_template.format(values=values_string)) match_result = re_msg.match(client_error.response["Error"]["Message"]) assert match_result is not None @@ -4886,7 +4884,7 @@ def test_set_attribute_is_dropped_if_empty_after_update_expression(attr_name): client.update_item( TableName=table_name, Key={"customer": {"S": item_key}}, - UpdateExpression="ADD {} :order".format(attr_name), + UpdateExpression=f"ADD {attr_name} :order", ExpressionAttributeNames=expression_attribute_names, ExpressionAttributeValues={":order": {"SS": [set_item]}}, ) @@ -4898,7 +4896,7 @@ def test_set_attribute_is_dropped_if_empty_after_update_expression(attr_name): client.update_item( TableName=table_name, Key={"customer": {"S": item_key}}, - UpdateExpression="DELETE {} :order".format(attr_name), + UpdateExpression=f"DELETE {attr_name} :order", ExpressionAttributeNames=expression_attribute_names, ExpressionAttributeValues={":order": {"SS": [set_item]}}, ) @@ -5193,7 +5191,7 @@ def test_describe_backup_for_non_existent_backup_raises_error(): client.describe_backup(BackupArn=non_existent_arn) error = ex.value.response["Error"] error["Code"].should.equal("BackupNotFoundException") - error["Message"].should.equal("Backup not found: {}".format(non_existent_arn)) + error["Message"].should.equal(f"Backup not found: {non_existent_arn}") @mock_dynamodb @@ -5280,7 +5278,7 @@ def test_restore_table_from_non_existent_backup_raises_error(): ) error = ex.value.response["Error"] error["Code"].should.equal("BackupNotFoundException") - error["Message"].should.equal("Backup not found: {}".format(non_existent_arn)) + error["Message"].should.equal(f"Backup not found: {non_existent_arn}") @mock_dynamodb @@ -5301,7 +5299,7 @@ def test_restore_table_from_backup_raises_error_when_table_already_exists(): ) error = ex.value.response["Error"] error["Code"].should.equal("TableAlreadyExistsException") - error["Message"].should.equal("Table already exists: {}".format(table_name)) + error["Message"].should.equal(f"Table already exists: {table_name}") @mock_dynamodb @@ -5316,7 +5314,7 @@ def test_restore_table_from_backup(): ) table = resp.get("TableDescription") for i in range(5): - client.put_item(TableName=table_name, Item={"id": {"S": "item %d" % i}}) + client.put_item(TableName=table_name, Item={"id": {"S": f"item {i}"}}) backup_arn = ( client.create_backup(TableName=table_name, BackupName="backup") @@ -5356,7 +5354,7 @@ def test_restore_table_to_point_in_time(): ) table = resp.get("TableDescription") for i in range(5): - client.put_item(TableName=table_name, Item={"id": {"S": "item %d" % i}}) + client.put_item(TableName=table_name, Item={"id": {"S": f"item {i}"}}) restored_table_name = "restored-from-pit" restored = client.restore_table_to_point_in_time( @@ -5385,7 +5383,7 @@ def test_restore_table_to_point_in_time_raises_error_when_source_not_exist(): ) error = ex.value.response["Error"] error["Code"].should.equal("SourceTableNotFoundException") - error["Message"].should.equal("Source table not found: %s" % table_name) + error["Message"].should.equal(f"Source table not found: {table_name}") @mock_dynamodb @@ -5411,7 +5409,7 @@ def test_restore_table_to_point_in_time_raises_error_when_dest_exist(): ) error = ex.value.response["Error"] error["Code"].should.equal("TableAlreadyExistsException") - error["Message"].should.equal("Table already exists: %s" % restored_table_name) + error["Message"].should.equal(f"Table already exists: {restored_table_name}") @mock_dynamodb @@ -5422,7 +5420,7 @@ def test_delete_non_existent_backup_raises_error(): client.delete_backup(BackupArn=non_existent_arn) error = ex.value.response["Error"] error["Code"].should.equal("BackupNotFoundException") - error["Message"].should.equal("Backup not found: {}".format(non_existent_arn)) + error["Message"].should.equal(f"Backup not found: {non_existent_arn}") @mock_dynamodb @@ -5511,7 +5509,7 @@ def test_describe_endpoints(region): res.should.equal( [ { - "Address": "dynamodb.{}.amazonaws.com".format(region), + "Address": f"dynamodb.{region}.amazonaws.com", "CachePeriodInMinutes": 1440, }, ] diff --git a/tests/test_dynamodb/test_dynamodb_executor.py b/tests/test_dynamodb/test_dynamodb_executor.py index 7641445df..5578a5bb3 100644 --- a/tests/test_dynamodb/test_dynamodb_executor.py +++ b/tests/test_dynamodb/test_dynamodb_executor.py @@ -253,7 +253,7 @@ def test_execution_of_remove_in_list(table): @pytest.mark.parametrize("attr_name", ["s", "#placeholder"]) def test_execution_of_delete_element_from_set(table, attr_name): expression_attribute_names = {"#placeholder": "s"} - update_expression = "delete {} :value".format(attr_name) + update_expression = f"delete {attr_name} :value" update_expression_ast = UpdateExpressionParser.make(update_expression) item = Item( hash_key=DynamoType({"S": "id"}), @@ -276,7 +276,7 @@ def test_execution_of_delete_element_from_set(table, attr_name): assert expected_item == item # delete last elements - update_expression = "delete {} :value".format(attr_name) + update_expression = f"delete {attr_name} :value" update_expression_ast = UpdateExpressionParser.make(update_expression) validated_ast = UpdateExpressionValidator( update_expression_ast, diff --git a/tests/test_dynamodb/test_dynamodb_table_with_range_key.py b/tests/test_dynamodb/test_dynamodb_table_with_range_key.py index bad4e3ab3..80beeb812 100644 --- a/tests/test_dynamodb/test_dynamodb_table_with_range_key.py +++ b/tests/test_dynamodb/test_dynamodb_table_with_range_key.py @@ -192,9 +192,7 @@ def test_boto3_conditions_ignorecase(): for expr in between_expressions: results = dynamodb.query( TableName="users", - KeyConditionExpression="forum_name = :forum_name and subject {}".format( - expr - ), + KeyConditionExpression=f"forum_name = :forum_name and subject {expr}", ExpressionAttributeValues={ ":forum_name": {"S": "the-key"}, ":start": {"S": "100"}, @@ -1023,7 +1021,7 @@ def test_query_pagination(): table.put_item( Item={ "forum_name": "the-key", - "subject": "{0}".format(i), + "subject": f"{i}", "username": "johndoe", "created": Decimal("3"), } diff --git a/tests/test_dynamodb/test_dynamodb_table_without_range_key.py b/tests/test_dynamodb/test_dynamodb_table_without_range_key.py index a15509baa..caadf8bd6 100644 --- a/tests/test_dynamodb/test_dynamodb_table_without_range_key.py +++ b/tests/test_dynamodb/test_dynamodb_table_without_range_key.py @@ -505,7 +505,7 @@ def test_update_settype_item_with_conditions(): def test_scan_pagination(): table = _create_user_table() - expected_usernames = ["user{0}".format(i) for i in range(10)] + expected_usernames = [f"user{i}" for i in range(10)] for u in expected_usernames: table.put_item(Item={"username": u}) diff --git a/tests/test_dynamodb_v20111205/test_server.py b/tests/test_dynamodb_v20111205/test_server.py index c5bbfa2ec..af2ab454d 100644 --- a/tests/test_dynamodb_v20111205/test_server.py +++ b/tests/test_dynamodb_v20111205/test_server.py @@ -1319,7 +1319,7 @@ def create_table(test_client, name=None, region=None, use_range_key=True): "Content-Type": "application/x-amz-json-1.0", } if region: - headers["Host"] = "dynamodb.{}.amazonaws.com".format(region) + headers["Host"] = f"dynamodb.{region}.amazonaws.com" request_body = { "TableName": name, "KeySchema": { diff --git a/tests/test_ec2/test_amis.py b/tests/test_ec2/test_amis.py index 68b3b8f1b..6a6410431 100644 --- a/tests/test_ec2/test_amis.py +++ b/tests/test_ec2/test_amis.py @@ -599,8 +599,8 @@ def test_ami_attribute_user_permissions(): ] permissions.should.equal([]) - USER1 = "".join(["{}".format(random.randint(0, 9)) for _ in range(0, 12)]) - USER2 = "".join(["{}".format(random.randint(0, 9)) for _ in range(0, 12)]) + USER1 = "".join([f"{random.randint(0, 9)}" for _ in range(0, 12)]) + USER2 = "".join([f"{random.randint(0, 9)}" for _ in range(0, 12)]) ADD_USERS_ARGS = { "ImageId": image.id, @@ -673,7 +673,7 @@ def test_ami_describe_executable_users(): instance_id = response["Reservations"][0]["Instances"][0]["InstanceId"] image_id = conn.create_image(InstanceId=instance_id, Name="TestImage")["ImageId"] - USER1 = "".join(["{}".format(random.randint(0, 9)) for _ in range(0, 12)]) + USER1 = "".join([f"{random.randint(0, 9)}" for _ in range(0, 12)]) ADD_USER_ARGS = { "ImageId": image_id, @@ -706,8 +706,8 @@ def test_ami_describe_executable_users_negative(): instance_id = response["Reservations"][0]["Instances"][0]["InstanceId"] image_id = conn.create_image(InstanceId=instance_id, Name="TestImage")["ImageId"] - USER1 = "".join(["{}".format(random.randint(0, 9)) for _ in range(0, 12)]) - USER2 = "".join(["{}".format(random.randint(0, 9)) for _ in range(0, 12)]) + USER1 = "".join([f"{random.randint(0, 9)}" for _ in range(0, 12)]) + USER2 = "".join([f"{random.randint(0, 9)}" for _ in range(0, 12)]) ADD_USER_ARGS = { "ImageId": image_id, @@ -742,7 +742,7 @@ def test_ami_describe_executable_users_and_filter(): "ImageId" ] - USER1 = "".join(["{}".format(random.randint(0, 9)) for _ in range(0, 12)]) + USER1 = "".join([f"{random.randint(0, 9)}" for _ in range(0, 12)]) ADD_USER_ARGS = { "ImageId": image_id, @@ -1127,7 +1127,7 @@ def test_ami_filter_by_empty_tag(): for i in range(10): image = client.create_image( InstanceId=instance.instance_id, - Name="MyAMI{}".format(i), + Name=f"MyAMI{i}", Description="Test", ) diff --git a/tests/test_ec2/test_elastic_block_store.py b/tests/test_ec2/test_elastic_block_store.py index e80d0bb62..d17dbc999 100644 --- a/tests/test_ec2/test_elastic_block_store.py +++ b/tests/test_ec2/test_elastic_block_store.py @@ -107,9 +107,7 @@ def test_delete_attached_volume(): ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.value.response["Error"]["Code"].should.equal("VolumeInUse") ex.value.response["Error"]["Message"].should.equal( - "Volume {0} is currently attached to {1}".format( - volume.id, instance["InstanceId"] - ) + f"Volume {volume.id} is currently attached to {instance['InstanceId']}" ) volume.detach_from_instance(InstanceId=instance["InstanceId"]) diff --git a/tests/test_ec2/test_instances.py b/tests/test_ec2/test_instances.py index 7fc203a45..a1e67456a 100644 --- a/tests/test_ec2/test_instances.py +++ b/tests/test_ec2/test_instances.py @@ -228,9 +228,7 @@ def test_instance_detach_volume_wrong_path(): ex.value.response["Error"]["Code"].should.equal("InvalidAttachment.NotFound") ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.value.response["Error"]["Message"].should.equal( - "The volume {0} is not attached to instance {1} as device {2}".format( - volume.volume_id, instance.instance_id, "/dev/sdf" - ) + f"The volume {volume.volume_id} is not attached to instance {instance.instance_id} as device /dev/sdf" ) @@ -2120,9 +2118,7 @@ def test_describe_instance_attribute(): ) ex.value.response["Error"]["Code"].should.equal("InvalidParameterValue") ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - message = "Value ({invalid_instance_attribute}) for parameter attribute is invalid. Unknown attribute.".format( - invalid_instance_attribute=invalid_instance_attribute - ) + message = f"Value ({invalid_instance_attribute}) for parameter attribute is invalid. Unknown attribute." ex.value.response["Error"]["Message"].should.equal(message) @@ -2227,7 +2223,7 @@ def test_instance_termination_protection(): error = ex.value.response["Error"] error["Code"].should.equal("OperationNotPermitted") ex.value.response["Error"]["Message"].should.match( - r"The instance '{}' may not be terminated.*$".format(instance_id) + rf"The instance '{instance_id}' may not be terminated.*$" ) # Use alternate request syntax for setting attribute. diff --git a/tests/test_ec2/test_network_acls.py b/tests/test_ec2/test_network_acls.py index 9904f32e2..a40f897a9 100644 --- a/tests/test_ec2/test_network_acls.py +++ b/tests/test_ec2/test_network_acls.py @@ -304,9 +304,7 @@ def test_duplicate_network_acl_entry(): ) str(ex.value).should.equal( "An error occurred (NetworkAclEntryAlreadyExists) when calling the CreateNetworkAclEntry " - "operation: The network acl entry identified by {} already exists.".format( - rule_number - ) + f"operation: The network acl entry identified by {rule_number} already exists." ) diff --git a/tests/test_ec2/test_prefix_lists.py b/tests/test_ec2/test_prefix_lists.py index a31a5106a..9611ebcb7 100644 --- a/tests/test_ec2/test_prefix_lists.py +++ b/tests/test_ec2/test_prefix_lists.py @@ -15,9 +15,7 @@ def test_create(): prefix_list.should.have.key("AddressFamily").equals("?") prefix_list.should.have.key("State").equals("create-complete") prefix_list.should.have.key("PrefixListArn").equals( - "arn:aws:ec2:us-west-1:{}:prefix-list/{}".format( - ACCOUNT_ID, prefix_list["PrefixListId"] - ) + f"arn:aws:ec2:us-west-1:{ACCOUNT_ID}:prefix-list/{prefix_list['PrefixListId']}" ) prefix_list.should.have.key("PrefixListName").equals("examplelist") prefix_list.should.have.key("MaxEntries").equals(2) diff --git a/tests/test_ec2/test_regions.py b/tests/test_ec2/test_regions.py index c89b1eecf..d26e9658f 100644 --- a/tests/test_ec2/test_regions.py +++ b/tests/test_ec2/test_regions.py @@ -63,10 +63,10 @@ def test_add_servers_to_multiple_regions_boto3(): def test_create_autoscaling_group_boto3(): regions = [("us-east-1", "c"), ("ap-northeast-1", "a")] for region, zone in regions: - a_zone = "{}{}".format(region, zone) - asg_name = "{}_tester_group_{}".format(region, str(uuid4())[0:6]) - lb_name = "{}_lb_{}".format(region, str(uuid4())[0:6]) - config_name = "{}_tester_{}".format(region, str(uuid4())[0:6]) + a_zone = f"{region}{zone}" + asg_name = f"{region}_tester_group_{str(uuid4())[0:6]}" + lb_name = f"{region}_lb_{str(uuid4())[0:6]}" + config_name = f"{region}_tester_{str(uuid4())[0:6]}" elb_client = boto3.client("elb", region_name=region) elb_client.create_load_balancer( diff --git a/tests/test_ec2/test_route_tables.py b/tests/test_ec2/test_route_tables.py index 8e8d56ec8..9a9ec4531 100644 --- a/tests/test_ec2/test_route_tables.py +++ b/tests/test_ec2/test_route_tables.py @@ -818,9 +818,7 @@ def test_create_route_with_invalid_destination_cidr_block_parameter(): ) str(ex.value).should.equal( "An error occurred (InvalidParameterValue) when calling the CreateRoute " - "operation: Value ({}) for parameter destinationCidrBlock is invalid. This is not a valid CIDR block.".format( - destination_cidr_block - ) + f"operation: Value ({destination_cidr_block}) for parameter destinationCidrBlock is invalid. This is not a valid CIDR block." ) route_table.create_route( diff --git a/tests/test_ec2/test_security_groups.py b/tests/test_ec2/test_security_groups.py index 3f9969545..dba97645a 100644 --- a/tests/test_ec2/test_security_groups.py +++ b/tests/test_ec2/test_security_groups.py @@ -589,7 +589,7 @@ def test_sec_group_rule_limit(use_vpc): ip_permissions = [ { "IpProtocol": "-1", - "IpRanges": [{"CidrIp": "{}.0.0.0/0".format(i)} for i in range(110)], + "IpRanges": [{"CidrIp": f"{i}.0.0.0/0"} for i in range(110)], } ] client.authorize_security_group_ingress( @@ -621,7 +621,7 @@ def test_sec_group_rule_limit(use_vpc): permissions = [ { "IpProtocol": "-1", - "IpRanges": [{"CidrIp": "{}.0.0.0/0".format(i)} for i in range(limit - 1)], + "IpRanges": [{"CidrIp": f"{i}.0.0.0/0"} for i in range(limit - 1)], } ] client.authorize_security_group_ingress(GroupId=sg.id, IpPermissions=permissions) @@ -654,9 +654,7 @@ def test_sec_group_rule_limit(use_vpc): permissions = [ { "IpProtocol": "-1", - "IpRanges": [ - {"CidrIp": "{}.0.0.0/0".format(i)} for i in range(1, limit - 1) - ], + "IpRanges": [{"CidrIp": f"{i}.0.0.0/0"} for i in range(1, limit - 1)], } ] client.authorize_security_group_egress(GroupId=sg.id, IpPermissions=permissions) @@ -1187,7 +1185,7 @@ def test_update_security_group_rule_descriptions_ingress(): def test_non_existent_security_group_raises_error_on_authorize(): client = boto3.client("ec2", "us-east-1") non_existent_sg = "sg-123abc" - expected_error = "The security group '{}' does not exist".format(non_existent_sg) + expected_error = f"The security group '{non_existent_sg}' does not exist" authorize_funcs = [ client.authorize_security_group_egress, client.authorize_security_group_ingress, diff --git a/tests/test_ec2/test_spot_fleet.py b/tests/test_ec2/test_spot_fleet.py index 5c2c32b1d..4cd370c85 100644 --- a/tests/test_ec2/test_spot_fleet.py +++ b/tests/test_ec2/test_spot_fleet.py @@ -22,7 +22,7 @@ def spot_config(subnet_id, allocation_strategy="lowestPrice"): "ClientToken": "string", "SpotPrice": "0.12", "TargetCapacity": 6, - "IamFleetRole": "arn:aws:iam::{}:role/fleet".format(ACCOUNT_ID), + "IamFleetRole": f"arn:aws:iam::{ACCOUNT_ID}:role/fleet", "LaunchSpecifications": [ { "ImageId": EXAMPLE_AMI_ID, @@ -47,9 +47,7 @@ def spot_config(subnet_id, allocation_strategy="lowestPrice"): ], "Monitoring": {"Enabled": True}, "SubnetId": subnet_id, - "IamInstanceProfile": { - "Arn": "arn:aws:iam::{}:role/fleet".format(ACCOUNT_ID) - }, + "IamInstanceProfile": {"Arn": f"arn:aws:iam::{ACCOUNT_ID}:role/fleet"}, "EbsOptimized": False, "WeightedCapacity": 2.0, "SpotPrice": "0.13", @@ -62,9 +60,7 @@ def spot_config(subnet_id, allocation_strategy="lowestPrice"): "InstanceType": "t2.large", "Monitoring": {"Enabled": True}, "SubnetId": subnet_id, - "IamInstanceProfile": { - "Arn": "arn:aws:iam::{}:role/fleet".format(ACCOUNT_ID) - }, + "IamInstanceProfile": {"Arn": f"arn:aws:iam::{ACCOUNT_ID}:role/fleet"}, "EbsOptimized": False, "WeightedCapacity": 4.0, "SpotPrice": "10.00", @@ -96,7 +92,7 @@ def test_create_spot_fleet_with_lowest_price(): spot_fleet_config["SpotPrice"].should.equal("0.12") spot_fleet_config["TargetCapacity"].should.equal(6) spot_fleet_config["IamFleetRole"].should.equal( - "arn:aws:iam::{}:role/fleet".format(ACCOUNT_ID) + f"arn:aws:iam::{ACCOUNT_ID}:role/fleet" ) spot_fleet_config["AllocationStrategy"].should.equal("lowestPrice") spot_fleet_config["FulfilledCapacity"].should.equal(6.0) @@ -107,7 +103,7 @@ def test_create_spot_fleet_with_lowest_price(): launch_spec["EbsOptimized"].should.equal(False) launch_spec["SecurityGroups"].should.equal([{"GroupId": "sg-123"}]) launch_spec["IamInstanceProfile"].should.equal( - {"Arn": "arn:aws:iam::{}:role/fleet".format(ACCOUNT_ID)} + {"Arn": f"arn:aws:iam::{ACCOUNT_ID}:role/fleet"} ) launch_spec["ImageId"].should.equal(EXAMPLE_AMI_ID) launch_spec["InstanceType"].should.equal("t2.small") diff --git a/tests/test_ec2/test_subnets.py b/tests/test_ec2/test_subnets.py index 53ee8a7eb..09a41f697 100644 --- a/tests/test_ec2/test_subnets.py +++ b/tests/test_ec2/test_subnets.py @@ -356,11 +356,7 @@ def test_create_subnet_response_fields(): subnet.should.have.key("AssignIpv6AddressOnCreation").which.should.equal(False) subnet.should.have.key("Ipv6Native").which.should.equal(False) - subnet_arn = "arn:aws:ec2:{region}:{owner_id}:subnet/{subnet_id}".format( - region=subnet["AvailabilityZone"][0:-1], - owner_id=subnet["OwnerId"], - subnet_id=subnet["SubnetId"], - ) + subnet_arn = f"arn:aws:ec2:{subnet['AvailabilityZone'][0:-1]}:{subnet['OwnerId']}:subnet/{subnet['SubnetId']}" subnet.should.have.key("SubnetArn").which.should.equal(subnet_arn) subnet.should.have.key("Ipv6CidrBlockAssociationSet").which.should.equal([]) @@ -393,11 +389,7 @@ def test_describe_subnet_response_fields(): subnet.should.have.key("AssignIpv6AddressOnCreation").which.should.equal(False) subnet.should.have.key("Ipv6Native").which.should.equal(False) - subnet_arn = "arn:aws:ec2:{region}:{owner_id}:subnet/{subnet_id}".format( - region=subnet["AvailabilityZone"][0:-1], - owner_id=subnet["OwnerId"], - subnet_id=subnet["SubnetId"], - ) + subnet_arn = f"arn:aws:ec2:{subnet['AvailabilityZone'][0:-1]}:{subnet['OwnerId']}:subnet/{subnet['SubnetId']}" subnet.should.have.key("SubnetArn").which.should.equal(subnet_arn) subnet.should.have.key("Ipv6CidrBlockAssociationSet").which.should.equal([]) @@ -418,9 +410,7 @@ def test_create_subnet_with_invalid_availability_zone(): ) assert str(ex.value).startswith( "An error occurred (InvalidParameterValue) when calling the CreateSubnet " - "operation: Value ({}) for parameter availabilityZone is invalid. Subnets can currently only be created in the following availability zones: ".format( - subnet_availability_zone - ) + f"operation: Value ({subnet_availability_zone}) for parameter availabilityZone is invalid. Subnets can currently only be created in the following availability zones: " ) @@ -437,7 +427,7 @@ def test_create_subnet_with_invalid_cidr_range(): ec2.create_subnet(VpcId=vpc.id, CidrBlock=subnet_cidr_block) str(ex.value).should.equal( "An error occurred (InvalidSubnet.Range) when calling the CreateSubnet " - "operation: The CIDR '{}' is invalid.".format(subnet_cidr_block) + f"operation: The CIDR '{subnet_cidr_block}' is invalid." ) @@ -455,7 +445,7 @@ def test_create_subnet_with_invalid_cidr_range_multiple_vpc_cidr_blocks(): ec2.create_subnet(VpcId=vpc.id, CidrBlock=subnet_cidr_block) str(ex.value).should.equal( "An error occurred (InvalidSubnet.Range) when calling the CreateSubnet " - "operation: The CIDR '{}' is invalid.".format(subnet_cidr_block) + f"operation: The CIDR '{subnet_cidr_block}' is invalid." ) @@ -472,9 +462,7 @@ def test_create_subnet_with_invalid_cidr_block_parameter(): ec2.create_subnet(VpcId=vpc.id, CidrBlock=subnet_cidr_block) str(ex.value).should.equal( "An error occurred (InvalidParameterValue) when calling the CreateSubnet " - "operation: Value ({}) for parameter cidrBlock is invalid. This is not a valid CIDR block.".format( - subnet_cidr_block - ) + f"operation: Value ({subnet_cidr_block}) for parameter cidrBlock is invalid. This is not a valid CIDR block." ) @@ -532,9 +520,7 @@ def test_create_subnets_with_overlapping_cidr_blocks(): ec2.create_subnet(VpcId=vpc.id, CidrBlock=subnet_cidr_block) str(ex.value).should.equal( "An error occurred (InvalidSubnet.Conflict) when calling the CreateSubnet " - "operation: The CIDR '{}' conflicts with another subnet".format( - subnet_cidr_block - ) + f"operation: The CIDR '{subnet_cidr_block}' conflicts with another subnet" ) diff --git a/tests/test_ec2/test_tags.py b/tests/test_ec2/test_tags.py index c767b66fa..39b497aaf 100644 --- a/tests/test_ec2/test_tags.py +++ b/tests/test_ec2/test_tags.py @@ -131,7 +131,7 @@ def test_tag_limit_exceeded(): instance = ec2.create_instances(ImageId=EXAMPLE_AMI_ID, MinCount=1, MaxCount=1)[0] tag_list = [] for i in range(51): - tag_list.append({"Key": "{0:02d}".format(i + 1), "Value": ""}) + tag_list.append({"Key": f"{i+1:02d}", "Value": ""}) with pytest.raises(ClientError) as ex: client.create_tags(Resources=[instance.id], Tags=tag_list) diff --git a/tests/test_ec2/test_transit_gateway.py b/tests/test_ec2/test_transit_gateway.py index dcfc39d36..ccb40ed67 100644 --- a/tests/test_ec2/test_transit_gateway.py +++ b/tests/test_ec2/test_transit_gateway.py @@ -48,9 +48,7 @@ def test_create_transit_gateway(): gateways.should.have.length_of(1) gateways[0].should.have.key("CreationTime") gateways[0].should.have.key("TransitGatewayArn").equal( - "arn:aws:ec2:us-east-1:{}:transit-gateway/{}".format( - ACCOUNT_ID, gateway["TransitGatewayId"] - ) + f"arn:aws:ec2:us-east-1:{ACCOUNT_ID}:transit-gateway/{gateway['TransitGatewayId']}" ) gateways[0]["Options"].should.have.key("AssociationDefaultRouteTableId").equal( gateways[0]["Options"]["PropagationDefaultRouteTableId"] diff --git a/tests/test_ec2/test_vpc_peering.py b/tests/test_ec2/test_vpc_peering.py index 1ab7f08ff..f9b211d18 100644 --- a/tests/test_ec2/test_vpc_peering.py +++ b/tests/test_ec2/test_vpc_peering.py @@ -399,9 +399,9 @@ def test_vpc_peering_connections_cross_region_accept_wrong_region(): ec2_usw1.accept_vpc_peering_connection(VpcPeeringConnectionId=vpc_pcx_usw1.id) cm.value.response["Error"]["Code"].should.equal("OperationNotPermitted") exp_msg = ( - "Incorrect region ({0}) specified for this request.VPC " - "peering connection {1} must be " - "accepted in region {2}".format("us-west-1", vpc_pcx_usw1.id, "ap-northeast-1") + "Incorrect region (us-west-1) specified for this request.VPC " + f"peering connection {vpc_pcx_usw1.id} must be " + "accepted in region ap-northeast-1" ) cm.value.response["Error"]["Message"].should.equal(exp_msg) @@ -424,9 +424,9 @@ def test_vpc_peering_connections_cross_region_reject_wrong_region(): ec2_usw1.reject_vpc_peering_connection(VpcPeeringConnectionId=vpc_pcx_usw1.id) cm.value.response["Error"]["Code"].should.equal("OperationNotPermitted") exp_msg = ( - "Incorrect region ({0}) specified for this request.VPC " - "peering connection {1} must be accepted or " - "rejected in region {2}".format("us-west-1", vpc_pcx_usw1.id, "ap-northeast-1") + "Incorrect region (us-west-1) specified for this request.VPC " + f"peering connection {vpc_pcx_usw1.id} must be accepted or " + "rejected in region ap-northeast-1" ) cm.value.response["Error"]["Message"].should.equal(exp_msg) diff --git a/tests/test_ec2/test_vpcs.py b/tests/test_ec2/test_vpcs.py index 2dc27528c..1c7966e45 100644 --- a/tests/test_ec2/test_vpcs.py +++ b/tests/test_ec2/test_vpcs.py @@ -561,14 +561,12 @@ def test_associate_vpc_ipv4_cidr_block(): # Associate/Extend vpc CIDR range up to 5 ciders for i in range(43, 47): response = ec2.meta.client.associate_vpc_cidr_block( - VpcId=vpc.id, CidrBlock="10.10.{}.0/24".format(i) + VpcId=vpc.id, CidrBlock=f"10.10.{i}.0/24" ) response["CidrBlockAssociation"]["CidrBlockState"]["State"].should.equal( "associating" ) - response["CidrBlockAssociation"]["CidrBlock"].should.equal( - "10.10.{}.0/24".format(i) - ) + response["CidrBlockAssociation"]["CidrBlock"].should.equal(f"10.10.{i}.0/24") response["CidrBlockAssociation"]["AssociationId"].should.contain( "vpc-cidr-assoc" ) @@ -590,9 +588,7 @@ def test_associate_vpc_ipv4_cidr_block(): ) str(ex.value).should.equal( "An error occurred (CidrLimitExceeded) when calling the AssociateVpcCidrBlock " - "operation: This network '{}' has met its maximum number of allowed CIDRs: 5".format( - vpc.id - ) + f"operation: This network '{vpc.id}' has met its maximum number of allowed CIDRs: 5" ) @@ -657,8 +653,8 @@ def test_disassociate_vpc_ipv4_cidr_block(): ) str(ex.value).should.equal( "An error occurred (OperationNotPermitted) when calling the DisassociateVpcCidrBlock operation: " - "The vpc CIDR block with association ID {} may not be disassociated. It is the primary " - "IPv4 CIDR block of the VPC".format(vpc_base_cidr_assoc_id) + f"The vpc CIDR block with association ID {vpc_base_cidr_assoc_id} may not be disassociated. It is the primary " + "IPv4 CIDR block of the VPC" ) @@ -738,9 +734,7 @@ def test_vpc_associate_ipv6_cidr_block(): ) str(ex.value).should.equal( "An error occurred (CidrLimitExceeded) when calling the AssociateVpcCidrBlock " - "operation: This network '{}' has met its maximum number of allowed CIDRs: 1".format( - vpc.id - ) + f"operation: This network '{vpc.id}' has met its maximum number of allowed CIDRs: 1" ) # Test associate ipv6 cidr block after vpc created @@ -848,9 +842,7 @@ def test_create_vpc_with_invalid_cidr_block_parameter(): ec2.create_vpc(CidrBlock=vpc_cidr_block) str(ex.value).should.equal( "An error occurred (InvalidParameterValue) when calling the CreateVpc " - "operation: Value ({}) for parameter cidrBlock is invalid. This is not a valid CIDR block.".format( - vpc_cidr_block - ) + f"operation: Value ({vpc_cidr_block}) for parameter cidrBlock is invalid. This is not a valid CIDR block." ) @@ -863,7 +855,7 @@ def test_create_vpc_with_invalid_cidr_range(): ec2.create_vpc(CidrBlock=vpc_cidr_block) str(ex.value).should.equal( "An error occurred (InvalidVpc.Range) when calling the CreateVpc " - "operation: The CIDR '{}' is invalid.".format(vpc_cidr_block) + f"operation: The CIDR '{vpc_cidr_block}' is invalid." ) diff --git a/tests/test_ecr/test_ecr_boto3.py b/tests/test_ecr/test_ecr_boto3.py index 7573fd713..a5c5e2536 100644 --- a/tests/test_ecr/test_ecr_boto3.py +++ b/tests/test_ecr/test_ecr_boto3.py @@ -22,8 +22,8 @@ from moto.core import DEFAULT_ACCOUNT_ID as ACCOUNT_ID def _create_image_digest(contents=None): if not contents: - contents = "docker_image{0}".format(int(random() * 10**6)) - return "sha256:%s" % hashlib.sha256(contents.encode("utf-8")).hexdigest() + contents = f"docker_image{int(random() * 10**6)}" + return "sha256:" + hashlib.sha256(contents.encode("utf-8")).hexdigest() def _create_image_manifest(): diff --git a/tests/test_ecs/test_ecs_account_settings.py b/tests/test_ecs/test_ecs_account_settings.py index 0f2dbe406..caacc0296 100644 --- a/tests/test_ecs/test_ecs_account_settings.py +++ b/tests/test_ecs/test_ecs_account_settings.py @@ -130,7 +130,7 @@ def test_put_account_setting_changes_service_arn(): response = client.list_services(cluster="dummy-cluster", launchType="FARGATE") service_arn = response["serviceArns"][0] service_arn.should.equal( - "arn:aws:ecs:eu-west-1:{}:service/test-ecs-service".format(ACCOUNT_ID) + f"arn:aws:ecs:eu-west-1:{ACCOUNT_ID}:service/test-ecs-service" ) # Second invocation returns long ARN's by default, after deleting the preference @@ -138,9 +138,7 @@ def test_put_account_setting_changes_service_arn(): response = client.list_services(cluster="dummy-cluster", launchType="FARGATE") service_arn = response["serviceArns"][0] service_arn.should.equal( - "arn:aws:ecs:eu-west-1:{}:service/dummy-cluster/test-ecs-service".format( - ACCOUNT_ID - ) + f"arn:aws:ecs:eu-west-1:{ACCOUNT_ID}:service/dummy-cluster/test-ecs-service" ) diff --git a/tests/test_ecs/test_ecs_boto3.py b/tests/test_ecs/test_ecs_boto3.py index bd1513a27..4c9b259b5 100644 --- a/tests/test_ecs/test_ecs_boto3.py +++ b/tests/test_ecs/test_ecs_boto3.py @@ -30,7 +30,7 @@ def test_create_cluster(): response = client.create_cluster(clusterName="test_ecs_cluster") response["cluster"]["clusterName"].should.equal("test_ecs_cluster") response["cluster"]["clusterArn"].should.equal( - "arn:aws:ecs:us-east-1:{}:cluster/test_ecs_cluster".format(ACCOUNT_ID) + f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:cluster/test_ecs_cluster" ) response["cluster"]["status"].should.equal("ACTIVE") response["cluster"]["registeredContainerInstancesCount"].should.equal(0) @@ -60,10 +60,10 @@ def test_list_clusters(): _ = client.create_cluster(clusterName="test_cluster1") response = client.list_clusters() response["clusterArns"].should.contain( - "arn:aws:ecs:us-east-2:{}:cluster/test_cluster0".format(ACCOUNT_ID) + f"arn:aws:ecs:us-east-2:{ACCOUNT_ID}:cluster/test_cluster0" ) response["clusterArns"].should.contain( - "arn:aws:ecs:us-east-2:{}:cluster/test_cluster1".format(ACCOUNT_ID) + f"arn:aws:ecs:us-east-2:{ACCOUNT_ID}:cluster/test_cluster1" ) @@ -104,7 +104,7 @@ def test_describe_clusters_missing(): response = client.describe_clusters(clusters=["some-cluster"]) response["failures"].should.contain( { - "arn": "arn:aws:ecs:us-east-1:{}:cluster/some-cluster".format(ACCOUNT_ID), + "arn": f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:cluster/some-cluster", "reason": "MISSING", } ) @@ -117,7 +117,7 @@ def test_delete_cluster(): response = client.delete_cluster(cluster="test_ecs_cluster") response["cluster"]["clusterName"].should.equal("test_ecs_cluster") response["cluster"]["clusterArn"].should.equal( - "arn:aws:ecs:us-east-1:{}:cluster/test_ecs_cluster".format(ACCOUNT_ID) + f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:cluster/test_ecs_cluster" ) response["cluster"]["status"].should.equal("ACTIVE") response["cluster"]["registeredContainerInstancesCount"].should.equal(0) @@ -154,7 +154,7 @@ def test_register_task_definition(): response["taskDefinition"]["family"].should.equal("test_ecs_task") response["taskDefinition"]["revision"].should.equal(1) response["taskDefinition"]["taskDefinitionArn"].should.equal( - "arn:aws:ecs:us-east-1:{}:task-definition/test_ecs_task:1".format(ACCOUNT_ID) + f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:task-definition/test_ecs_task:1" ) response["taskDefinition"]["networkMode"].should.equal("bridge") response["taskDefinition"]["volumes"].should.equal([]) @@ -192,7 +192,7 @@ def test_register_task_definition(): response["taskDefinition"]["revision"].should.equal(2) response["taskDefinition"]["taskDefinitionArn"].should.equal( - "arn:aws:ecs:us-east-1:{}:task-definition/test_ecs_task:2".format(ACCOUNT_ID) + f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:task-definition/test_ecs_task:2" ) # Registering with optional top-level params @@ -293,10 +293,10 @@ def test_list_task_definitions(): response = client.list_task_definitions() len(response["taskDefinitionArns"]).should.equal(2) response["taskDefinitionArns"][0].should.equal( - "arn:aws:ecs:us-east-1:{}:task-definition/test_ecs_task:1".format(ACCOUNT_ID) + f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:task-definition/test_ecs_task:1" ) response["taskDefinitionArns"][1].should.equal( - "arn:aws:ecs:us-east-1:{}:task-definition/test_ecs_task:2".format(ACCOUNT_ID) + f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:task-definition/test_ecs_task:2" ) @@ -356,10 +356,10 @@ def test_list_task_definitions_with_family_prefix(): filtered_response = client.list_task_definitions(familyPrefix="test_ecs_task_a") len(filtered_response["taskDefinitionArns"]).should.equal(2) filtered_response["taskDefinitionArns"][0].should.equal( - "arn:aws:ecs:us-east-1:{}:task-definition/test_ecs_task_a:1".format(ACCOUNT_ID) + f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:task-definition/test_ecs_task_a:1" ) filtered_response["taskDefinitionArns"][1].should.equal( - "arn:aws:ecs:us-east-1:{}:task-definition/test_ecs_task_a:2".format(ACCOUNT_ID) + f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:task-definition/test_ecs_task_a:2" ) @@ -419,12 +419,12 @@ def test_describe_task_definitions(): ) response = client.describe_task_definition(taskDefinition="test_ecs_task") response["taskDefinition"]["taskDefinitionArn"].should.equal( - "arn:aws:ecs:us-east-1:{}:task-definition/test_ecs_task:3".format(ACCOUNT_ID) + f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:task-definition/test_ecs_task:3" ) response = client.describe_task_definition(taskDefinition="test_ecs_task:2") response["taskDefinition"]["taskDefinitionArn"].should.equal( - "arn:aws:ecs:us-east-1:{}:task-definition/test_ecs_task:2".format(ACCOUNT_ID) + f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:task-definition/test_ecs_task:2" ) response["taskDefinition"]["taskRoleArn"].should.equal("my-task-role-arn") response["taskDefinition"]["executionRoleArn"].should.equal("my-execution-role-arn") @@ -458,7 +458,7 @@ def test_deregister_task_definition_1(): type(response["taskDefinition"]).should.be(dict) response["taskDefinition"]["status"].should.equal("INACTIVE") response["taskDefinition"]["taskDefinitionArn"].should.equal( - "arn:aws:ecs:us-east-1:{}:task-definition/test_ecs_task:1".format(ACCOUNT_ID) + f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:task-definition/test_ecs_task:1" ) response["taskDefinition"]["containerDefinitions"][0]["name"].should.equal( "hello_world" @@ -526,7 +526,7 @@ def test_create_service(): desiredCount=2, ) response["service"]["clusterArn"].should.equal( - "arn:aws:ecs:us-east-1:{}:cluster/test_ecs_cluster".format(ACCOUNT_ID) + f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:cluster/test_ecs_cluster" ) response["service"]["desiredCount"].should.equal(2) len(response["service"]["events"]).should.equal(0) @@ -534,14 +534,12 @@ def test_create_service(): response["service"]["pendingCount"].should.equal(0) response["service"]["runningCount"].should.equal(0) response["service"]["serviceArn"].should.equal( - "arn:aws:ecs:us-east-1:{}:service/test_ecs_cluster/test_ecs_service".format( - ACCOUNT_ID - ) + f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:service/test_ecs_cluster/test_ecs_service" ) response["service"]["serviceName"].should.equal("test_ecs_service") response["service"]["status"].should.equal("ACTIVE") response["service"]["taskDefinition"].should.equal( - "arn:aws:ecs:us-east-1:{}:task-definition/test_ecs_task:1".format(ACCOUNT_ID) + f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:task-definition/test_ecs_task:1" ) response["service"]["schedulingStrategy"].should.equal("REPLICA") response["service"]["launchType"].should.equal("EC2") @@ -618,7 +616,7 @@ def test_create_service_scheduling_strategy(): schedulingStrategy="DAEMON", ) response["service"]["clusterArn"].should.equal( - "arn:aws:ecs:us-east-1:{}:cluster/test_ecs_cluster".format(ACCOUNT_ID) + f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:cluster/test_ecs_cluster" ) response["service"]["desiredCount"].should.equal(2) len(response["service"]["events"]).should.equal(0) @@ -626,14 +624,12 @@ def test_create_service_scheduling_strategy(): response["service"]["pendingCount"].should.equal(0) response["service"]["runningCount"].should.equal(0) response["service"]["serviceArn"].should.equal( - "arn:aws:ecs:us-east-1:{}:service/test_ecs_cluster/test_ecs_service".format( - ACCOUNT_ID - ) + f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:service/test_ecs_cluster/test_ecs_service" ) response["service"]["serviceName"].should.equal("test_ecs_service") response["service"]["status"].should.equal("ACTIVE") response["service"]["taskDefinition"].should.equal( - "arn:aws:ecs:us-east-1:{}:task-definition/test_ecs_task:1".format(ACCOUNT_ID) + f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:task-definition/test_ecs_task:1" ) response["service"]["schedulingStrategy"].should.equal("DAEMON") @@ -684,16 +680,8 @@ def test_list_services(): desiredCount=2, ) - test_ecs_service1_arn = ( - "arn:aws:ecs:us-east-1:{}:service/test_ecs_cluster1/test_ecs_service1".format( - ACCOUNT_ID - ) - ) - test_ecs_service2_arn = ( - "arn:aws:ecs:us-east-1:{}:service/test_ecs_cluster1/test_ecs_service2".format( - ACCOUNT_ID - ) - ) + test_ecs_service1_arn = f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:service/test_ecs_cluster1/test_ecs_service1" + test_ecs_service2_arn = f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:service/test_ecs_cluster1/test_ecs_service2" cluster1_services = client.list_services(cluster="test_ecs_cluster1") len(cluster1_services["serviceArns"]).should.equal(2) @@ -766,22 +754,16 @@ def test_describe_services(): cluster="test_ecs_cluster", services=[ "test_ecs_service1", - "arn:aws:ecs:us-east-1:{}:service/test_ecs_cluster/test_ecs_service2".format( - ACCOUNT_ID - ), + f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:service/test_ecs_cluster/test_ecs_service2", ], ) len(response["services"]).should.equal(2) response["services"][0]["serviceArn"].should.equal( - "arn:aws:ecs:us-east-1:{}:service/test_ecs_cluster/test_ecs_service1".format( - ACCOUNT_ID - ) + f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:service/test_ecs_cluster/test_ecs_service1" ) response["services"][0]["serviceName"].should.equal("test_ecs_service1") response["services"][1]["serviceArn"].should.equal( - "arn:aws:ecs:us-east-1:{}:service/test_ecs_cluster/test_ecs_service2".format( - ACCOUNT_ID - ) + f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:service/test_ecs_cluster/test_ecs_service2" ) response["services"][1]["serviceName"].should.equal("test_ecs_service2") @@ -802,9 +784,7 @@ def test_describe_services(): cluster="test_ecs_cluster", services=[ "test_ecs_service1", - "arn:aws:ecs:us-east-1:{}:service/test_ecs_cluster/test_ecs_service2".format( - ACCOUNT_ID - ), + f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:service/test_ecs_cluster/test_ecs_service2", ], include=["TAGS"], ) @@ -840,9 +820,7 @@ def test_describe_services_new_arn(): cluster="test_ecs_cluster", services=["test_ecs_service1"] ) response["services"][0]["serviceArn"].should.equal( - "arn:aws:ecs:us-east-1:{}:service/test_ecs_cluster/test_ecs_service1".format( - ACCOUNT_ID - ) + f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:service/test_ecs_cluster/test_ecs_service1" ) @@ -889,23 +867,17 @@ def test_describe_services_scheduling_strategy(): cluster="test_ecs_cluster", services=[ "test_ecs_service1", - "arn:aws:ecs:us-east-1:{}:service/test_ecs_cluster/test_ecs_service2".format( - ACCOUNT_ID - ), + f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:service/test_ecs_cluster/test_ecs_service2", "test_ecs_service3", ], ) len(response["services"]).should.equal(3) response["services"][0]["serviceArn"].should.equal( - "arn:aws:ecs:us-east-1:{}:service/test_ecs_cluster/test_ecs_service1".format( - ACCOUNT_ID - ) + f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:service/test_ecs_cluster/test_ecs_service1" ) response["services"][0]["serviceName"].should.equal("test_ecs_service1") response["services"][1]["serviceArn"].should.equal( - "arn:aws:ecs:us-east-1:{}:service/test_ecs_cluster/test_ecs_service2".format( - ACCOUNT_ID - ) + f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:service/test_ecs_cluster/test_ecs_service2" ) response["services"][1]["serviceName"].should.equal("test_ecs_service2") @@ -971,7 +943,7 @@ def test_describe_services_with_known_unknown_services(): service_name, "unknown", service_arn, - "arn:aws:ecs:eu-central-1:{}:service/unknown-2".format(ACCOUNT_ID), + f"arn:aws:ecs:eu-central-1:{ACCOUNT_ID}:service/unknown-2", ], ) @@ -987,13 +959,11 @@ def test_describe_services_with_known_unknown_services(): sorted(failures, key=lambda item: item["arn"]).should.equal( [ { - "arn": "arn:aws:ecs:eu-central-1:{}:service/unknown".format(ACCOUNT_ID), + "arn": f"arn:aws:ecs:eu-central-1:{ACCOUNT_ID}:service/unknown", "reason": "MISSING", }, { - "arn": "arn:aws:ecs:eu-central-1:{}:service/unknown-2".format( - ACCOUNT_ID - ), + "arn": f"arn:aws:ecs:eu-central-1:{ACCOUNT_ID}:service/unknown-2", "reason": "MISSING", }, ] @@ -1093,7 +1063,7 @@ def test_delete_service(): cluster="test_ecs_cluster", service="test_ecs_service" ) response["service"]["clusterArn"].should.equal( - "arn:aws:ecs:us-east-1:{}:cluster/test_ecs_cluster".format(ACCOUNT_ID) + f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:cluster/test_ecs_cluster" ) response["service"]["desiredCount"].should.equal(0) len(response["service"]["events"]).should.equal(0) @@ -1101,15 +1071,13 @@ def test_delete_service(): response["service"]["pendingCount"].should.equal(0) response["service"]["runningCount"].should.equal(0) response["service"]["serviceArn"].should.equal( - "arn:aws:ecs:us-east-1:{}:service/test_ecs_cluster/test_ecs_service".format( - ACCOUNT_ID - ) + f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:service/test_ecs_cluster/test_ecs_service" ) response["service"]["serviceName"].should.equal("test_ecs_service") response["service"]["status"].should.equal("ACTIVE") response["service"]["schedulingStrategy"].should.equal("REPLICA") response["service"]["taskDefinition"].should.equal( - "arn:aws:ecs:us-east-1:{}:task-definition/test_ecs_task:1".format(ACCOUNT_ID) + f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:task-definition/test_ecs_task:1" ) @@ -1146,7 +1114,7 @@ def test_delete_service__using_arns(): ) response = client.delete_service(cluster=cluster_arn, service=service_arn) response["service"]["clusterArn"].should.equal( - "arn:aws:ecs:us-east-1:{}:cluster/test_ecs_cluster".format(ACCOUNT_ID) + f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:cluster/test_ecs_cluster" ) @@ -1180,22 +1148,20 @@ def test_delete_service_force(): cluster="test_ecs_cluster", service="test_ecs_service", force=True ) response["service"]["clusterArn"].should.equal( - "arn:aws:ecs:us-east-1:{}:cluster/test_ecs_cluster".format(ACCOUNT_ID) + f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:cluster/test_ecs_cluster" ) len(response["service"]["events"]).should.equal(0) len(response["service"]["loadBalancers"]).should.equal(0) response["service"]["pendingCount"].should.equal(0) response["service"]["runningCount"].should.equal(0) response["service"]["serviceArn"].should.equal( - "arn:aws:ecs:us-east-1:{}:service/test_ecs_cluster/test_ecs_service".format( - ACCOUNT_ID - ) + f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:service/test_ecs_cluster/test_ecs_service" ) response["service"]["serviceName"].should.equal("test_ecs_service") response["service"]["status"].should.equal("ACTIVE") response["service"]["schedulingStrategy"].should.equal("REPLICA") response["service"]["taskDefinition"].should.equal( - "arn:aws:ecs:us-east-1:{}:task-definition/test_ecs_task:1".format(ACCOUNT_ID) + f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:task-definition/test_ecs_task:1" ) @@ -1277,9 +1243,7 @@ def test_register_container_instance(): response["containerInstance"]["ec2InstanceId"].should.equal(test_instance.id) full_arn = response["containerInstance"]["containerInstanceArn"] arn_part = full_arn.split("/") - arn_part[0].should.equal( - "arn:aws:ecs:us-east-1:{}:container-instance".format(ACCOUNT_ID) - ) + arn_part[0].should.equal(f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:container-instance") arn_part[1].should.equal("test_ecs_cluster") arn_part[2].should.equal(str(UUID(arn_part[2]))) response["containerInstance"]["status"].should.equal("ACTIVE") @@ -1686,16 +1650,16 @@ def test_run_task(): ) len(response["tasks"]).should.equal(2) response["tasks"][0]["taskArn"].should.contain( - "arn:aws:ecs:us-east-1:{}:task/".format(ACCOUNT_ID) + f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:task/" ) response["tasks"][0]["clusterArn"].should.equal( - "arn:aws:ecs:us-east-1:{}:cluster/test_ecs_cluster".format(ACCOUNT_ID) + f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:cluster/test_ecs_cluster" ) response["tasks"][0]["taskDefinitionArn"].should.equal( - "arn:aws:ecs:us-east-1:{}:task-definition/test_ecs_task:1".format(ACCOUNT_ID) + f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:task-definition/test_ecs_task:1" ) response["tasks"][0]["containerInstanceArn"].should.contain( - "arn:aws:ecs:us-east-1:{}:container-instance/".format(ACCOUNT_ID) + f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:container-instance/" ) response["tasks"][0]["overrides"].should.equal({}) response["tasks"][0]["lastStatus"].should.equal("RUNNING") @@ -1835,16 +1799,16 @@ def test_run_task_default_cluster(): len(response["tasks"]).should.equal(2) response["tasks"][0].should.have.key("launchType").equals("FARGATE") response["tasks"][0]["taskArn"].should.match( - "arn:aws:ecs:us-east-1:{}:task/default/[a-z0-9-]+$".format(ACCOUNT_ID) + f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:task/default/[a-z0-9-]+$" ) response["tasks"][0]["clusterArn"].should.equal( - "arn:aws:ecs:us-east-1:{}:cluster/default".format(ACCOUNT_ID) + f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:cluster/default" ) response["tasks"][0]["taskDefinitionArn"].should.equal( - "arn:aws:ecs:us-east-1:{}:task-definition/test_ecs_task:1".format(ACCOUNT_ID) + f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:task-definition/test_ecs_task:1" ) response["tasks"][0]["containerInstanceArn"].should.contain( - "arn:aws:ecs:us-east-1:{}:container-instance/".format(ACCOUNT_ID) + f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:container-instance/" ) response["tasks"][0]["overrides"].should.equal({}) response["tasks"][0]["lastStatus"].should.equal("RUNNING") @@ -1975,18 +1939,16 @@ def test_start_task(): len(response["tasks"]).should.equal(1) response["tasks"][0]["taskArn"].should.contain( - "arn:aws:ecs:us-east-1:{}:task/".format(ACCOUNT_ID) + f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:task/" ) response["tasks"][0]["clusterArn"].should.equal( - "arn:aws:ecs:us-east-1:{}:cluster/test_ecs_cluster".format(ACCOUNT_ID) + f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:cluster/test_ecs_cluster" ) response["tasks"][0]["taskDefinitionArn"].should.equal( - "arn:aws:ecs:us-east-1:{}:task-definition/test_ecs_task:1".format(ACCOUNT_ID) + f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:task-definition/test_ecs_task:1" ) response["tasks"][0]["containerInstanceArn"].should.equal( - "arn:aws:ecs:us-east-1:{0}:container-instance/test_ecs_cluster/{1}".format( - ACCOUNT_ID, container_instance_id - ) + f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:container-instance/test_ecs_cluster/{container_instance_id}" ) response["tasks"][0]["overrides"].should.equal({}) response["tasks"][0]["lastStatus"].should.equal("RUNNING") @@ -2190,7 +2152,7 @@ def test_describe_task_definition_by_family(): ) ) task["taskDefinitionArn"].should.equal( - "arn:aws:ecs:us-east-1:{}:task-definition/test_ecs_task:1".format(ACCOUNT_ID) + f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:task-definition/test_ecs_task:1" ) task["volumes"].should.equal([]) task["status"].should.equal("ACTIVE") @@ -2508,16 +2470,16 @@ def test_task_definitions_with_port_clash(): ) len(response["tasks"]).should.equal(1) response["tasks"][0]["taskArn"].should.contain( - "arn:aws:ecs:us-east-1:{}:task/".format(ACCOUNT_ID) + f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:task/" ) response["tasks"][0]["clusterArn"].should.equal( - "arn:aws:ecs:us-east-1:{}:cluster/test_ecs_cluster".format(ACCOUNT_ID) + f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:cluster/test_ecs_cluster" ) response["tasks"][0]["taskDefinitionArn"].should.equal( - "arn:aws:ecs:us-east-1:{}:task-definition/test_ecs_task:1".format(ACCOUNT_ID) + f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:task-definition/test_ecs_task:1" ) response["tasks"][0]["containerInstanceArn"].should.contain( - "arn:aws:ecs:us-east-1:{}:container-instance/".format(ACCOUNT_ID) + f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:container-instance/" ) response["tasks"][0]["overrides"].should.equal({}) response["tasks"][0]["lastStatus"].should.equal("RUNNING") @@ -2857,7 +2819,7 @@ def test_create_service_load_balancing(): ], ) response["service"]["clusterArn"].should.equal( - "arn:aws:ecs:us-east-1:{}:cluster/test_ecs_cluster".format(ACCOUNT_ID) + f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:cluster/test_ecs_cluster" ) response["service"]["desiredCount"].should.equal(2) len(response["service"]["events"]).should.equal(0) @@ -2875,14 +2837,12 @@ def test_create_service_load_balancing(): response["service"]["pendingCount"].should.equal(0) response["service"]["runningCount"].should.equal(0) response["service"]["serviceArn"].should.equal( - "arn:aws:ecs:us-east-1:{}:service/test_ecs_cluster/test_ecs_service".format( - ACCOUNT_ID - ) + f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:service/test_ecs_cluster/test_ecs_service" ) response["service"]["serviceName"].should.equal("test_ecs_service") response["service"]["status"].should.equal("ACTIVE") response["service"]["taskDefinition"].should.equal( - "arn:aws:ecs:us-east-1:{}:task-definition/test_ecs_task:1".format(ACCOUNT_ID) + f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:task-definition/test_ecs_task:1" ) @@ -2912,7 +2872,7 @@ def test_list_tags_for_resource(): type(response["taskDefinition"]).should.be(dict) response["taskDefinition"]["revision"].should.equal(1) response["taskDefinition"]["taskDefinitionArn"].should.equal( - "arn:aws:ecs:us-east-1:{}:task-definition/test_ecs_task:1".format(ACCOUNT_ID) + f"arn:aws:ecs:us-east-1:{ACCOUNT_ID}:task-definition/test_ecs_task:1" ) task_definition_arn = response["taskDefinition"]["taskDefinitionArn"] @@ -3255,7 +3215,7 @@ def test_create_task_set(): )["services"][0]["serviceArn"] task_set["clusterArn"].should.equal(cluster_arn) task_set["serviceArn"].should.equal(service_arn) - task_set["taskDefinition"].should.match("{0}:1$".format(task_def_name)) + task_set["taskDefinition"].should.match(f"{task_def_name}:1$") task_set["scale"].should.equal({"value": 100.0, "unit": "PERCENT"}) task_set["loadBalancers"][0]["targetGroupArn"].should.equal( "arn:aws:elasticloadbalancing:us-east-1:01234567890:targetgroup/" @@ -3384,12 +3344,12 @@ def test_describe_task_sets(): task_sets[0].should.have.key("tags") task_sets.should.have.length_of(1) - task_sets[0]["taskDefinition"].should.match("{0}:1$".format(task_def_name)) + task_sets[0]["taskDefinition"].should.match(f"{task_def_name}:1$") task_sets[0]["clusterArn"].should.equal(cluster_arn) task_sets[0]["serviceArn"].should.equal(service_arn) - task_sets[0]["serviceArn"].should.match("{0}$".format(service_name)) + task_sets[0]["serviceArn"].should.match(f"{service_name}$") task_sets[0]["scale"].should.equal({"value": 100.0, "unit": "PERCENT"}) - task_sets[0]["taskSetArn"].should.match("{0}$".format(task_sets[0]["id"])) + task_sets[0]["taskSetArn"].should.match(f"{task_sets[0]['id']}$") task_sets[0]["loadBalancers"][0]["targetGroupArn"].should.equal( "arn:aws:elasticloadbalancing:us-east-1:01234567890:targetgroup/" "c26b93c1bc35466ba792d5b08fe6a5bc/ec39113f8831453a" diff --git a/tests/test_efs/test_file_system.py b/tests/test_efs/test_file_system.py index 88325bb16..cf9af1576 100644 --- a/tests/test_efs/test_file_system.py +++ b/tests/test_efs/test_file_system.py @@ -262,7 +262,7 @@ def test_describe_file_systems_aws_create_sample_2(efs): def test_describe_file_systems_paging(efs): # Create several file systems. for i in range(10): - efs.create_file_system(CreationToken="foobar_{}".format(i)) + efs.create_file_system(CreationToken=f"foobar_{i}") # First call (Start) # ------------------ diff --git a/tests/test_efs/test_mount_target.py b/tests/test_efs/test_mount_target.py index d3550df77..385f3b6ff 100644 --- a/tests/test_efs/test_mount_target.py +++ b/tests/test_efs/test_mount_target.py @@ -91,9 +91,9 @@ def test_create_mount_target_aws_sample_2(efs, ec2, file_system, subnet): ip_addr = ip_addr_obj.exploded break else: - assert False, "Could not generate an IP address from CIDR block: {}".format( - subnet["CidrBlock"] - ) + assert ( + False + ), f"Could not generate an IP address from CIDR block: {subnet['CidrBlock']}" desc_sg_resp = ec2.describe_security_groups() security_group = desc_sg_resp["SecurityGroups"][0] security_group_id = security_group["GroupId"] @@ -216,8 +216,8 @@ def test_create_mount_target_too_many_security_groups(efs, ec2, file_system, sub for i in range(6): sg_info = ec2.create_security_group( VpcId=subnet["VpcId"], - GroupName="sg-{}".format(i), - Description="SG-{} protects us from the Goa'uld.".format(i), + GroupName=f"sg-{i}", + Description=f"SG-{i} protects us from the Goa'uld.", ) sg_id_list.append(sg_info["GroupId"]) with pytest.raises(ClientError) as exc_info: diff --git a/tests/test_efs/test_server.py b/tests/test_efs/test_server.py index a6fb98a29..4a97c852e 100644 --- a/tests/test_efs/test_server.py +++ b/tests/test_efs/test_server.py @@ -61,7 +61,7 @@ def test_efs_file_system_describe(efs_client): def test_efs_file_system_delete(file_system_id, efs_client): - res = efs_client.delete("/2015-02-01/file-systems/{}".format(file_system_id)) + res = efs_client.delete(f"/2015-02-01/file-systems/{file_system_id}") assert res.status_code == 204 @@ -84,12 +84,10 @@ def test_efs_mount_target_delete(file_system_id, subnet_id, efs_client): json={"FileSystemId": file_system_id, "SubnetId": subnet_id}, ) mt_id = create_res.json["MountTargetId"] - res = efs_client.delete("/2015-02-01/mount-targets/{}".format(mt_id)) + res = efs_client.delete(f"/2015-02-01/mount-targets/{mt_id}") assert res.status_code == 204 def test_efs_describe_backup_policy(file_system_id, efs_client): - res = efs_client.get( - "/2015-02-01/file-systems/{}/backup-policy".format(file_system_id) - ) + res = efs_client.get(f"/2015-02-01/file-systems/{file_system_id}/backup-policy") assert res.status_code == 200 diff --git a/tests/test_eks/test_eks_utils.py b/tests/test_eks/test_eks_utils.py index 6354c9ddb..8c9b85b94 100644 --- a/tests/test_eks/test_eks_utils.py +++ b/tests/test_eks/test_eks_utils.py @@ -60,7 +60,7 @@ def generate_fargate_profiles(client, cluster_name, num_profiles, minimal): client.create_fargate_profile( fargateProfileName=generate_random_name(), clusterName=cluster_name, - **_input_builder(FargateProfileInputs, minimal) + **_input_builder(FargateProfileInputs, minimal), )[ResponseAttributes.FARGATE_PROFILE][ FargateProfileAttributes.FARGATE_PROFILE_NAME ] @@ -78,17 +78,14 @@ def generate_nodegroups(client, cluster_name, num_nodegroups, minimal): client.create_nodegroup( nodegroupName=generate_random_name(), clusterName=cluster_name, - **_input_builder(NodegroupInputs, minimal) + **_input_builder(NodegroupInputs, minimal), )[ResponseAttributes.NODEGROUP][NodegroupAttributes.NODEGROUP_NAME] for _ in range(num_nodegroups) ] def generate_dict(prefix, count): - return { - "{prefix}_{count}".format(prefix=prefix, count=_count): str(_count) - for _count in range(count) - } + return {f"{prefix}_{_count}": str(_count) for _count in range(count)} def is_valid_uri(value): diff --git a/tests/test_elastictranscoder/test_elastictranscoder.py b/tests/test_elastictranscoder/test_elastictranscoder.py index 36b0f631c..c611c761e 100644 --- a/tests/test_elastictranscoder/test_elastictranscoder.py +++ b/tests/test_elastictranscoder/test_elastictranscoder.py @@ -24,9 +24,7 @@ def test_create_simple_pipeline(): pipeline.should.have.key("Id") pipeline.should.have.key("Name").being.equal("testpipeline") pipeline.should.have.key("Arn").being.equal( - "arn:aws:elastictranscoder:{}:{}:pipeline/{}".format( - region, ACCOUNT_ID, pipeline["Id"] - ) + f"arn:aws:elastictranscoder:{region}:{ACCOUNT_ID}:pipeline/{pipeline['Id']}" ) pipeline.should.have.key("Status").being.equal("Active") pipeline.should.have.key("InputBucket").being.equal("inputtest") @@ -64,9 +62,7 @@ def test_create_pipeline_with_content_config(): pipeline.should.have.key("Id") pipeline.should.have.key("Name").being.equal("testpipeline") pipeline.should.have.key("Arn").being.equal( - "arn:aws:elastictranscoder:{}:{}:pipeline/{}".format( - region, ACCOUNT_ID, pipeline["Id"] - ) + f"arn:aws:elastictranscoder:{region}:{ACCOUNT_ID}:pipeline/{pipeline['Id']}" ) pipeline.should.have.key("Status").being.equal("Active") pipeline.should.have.key("InputBucket").being.equal("inputtest") @@ -186,9 +182,7 @@ def test_list_pipelines(): pipeline.should.have.key("Id") pipeline.should.have.key("Name").being.equal("testpipeline") pipeline.should.have.key("Arn").being.equal( - "arn:aws:elastictranscoder:{}:{}:pipeline/{}".format( - region, ACCOUNT_ID, pipeline["Id"] - ) + f"arn:aws:elastictranscoder:{region}:{ACCOUNT_ID}:pipeline/{pipeline['Id']}" ) pipeline.should.have.key("Status").being.equal("Active") pipeline.should.have.key("InputBucket").being.equal("inputtest") @@ -227,9 +221,7 @@ def test_read_pipeline(): pipeline.should.have.key("Id") pipeline.should.have.key("Name").being.equal("testpipeline") pipeline.should.have.key("Arn").being.equal( - "arn:aws:elastictranscoder:{}:{}:pipeline/{}".format( - region, ACCOUNT_ID, pipeline["Id"] - ) + f"arn:aws:elastictranscoder:{region}:{ACCOUNT_ID}:pipeline/{pipeline['Id']}" ) pipeline.should.have.key("Status").being.equal("Active") pipeline.should.have.key("InputBucket").being.equal("inputtest") @@ -271,9 +263,7 @@ def test_read_nonexisting_pipeline_format(): err = ex.value.response["Error"] err["Code"].should.equal("ResourceNotFoundException") err["Message"].should.equal( - "The specified pipeline was not found: account={}, pipelineId={}.".format( - ACCOUNT_ID, pipeline_id - ) + f"The specified pipeline was not found: account={ACCOUNT_ID}, pipelineId={pipeline_id}." ) @@ -298,9 +288,7 @@ def test_update_pipeline_name(): pipeline.should.have.key("Id") pipeline.should.have.key("Name").being.equal("newtestpipeline") pipeline.should.have.key("Arn").being.equal( - "arn:aws:elastictranscoder:{}:{}:pipeline/{}".format( - region, ACCOUNT_ID, pipeline["Id"] - ) + f"arn:aws:elastictranscoder:{region}:{ACCOUNT_ID}:pipeline/{pipeline['Id']}" ) pipeline.should.have.key("Status").being.equal("Active") pipeline.should.have.key("InputBucket").being.equal("inputtest") @@ -369,9 +357,7 @@ def test_update_nonexisting_pipeline(): err = ex.value.response["Error"] err["Code"].should.equal("ResourceNotFoundException") err["Message"].should.equal( - "The specified pipeline was not found: account={}, pipelineId={}.".format( - ACCOUNT_ID, pipeline_id - ) + f"The specified pipeline was not found: account={ACCOUNT_ID}, pipelineId={pipeline_id}." ) @@ -395,4 +381,4 @@ def test_delete_pipeline(): def create_role_name(name): - return "arn:aws:iam::{}:role/{}".format(ACCOUNT_ID, name) + return f"arn:aws:iam::{ACCOUNT_ID}:role/{name}" diff --git a/tests/test_elb/test_elb.py b/tests/test_elb/test_elb.py index 0e666d020..051b117b4 100644 --- a/tests/test_elb/test_elb.py +++ b/tests/test_elb/test_elb.py @@ -248,7 +248,7 @@ def test_describe_paginated_balancers(): for i in range(51): client.create_load_balancer( - LoadBalancerName="my-lb%d" % i, + LoadBalancerName=f"my-lb{i}", Listeners=[ {"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080} ], diff --git a/tests/test_elbv2/test_elbv2.py b/tests/test_elbv2/test_elbv2.py index 1c169f526..ff16b35b9 100644 --- a/tests/test_elbv2/test_elbv2.py +++ b/tests/test_elbv2/test_elbv2.py @@ -399,7 +399,7 @@ def test_describe_paginated_balancers(): for i in range(51): conn.create_load_balancer( - Name="my-lb%d" % i, + Name=f"my-lb{i}", Subnets=[subnet1.id, subnet2.id], SecurityGroups=[security_group.id], Scheme="internal", @@ -1685,9 +1685,7 @@ def test_cognito_action_listener_rule(): action = { "Type": "authenticate-cognito", "AuthenticateCognitoConfig": { - "UserPoolArn": "arn:aws:cognito-idp:us-east-1:{}:userpool/us-east-1_ABCD1234".format( - ACCOUNT_ID - ), + "UserPoolArn": f"arn:aws:cognito-idp:us-east-1:{ACCOUNT_ID}:userpool/us-east-1_ABCD1234", "UserPoolClientId": "abcd1234abcd", "UserPoolDomain": "testpool", "AuthenticationRequestExtraParams": {"param": "test"}, diff --git a/tests/test_elbv2/test_elbv2_cloudformation.py b/tests/test_elbv2/test_elbv2_cloudformation.py index a661d0561..e04a544d5 100644 --- a/tests/test_elbv2/test_elbv2_cloudformation.py +++ b/tests/test_elbv2/test_elbv2_cloudformation.py @@ -144,9 +144,7 @@ def test_cognito_action_listener_rule_cloudformation(): { "Type": "authenticate-cognito", "AuthenticateCognitoConfig": { - "UserPoolArn": "arn:aws:cognito-idp:us-east-1:{}:userpool/us-east-1_ABCD1234".format( - ACCOUNT_ID - ), + "UserPoolArn": f"arn:aws:cognito-idp:us-east-1:{ACCOUNT_ID}:userpool/us-east-1_ABCD1234", "UserPoolClientId": "abcd1234abcd", "UserPoolDomain": "testpool", }, @@ -175,9 +173,7 @@ def test_cognito_action_listener_rule_cloudformation(): { "Type": "authenticate-cognito", "AuthenticateCognitoConfig": { - "UserPoolArn": "arn:aws:cognito-idp:us-east-1:{}:userpool/us-east-1_ABCD1234".format( - ACCOUNT_ID - ), + "UserPoolArn": f"arn:aws:cognito-idp:us-east-1:{ACCOUNT_ID}:userpool/us-east-1_ABCD1234", "UserPoolClientId": "abcd1234abcd", "UserPoolDomain": "testpool", }, diff --git a/tests/test_elbv2/test_elbv2_target_groups.py b/tests/test_elbv2/test_elbv2_target_groups.py index a67350466..47f91b698 100644 --- a/tests/test_elbv2/test_elbv2_target_groups.py +++ b/tests/test_elbv2/test_elbv2_target_groups.py @@ -131,11 +131,7 @@ def test_create_target_group_and_listeners(): Protocol="HTTPS", Port=443, Certificates=[ - { - "CertificateArn": "arn:aws:iam:{}:server-certificate/test-cert".format( - ACCOUNT_ID - ) - } + {"CertificateArn": f"arn:aws:iam:{ACCOUNT_ID}:server-certificate/test-cert"} ], DefaultActions=[actions], ) @@ -143,13 +139,7 @@ def test_create_target_group_and_listeners(): listener.get("Port").should.equal(443) listener.get("Protocol").should.equal("HTTPS") listener.get("Certificates").should.equal( - [ - { - "CertificateArn": "arn:aws:iam:{}:server-certificate/test-cert".format( - ACCOUNT_ID - ) - } - ] + [{"CertificateArn": f"arn:aws:iam:{ACCOUNT_ID}:server-certificate/test-cert"}] ) listener.get("DefaultActions").should.equal( [{"TargetGroupArn": target_group_arn, "Type": "forward"}] diff --git a/tests/test_emr/test_emr_boto3.py b/tests/test_emr/test_emr_boto3.py index 67cc614ae..38363056d 100644 --- a/tests/test_emr/test_emr_boto3.py +++ b/tests/test_emr/test_emr_boto3.py @@ -183,9 +183,7 @@ def test_describe_cluster(): cl["TerminationProtected"].should.equal(False) cl["VisibleToAllUsers"].should.equal(True) cl["ClusterArn"].should.equal( - "arn:aws:elasticmapreduce:{0}:{1}:cluster/{2}".format( - region_name, ACCOUNT_ID, cluster_id - ) + f"arn:aws:elasticmapreduce:{region_name}:{ACCOUNT_ID}:cluster/{cluster_id}" ) @@ -396,7 +394,7 @@ def test_run_job_flow(): args = deepcopy(run_job_flow_args) resp = client.run_job_flow(**args) resp["ClusterArn"].startswith( - "arn:aws:elasticmapreduce:{0}:{1}:cluster/".format(region_name, ACCOUNT_ID) + f"arn:aws:elasticmapreduce:{region_name}:{ACCOUNT_ID}:cluster/" ) job_flow_id = resp["JobFlowId"] resp = client.describe_job_flows(JobFlowIds=[job_flow_id])["JobFlows"][0] @@ -584,9 +582,7 @@ def test_put_remove_auto_scaling_policy(): del resp["AutoScalingPolicy"]["Status"] resp["AutoScalingPolicy"].should.equal(auto_scaling_policy_with_cluster_id) resp["ClusterArn"].should.equal( - "arn:aws:elasticmapreduce:{0}:{1}:cluster/{2}".format( - region_name, ACCOUNT_ID, cluster_id - ) + f"arn:aws:elasticmapreduce:{region_name}:{ACCOUNT_ID}:cluster/{cluster_id}" ) core_instance_group = [ diff --git a/tests/test_events/test_events.py b/tests/test_events/test_events.py index a5dda6e25..8613f4248 100644 --- a/tests/test_events/test_events.py +++ b/tests/test_events/test_events.py @@ -185,7 +185,7 @@ def test_describe_rule(): response["Name"].should.equal(rule_name) response["Arn"].should.equal( - "arn:aws:events:us-west-2:{0}:rule/{1}".format(ACCOUNT_ID, rule_name) + f"arn:aws:events:us-west-2:{ACCOUNT_ID}:rule/{rule_name}" ) @@ -201,7 +201,7 @@ def test_describe_rule_with_event_bus_name(): EventPattern=json.dumps({"account": [ACCOUNT_ID]}), State="DISABLED", Description="test rule", - RoleArn="arn:aws:iam::{}:role/test-role".format(ACCOUNT_ID), + RoleArn=f"arn:aws:iam::{ACCOUNT_ID}:role/test-role", EventBusName=event_bus_name, ) @@ -210,18 +210,14 @@ def test_describe_rule_with_event_bus_name(): # then response["Arn"].should.equal( - "arn:aws:events:eu-central-1:{0}:rule/{1}/{2}".format( - ACCOUNT_ID, event_bus_name, rule_name - ) + f"arn:aws:events:eu-central-1:{ACCOUNT_ID}:rule/{event_bus_name}/{rule_name}" ) response["CreatedBy"].should.equal(ACCOUNT_ID) response["Description"].should.equal("test rule") response["EventBusName"].should.equal(event_bus_name) json.loads(response["EventPattern"]).should.equal({"account": [ACCOUNT_ID]}) response["Name"].should.equal(rule_name) - response["RoleArn"].should.equal( - "arn:aws:iam::{}:role/test-role".format(ACCOUNT_ID) - ) + response["RoleArn"].should.equal(f"arn:aws:iam::{ACCOUNT_ID}:role/test-role") response["State"].should.equal("DISABLED") response.should_not.have.key("ManagedBy") @@ -505,9 +501,7 @@ def test_put_targets_error_missing_parameter_sqs_fifo(): Targets=[ { "Id": "sqs-fifo", - "Arn": "arn:aws:sqs:eu-central-1:{}:test-queue.fifo".format( - ACCOUNT_ID - ), + "Arn": f"arn:aws:sqs:eu-central-1:{ACCOUNT_ID}:test-queue.fifo", } ], ) @@ -770,7 +764,7 @@ def test_create_event_bus(): response = client.create_event_bus(Name="test-bus") response["EventBusArn"].should.equal( - "arn:aws:events:us-east-1:{}:event-bus/test-bus".format(ACCOUNT_ID) + f"arn:aws:events:us-east-1:{ACCOUNT_ID}:event-bus/test-bus" ) @@ -808,7 +802,7 @@ def test_describe_event_bus(): response["Name"].should.equal("default") response["Arn"].should.equal( - "arn:aws:events:us-east-1:{}:event-bus/default".format(ACCOUNT_ID) + f"arn:aws:events:us-east-1:{ACCOUNT_ID}:event-bus/default" ) response.should_not.have.key("Policy") @@ -824,7 +818,7 @@ def test_describe_event_bus(): response["Name"].should.equal("test-bus") response["Arn"].should.equal( - "arn:aws:events:us-east-1:{}:event-bus/test-bus".format(ACCOUNT_ID) + f"arn:aws:events:us-east-1:{ACCOUNT_ID}:event-bus/test-bus" ) json.loads(response["Policy"]).should.equal( { @@ -835,9 +829,7 @@ def test_describe_event_bus(): "Effect": "Allow", "Principal": {"AWS": "arn:aws:iam::111111111111:root"}, "Action": "events:PutEvents", - "Resource": "arn:aws:events:us-east-1:{}:event-bus/test-bus".format( - ACCOUNT_ID - ), + "Resource": f"arn:aws:events:us-east-1:{ACCOUNT_ID}:event-bus/test-bus", } ], } @@ -868,33 +860,23 @@ def test_list_event_buses(): [ { "Name": "default", - "Arn": "arn:aws:events:us-east-1:{}:event-bus/default".format( - ACCOUNT_ID - ), + "Arn": f"arn:aws:events:us-east-1:{ACCOUNT_ID}:event-bus/default", }, { "Name": "other-bus-1", - "Arn": "arn:aws:events:us-east-1:{}:event-bus/other-bus-1".format( - ACCOUNT_ID - ), + "Arn": f"arn:aws:events:us-east-1:{ACCOUNT_ID}:event-bus/other-bus-1", }, { "Name": "other-bus-2", - "Arn": "arn:aws:events:us-east-1:{}:event-bus/other-bus-2".format( - ACCOUNT_ID - ), + "Arn": f"arn:aws:events:us-east-1:{ACCOUNT_ID}:event-bus/other-bus-2", }, { "Name": "test-bus-1", - "Arn": "arn:aws:events:us-east-1:{}:event-bus/test-bus-1".format( - ACCOUNT_ID - ), + "Arn": f"arn:aws:events:us-east-1:{ACCOUNT_ID}:event-bus/test-bus-1", }, { "Name": "test-bus-2", - "Arn": "arn:aws:events:us-east-1:{}:event-bus/test-bus-2".format( - ACCOUNT_ID - ), + "Arn": f"arn:aws:events:us-east-1:{ACCOUNT_ID}:event-bus/test-bus-2", }, ] ) @@ -906,15 +888,11 @@ def test_list_event_buses(): [ { "Name": "other-bus-1", - "Arn": "arn:aws:events:us-east-1:{}:event-bus/other-bus-1".format( - ACCOUNT_ID - ), + "Arn": f"arn:aws:events:us-east-1:{ACCOUNT_ID}:event-bus/other-bus-1", }, { "Name": "other-bus-2", - "Arn": "arn:aws:events:us-east-1:{}:event-bus/other-bus-2".format( - ACCOUNT_ID - ), + "Arn": f"arn:aws:events:us-east-1:{ACCOUNT_ID}:event-bus/other-bus-2", }, ] ) @@ -936,9 +914,7 @@ def test_delete_event_bus(): [ { "Name": "default", - "Arn": "arn:aws:events:us-east-1:{}:event-bus/default".format( - ACCOUNT_ID - ), + "Arn": f"arn:aws:events:us-east-1:{ACCOUNT_ID}:event-bus/default", } ] ) @@ -1016,9 +992,7 @@ def test_tag_resource_error_unknown_arn(): # when with pytest.raises(ClientError) as e: client.tag_resource( - ResourceARN="arn:aws:events:eu-central-1:{0}:rule/unknown".format( - ACCOUNT_ID - ), + ResourceARN=f"arn:aws:events:eu-central-1:{ACCOUNT_ID}:rule/unknown", Tags=[], ) @@ -1040,9 +1014,7 @@ def test_untag_resource_error_unknown_arn(): # when with pytest.raises(ClientError) as e: client.untag_resource( - ResourceARN="arn:aws:events:eu-central-1:{0}:rule/unknown".format( - ACCOUNT_ID - ), + ResourceARN=f"arn:aws:events:eu-central-1:{ACCOUNT_ID}:rule/unknown", TagKeys=[], ) @@ -1064,9 +1036,7 @@ def test_list_tags_for_resource_error_unknown_arn(): # when with pytest.raises(ClientError) as e: client.list_tags_for_resource( - ResourceARN="arn:aws:events:eu-central-1:{0}:rule/unknown".format( - ACCOUNT_ID - ) + ResourceARN=f"arn:aws:events:eu-central-1:{ACCOUNT_ID}:rule/unknown" ) # then @@ -1088,24 +1058,22 @@ def test_create_archive(): # when response = client.create_archive( ArchiveName=archive_name, - EventSourceArn="arn:aws:events:eu-central-1:{}:event-bus/default".format( - ACCOUNT_ID - ), + EventSourceArn=f"arn:aws:events:eu-central-1:{ACCOUNT_ID}:event-bus/default", ) # then response["ArchiveArn"].should.equal( - "arn:aws:events:eu-central-1:{0}:archive/{1}".format(ACCOUNT_ID, archive_name) + f"arn:aws:events:eu-central-1:{ACCOUNT_ID}:archive/{archive_name}" ) response["CreationTime"].should.be.a(datetime) response["State"].should.equal("ENABLED") # check for archive rule existence - rule_name = "Events-Archive-{}".format(archive_name) + rule_name = f"Events-Archive-{archive_name}" response = client.describe_rule(Name=rule_name) response["Arn"].should.equal( - "arn:aws:events:eu-central-1:{0}:rule/{1}".format(ACCOUNT_ID, rule_name) + f"arn:aws:events:eu-central-1:{ACCOUNT_ID}:rule/{rule_name}" ) response["CreatedBy"].should.equal(ACCOUNT_ID) response["EventBusName"].should.equal("default") @@ -1142,7 +1110,7 @@ def test_create_archive_custom_event_bus(): # then response["ArchiveArn"].should.equal( - "arn:aws:events:eu-central-1:{}:archive/test-archive".format(ACCOUNT_ID) + f"arn:aws:events:eu-central-1:{ACCOUNT_ID}:archive/test-archive" ) response["CreationTime"].should.be.a(datetime) response["State"].should.equal("ENABLED") @@ -1159,7 +1127,7 @@ def test_create_archive_error_long_name(): client.create_archive( ArchiveName=name, EventSourceArn=( - "arn:aws:events:eu-central-1:{}:event-bus/default".format(ACCOUNT_ID) + f"arn:aws:events:eu-central-1:{ACCOUNT_ID}:event-bus/default" ), ) @@ -1170,8 +1138,8 @@ def test_create_archive_error_long_name(): ex.response["Error"]["Code"].should.contain("ValidationException") ex.response["Error"]["Message"].should.equal( " 1 validation error detected: " - "Value '{}' at 'archiveName' failed to satisfy constraint: " - "Member must have length less than or equal to 48".format(name) + f"Value '{name}' at 'archiveName' failed to satisfy constraint: " + "Member must have length less than or equal to 48" ) @@ -1185,7 +1153,7 @@ def test_create_archive_error_invalid_event_pattern(): client.create_archive( ArchiveName="test-archive", EventSourceArn=( - "arn:aws:events:eu-central-1:{}:event-bus/default".format(ACCOUNT_ID) + f"arn:aws:events:eu-central-1:{ACCOUNT_ID}:event-bus/default" ), EventPattern="invalid", ) @@ -1210,7 +1178,7 @@ def test_create_archive_error_invalid_event_pattern_not_an_array(): client.create_archive( ArchiveName="test-archive", EventSourceArn=( - "arn:aws:events:eu-central-1:{}:event-bus/default".format(ACCOUNT_ID) + f"arn:aws:events:eu-central-1:{ACCOUNT_ID}:event-bus/default" ), EventPattern=json.dumps( { @@ -1243,9 +1211,7 @@ def test_create_archive_error_unknown_event_bus(): client.create_archive( ArchiveName="test-archive", EventSourceArn=( - "arn:aws:events:eu-central-1:{}:event-bus/{}".format( - ACCOUNT_ID, event_bus_name - ) + f"arn:aws:events:eu-central-1:{ACCOUNT_ID}:event-bus/{event_bus_name}" ), ) @@ -1255,7 +1221,7 @@ def test_create_archive_error_unknown_event_bus(): ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("ResourceNotFoundException") ex.response["Error"]["Message"].should.equal( - "Event bus {} does not exist.".format(event_bus_name) + f"Event bus {event_bus_name} does not exist." ) @@ -1264,7 +1230,7 @@ def test_create_archive_error_duplicate(): # given client = boto3.client("events", "eu-central-1") name = "test-archive" - source_arn = "arn:aws:events:eu-central-1:{}:event-bus/default".format(ACCOUNT_ID) + source_arn = f"arn:aws:events:eu-central-1:{ACCOUNT_ID}:event-bus/default" client.create_archive(ArchiveName=name, EventSourceArn=source_arn) # when @@ -1284,7 +1250,7 @@ def test_describe_archive(): # given client = boto3.client("events", "eu-central-1") name = "test-archive" - source_arn = "arn:aws:events:eu-central-1:{}:event-bus/default".format(ACCOUNT_ID) + source_arn = f"arn:aws:events:eu-central-1:{ACCOUNT_ID}:event-bus/default" event_pattern = json.dumps({"key": ["value"]}) client.create_archive( ArchiveName=name, @@ -1298,7 +1264,7 @@ def test_describe_archive(): # then response["ArchiveArn"].should.equal( - "arn:aws:events:eu-central-1:{0}:archive/{1}".format(ACCOUNT_ID, name) + f"arn:aws:events:eu-central-1:{ACCOUNT_ID}:archive/{name}" ) response["ArchiveName"].should.equal(name) response["CreationTime"].should.be.a(datetime) @@ -1326,9 +1292,7 @@ def test_describe_archive_error_unknown_archive(): ex.operation_name.should.equal("DescribeArchive") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("ResourceNotFoundException") - ex.response["Error"]["Message"].should.equal( - "Archive {} does not exist.".format(name) - ) + ex.response["Error"]["Message"].should.equal(f"Archive {name} does not exist.") @mock_events @@ -1336,7 +1300,7 @@ def test_list_archives(): # given client = boto3.client("events", "eu-central-1") name = "test-archive" - source_arn = "arn:aws:events:eu-central-1:{}:event-bus/default".format(ACCOUNT_ID) + source_arn = f"arn:aws:events:eu-central-1:{ACCOUNT_ID}:event-bus/default" event_pattern = json.dumps({"key": ["value"]}) client.create_archive( ArchiveName=name, @@ -1368,7 +1332,7 @@ def test_list_archives(): def test_list_archives_with_name_prefix(): # given client = boto3.client("events", "eu-central-1") - source_arn = "arn:aws:events:eu-central-1:{}:event-bus/default".format(ACCOUNT_ID) + source_arn = f"arn:aws:events:eu-central-1:{ACCOUNT_ID}:event-bus/default" client.create_archive(ArchiveName="test", EventSourceArn=source_arn) client.create_archive(ArchiveName="test-archive", EventSourceArn=source_arn) @@ -1384,7 +1348,7 @@ def test_list_archives_with_name_prefix(): def test_list_archives_with_source_arn(): # given client = boto3.client("events", "eu-central-1") - source_arn = "arn:aws:events:eu-central-1:{}:event-bus/default".format(ACCOUNT_ID) + source_arn = f"arn:aws:events:eu-central-1:{ACCOUNT_ID}:event-bus/default" source_arn_2 = client.create_event_bus(Name="test-bus")["EventBusArn"] client.create_archive(ArchiveName="test", EventSourceArn=source_arn) client.create_archive(ArchiveName="test-archive", EventSourceArn=source_arn_2) @@ -1401,7 +1365,7 @@ def test_list_archives_with_source_arn(): def test_list_archives_with_state(): # given client = boto3.client("events", "eu-central-1") - source_arn = "arn:aws:events:eu-central-1:{}:event-bus/default".format(ACCOUNT_ID) + source_arn = f"arn:aws:events:eu-central-1:{ACCOUNT_ID}:event-bus/default" client.create_archive(ArchiveName="test", EventSourceArn=source_arn) client.create_archive(ArchiveName="test-archive", EventSourceArn=source_arn) @@ -1459,7 +1423,7 @@ def test_update_archive(): # given client = boto3.client("events", "eu-central-1") name = "test-archive" - source_arn = "arn:aws:events:eu-central-1:{}:event-bus/default".format(ACCOUNT_ID) + source_arn = f"arn:aws:events:eu-central-1:{ACCOUNT_ID}:event-bus/default" event_pattern = json.dumps({"key": ["value"]}) archive_arn = client.create_archive(ArchiveName=name, EventSourceArn=source_arn)[ "ArchiveArn" @@ -1499,9 +1463,7 @@ def test_update_archive_error_invalid_event_pattern(): name = "test-archive" client.create_archive( ArchiveName=name, - EventSourceArn="arn:aws:events:eu-central-1:{}:event-bus/default".format( - ACCOUNT_ID - ), + EventSourceArn=f"arn:aws:events:eu-central-1:{ACCOUNT_ID}:event-bus/default", ) # when @@ -1533,9 +1495,7 @@ def test_update_archive_error_unknown_archive(): ex.operation_name.should.equal("UpdateArchive") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("ResourceNotFoundException") - ex.response["Error"]["Message"].should.equal( - "Archive {} does not exist.".format(name) - ) + ex.response["Error"]["Message"].should.equal(f"Archive {name} does not exist.") @mock_events @@ -1545,9 +1505,7 @@ def test_delete_archive(): name = "test-archive" client.create_archive( ArchiveName=name, - EventSourceArn="arn:aws:events:eu-central-1:{}:event-bus/default".format( - ACCOUNT_ID - ), + EventSourceArn=f"arn:aws:events:eu-central-1:{ACCOUNT_ID}:event-bus/default", ) # when @@ -1573,9 +1531,7 @@ def test_delete_archive_error_unknown_archive(): ex.operation_name.should.equal("DeleteArchive") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("ResourceNotFoundException") - ex.response["Error"]["Message"].should.equal( - "Archive {} does not exist.".format(name) - ) + ex.response["Error"]["Message"].should.equal(f"Archive {name} does not exist.") @mock_events @@ -1585,9 +1541,7 @@ def test_archive_actual_events(): name = "test-archive" name_2 = "test-archive-no-match" name_3 = "test-archive-matches" - event_bus_arn = "arn:aws:events:eu-central-1:{}:event-bus/default".format( - ACCOUNT_ID - ) + event_bus_arn = f"arn:aws:events:eu-central-1:{ACCOUNT_ID}:event-bus/default" event = { "Source": "source", "DetailType": "type", @@ -1629,9 +1583,7 @@ def test_archive_actual_events(): def test_archive_event_with_bus_arn(): # given client = boto3.client("events", "eu-central-1") - event_bus_arn = "arn:aws:events:eu-central-1:{}:event-bus/default".format( - ACCOUNT_ID - ) + event_bus_arn = f"arn:aws:events:eu-central-1:{ACCOUNT_ID}:event-bus/default" archive_name = "mock_archive" event_with_bus_arn = { "Source": "source", @@ -1658,9 +1610,7 @@ def test_start_replay(): # given client = boto3.client("events", "eu-central-1") name = "test-replay" - event_bus_arn = "arn:aws:events:eu-central-1:{}:event-bus/default".format( - ACCOUNT_ID - ) + event_bus_arn = f"arn:aws:events:eu-central-1:{ACCOUNT_ID}:event-bus/default" archive_arn = client.create_archive( ArchiveName="test-archive", EventSourceArn=event_bus_arn )["ArchiveArn"] @@ -1676,7 +1626,7 @@ def test_start_replay(): # then response["ReplayArn"].should.equal( - "arn:aws:events:eu-central-1:{0}:replay/{1}".format(ACCOUNT_ID, name) + f"arn:aws:events:eu-central-1:{ACCOUNT_ID}:replay/{name}" ) response["ReplayStartTime"].should.be.a(datetime) response["State"].should.equal("STARTING") @@ -1692,15 +1642,11 @@ def test_start_replay_error_unknown_event_bus(): with pytest.raises(ClientError) as e: client.start_replay( ReplayName="test", - EventSourceArn="arn:aws:events:eu-central-1:{}:archive/test".format( - ACCOUNT_ID - ), + EventSourceArn=f"arn:aws:events:eu-central-1:{ACCOUNT_ID}:archive/test", EventStartTime=datetime(2021, 2, 1), EventEndTime=datetime(2021, 2, 2), Destination={ - "Arn": "arn:aws:events:eu-central-1:{0}:event-bus/{1}".format( - ACCOUNT_ID, event_bus_name - ), + "Arn": f"arn:aws:events:eu-central-1:{ACCOUNT_ID}:event-bus/{event_bus_name}", }, ) @@ -1710,7 +1656,7 @@ def test_start_replay_error_unknown_event_bus(): ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("ResourceNotFoundException") ex.response["Error"]["Message"].should.equal( - "Event bus {} does not exist.".format(event_bus_name) + f"Event bus {event_bus_name} does not exist." ) @@ -1723,9 +1669,7 @@ def test_start_replay_error_invalid_event_bus_arn(): with pytest.raises(ClientError) as e: client.start_replay( ReplayName="test", - EventSourceArn="arn:aws:events:eu-central-1:{}:archive/test".format( - ACCOUNT_ID - ), + EventSourceArn=f"arn:aws:events:eu-central-1:{ACCOUNT_ID}:archive/test", EventStartTime=datetime(2021, 2, 1), EventEndTime=datetime(2021, 2, 2), Destination={ @@ -1753,15 +1697,11 @@ def test_start_replay_error_unknown_archive(): with pytest.raises(ClientError) as e: client.start_replay( ReplayName="test", - EventSourceArn="arn:aws:events:eu-central-1:{0}:archive/{1}".format( - ACCOUNT_ID, archive_name - ), + EventSourceArn=f"arn:aws:events:eu-central-1:{ACCOUNT_ID}:archive/{archive_name}", EventStartTime=datetime(2021, 2, 1), EventEndTime=datetime(2021, 2, 2), Destination={ - "Arn": "arn:aws:events:eu-central-1:{}:event-bus/default".format( - ACCOUNT_ID - ), + "Arn": f"arn:aws:events:eu-central-1:{ACCOUNT_ID}:event-bus/default", }, ) @@ -1772,7 +1712,7 @@ def test_start_replay_error_unknown_archive(): ex.response["Error"]["Code"].should.contain("ValidationException") ex.response["Error"]["Message"].should.equal( "Parameter EventSourceArn is not valid. " - "Reason: Archive {} does not exist.".format(archive_name) + f"Reason: Archive {archive_name} does not exist." ) @@ -1782,9 +1722,7 @@ def test_start_replay_error_cross_event_bus(): client = boto3.client("events", "eu-central-1") archive_arn = client.create_archive( ArchiveName="test-archive", - EventSourceArn="arn:aws:events:eu-central-1:{}:event-bus/default".format( - ACCOUNT_ID - ), + EventSourceArn=f"arn:aws:events:eu-central-1:{ACCOUNT_ID}:event-bus/default", )["ArchiveArn"] event_bus_arn = client.create_event_bus(Name="test-bus")["EventBusArn"] @@ -1813,9 +1751,7 @@ def test_start_replay_error_cross_event_bus(): def test_start_replay_error_invalid_end_time(): # given client = boto3.client("events", "eu-central-1") - event_bus_arn = "arn:aws:events:eu-central-1:{}:event-bus/default".format( - ACCOUNT_ID - ) + event_bus_arn = f"arn:aws:events:eu-central-1:{ACCOUNT_ID}:event-bus/default" archive_arn = client.create_archive( ArchiveName="test-archive", EventSourceArn=event_bus_arn )["ArchiveArn"] @@ -1846,9 +1782,7 @@ def test_start_replay_error_duplicate(): # given client = boto3.client("events", "eu-central-1") name = "test-replay" - event_bus_arn = "arn:aws:events:eu-central-1:{}:event-bus/default".format( - ACCOUNT_ID - ) + event_bus_arn = f"arn:aws:events:eu-central-1:{ACCOUNT_ID}:event-bus/default" archive_arn = client.create_archive( ArchiveName="test-archive", EventSourceArn=event_bus_arn )["ArchiveArn"] @@ -1875,9 +1809,7 @@ def test_start_replay_error_duplicate(): ex.operation_name.should.equal("StartReplay") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("ResourceAlreadyExistsException") - ex.response["Error"]["Message"].should.equal( - "Replay {} already exists.".format(name) - ) + ex.response["Error"]["Message"].should.equal(f"Replay {name} already exists.") @mock_events @@ -1885,9 +1817,7 @@ def test_describe_replay(): # given client = boto3.client("events", "eu-central-1") name = "test-replay" - event_bus_arn = "arn:aws:events:eu-central-1:{}:event-bus/default".format( - ACCOUNT_ID - ) + event_bus_arn = f"arn:aws:events:eu-central-1:{ACCOUNT_ID}:event-bus/default" archive_arn = client.create_archive( ArchiveName="test-archive", EventSourceArn=event_bus_arn )["ArchiveArn"] @@ -1910,7 +1840,7 @@ def test_describe_replay(): response["EventStartTime"].should.equal(datetime(2021, 2, 1, tzinfo=pytz.utc)) response["EventEndTime"].should.equal(datetime(2021, 2, 2, tzinfo=pytz.utc)) response["ReplayArn"].should.equal( - "arn:aws:events:eu-central-1:{0}:replay/{1}".format(ACCOUNT_ID, name) + f"arn:aws:events:eu-central-1:{ACCOUNT_ID}:replay/{name}" ) response["ReplayName"].should.equal(name) response["ReplayStartTime"].should.be.a(datetime) @@ -1933,9 +1863,7 @@ def test_describe_replay_error_unknown_replay(): ex.operation_name.should.equal("DescribeReplay") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("ResourceNotFoundException") - ex.response["Error"]["Message"].should.equal( - "Replay {} does not exist.".format(name) - ) + ex.response["Error"]["Message"].should.equal(f"Replay {name} does not exist.") @mock_events @@ -1943,9 +1871,7 @@ def test_list_replays(): # given client = boto3.client("events", "eu-central-1") name = "test-replay" - event_bus_arn = "arn:aws:events:eu-central-1:{}:event-bus/default".format( - ACCOUNT_ID - ) + event_bus_arn = f"arn:aws:events:eu-central-1:{ACCOUNT_ID}:event-bus/default" archive_arn = client.create_archive( ArchiveName="test-replay", EventSourceArn=event_bus_arn )["ArchiveArn"] @@ -1977,9 +1903,7 @@ def test_list_replays(): def test_list_replays_with_name_prefix(): # given client = boto3.client("events", "eu-central-1") - event_bus_arn = "arn:aws:events:eu-central-1:{}:event-bus/default".format( - ACCOUNT_ID - ) + event_bus_arn = f"arn:aws:events:eu-central-1:{ACCOUNT_ID}:event-bus/default" archive_arn = client.create_archive( ArchiveName="test-replay", EventSourceArn=event_bus_arn )["ArchiveArn"] @@ -2010,9 +1934,7 @@ def test_list_replays_with_name_prefix(): def test_list_replays_with_source_arn(): # given client = boto3.client("events", "eu-central-1") - event_bus_arn = "arn:aws:events:eu-central-1:{}:event-bus/default".format( - ACCOUNT_ID - ) + event_bus_arn = f"arn:aws:events:eu-central-1:{ACCOUNT_ID}:event-bus/default" archive_arn = client.create_archive( ArchiveName="test-replay", EventSourceArn=event_bus_arn )["ArchiveArn"] @@ -2042,9 +1964,7 @@ def test_list_replays_with_source_arn(): def test_list_replays_with_state(): # given client = boto3.client("events", "eu-central-1") - event_bus_arn = "arn:aws:events:eu-central-1:{}:event-bus/default".format( - ACCOUNT_ID - ) + event_bus_arn = f"arn:aws:events:eu-central-1:{ACCOUNT_ID}:event-bus/default" archive_arn = client.create_archive( ArchiveName="test-replay", EventSourceArn=event_bus_arn )["ArchiveArn"] @@ -2117,9 +2037,7 @@ def test_cancel_replay(): # given client = boto3.client("events", "eu-central-1") name = "test-replay" - event_bus_arn = "arn:aws:events:eu-central-1:{}:event-bus/default".format( - ACCOUNT_ID - ) + event_bus_arn = f"arn:aws:events:eu-central-1:{ACCOUNT_ID}:event-bus/default" archive_arn = client.create_archive( ArchiveName="test-archive", EventSourceArn=event_bus_arn )["ArchiveArn"] @@ -2137,7 +2055,7 @@ def test_cancel_replay(): # then response["ReplayArn"].should.equal( - "arn:aws:events:eu-central-1:{0}:replay/{1}".format(ACCOUNT_ID, name) + f"arn:aws:events:eu-central-1:{ACCOUNT_ID}:replay/{name}" ) response["State"].should.equal("CANCELLING") @@ -2160,9 +2078,7 @@ def test_cancel_replay_error_unknown_replay(): ex.operation_name.should.equal("CancelReplay") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("ResourceNotFoundException") - ex.response["Error"]["Message"].should.equal( - "Replay {} does not exist.".format(name) - ) + ex.response["Error"]["Message"].should.equal(f"Replay {name} does not exist.") @mock_events @@ -2170,9 +2086,7 @@ def test_cancel_replay_error_illegal_state(): # given client = boto3.client("events", "eu-central-1") name = "test-replay" - event_bus_arn = "arn:aws:events:eu-central-1:{}:event-bus/default".format( - ACCOUNT_ID - ) + event_bus_arn = f"arn:aws:events:eu-central-1:{ACCOUNT_ID}:event-bus/default" archive_arn = client.create_archive( ArchiveName="test-archive", EventSourceArn=event_bus_arn )["ArchiveArn"] @@ -2196,7 +2110,7 @@ def test_cancel_replay_error_illegal_state(): ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("IllegalStatusException") ex.response["Error"]["Message"].should.equal( - "Replay {} is not in a valid state for this operation.".format(name) + f"Replay {name} is not in a valid state for this operation." ) @@ -2209,18 +2123,14 @@ def test_start_replay_send_to_log_group(): log_group_name = "/test-group" rule_name = "test-rule" logs_client.create_log_group(logGroupName=log_group_name) - event_bus_arn = "arn:aws:events:eu-central-1:{}:event-bus/default".format( - ACCOUNT_ID - ) + event_bus_arn = f"arn:aws:events:eu-central-1:{ACCOUNT_ID}:event-bus/default" client.put_rule(Name=rule_name, EventPattern=json.dumps({"account": [ACCOUNT_ID]})) client.put_targets( Rule=rule_name, Targets=[ { "Id": "test", - "Arn": "arn:aws:logs:eu-central-1:{0}:log-group:{1}".format( - ACCOUNT_ID, log_group_name - ), + "Arn": f"arn:aws:logs:eu-central-1:{ACCOUNT_ID}:log-group:{log_group_name}", } ], ) @@ -2296,13 +2206,13 @@ def test_create_and_list_connections(): ) response.get("ConnectionArn").should.contain( - "arn:aws:events:eu-central-1:{0}:connection/test/".format(ACCOUNT_ID) + f"arn:aws:events:eu-central-1:{ACCOUNT_ID}:connection/test/" ) response = client.list_connections() response.get("Connections")[0].get("ConnectionArn").should.contain( - "arn:aws:events:eu-central-1:{0}:connection/test/".format(ACCOUNT_ID) + f"arn:aws:events:eu-central-1:{ACCOUNT_ID}:connection/test/" ) diff --git a/tests/test_events/test_events_cloudformation.py b/tests/test_events/test_events_cloudformation.py index d33c6b995..b0d06b907 100644 --- a/tests/test_events/test_events_cloudformation.py +++ b/tests/test_events/test_events_cloudformation.py @@ -84,7 +84,7 @@ def test_create_archive(): cfn_client.create_stack(StackName=stack_name, TemplateBody=template) # then - archive_arn = "arn:aws:events:eu-central-1:{0}:archive/{1}".format(ACCOUNT_ID, name) + archive_arn = f"arn:aws:events:eu-central-1:{ACCOUNT_ID}:archive/{name}" stack = cfn_client.describe_stacks(StackName=stack_name)["Stacks"][0] stack["Outputs"][0]["OutputValue"].should.equal(archive_arn) @@ -119,7 +119,7 @@ def test_update_archive(): response = events_client.describe_archive(ArchiveName=name) response["ArchiveArn"].should.equal( - "arn:aws:events:eu-central-1:{0}:archive/{1}".format(ACCOUNT_ID, name) + f"arn:aws:events:eu-central-1:{ACCOUNT_ID}:archive/{name}" ) response["Description"].should.equal("test archive") @@ -156,7 +156,7 @@ def test_create_rule(): cfn_client.create_stack(StackName=stack_name, TemplateBody=template) # then - rule_arn = "arn:aws:events:eu-central-1:{0}:rule/{1}".format(ACCOUNT_ID, name) + rule_arn = f"arn:aws:events:eu-central-1:{ACCOUNT_ID}:rule/{name}" stack = cfn_client.describe_stacks(StackName=stack_name)["Stacks"][0] stack["Outputs"][0]["OutputValue"].should.equal(rule_arn) diff --git a/tests/test_events/test_events_integration.py b/tests/test_events/test_events_integration.py index 231760133..1ba9550c1 100644 --- a/tests/test_events/test_events_integration.py +++ b/tests/test_events/test_events_integration.py @@ -28,9 +28,7 @@ def test_send_to_cw_log_group(): Targets=[ { "Id": "logs", - "Arn": "arn:aws:logs:eu-central-1:{0}:log-group:{1}".format( - ACCOUNT_ID, log_group_name - ), + "Arn": f"arn:aws:logs:eu-central-1:{ACCOUNT_ID}:log-group:{log_group_name}", } ], ) diff --git a/tests/test_firehose/test_firehose_destination_types.py b/tests/test_firehose/test_firehose_destination_types.py index 2f8b7599e..8390db197 100644 --- a/tests/test_firehose/test_firehose_destination_types.py +++ b/tests/test_firehose/test_firehose_destination_types.py @@ -16,7 +16,7 @@ def create_extended_s3_delivery_stream(client, stream_name): DeliveryStreamName=stream_name, DeliveryStreamType="DirectPut", ExtendedS3DestinationConfiguration={ - "RoleARN": "arn:aws:iam::{}:role/firehose_delivery_role".format(ACCOUNT_ID), + "RoleARN": f"arn:aws:iam::{ACCOUNT_ID}:role/firehose_delivery_role", "BucketARN": "arn:aws:s3:::firehose-test", "Prefix": "myFolder/", "CompressionFormat": "UNCOMPRESSED", @@ -28,9 +28,7 @@ def create_extended_s3_delivery_stream(client, stream_name): }, "SchemaConfiguration": { "DatabaseName": stream_name, - "RoleARN": "arn:aws:iam::{}:role/firehose_delivery_role".format( - ACCOUNT_ID - ), + "RoleARN": f"arn:aws:iam::{ACCOUNT_ID}:role/firehose_delivery_role", "TableName": "outputTable", }, }, @@ -43,7 +41,7 @@ def create_redshift_delivery_stream(client, stream_name): return client.create_delivery_stream( DeliveryStreamName=stream_name, RedshiftDestinationConfiguration={ - "RoleARN": "arn:aws:iam::{}:role/firehose_delivery_role".format(ACCOUNT_ID), + "RoleARN": f"arn:aws:iam::{ACCOUNT_ID}:role/firehose_delivery_role", "ClusterJDBCURL": "jdbc:redshift://host.amazonaws.com:5439/database", "CopyCommand": { "DataTableName": "outputTable", @@ -52,9 +50,7 @@ def create_redshift_delivery_stream(client, stream_name): "Username": "username", "Password": "password", "S3Configuration": { - "RoleARN": "arn:aws:iam::{}:role/firehose_delivery_role".format( - ACCOUNT_ID - ), + "RoleARN": f"arn:aws:iam::{ACCOUNT_ID}:role/firehose_delivery_role", "BucketARN": "arn:aws:s3:::firehose-test", "Prefix": "myFolder/", "BufferingHints": {"SizeInMBs": 123, "IntervalInSeconds": 124}, @@ -70,7 +66,7 @@ def create_elasticsearch_delivery_stream(client, stream_name): DeliveryStreamName=stream_name, DeliveryStreamType="DirectPut", ElasticsearchDestinationConfiguration={ - "RoleARN": "arn:aws:iam::{}:role/firehose_delivery_role".format(ACCOUNT_ID), + "RoleARN": f"arn:aws:iam::{ACCOUNT_ID}:role/firehose_delivery_role", "DomainARN": "arn:aws:es:::domain/firehose-test", "IndexName": "myIndex", "TypeName": "UNCOMPRESSED", @@ -78,9 +74,7 @@ def create_elasticsearch_delivery_stream(client, stream_name): "BufferingHints": {"IntervalInSeconds": 123, "SizeInMBs": 123}, "RetryOptions": {"DurationInSeconds": 123}, "S3Configuration": { - "RoleARN": "arn:aws:iam::{}:role/firehose_delivery_role".format( - ACCOUNT_ID - ), + "RoleARN": f"arn:aws:iam::{ACCOUNT_ID}:role/firehose_delivery_role", "BucketARN": "arn:aws:s3:::firehose-test", "Prefix": "myFolder/", "BufferingHints": {"SizeInMBs": 123, "IntervalInSeconds": 124}, @@ -101,9 +95,7 @@ def create_http_delivery_stream(client, stream_name): "BufferingHints": {"SizeInMBs": 123, "IntervalInSeconds": 124}, "CloudWatchLoggingOptions": {"Enabled": False}, "S3Configuration": { - "RoleARN": "arn:aws:iam::{}:role/firehose_delivery_role".format( - ACCOUNT_ID - ), + "RoleARN": f"arn:aws:iam::{ACCOUNT_ID}:role/firehose_delivery_role", "BucketARN": "arn:aws:s3:::firehose-test", "Prefix": "myFolder/", "BufferingHints": {"SizeInMBs": 123, "IntervalInSeconds": 124}, @@ -140,9 +132,7 @@ def test_create_redshift_delivery_stream(): { "DestinationId": "destinationId-000000000001", "RedshiftDestinationDescription": { - "RoleARN": "arn:aws:iam::{}:role/firehose_delivery_role".format( - ACCOUNT_ID - ), + "RoleARN": f"arn:aws:iam::{ACCOUNT_ID}:role/firehose_delivery_role", "ClusterJDBCURL": "jdbc:redshift://host.amazonaws.com:5439/database", "CopyCommand": { "DataTableName": "outputTable", @@ -150,9 +140,7 @@ def test_create_redshift_delivery_stream(): }, "Username": "username", "S3DestinationDescription": { - "RoleARN": "arn:aws:iam::{}:role/firehose_delivery_role".format( - ACCOUNT_ID - ), + "RoleARN": f"arn:aws:iam::{ACCOUNT_ID}:role/firehose_delivery_role", "BucketARN": "arn:aws:s3:::firehose-test", "Prefix": "myFolder/", "BufferingHints": { @@ -196,9 +184,7 @@ def test_create_extended_s3_delivery_stream(): { "DestinationId": "destinationId-000000000001", "ExtendedS3DestinationDescription": { - "RoleARN": "arn:aws:iam::{}:role/firehose_delivery_role".format( - ACCOUNT_ID - ), + "RoleARN": f"arn:aws:iam::{ACCOUNT_ID}:role/firehose_delivery_role", "BucketARN": "arn:aws:s3:::firehose-test", "Prefix": "myFolder/", "CompressionFormat": "UNCOMPRESSED", @@ -214,17 +200,13 @@ def test_create_extended_s3_delivery_stream(): }, "SchemaConfiguration": { "DatabaseName": stream_name, - "RoleARN": "arn:aws:iam::{}:role/firehose_delivery_role".format( - ACCOUNT_ID - ), + "RoleARN": f"arn:aws:iam::{ACCOUNT_ID}:role/firehose_delivery_role", "TableName": "outputTable", }, }, }, "S3DestinationDescription": { - "RoleARN": "arn:aws:iam::{}:role/firehose_delivery_role".format( - ACCOUNT_ID - ), + "RoleARN": f"arn:aws:iam::{ACCOUNT_ID}:role/firehose_delivery_role", "BucketARN": "arn:aws:s3:::firehose-test", "Prefix": "myFolder/", "CompressionFormat": "UNCOMPRESSED", @@ -263,9 +245,7 @@ def test_create_elasticsearch_delivery_stream(): { "DestinationId": "destinationId-000000000001", "ElasticsearchDestinationDescription": { - "RoleARN": "arn:aws:iam::{}:role/firehose_delivery_role".format( - ACCOUNT_ID - ), + "RoleARN": f"arn:aws:iam::{ACCOUNT_ID}:role/firehose_delivery_role", "DomainARN": "arn:aws:es:::domain/firehose-test", "IndexName": "myIndex", "TypeName": "UNCOMPRESSED", @@ -273,9 +253,7 @@ def test_create_elasticsearch_delivery_stream(): "BufferingHints": {"IntervalInSeconds": 123, "SizeInMBs": 123}, "RetryOptions": {"DurationInSeconds": 123}, "S3DestinationDescription": { - "RoleARN": "arn:aws:iam::{}:role/firehose_delivery_role".format( - ACCOUNT_ID - ), + "RoleARN": f"arn:aws:iam::{ACCOUNT_ID}:role/firehose_delivery_role", "BucketARN": "arn:aws:s3:::firehose-test", "Prefix": "myFolder/", "BufferingHints": { @@ -301,7 +279,7 @@ def test_create_s3_delivery_stream(): response = client.create_delivery_stream( DeliveryStreamName=stream_name, S3DestinationConfiguration={ - "RoleARN": "arn:aws:iam::{}:role/firehose_delivery_role".format(ACCOUNT_ID), + "RoleARN": f"arn:aws:iam::{ACCOUNT_ID}:role/firehose_delivery_role", "BucketARN": "arn:aws:s3:::firehose-test", "Prefix": "myFolder/", "BufferingHints": {"SizeInMBs": 123, "IntervalInSeconds": 124}, @@ -328,9 +306,7 @@ def test_create_s3_delivery_stream(): { "DestinationId": "destinationId-000000000001", "S3DestinationDescription": { - "RoleARN": "arn:aws:iam::{}:role/firehose_delivery_role".format( - ACCOUNT_ID - ), + "RoleARN": f"arn:aws:iam::{ACCOUNT_ID}:role/firehose_delivery_role", "BucketARN": "arn:aws:s3:::firehose-test", "Prefix": "myFolder/", "BufferingHints": {"SizeInMBs": 123, "IntervalInSeconds": 124}, @@ -375,9 +351,7 @@ def test_create_http_stream(): "BufferingHints": {"SizeInMBs": 123, "IntervalInSeconds": 124}, "CloudWatchLoggingOptions": {"Enabled": False}, "S3DestinationDescription": { - "RoleARN": "arn:aws:iam::{}:role/firehose_delivery_role".format( - ACCOUNT_ID - ), + "RoleARN": f"arn:aws:iam::{ACCOUNT_ID}:role/firehose_delivery_role", "BucketARN": "arn:aws:s3:::firehose-test", "Prefix": "myFolder/", "BufferingHints": { diff --git a/tests/test_glue/helpers.py b/tests/test_glue/helpers.py index a868f7fd0..ca31c97ad 100644 --- a/tests/test_glue/helpers.py +++ b/tests/test_glue/helpers.py @@ -15,9 +15,7 @@ from .fixtures.schema_registry import ( def create_database_input(database_name): database_input = copy.deepcopy(DATABASE_INPUT) database_input["Name"] = database_name - database_input["LocationUri"] = "s3://my-bucket/{database_name}".format( - database_name=database_name - ) + database_input["LocationUri"] = f"s3://my-bucket/{database_name}" return database_input @@ -42,9 +40,7 @@ def create_table_input(database_name, table_name, columns=None, partition_keys=N table_input["StorageDescriptor"]["Columns"] = columns or [] table_input["StorageDescriptor"][ "Location" - ] = "s3://my-bucket/{database_name}/{table_name}".format( - database_name=database_name, table_name=table_name - ) + ] = f"s3://my-bucket/{database_name}/{table_name}" return table_input @@ -93,9 +89,7 @@ def create_column(name, type_, comment=None, parameters=None): def create_partition_input(database_name, table_name, values=None, columns=None): - root_path = "s3://my-bucket/{database_name}/{table_name}".format( - database_name=database_name, table_name=table_name - ) + root_path = f"s3://my-bucket/{database_name}/{table_name}" part_input = copy.deepcopy(PARTITION_INPUT) part_input["Values"] = values or [] diff --git a/tests/test_glue/test_datacatalog.py b/tests/test_glue/test_datacatalog.py index 6c253bfac..87eaa8701 100644 --- a/tests/test_glue/test_datacatalog.py +++ b/tests/test_glue/test_datacatalog.py @@ -552,7 +552,7 @@ def test_batch_create_partition(): partition_inputs = [] for i in range(0, 20): - values = ["2018-10-{:2}".format(i)] + values = [f"2018-10-{i:2}"] part_input = helpers.create_partition_input( database_name, table_name, values=values ) @@ -994,7 +994,7 @@ def test_batch_delete_partition(): partition_inputs = [] for i in range(0, 20): - values = ["2018-10-{:2}".format(i)] + values = [f"2018-10-{i:2}"] part_input = helpers.create_partition_input( database_name, table_name, values=values ) @@ -1027,7 +1027,7 @@ def test_batch_delete_partition_with_bad_partitions(): partition_inputs = [] for i in range(0, 20): - values = ["2018-10-{:2}".format(i)] + values = [f"2018-10-{i:2}"] part_input = helpers.create_partition_input( database_name, table_name, values=values ) diff --git a/tests/test_iam/test_iam.py b/tests/test_iam/test_iam.py index ad83798ac..4280a80b2 100644 --- a/tests/test_iam/test_iam.py +++ b/tests/test_iam/test_iam.py @@ -514,7 +514,7 @@ def test_create_policy(): PolicyName="TestCreatePolicy", PolicyDocument=MOCK_POLICY ) response["Policy"]["Arn"].should.equal( - "arn:aws:iam::{}:policy/TestCreatePolicy".format(ACCOUNT_ID) + f"arn:aws:iam::{ACCOUNT_ID}:policy/TestCreatePolicy" ) @@ -547,14 +547,12 @@ def test_create_policy_versions(): conn = boto3.client("iam", region_name="us-east-1") with pytest.raises(ClientError): conn.create_policy_version( - PolicyArn="arn:aws:iam::{}:policy/TestCreatePolicyVersion".format( - ACCOUNT_ID - ), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestCreatePolicyVersion", PolicyDocument='{"some":"policy"}', ) conn.create_policy(PolicyName="TestCreatePolicyVersion", PolicyDocument=MOCK_POLICY) version = conn.create_policy_version( - PolicyArn="arn:aws:iam::{}:policy/TestCreatePolicyVersion".format(ACCOUNT_ID), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestCreatePolicyVersion", PolicyDocument=MOCK_POLICY, SetAsDefault=True, ) @@ -562,11 +560,11 @@ def test_create_policy_versions(): version.get("PolicyVersion").get("VersionId").should.equal("v2") version.get("PolicyVersion").get("IsDefaultVersion").should.be.ok conn.delete_policy_version( - PolicyArn="arn:aws:iam::{}:policy/TestCreatePolicyVersion".format(ACCOUNT_ID), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestCreatePolicyVersion", VersionId="v1", ) version = conn.create_policy_version( - PolicyArn="arn:aws:iam::{}:policy/TestCreatePolicyVersion".format(ACCOUNT_ID), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestCreatePolicyVersion", PolicyDocument=MOCK_POLICY, ) version.get("PolicyVersion").get("VersionId").should.equal("v3") @@ -581,16 +579,12 @@ def test_create_many_policy_versions(): ) for _ in range(0, 4): conn.create_policy_version( - PolicyArn="arn:aws:iam::{}:policy/TestCreateManyPolicyVersions".format( - ACCOUNT_ID - ), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestCreateManyPolicyVersions", PolicyDocument=MOCK_POLICY, ) with pytest.raises(ClientError): conn.create_policy_version( - PolicyArn="arn:aws:iam::{}:policy/TestCreateManyPolicyVersions".format( - ACCOUNT_ID - ), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestCreateManyPolicyVersions", PolicyDocument=MOCK_POLICY, ) @@ -602,23 +596,17 @@ def test_set_default_policy_version(): PolicyName="TestSetDefaultPolicyVersion", PolicyDocument=MOCK_POLICY ) conn.create_policy_version( - PolicyArn="arn:aws:iam::{}:policy/TestSetDefaultPolicyVersion".format( - ACCOUNT_ID - ), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestSetDefaultPolicyVersion", PolicyDocument=MOCK_POLICY_2, SetAsDefault=True, ) conn.create_policy_version( - PolicyArn="arn:aws:iam::{}:policy/TestSetDefaultPolicyVersion".format( - ACCOUNT_ID - ), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestSetDefaultPolicyVersion", PolicyDocument=MOCK_POLICY_3, SetAsDefault=True, ) versions = conn.list_policy_versions( - PolicyArn="arn:aws:iam::{}:policy/TestSetDefaultPolicyVersion".format( - ACCOUNT_ID - ) + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestSetDefaultPolicyVersion" ) versions.get("Versions")[0].get("Document").should.equal(json.loads(MOCK_POLICY)) versions.get("Versions")[0].get("IsDefaultVersion").shouldnt.be.ok @@ -628,15 +616,11 @@ def test_set_default_policy_version(): versions.get("Versions")[2].get("IsDefaultVersion").should.be.ok conn.set_default_policy_version( - PolicyArn="arn:aws:iam::{}:policy/TestSetDefaultPolicyVersion".format( - ACCOUNT_ID - ), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestSetDefaultPolicyVersion", VersionId="v1", ) versions = conn.list_policy_versions( - PolicyArn="arn:aws:iam::{}:policy/TestSetDefaultPolicyVersion".format( - ACCOUNT_ID - ) + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestSetDefaultPolicyVersion" ) versions.get("Versions")[0].get("Document").should.equal(json.loads(MOCK_POLICY)) versions.get("Versions")[0].get("IsDefaultVersion").should.be.ok @@ -647,20 +631,16 @@ def test_set_default_policy_version(): # Set default version for non-existing policy conn.set_default_policy_version.when.called_with( - PolicyArn="arn:aws:iam::{}:policy/TestNonExistingPolicy".format(ACCOUNT_ID), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestNonExistingPolicy", VersionId="v1", ).should.throw( ClientError, - "Policy arn:aws:iam::{}:policy/TestNonExistingPolicy not found".format( - ACCOUNT_ID - ), + f"Policy arn:aws:iam::{ACCOUNT_ID}:policy/TestNonExistingPolicy not found", ) # Set default version for incorrect version conn.set_default_policy_version.when.called_with( - PolicyArn="arn:aws:iam::{}:policy/TestSetDefaultPolicyVersion".format( - ACCOUNT_ID - ), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestSetDefaultPolicyVersion", VersionId="wrong_version_id", ).should.throw( ClientError, @@ -669,15 +649,11 @@ def test_set_default_policy_version(): # Set default version for non-existing version conn.set_default_policy_version.when.called_with( - PolicyArn="arn:aws:iam::{}:policy/TestSetDefaultPolicyVersion".format( - ACCOUNT_ID - ), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestSetDefaultPolicyVersion", VersionId="v4", ).should.throw( ClientError, - "Policy arn:aws:iam::{}:policy/TestSetDefaultPolicyVersion version v4 does not exist or is not attachable.".format( - ACCOUNT_ID - ), + f"Policy arn:aws:iam::{ACCOUNT_ID}:policy/TestSetDefaultPolicyVersion version v4 does not exist or is not attachable.", ) @@ -686,10 +662,10 @@ def test_get_policy(): conn = boto3.client("iam", region_name="us-east-1") conn.create_policy(PolicyName="TestGetPolicy", PolicyDocument=MOCK_POLICY) policy = conn.get_policy( - PolicyArn="arn:aws:iam::{}:policy/TestGetPolicy".format(ACCOUNT_ID) + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestGetPolicy" ) policy["Policy"]["Arn"].should.equal( - "arn:aws:iam::{}:policy/TestGetPolicy".format(ACCOUNT_ID) + f"arn:aws:iam::{ACCOUNT_ID}:policy/TestGetPolicy" ) @@ -712,16 +688,16 @@ def test_get_policy_version(): conn = boto3.client("iam", region_name="us-east-1") conn.create_policy(PolicyName="TestGetPolicyVersion", PolicyDocument=MOCK_POLICY) version = conn.create_policy_version( - PolicyArn="arn:aws:iam::{}:policy/TestGetPolicyVersion".format(ACCOUNT_ID), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestGetPolicyVersion", PolicyDocument=MOCK_POLICY, ) with pytest.raises(ClientError): conn.get_policy_version( - PolicyArn="arn:aws:iam::{}:policy/TestGetPolicyVersion".format(ACCOUNT_ID), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestGetPolicyVersion", VersionId="v2-does-not-exist", ) retrieved = conn.get_policy_version( - PolicyArn="arn:aws:iam::{}:policy/TestGetPolicyVersion".format(ACCOUNT_ID), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestGetPolicyVersion", VersionId=version.get("PolicyVersion").get("VersionId"), ) retrieved.get("PolicyVersion").get("Document").should.equal(json.loads(MOCK_POLICY)) @@ -766,25 +742,25 @@ def test_list_policy_versions(): conn = boto3.client("iam", region_name="us-east-1") with pytest.raises(ClientError): versions = conn.list_policy_versions( - PolicyArn="arn:aws:iam::{}:policy/TestListPolicyVersions".format(ACCOUNT_ID) + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestListPolicyVersions" ) conn.create_policy(PolicyName="TestListPolicyVersions", PolicyDocument=MOCK_POLICY) versions = conn.list_policy_versions( - PolicyArn="arn:aws:iam::{}:policy/TestListPolicyVersions".format(ACCOUNT_ID) + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestListPolicyVersions" ) versions.get("Versions")[0].get("VersionId").should.equal("v1") versions.get("Versions")[0].get("IsDefaultVersion").should.be.ok conn.create_policy_version( - PolicyArn="arn:aws:iam::{}:policy/TestListPolicyVersions".format(ACCOUNT_ID), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestListPolicyVersions", PolicyDocument=MOCK_POLICY_2, ) conn.create_policy_version( - PolicyArn="arn:aws:iam::{}:policy/TestListPolicyVersions".format(ACCOUNT_ID), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestListPolicyVersions", PolicyDocument=MOCK_POLICY_3, ) versions = conn.list_policy_versions( - PolicyArn="arn:aws:iam::{}:policy/TestListPolicyVersions".format(ACCOUNT_ID) + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestListPolicyVersions" ) versions.get("Versions")[1].get("Document").should.equal(json.loads(MOCK_POLICY_2)) versions.get("Versions")[1].get("IsDefaultVersion").shouldnt.be.ok @@ -797,22 +773,20 @@ def test_delete_policy_version(): conn = boto3.client("iam", region_name="us-east-1") conn.create_policy(PolicyName="TestDeletePolicyVersion", PolicyDocument=MOCK_POLICY) conn.create_policy_version( - PolicyArn="arn:aws:iam::{}:policy/TestDeletePolicyVersion".format(ACCOUNT_ID), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestDeletePolicyVersion", PolicyDocument=MOCK_POLICY, ) with pytest.raises(ClientError): conn.delete_policy_version( - PolicyArn="arn:aws:iam::{}:policy/TestDeletePolicyVersion".format( - ACCOUNT_ID - ), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestDeletePolicyVersion", VersionId="v2-nope-this-does-not-exist", ) conn.delete_policy_version( - PolicyArn="arn:aws:iam::{}:policy/TestDeletePolicyVersion".format(ACCOUNT_ID), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestDeletePolicyVersion", VersionId="v2", ) versions = conn.list_policy_versions( - PolicyArn="arn:aws:iam::{}:policy/TestDeletePolicyVersion".format(ACCOUNT_ID) + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestDeletePolicyVersion" ) len(versions.get("Versions")).should.equal(1) @@ -822,14 +796,12 @@ def test_delete_default_policy_version(): conn = boto3.client("iam", region_name="us-east-1") conn.create_policy(PolicyName="TestDeletePolicyVersion", PolicyDocument=MOCK_POLICY) conn.create_policy_version( - PolicyArn="arn:aws:iam::{}:policy/TestDeletePolicyVersion".format(ACCOUNT_ID), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestDeletePolicyVersion", PolicyDocument=MOCK_POLICY_2, ) with pytest.raises(ClientError): conn.delete_policy_version( - PolicyArn="arn:aws:iam::{}:policy/TestDeletePolicyVersion".format( - ACCOUNT_ID - ), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestDeletePolicyVersion", VersionId="v1", ) @@ -849,9 +821,7 @@ def test_create_policy_with_tags(): # Get policy: policy = conn.get_policy( - PolicyArn="arn:aws:iam::{}:policy/{}".format( - ACCOUNT_ID, "TestCreatePolicyWithTags1" - ) + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestCreatePolicyWithTags1" )["Policy"] assert len(policy["Tags"]) == 2 assert policy["Tags"][0]["Key"] == "somekey" @@ -872,9 +842,7 @@ def test_create_policy_with_empty_tag_value(): Tags=[{"Key": "somekey", "Value": ""}], ) tags = conn.list_policy_tags( - PolicyArn="arn:aws:iam::{}:policy/{}".format( - ACCOUNT_ID, "TestCreatePolicyWithTags2" - ) + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestCreatePolicyWithTags2" ) assert len(tags["Tags"]) == 1 assert tags["Tags"][0]["Key"] == "somekey" @@ -994,7 +962,7 @@ def test_create_policy_with_no_tags(): # Get without tags: policy = conn.get_policy( - PolicyArn="arn:aws:iam::{}:policy/{}".format(ACCOUNT_ID, "TestTagPolicy") + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestTagPolicy" )["Policy"] assert not policy.get("Tags") @@ -1004,7 +972,7 @@ def test_get_policy_with_tags(): conn = boto3.client("iam", region_name="us-east-1") conn.create_policy(PolicyName="TestTagPolicy", PolicyDocument=MOCK_POLICY) conn.tag_policy( - PolicyArn="arn:aws:iam::{}:policy/{}".format(ACCOUNT_ID, "TestTagPolicy"), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestTagPolicy", Tags=[ {"Key": "somekey", "Value": "somevalue"}, {"Key": "someotherkey", "Value": "someothervalue"}, @@ -1013,7 +981,7 @@ def test_get_policy_with_tags(): # Get policy: policy = conn.get_policy( - PolicyArn="arn:aws:iam::{}:policy/{}".format(ACCOUNT_ID, "TestTagPolicy") + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestTagPolicy" )["Policy"] assert len(policy["Tags"]) == 2 assert policy["Tags"][0]["Key"] == "somekey" @@ -1027,7 +995,7 @@ def test_list_policy_tags(): conn = boto3.client("iam", region_name="us-east-1") conn.create_policy(PolicyName="TestTagPolicy", PolicyDocument=MOCK_POLICY) conn.tag_policy( - PolicyArn="arn:aws:iam::{}:policy/{}".format(ACCOUNT_ID, "TestTagPolicy"), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestTagPolicy", Tags=[ {"Key": "somekey", "Value": "somevalue"}, {"Key": "someotherkey", "Value": "someothervalue"}, @@ -1036,7 +1004,7 @@ def test_list_policy_tags(): # List_policy_tags: tags = conn.list_policy_tags( - PolicyArn="arn:aws:iam::{}:policy/{}".format(ACCOUNT_ID, "TestTagPolicy") + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestTagPolicy" ) assert len(tags["Tags"]) == 2 assert tags["Tags"][0]["Key"] == "somekey" @@ -1052,7 +1020,7 @@ def test_list_policy_tags_pagination(): conn = boto3.client("iam", region_name="us-east-1") conn.create_policy(PolicyName="TestTagPolicy", PolicyDocument=MOCK_POLICY) conn.tag_policy( - PolicyArn="arn:aws:iam::{}:policy/{}".format(ACCOUNT_ID, "TestTagPolicy"), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestTagPolicy", Tags=[ {"Key": "somekey", "Value": "somevalue"}, {"Key": "someotherkey", "Value": "someothervalue"}, @@ -1061,7 +1029,7 @@ def test_list_policy_tags_pagination(): # Test pagination: tags = conn.list_policy_tags( - PolicyArn="arn:aws:iam::{}:policy/{}".format(ACCOUNT_ID, "TestTagPolicy"), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestTagPolicy", MaxItems=1, ) assert len(tags["Tags"]) == 1 @@ -1071,7 +1039,7 @@ def test_list_policy_tags_pagination(): assert tags["Marker"] == "1" tags = conn.list_policy_tags( - PolicyArn="arn:aws:iam::{}:policy/{}".format(ACCOUNT_ID, "TestTagPolicy"), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestTagPolicy", Marker=tags["Marker"], ) assert len(tags["Tags"]) == 1 @@ -1086,7 +1054,7 @@ def test_updating_existing_tag(): conn = boto3.client("iam", region_name="us-east-1") conn.create_policy(PolicyName="TestTagPolicy", PolicyDocument=MOCK_POLICY) conn.tag_policy( - PolicyArn="arn:aws:iam::{}:policy/{}".format(ACCOUNT_ID, "TestTagPolicy"), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestTagPolicy", Tags=[ {"Key": "somekey", "Value": "somevalue"}, {"Key": "someotherkey", "Value": "someothervalue"}, @@ -1095,11 +1063,11 @@ def test_updating_existing_tag(): # Test updating an existing tag: conn.tag_policy( - PolicyArn="arn:aws:iam::{}:policy/{}".format(ACCOUNT_ID, "TestTagPolicy"), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestTagPolicy", Tags=[{"Key": "somekey", "Value": "somenewvalue"}], ) tags = conn.list_policy_tags( - PolicyArn="arn:aws:iam::{}:policy/{}".format(ACCOUNT_ID, "TestTagPolicy") + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestTagPolicy" ) assert len(tags["Tags"]) == 2 assert tags["Tags"][0]["Key"] == "somekey" @@ -1111,7 +1079,7 @@ def test_updating_existing_tag_with_empty_value(): conn = boto3.client("iam", region_name="us-east-1") conn.create_policy(PolicyName="TestTagPolicy", PolicyDocument=MOCK_POLICY) conn.tag_policy( - PolicyArn="arn:aws:iam::{}:policy/{}".format(ACCOUNT_ID, "TestTagPolicy"), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestTagPolicy", Tags=[ {"Key": "somekey", "Value": "somevalue"}, {"Key": "someotherkey", "Value": "someothervalue"}, @@ -1120,11 +1088,11 @@ def test_updating_existing_tag_with_empty_value(): # Empty is good: conn.tag_policy( - PolicyArn="arn:aws:iam::{}:policy/{}".format(ACCOUNT_ID, "TestTagPolicy"), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestTagPolicy", Tags=[{"Key": "somekey", "Value": ""}], ) tags = conn.list_policy_tags( - PolicyArn="arn:aws:iam::{}:policy/{}".format(ACCOUNT_ID, "TestTagPolicy") + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestTagPolicy" ) assert len(tags["Tags"]) == 2 assert tags["Tags"][0]["Key"] == "somekey" @@ -1136,7 +1104,7 @@ def test_updating_existing_tagged_policy_with_too_many_tags(): conn = boto3.client("iam", region_name="us-east-1") conn.create_policy(PolicyName="TestTagPolicy", PolicyDocument=MOCK_POLICY) conn.tag_policy( - PolicyArn="arn:aws:iam::{}:policy/{}".format(ACCOUNT_ID, "TestTagPolicy"), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestTagPolicy", Tags=[ {"Key": "somekey", "Value": "somevalue"}, {"Key": "someotherkey", "Value": "someothervalue"}, @@ -1149,7 +1117,7 @@ def test_updating_existing_tagged_policy_with_too_many_tags(): map(lambda x: {"Key": str(x), "Value": str(x)}, range(0, 51)) ) conn.tag_policy( - PolicyArn="arn:aws:iam::{}:policy/{}".format(ACCOUNT_ID, "TestTagPolicy"), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestTagPolicy", Tags=too_many_tags, ) assert ( @@ -1163,7 +1131,7 @@ def test_updating_existing_tagged_policy_with_duplicate_tag(): conn = boto3.client("iam", region_name="us-east-1") conn.create_policy(PolicyName="TestTagPolicy", PolicyDocument=MOCK_POLICY) conn.tag_policy( - PolicyArn="arn:aws:iam::{}:policy/{}".format(ACCOUNT_ID, "TestTagPolicy"), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestTagPolicy", Tags=[ {"Key": "somekey", "Value": "somevalue"}, {"Key": "someotherkey", "Value": "someothervalue"}, @@ -1173,7 +1141,7 @@ def test_updating_existing_tagged_policy_with_duplicate_tag(): # With a duplicate tag: with pytest.raises(ClientError) as ce: conn.tag_policy( - PolicyArn="arn:aws:iam::{}:policy/{}".format(ACCOUNT_ID, "TestTagPolicy"), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestTagPolicy", Tags=[{"Key": "0", "Value": ""}, {"Key": "0", "Value": ""}], ) assert ( @@ -1187,7 +1155,7 @@ def test_updating_existing_tagged_policy_with_duplicate_tag_different_casing(): conn = boto3.client("iam", region_name="us-east-1") conn.create_policy(PolicyName="TestTagPolicy", PolicyDocument=MOCK_POLICY) conn.tag_policy( - PolicyArn="arn:aws:iam::{}:policy/{}".format(ACCOUNT_ID, "TestTagPolicy"), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestTagPolicy", Tags=[ {"Key": "somekey", "Value": "somevalue"}, {"Key": "someotherkey", "Value": "someothervalue"}, @@ -1197,7 +1165,7 @@ def test_updating_existing_tagged_policy_with_duplicate_tag_different_casing(): # Duplicate tag with different casing: with pytest.raises(ClientError) as ce: conn.tag_policy( - PolicyArn="arn:aws:iam::{}:policy/{}".format(ACCOUNT_ID, "TestTagPolicy"), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestTagPolicy", Tags=[{"Key": "a", "Value": ""}, {"Key": "A", "Value": ""}], ) assert ( @@ -1211,7 +1179,7 @@ def test_updating_existing_tagged_policy_with_large_key(): conn = boto3.client("iam", region_name="us-east-1") conn.create_policy(PolicyName="TestTagPolicy", PolicyDocument=MOCK_POLICY) conn.tag_policy( - PolicyArn="arn:aws:iam::{}:policy/{}".format(ACCOUNT_ID, "TestTagPolicy"), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestTagPolicy", Tags=[ {"Key": "somekey", "Value": "somevalue"}, {"Key": "someotherkey", "Value": "someothervalue"}, @@ -1221,7 +1189,7 @@ def test_updating_existing_tagged_policy_with_large_key(): # With a really big key: with pytest.raises(ClientError) as ce: conn.tag_policy( - PolicyArn="arn:aws:iam::{}:policy/{}".format(ACCOUNT_ID, "TestTagPolicy"), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestTagPolicy", Tags=[{"Key": "0" * 129, "Value": ""}], ) assert ( @@ -1235,7 +1203,7 @@ def test_updating_existing_tagged_policy_with_large_value(): conn = boto3.client("iam", region_name="us-east-1") conn.create_policy(PolicyName="TestTagPolicy", PolicyDocument=MOCK_POLICY) conn.tag_policy( - PolicyArn="arn:aws:iam::{}:policy/{}".format(ACCOUNT_ID, "TestTagPolicy"), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestTagPolicy", Tags=[ {"Key": "somekey", "Value": "somevalue"}, {"Key": "someotherkey", "Value": "someothervalue"}, @@ -1245,7 +1213,7 @@ def test_updating_existing_tagged_policy_with_large_value(): # With a really big value: with pytest.raises(ClientError) as ce: conn.tag_policy( - PolicyArn="arn:aws:iam::{}:policy/{}".format(ACCOUNT_ID, "TestTagPolicy"), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestTagPolicy", Tags=[{"Key": "0", "Value": "0" * 257}], ) assert ( @@ -1259,7 +1227,7 @@ def test_updating_existing_tagged_policy_with_invalid_character(): conn = boto3.client("iam", region_name="us-east-1") conn.create_policy(PolicyName="TestTagPolicy", PolicyDocument=MOCK_POLICY) conn.tag_policy( - PolicyArn="arn:aws:iam::{}:policy/{}".format(ACCOUNT_ID, "TestTagPolicy"), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestTagPolicy", Tags=[ {"Key": "somekey", "Value": "somevalue"}, {"Key": "someotherkey", "Value": "someothervalue"}, @@ -1269,7 +1237,7 @@ def test_updating_existing_tagged_policy_with_invalid_character(): # With an invalid character: with pytest.raises(ClientError) as ce: conn.tag_policy( - PolicyArn="arn:aws:iam::{}:policy/{}".format(ACCOUNT_ID, "TestTagPolicy"), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestTagPolicy", Tags=[{"Key": "NOWAY!", "Value": ""}], ) assert ( @@ -1285,7 +1253,7 @@ def test_tag_non_existant_policy(): # With a policy that doesn't exist: with pytest.raises(ClientError): conn.tag_policy( - PolicyArn="arn:aws:iam::{}:policy/{}".format(ACCOUNT_ID, "NotAPolicy"), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/NotAPolicy", Tags=[{"Key": "some", "Value": "value"}], ) @@ -1297,7 +1265,7 @@ def test_untag_policy(): # With proper tag values: conn.tag_policy( - PolicyArn="arn:aws:iam::{}:policy/{}".format(ACCOUNT_ID, "TestUnTagPolicy"), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestUnTagPolicy", Tags=[ {"Key": "somekey", "Value": "somevalue"}, {"Key": "someotherkey", "Value": "someothervalue"}, @@ -1306,11 +1274,11 @@ def test_untag_policy(): # Remove them: conn.untag_policy( - PolicyArn="arn:aws:iam::{}:policy/{}".format(ACCOUNT_ID, "TestUnTagPolicy"), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestUnTagPolicy", TagKeys=["somekey"], ) tags = conn.list_policy_tags( - PolicyArn="arn:aws:iam::{}:policy/{}".format(ACCOUNT_ID, "TestUnTagPolicy") + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestUnTagPolicy" ) assert len(tags["Tags"]) == 1 assert tags["Tags"][0]["Key"] == "someotherkey" @@ -1318,11 +1286,11 @@ def test_untag_policy(): # And again: conn.untag_policy( - PolicyArn="arn:aws:iam::{}:policy/{}".format(ACCOUNT_ID, "TestUnTagPolicy"), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestUnTagPolicy", TagKeys=["someotherkey"], ) tags = conn.list_policy_tags( - PolicyArn="arn:aws:iam::{}:policy/{}".format(ACCOUNT_ID, "TestUnTagPolicy") + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestUnTagPolicy" ) assert not tags["Tags"] @@ -1330,7 +1298,7 @@ def test_untag_policy(): # With more than 50 tags: with pytest.raises(ClientError) as ce: conn.untag_policy( - PolicyArn="arn:aws:iam::{}:policy/{}".format(ACCOUNT_ID, "TestUnTagPolicy"), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestUnTagPolicy", TagKeys=[str(x) for x in range(0, 51)], ) assert ( @@ -1342,7 +1310,7 @@ def test_untag_policy(): # With a really big key: with pytest.raises(ClientError) as ce: conn.untag_policy( - PolicyArn="arn:aws:iam::{}:policy/{}".format(ACCOUNT_ID, "TestUnTagPolicy"), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestUnTagPolicy", TagKeys=["0" * 129], ) assert ( @@ -1354,7 +1322,7 @@ def test_untag_policy(): # With an invalid character: with pytest.raises(ClientError) as ce: conn.untag_policy( - PolicyArn="arn:aws:iam::{}:policy/{}".format(ACCOUNT_ID, "TestUnTagPolicy"), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/TestUnTagPolicy", TagKeys=["NOWAY!"], ) assert ( @@ -1366,7 +1334,7 @@ def test_untag_policy(): # With a policy that doesn't exist: with pytest.raises(ClientError): conn.untag_policy( - PolicyArn="arn:aws:iam::{}:policy/{}".format(ACCOUNT_ID, "NotAPolicy"), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/NotAPolicy", TagKeys=["somevalue"], ) @@ -1378,7 +1346,7 @@ def test_create_user_boto(): u["Path"].should.equal("/") u["UserName"].should.equal("my-user") u.should.have.key("UserId") - u["Arn"].should.equal("arn:aws:iam::{}:user/my-user".format(ACCOUNT_ID)) + u["Arn"].should.equal(f"arn:aws:iam::{ACCOUNT_ID}:user/my-user") u["CreateDate"].should.be.a(datetime) with pytest.raises(ClientError) as ex: @@ -1403,7 +1371,7 @@ def test_get_user(): u["Path"].should.equal("/") u["UserName"].should.equal("my-user") u.should.have.key("UserId") - u["Arn"].should.equal("arn:aws:iam::{}:user/my-user".format(ACCOUNT_ID)) + u["Arn"].should.equal(f"arn:aws:iam::{ACCOUNT_ID}:user/my-user") u["CreateDate"].should.be.a(datetime) @@ -1438,7 +1406,7 @@ def test_list_users(): user = response["Users"][0] user["UserName"].should.equal("my-user") user["Path"].should.equal("/") - user["Arn"].should.equal("arn:aws:iam::{}:user/my-user".format(ACCOUNT_ID)) + user["Arn"].should.equal(f"arn:aws:iam::{ACCOUNT_ID}:user/my-user") response["IsTruncated"].should.equal(False) conn.create_user(UserName="my-user-1", Path="myUser") @@ -1620,9 +1588,7 @@ def test_create_virtual_mfa_device(): response = client.create_virtual_mfa_device(VirtualMFADeviceName="test-device") device = response["VirtualMFADevice"] - device["SerialNumber"].should.equal( - "arn:aws:iam::{}:mfa/test-device".format(ACCOUNT_ID) - ) + device["SerialNumber"].should.equal(f"arn:aws:iam::{ACCOUNT_ID}:mfa/test-device") device["Base32StringSeed"].decode("ascii").should.match("[A-Z234567]") device["QRCodePNG"].should_not.equal("") @@ -1631,9 +1597,7 @@ def test_create_virtual_mfa_device(): ) device = response["VirtualMFADevice"] - device["SerialNumber"].should.equal( - "arn:aws:iam::{}:mfa/test-device-2".format(ACCOUNT_ID) - ) + device["SerialNumber"].should.equal(f"arn:aws:iam::{ACCOUNT_ID}:mfa/test-device-2") device["Base32StringSeed"].decode("ascii").should.match("[A-Z234567]") device["QRCodePNG"].should_not.equal("") @@ -1643,7 +1607,7 @@ def test_create_virtual_mfa_device(): device = response["VirtualMFADevice"] device["SerialNumber"].should.equal( - "arn:aws:iam::{}:mfa/test/test-device".format(ACCOUNT_ID) + f"arn:aws:iam::{ACCOUNT_ID}:mfa/test/test-device" ) device["Base32StringSeed"].decode("ascii").should.match("[A-Z234567]") device["QRCodePNG"].should_not.equal("") @@ -1677,7 +1641,7 @@ def test_create_virtual_mfa_device_errors(): "It must begin and end with / and contain only alphanumeric characters and/or / characters.", ) - too_long_path = "/{}/".format("b" * 511) + too_long_path = f"/{('b' * 511)}/" client.create_virtual_mfa_device.when.called_with( Path=too_long_path, VirtualMFADeviceName="test-device" ).should.throw( @@ -1706,12 +1670,12 @@ def test_delete_virtual_mfa_device(): def test_delete_virtual_mfa_device_errors(): client = boto3.client("iam", region_name="us-east-1") - serial_number = "arn:aws:iam::{}:mfa/not-existing".format(ACCOUNT_ID) + serial_number = f"arn:aws:iam::{ACCOUNT_ID}:mfa/not-existing" client.delete_virtual_mfa_device.when.called_with( SerialNumber=serial_number ).should.throw( ClientError, - "VirtualMFADevice with serial number {0} doesn't exist.".format(serial_number), + f"VirtualMFADevice with serial number {serial_number} doesn't exist.", ) @@ -1796,9 +1760,7 @@ def test_enable_virtual_mfa_device(): device["User"]["Path"].should.equal("/") device["User"]["UserName"].should.equal("test-user") device["User"]["UserId"].should.match("[a-z0-9]+") - device["User"]["Arn"].should.equal( - "arn:aws:iam::{}:user/test-user".format(ACCOUNT_ID) - ) + device["User"]["Arn"].should.equal(f"arn:aws:iam::{ACCOUNT_ID}:user/test-user") device["User"]["CreateDate"].should.be.a(datetime) device["User"]["Tags"].should.equal(tags) device["EnableDate"].should.be.a(datetime) @@ -2324,7 +2286,7 @@ def test_get_account_authorization_details(): ) conn = boto3.client("iam", region_name="us-east-1") - boundary = "arn:aws:iam::{}:policy/boundary".format(ACCOUNT_ID) + boundary = f"arn:aws:iam::{ACCOUNT_ID}:policy/boundary" conn.create_role( RoleName="my-role", AssumeRolePolicyDocument="some policy", @@ -2351,11 +2313,11 @@ def test_get_account_authorization_details(): conn.attach_user_policy( UserName="testUser", - PolicyArn="arn:aws:iam::{}:policy/testPolicy".format(ACCOUNT_ID), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/testPolicy", ) conn.attach_group_policy( GroupName="testGroup", - PolicyArn="arn:aws:iam::{}:policy/testPolicy".format(ACCOUNT_ID), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/testPolicy", ) conn.add_user_to_group(UserName="testUser", GroupName="testGroup") @@ -2375,7 +2337,7 @@ def test_get_account_authorization_details(): ) conn.attach_role_policy( RoleName="my-role", - PolicyArn="arn:aws:iam::{}:policy/testPolicy".format(ACCOUNT_ID), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/testPolicy", ) result = conn.get_account_authorization_details(Filter=["Role"]) @@ -2392,7 +2354,7 @@ def test_get_account_authorization_details(): "PermissionsBoundary" ] == { "PermissionsBoundaryType": "PermissionsBoundaryPolicy", - "PermissionsBoundaryArn": "arn:aws:iam::{}:policy/boundary".format(ACCOUNT_ID), + "PermissionsBoundaryArn": f"arn:aws:iam::{ACCOUNT_ID}:policy/boundary", } assert len(result["RoleDetailList"][0]["Tags"]) == 2 assert len(result["RoleDetailList"][0]["RolePolicyList"]) == 1 @@ -2401,9 +2363,10 @@ def test_get_account_authorization_details(): result["RoleDetailList"][0]["AttachedManagedPolicies"][0]["PolicyName"] == "testPolicy" ) - assert result["RoleDetailList"][0]["AttachedManagedPolicies"][0][ - "PolicyArn" - ] == "arn:aws:iam::{}:policy/testPolicy".format(ACCOUNT_ID) + assert ( + result["RoleDetailList"][0]["AttachedManagedPolicies"][0]["PolicyArn"] + == f"arn:aws:iam::{ACCOUNT_ID}:policy/testPolicy" + ) assert result["RoleDetailList"][0]["RolePolicyList"][0][ "PolicyDocument" ] == json.loads(test_policy) @@ -2420,9 +2383,10 @@ def test_get_account_authorization_details(): result["UserDetailList"][0]["AttachedManagedPolicies"][0]["PolicyName"] == "testPolicy" ) - assert result["UserDetailList"][0]["AttachedManagedPolicies"][0][ - "PolicyArn" - ] == "arn:aws:iam::{}:policy/testPolicy".format(ACCOUNT_ID) + assert ( + result["UserDetailList"][0]["AttachedManagedPolicies"][0]["PolicyArn"] + == f"arn:aws:iam::{ACCOUNT_ID}:policy/testPolicy" + ) assert result["UserDetailList"][0]["UserPolicyList"][0][ "PolicyDocument" ] == json.loads(test_policy) @@ -2438,9 +2402,10 @@ def test_get_account_authorization_details(): result["GroupDetailList"][0]["AttachedManagedPolicies"][0]["PolicyName"] == "testPolicy" ) - assert result["GroupDetailList"][0]["AttachedManagedPolicies"][0][ - "PolicyArn" - ] == "arn:aws:iam::{}:policy/testPolicy".format(ACCOUNT_ID) + assert ( + result["GroupDetailList"][0]["AttachedManagedPolicies"][0]["PolicyArn"] + == f"arn:aws:iam::{ACCOUNT_ID}:policy/testPolicy" + ) assert result["GroupDetailList"][0]["GroupPolicyList"][0][ "PolicyDocument" ] == json.loads(test_policy) @@ -2508,14 +2473,16 @@ def test_signing_certs(): UserName="notauser", CertificateId=cert_id, Status="Inactive" ) + fake_id_name = "x" * 32 with pytest.raises(ClientError) as ce: client.update_signing_certificate( - UserName="testing", CertificateId="x" * 32, Status="Inactive" + UserName="testing", CertificateId=fake_id_name, Status="Inactive" ) - assert ce.value.response["Error"][ - "Message" - ] == "The Certificate with id {id} cannot be found.".format(id="x" * 32) + assert ( + ce.value.response["Error"]["Message"] + == f"The Certificate with id {fake_id_name} cannot be found." + ) # List the certs: resp = client.list_signing_certificates(UserName="testing")["Certificates"] @@ -2540,7 +2507,7 @@ def test_create_saml_provider(): Name="TestSAMLProvider", SAMLMetadataDocument="a" * 1024 ) response["SAMLProviderArn"].should.equal( - "arn:aws:iam::{}:saml-provider/TestSAMLProvider".format(ACCOUNT_ID) + f"arn:aws:iam::{ACCOUNT_ID}:saml-provider/TestSAMLProvider" ) @@ -2562,7 +2529,7 @@ def test_list_saml_providers(): conn.create_saml_provider(Name="TestSAMLProvider", SAMLMetadataDocument="a" * 1024) response = conn.list_saml_providers() response["SAMLProviderList"][0]["Arn"].should.equal( - "arn:aws:iam::{}:saml-provider/TestSAMLProvider".format(ACCOUNT_ID) + f"arn:aws:iam::{ACCOUNT_ID}:saml-provider/TestSAMLProvider" ) @@ -2583,9 +2550,10 @@ def test_delete_saml_provider(): with pytest.raises(ClientError) as ce: conn.delete_signing_certificate(UserName="testing", CertificateId=cert_id) - assert ce.value.response["Error"][ - "Message" - ] == "The Certificate with id {id} cannot be found.".format(id=cert_id) + assert ( + ce.value.response["Error"]["Message"] + == f"The Certificate with id {cert_id} cannot be found." + ) # Verify that it's not in the list: resp = conn.list_signing_certificates(UserName="testing") @@ -2989,11 +2957,11 @@ def test_list_entities_for_policy(): conn.attach_user_policy( UserName="testUser", - PolicyArn="arn:aws:iam::{}:policy/testPolicy".format(ACCOUNT_ID), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/testPolicy", ) conn.attach_group_policy( GroupName="testGroup", - PolicyArn="arn:aws:iam::{}:policy/testPolicy".format(ACCOUNT_ID), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/testPolicy", ) conn.add_user_to_group(UserName="testUser", GroupName="testGroup") @@ -3013,11 +2981,11 @@ def test_list_entities_for_policy(): ) conn.attach_role_policy( RoleName="my-role", - PolicyArn="arn:aws:iam::{}:policy/testPolicy".format(ACCOUNT_ID), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/testPolicy", ) response = conn.list_entities_for_policy( - PolicyArn="arn:aws:iam::{}:policy/testPolicy".format(ACCOUNT_ID), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/testPolicy", EntityFilter="Role", ) assert response["PolicyRoles"][0]["RoleName"] == "my-role" @@ -3026,7 +2994,7 @@ def test_list_entities_for_policy(): response["PolicyUsers"].should.equal([]) response = conn.list_entities_for_policy( - PolicyArn="arn:aws:iam::{}:policy/testPolicy".format(ACCOUNT_ID), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/testPolicy", EntityFilter="User", ) assert response["PolicyUsers"][0]["UserName"] == "testUser" @@ -3035,7 +3003,7 @@ def test_list_entities_for_policy(): response["PolicyRoles"].should.equal([]) response = conn.list_entities_for_policy( - PolicyArn="arn:aws:iam::{}:policy/testPolicy".format(ACCOUNT_ID), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/testPolicy", EntityFilter="Group", ) assert response["PolicyGroups"][0]["GroupName"] == "testGroup" @@ -3044,7 +3012,7 @@ def test_list_entities_for_policy(): response["PolicyUsers"].should.equal([]) response = conn.list_entities_for_policy( - PolicyArn="arn:aws:iam::{}:policy/testPolicy".format(ACCOUNT_ID), + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/testPolicy", EntityFilter="LocalManagedPolicy", ) assert response["PolicyGroups"][0]["GroupName"] == "testGroup" @@ -3057,7 +3025,7 @@ def test_list_entities_for_policy(): # Return everything when no entity is specified response = conn.list_entities_for_policy( - PolicyArn="arn:aws:iam::{}:policy/testPolicy".format(ACCOUNT_ID) + PolicyArn=f"arn:aws:iam::{ACCOUNT_ID}:policy/testPolicy" ) response["PolicyGroups"][0]["GroupName"].should.equal("testGroup") response["PolicyUsers"][0]["UserName"].should.equal("testUser") @@ -3074,9 +3042,7 @@ def test_create_role_no_path(): resp = conn.create_role( RoleName="my-role", AssumeRolePolicyDocument="some policy", Description="test" ) - resp.get("Role").get("Arn").should.equal( - "arn:aws:iam::{}:role/my-role".format(ACCOUNT_ID) - ) + resp.get("Role").get("Arn").should.equal(f"arn:aws:iam::{ACCOUNT_ID}:role/my-role") resp.get("Role").should_not.have.key("PermissionsBoundary") resp.get("Role").get("Description").should.equal("test") @@ -3084,7 +3050,7 @@ def test_create_role_no_path(): @mock_iam() def test_create_role_with_permissions_boundary(): conn = boto3.client("iam", region_name="us-east-1") - boundary = "arn:aws:iam::{}:policy/boundary".format(ACCOUNT_ID) + boundary = f"arn:aws:iam::{ACCOUNT_ID}:policy/boundary" resp = conn.create_role( RoleName="my-role", AssumeRolePolicyDocument="some policy", @@ -3139,7 +3105,7 @@ def test_create_role_with_same_name_should_fail(): ) err.value.response["Error"]["Code"].should.equal("EntityAlreadyExists") err.value.response["Error"]["Message"].should.equal( - "Role with name {0} already exists.".format(test_role_name) + f"Role with name {test_role_name} already exists." ) @@ -3153,9 +3119,7 @@ def test_create_policy_with_same_name_should_fail(): iam.create_policy(PolicyName=test_policy_name, PolicyDocument=MOCK_POLICY) err.value.response["Error"]["Code"].should.equal("EntityAlreadyExists") err.value.response["Error"]["Message"].should.equal( - "A policy called {0} already exists. Duplicate names are not allowed.".format( - test_policy_name - ) + f"A policy called {test_policy_name} already exists. Duplicate names are not allowed." ) @@ -3237,7 +3201,7 @@ def test_get_account_password_policy_errors(): client.get_account_password_policy.when.called_with().should.throw( ClientError, - "The Password Policy with domain name {} cannot be found.".format(ACCOUNT_ID), + f"The Password Policy with domain name {ACCOUNT_ID} cannot be found.", ) @@ -3254,7 +3218,7 @@ def test_delete_account_password_policy(): client.get_account_password_policy.when.called_with().should.throw( ClientError, - "The Password Policy with domain name {} cannot be found.".format(ACCOUNT_ID), + f"The Password Policy with domain name {ACCOUNT_ID} cannot be found.", ) @@ -3467,11 +3431,11 @@ def test_role_list_config_discovered_resources(): this_role = role_config_query.backends[DEFAULT_ACCOUNT_ID][ "global" ].create_role( - role_name="role{}".format(ix), + role_name=f"role{ix}", assume_role_policy_document=None, path="/", permissions_boundary=None, - description="role{}".format(ix), + description=f"role{ix}", tags=[{"Key": "foo", "Value": "bar"}], max_session_duration=3600, ) @@ -3848,9 +3812,9 @@ def test_role_config_client(): num_roles = 10 for ix in range(1, num_roles + 1): this_policy = iam_client.create_role( - RoleName="role{}".format(ix), + RoleName=f"role{ix}", Path="/", - Description="role{}".format(ix), + Description=f"role{ix}", AssumeRolePolicyDocument=json.dumps("{ }"), ) roles.append( @@ -4075,10 +4039,10 @@ def test_policy_list_config_discovered_resources(): this_policy = policy_config_query.backends[DEFAULT_ACCOUNT_ID][ "global" ].create_policy( - description="policy{}".format(ix), + description=f"policy{ix}", path="", policy_document=json.dumps(basic_policy), - policy_name="policy{}".format(ix), + policy_name=f"policy{ix}", tags=[], ) policies.append({"id": this_policy.id, "name": this_policy.name}) @@ -4301,10 +4265,10 @@ def test_policy_config_client(): num_policies = 10 for ix in range(1, num_policies + 1): this_policy = iam_client.create_policy( - PolicyName="policy{}".format(ix), + PolicyName=f"policy{ix}", Path="/", PolicyDocument=json.dumps(basic_policy), - Description="policy{}".format(ix), + Description=f"policy{ix}", ) policies.append( { @@ -4479,7 +4443,7 @@ def test_list_roles_with_more_than_100_roles_no_max_items_defaults_to_100(): iam = boto3.client("iam", region_name="us-east-1") for i in range(150): iam.create_role( - RoleName="test_role_{}".format(i), AssumeRolePolicyDocument="some policy" + RoleName=f"test_role_{i}", AssumeRolePolicyDocument="some policy" ) response = iam.list_roles() roles = response["Roles"] @@ -4493,7 +4457,7 @@ def test_list_roles_max_item_and_marker_values_adhered(): iam = boto3.client("iam", region_name="us-east-1") for i in range(10): iam.create_role( - RoleName="test_role_{}".format(i), AssumeRolePolicyDocument="some policy" + RoleName=f"test_role_{i}", AssumeRolePolicyDocument="some policy" ) response = iam.list_roles(MaxItems=2) roles = response["Roles"] @@ -4633,7 +4597,7 @@ def test_tag_user_error_unknown_user_name(): ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(404) ex.response["Error"]["Code"].should.contain("NoSuchEntity") ex.response["Error"]["Message"].should.equal( - "The user with name {} cannot be found.".format(name) + f"The user with name {name} cannot be found." ) @@ -4671,7 +4635,7 @@ def test_untag_user_error_unknown_user_name(): ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(404) ex.response["Error"]["Code"].should.contain("NoSuchEntity") ex.response["Error"]["Message"].should.equal( - "The user with name {} cannot be found.".format(name) + f"The user with name {name} cannot be found." ) diff --git a/tests/test_iam/test_iam_cloudformation.py b/tests/test_iam/test_iam_cloudformation.py index 36dcd8561..dbf4e21a4 100644 --- a/tests/test_iam/test_iam_cloudformation.py +++ b/tests/test_iam/test_iam_cloudformation.py @@ -368,7 +368,7 @@ Resources: policy_arn = provisioned_resource["PhysicalResourceId"] policy_arn.should.match( - "arn:aws:iam::{}:policy/MyStack-ThePolicy-[A-Z0-9]+".format(ACCOUNT_ID) + f"arn:aws:iam::{ACCOUNT_ID}:policy/MyStack-ThePolicy-[A-Z0-9]+" ) expected_name = policy_arn.split("/")[1] @@ -420,7 +420,7 @@ Resources: logical_resource_id.should.equal("ThePolicy") policy_arn = provisioned_resource["PhysicalResourceId"] - policy_arn.should.equal("arn:aws:iam::{}:policy/{}".format(ACCOUNT_ID, name)) + policy_arn.should.equal(f"arn:aws:iam::{ACCOUNT_ID}:policy/{name}") policy = iam_client.get_policy(PolicyArn=policy_arn)["Policy"] policy.should.have.key("Arn").equal(policy_arn) @@ -469,7 +469,7 @@ Resources: policy_arn = provisioned_resource["PhysicalResourceId"] policy_arn.should.match( - "rn:aws:iam::{}:policy/MyStack-ThePolicy-[A-Z0-9]+".format(ACCOUNT_ID) + f"rn:aws:iam::{ACCOUNT_ID}:policy/MyStack-ThePolicy-[A-Z0-9]+" ) response = iam_client.list_entities_for_policy(PolicyArn=policy_arn) @@ -520,7 +520,7 @@ Resources: policy_arn = provisioned_resource["PhysicalResourceId"] policy_arn.should.match( - "rn:aws:iam::{}:policy/MyStack-ThePolicy-[A-Z0-9]+".format(ACCOUNT_ID) + f"rn:aws:iam::{ACCOUNT_ID}:policy/MyStack-ThePolicy-[A-Z0-9]+" ) response = iam_client.list_entities_for_policy(PolicyArn=policy_arn) @@ -571,7 +571,7 @@ Resources: policy_arn = provisioned_resource["PhysicalResourceId"] policy_arn.should.match( - "rn:aws:iam::{}:policy/MyStack-ThePolicy-[A-Z0-9]+".format(ACCOUNT_ID) + f"rn:aws:iam::{ACCOUNT_ID}:policy/MyStack-ThePolicy-[A-Z0-9]+" ) response = iam_client.list_entities_for_policy(PolicyArn=policy_arn) @@ -594,7 +594,7 @@ def test_iam_cloudformation_create_user_policy(): s3_client = boto3.client("s3", region_name="us-east-1") bucket_name = "my-bucket" s3_client.create_bucket(Bucket=bucket_name) - bucket_arn = "arn:aws:s3:::{0}".format(bucket_name) + bucket_arn = f"arn:aws:s3:::{bucket_name}" cf_client = boto3.client("cloudformation", region_name="us-east-1") stack_name = "MyStack" @@ -646,7 +646,7 @@ def test_iam_cloudformation_update_user_policy(): s3_client = boto3.client("s3", region_name="us-east-1") bucket_name = "my-bucket" s3_client.create_bucket(Bucket=bucket_name) - bucket_arn = "arn:aws:s3:::{0}".format(bucket_name) + bucket_arn = f"arn:aws:s3:::{bucket_name}" cf_client = boto3.client("cloudformation", region_name="us-east-1") stack_name = "MyStack" @@ -733,7 +733,7 @@ def test_iam_cloudformation_delete_user_policy_having_generated_name(): s3_client = boto3.client("s3", region_name="us-east-1") bucket_name = "my-bucket" s3_client.create_bucket(Bucket=bucket_name) - bucket_arn = "arn:aws:s3:::{0}".format(bucket_name) + bucket_arn = f"arn:aws:s3:::{bucket_name}" cf_client = boto3.client("cloudformation", region_name="us-east-1") stack_name = "MyStack" @@ -788,7 +788,7 @@ def test_iam_cloudformation_create_role_policy(): s3_client = boto3.client("s3", region_name="us-east-1") bucket_name = "my-bucket" s3_client.create_bucket(Bucket=bucket_name) - bucket_arn = "arn:aws:s3:::{0}".format(bucket_name) + bucket_arn = f"arn:aws:s3:::{bucket_name}" cf_client = boto3.client("cloudformation", region_name="us-east-1") stack_name = "MyStack" @@ -840,7 +840,7 @@ def test_iam_cloudformation_update_role_policy(): s3_client = boto3.client("s3", region_name="us-east-1") bucket_name = "my-bucket" s3_client.create_bucket(Bucket=bucket_name) - bucket_arn = "arn:aws:s3:::{0}".format(bucket_name) + bucket_arn = f"arn:aws:s3:::{bucket_name}" cf_client = boto3.client("cloudformation", region_name="us-east-1") stack_name = "MyStack" @@ -927,7 +927,7 @@ def test_iam_cloudformation_delete_role_policy_having_generated_name(): s3_client = boto3.client("s3", region_name="us-east-1") bucket_name = "my-bucket" s3_client.create_bucket(Bucket=bucket_name) - bucket_arn = "arn:aws:s3:::{0}".format(bucket_name) + bucket_arn = f"arn:aws:s3:::{bucket_name}" cf_client = boto3.client("cloudformation", region_name="us-east-1") stack_name = "MyStack" @@ -982,7 +982,7 @@ def test_iam_cloudformation_create_group_policy(): s3_client = boto3.client("s3", region_name="us-east-1") bucket_name = "my-bucket" s3_client.create_bucket(Bucket=bucket_name) - bucket_arn = "arn:aws:s3:::{0}".format(bucket_name) + bucket_arn = f"arn:aws:s3:::{bucket_name}" cf_client = boto3.client("cloudformation", region_name="us-east-1") stack_name = "MyStack" @@ -1034,7 +1034,7 @@ def test_iam_cloudformation_update_group_policy(): s3_client = boto3.client("s3", region_name="us-east-1") bucket_name = "my-bucket" s3_client.create_bucket(Bucket=bucket_name) - bucket_arn = "arn:aws:s3:::{0}".format(bucket_name) + bucket_arn = f"arn:aws:s3:::{bucket_name}" cf_client = boto3.client("cloudformation", region_name="us-east-1") stack_name = "MyStack" @@ -1121,7 +1121,7 @@ def test_iam_cloudformation_delete_group_policy_having_generated_name(): s3_client = boto3.client("s3", region_name="us-east-1") bucket_name = "my-bucket" s3_client.create_bucket(Bucket=bucket_name) - bucket_arn = "arn:aws:s3:::{0}".format(bucket_name) + bucket_arn = f"arn:aws:s3:::{bucket_name}" cf_client = boto3.client("cloudformation", region_name="us-east-1") stack_name = "MyStack" diff --git a/tests/test_iam/test_iam_groups.py b/tests/test_iam/test_iam_groups.py index 67c73f22b..5efa67e69 100644 --- a/tests/test_iam/test_iam_groups.py +++ b/tests/test_iam/test_iam_groups.py @@ -40,7 +40,7 @@ def test_get_group_boto3(): created["Path"].should.equal("/") created["GroupName"].should.equal("my-group") created.should.have.key("GroupId") - created["Arn"].should.equal("arn:aws:iam::{}:group/my-group".format(ACCOUNT_ID)) + created["Arn"].should.equal(f"arn:aws:iam::{ACCOUNT_ID}:group/my-group") created["CreateDate"].should.be.a(datetime) retrieved = conn.get_group(GroupName="my-group")["Group"] @@ -63,15 +63,16 @@ def test_get_group_current(): assert result["Group"]["GroupName"] == "my-group" assert isinstance(result["Group"]["CreateDate"], datetime) assert result["Group"]["GroupId"] - assert result["Group"]["Arn"] == "arn:aws:iam::{}:group/my-group".format(ACCOUNT_ID) + assert result["Group"]["Arn"] == f"arn:aws:iam::{ACCOUNT_ID}:group/my-group" assert not result["Users"] # Make a group with a different path: other_group = conn.create_group(GroupName="my-other-group", Path="some/location") assert other_group["Group"]["Path"] == "some/location" - assert other_group["Group"][ - "Arn" - ] == "arn:aws:iam::{}:group/some/location/my-other-group".format(ACCOUNT_ID) + assert ( + other_group["Group"]["Arn"] + == f"arn:aws:iam::{ACCOUNT_ID}:group/some/location/my-other-group" + ) @mock_iam diff --git a/tests/test_iam/test_iam_oidc.py b/tests/test_iam/test_iam_oidc.py index 69ab0b05a..9f9557afd 100644 --- a/tests/test_iam/test_iam_oidc.py +++ b/tests/test_iam/test_iam_oidc.py @@ -18,7 +18,7 @@ def test_create_open_id_connect_provider(): ) response["OpenIDConnectProviderArn"].should.equal( - "arn:aws:iam::{}:oidc-provider/example.com".format(ACCOUNT_ID) + f"arn:aws:iam::{ACCOUNT_ID}:oidc-provider/example.com" ) response = client.create_open_id_connect_provider( @@ -26,7 +26,7 @@ def test_create_open_id_connect_provider(): ) response["OpenIDConnectProviderArn"].should.equal( - "arn:aws:iam::{}:oidc-provider/example.org".format(ACCOUNT_ID) + f"arn:aws:iam::{ACCOUNT_ID}:oidc-provider/example.org" ) response = client.create_open_id_connect_provider( @@ -34,7 +34,7 @@ def test_create_open_id_connect_provider(): ) response["OpenIDConnectProviderArn"].should.equal( - "arn:aws:iam::{}:oidc-provider/example.org/oidc".format(ACCOUNT_ID) + f"arn:aws:iam::{ACCOUNT_ID}:oidc-provider/example.org/oidc" ) response = client.create_open_id_connect_provider( @@ -42,7 +42,7 @@ def test_create_open_id_connect_provider(): ) response["OpenIDConnectProviderArn"].should.equal( - "arn:aws:iam::{}:oidc-provider/example.org/oidc-query".format(ACCOUNT_ID) + f"arn:aws:iam::{ACCOUNT_ID}:oidc-provider/example.org/oidc-query" ) @@ -106,7 +106,7 @@ def test_create_open_id_connect_provider_too_many_entries(): def test_create_open_id_connect_provider_quota_error(): client = boto3.client("iam", region_name="us-east-1") - too_many_client_ids = ["{}".format(i) for i in range(101)] + too_many_client_ids = [f"{i}" for i in range(101)] with pytest.raises(ClientError) as e: client.create_open_id_connect_provider( Url="http://example.org", @@ -155,7 +155,7 @@ def test_delete_open_id_connect_provider(): client.get_open_id_connect_provider.when.called_with( OpenIDConnectProviderArn=open_id_arn ).should.throw( - ClientError, "OpenIDConnect Provider not found for arn {}".format(open_id_arn) + ClientError, f"OpenIDConnect Provider not found for arn {open_id_arn}" ) # deleting a non existing provider should be successful @@ -206,13 +206,11 @@ def test_get_open_id_connect_provider_errors(): ) open_id_arn = response["OpenIDConnectProviderArn"] + unknown_arn = open_id_arn + "-not-existing" client.get_open_id_connect_provider.when.called_with( - OpenIDConnectProviderArn=open_id_arn + "-not-existing" + OpenIDConnectProviderArn=unknown_arn ).should.throw( - ClientError, - "OpenIDConnect Provider not found for arn {}".format( - open_id_arn + "-not-existing" - ), + ClientError, f"OpenIDConnect Provider not found for arn {unknown_arn}" ) diff --git a/tests/test_iam/test_iam_server_certificates.py b/tests/test_iam/test_iam_server_certificates.py index a93344002..57d2fa853 100644 --- a/tests/test_iam/test_iam_server_certificates.py +++ b/tests/test_iam/test_iam_server_certificates.py @@ -22,9 +22,7 @@ def test_get_all_server_certs(): certs.should.have.length_of(1) cert1 = certs[0] cert1["ServerCertificateName"].should.equal("certname") - cert1["Arn"].should.equal( - "arn:aws:iam::{}:server-certificate/certname".format(ACCOUNT_ID) - ) + cert1["Arn"].should.equal(f"arn:aws:iam::{ACCOUNT_ID}:server-certificate/certname") @mock_iam @@ -59,7 +57,7 @@ def test_get_server_cert(): metadata["Path"].should.equal("/") metadata["ServerCertificateName"].should.equal("certname") metadata["Arn"].should.equal( - "arn:aws:iam::{}:server-certificate/certname".format(ACCOUNT_ID) + f"arn:aws:iam::{ACCOUNT_ID}:server-certificate/certname" ) metadata.should.have.key("ServerCertificateId") metadata["UploadDate"].should.be.a(datetime) diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py index 9ba031e83..2c88fe214 100644 --- a/tests/test_iot/test_iot.py +++ b/tests/test_iot/test_iot.py @@ -16,25 +16,25 @@ def test_endpoints(): endpoint = client.describe_endpoint(endpointType="iot:Data") endpoint.should.have.key("endpointAddress").which.should_not.contain("ats") endpoint.should.have.key("endpointAddress").which.should.contain( - "iot.{}.amazonaws.com".format(region_name) + f"iot.{region_name}.amazonaws.com" ) # iot:Data-ATS endpoint = client.describe_endpoint(endpointType="iot:Data-ATS") endpoint.should.have.key("endpointAddress").which.should.contain( - "ats.iot.{}.amazonaws.com".format(region_name) + f"ats.iot.{region_name}.amazonaws.com" ) # iot:Data-ATS endpoint = client.describe_endpoint(endpointType="iot:CredentialProvider") endpoint.should.have.key("endpointAddress").which.should.contain( - "credentials.iot.{}.amazonaws.com".format(region_name) + f"credentials.iot.{region_name}.amazonaws.com" ) # iot:Data-ATS endpoint = client.describe_endpoint(endpointType="iot:Jobs") endpoint.should.have.key("endpointAddress").which.should.contain( - "jobs.iot.{}.amazonaws.com".format(region_name) + f"jobs.iot.{region_name}.amazonaws.com" ) # raise InvalidRequestException diff --git a/tests/test_iot/test_iot_certificates.py b/tests/test_iot/test_iot_certificates.py index 45eb1a2a0..31bd17cdd 100644 --- a/tests/test_iot/test_iot_certificates.py +++ b/tests/test_iot/test_iot_certificates.py @@ -206,7 +206,7 @@ def test_delete_certificate_validation(): with pytest.raises(ClientError) as e: client.delete_certificate(certificateId=cert_id) e.value.response["Error"]["Message"].should.contain( - "Things must be detached before deletion (arn: %s)" % cert_arn + f"Things must be detached before deletion (arn: {cert_arn})" ) res = client.list_certificates() res.should.have.key("certificates").which.should.have.length_of(1) @@ -215,7 +215,7 @@ def test_delete_certificate_validation(): with pytest.raises(ClientError) as e: client.delete_certificate(certificateId=cert_id) e.value.response["Error"]["Message"].should.contain( - "Certificate policies must be detached before deletion (arn: %s)" % cert_arn + f"Certificate policies must be detached before deletion (arn: {cert_arn})" ) res = client.list_certificates() res.should.have.key("certificates").which.should.have.length_of(1) diff --git a/tests/test_iot/test_iot_policies.py b/tests/test_iot/test_iot_policies.py index 8b9616a2e..4246e39c7 100644 --- a/tests/test_iot/test_iot_policies.py +++ b/tests/test_iot/test_iot_policies.py @@ -208,7 +208,7 @@ def test_policy_versions(iot_client): ) err = exc.value.response["Error"] err["Message"].should.equal( - "The policy %s already has the maximum number of versions (5)" % policy_name + f"The policy {policy_name} already has the maximum number of versions (5)" ) iot_client.delete_policy_version(policyName=policy_name, policyVersionId="1") diff --git a/tests/test_iot/test_server.py b/tests/test_iot/test_server.py index 460eac1a7..8c81be327 100644 --- a/tests/test_iot/test_server.py +++ b/tests/test_iot/test_server.py @@ -44,7 +44,7 @@ def test_list_attached_policies(url_encode_arn): if url_encode_arn: certificate_arn = quote(certificate_arn, safe="") - result = test_client.post("/attached-policies/{}".format(certificate_arn)) + result = test_client.post(f"/attached-policies/{certificate_arn}") result.status_code.should.equal(200) result_dict = json.loads(result.data.decode("utf-8")) result_dict["policies"][0]["policyName"].should.equal("my-policy") diff --git a/tests/test_iotdata/test_server.py b/tests/test_iotdata/test_server.py index 6398cf5b6..e226be083 100644 --- a/tests/test_iotdata/test_server.py +++ b/tests/test_iotdata/test_server.py @@ -18,7 +18,7 @@ def test_iotdata_list(): # just making sure that server is up thing_name = "nothing" - res = test_client.get("/things/{}/shadow".format(thing_name)) + res = test_client.get(f"/things/{thing_name}/shadow") res.status_code.should.equal(404) @@ -37,5 +37,5 @@ def test_publish(url_encode_topic): topic = "test/topic" topic_for_path = quote(topic, safe="") if url_encode_topic else topic - result = test_client.post("/topics/{}".format(topic_for_path)) + result = test_client.post(f"/topics/{topic_for_path}") result.status_code.should.equal(200) diff --git a/tests/test_kinesis/test_kinesis.py b/tests/test_kinesis/test_kinesis.py index 38b291636..005e3382a 100644 --- a/tests/test_kinesis/test_kinesis.py +++ b/tests/test_kinesis/test_kinesis.py @@ -97,7 +97,7 @@ def test_list_many_streams(): conn = boto3.client("kinesis", region_name="us-west-2") for i in range(11): - conn.create_stream(StreamName="stream%d" % i, ShardCount=1) + conn.create_stream(StreamName=f"stream{i}", ShardCount=1) resp = conn.list_streams() stream_names = resp["StreamNames"] @@ -124,7 +124,7 @@ def test_describe_stream_summary(): stream["StreamName"].should.equal(stream_name) stream["OpenShardCount"].should.equal(shard_count) stream["StreamARN"].should.equal( - "arn:aws:kinesis:us-west-2:{}:stream/{}".format(ACCOUNT_ID, stream_name) + f"arn:aws:kinesis:us-west-2:{ACCOUNT_ID}:stream/{stream_name}" ) stream["StreamStatus"].should.equal("ACTIVE") diff --git a/tests/test_kinesisvideoarchivedmedia/test_kinesisvideoarchivedmedia.py b/tests/test_kinesisvideoarchivedmedia/test_kinesisvideoarchivedmedia.py index 30a94b436..657c9235e 100644 --- a/tests/test_kinesisvideoarchivedmedia/test_kinesisvideoarchivedmedia.py +++ b/tests/test_kinesisvideoarchivedmedia/test_kinesisvideoarchivedmedia.py @@ -23,9 +23,7 @@ def test_get_hls_streaming_session_url(): endpoint_url=data_endpoint, ) res = client.get_hls_streaming_session_url(StreamName=stream_name) - reg_exp = r"^{}/hls/v1/getHLSMasterPlaylist.m3u8\?SessionToken\=.+$".format( - data_endpoint - ) + reg_exp = rf"^{data_endpoint}/hls/v1/getHLSMasterPlaylist.m3u8\?SessionToken\=.+$" res.should.have.key("HLSStreamingSessionURL").which.should.match(reg_exp) @@ -47,9 +45,7 @@ def test_get_dash_streaming_session_url(): endpoint_url=data_endpoint, ) res = client.get_dash_streaming_session_url(StreamName=stream_name) - reg_exp = r"^{}/dash/v1/getDASHManifest.mpd\?SessionToken\=.+$".format( - data_endpoint - ) + reg_exp = rf"^{data_endpoint}/dash/v1/getDASHManifest.mpd\?SessionToken\=.+$" res.should.have.key("DASHStreamingSessionURL").which.should.match(reg_exp) diff --git a/tests/test_kms/test_kms_boto3.py b/tests/test_kms/test_kms_boto3.py index 3d251b546..47a690554 100644 --- a/tests/test_kms/test_kms_boto3.py +++ b/tests/test_kms/test_kms_boto3.py @@ -65,9 +65,7 @@ def test_create_key(): ) key["KeyMetadata"]["Arn"].should.equal( - "arn:aws:kms:us-east-1:{}:key/{}".format( - ACCOUNT_ID, key["KeyMetadata"]["KeyId"] - ) + f"arn:aws:kms:us-east-1:{ACCOUNT_ID}:key/{key['KeyMetadata']['KeyId']}" ) key["KeyMetadata"]["AWSAccountId"].should.equal(ACCOUNT_ID) key["KeyMetadata"]["CreationDate"].should.be.a(datetime) @@ -262,7 +260,7 @@ def test__create_alias__can_create_multiple_aliases_for_same_key_id(): aliases = client.list_aliases(KeyId=key_id)["Aliases"] for name in alias_names: - alias_arn = "arn:aws:kms:us-east-1:{}:{}".format(ACCOUNT_ID, name) + alias_arn = f"arn:aws:kms:us-east-1:{ACCOUNT_ID}:{name}" aliases.should.contain( {"AliasName": name, "AliasArn": alias_arn, "TargetKeyId": key_id} ) @@ -278,8 +276,8 @@ def test_list_aliases(): aliases.should.have.length_of(14) default_alias_names = ["aws/ebs", "aws/s3", "aws/redshift", "aws/rds"] for name in default_alias_names: - full_name = "alias/{}".format(name) - arn = "arn:aws:kms:{}:{}:{}".format(region, ACCOUNT_ID, full_name) + full_name = f"alias/{name}" + arn = f"arn:aws:kms:{region}:{ACCOUNT_ID}:{full_name}" aliases.should.contain({"AliasName": full_name, "AliasArn": arn}) @@ -991,9 +989,7 @@ def test__create_alias__raises_if_alias_has_restricted_characters(name): err = ex.value.response["Error"] err["Code"].should.equal("ValidationException") err["Message"].should.equal( - "1 validation error detected: Value '{}' at 'aliasName' failed to satisfy constraint: Member must satisfy regular expression pattern: ^[a-zA-Z0-9:/_-]+$".format( - name - ) + f"1 validation error detected: Value '{name}' at 'aliasName' failed to satisfy constraint: Member must satisfy regular expression pattern: ^[a-zA-Z0-9:/_-]+$" ) diff --git a/tests/test_logs/test_integration.py b/tests/test_logs/test_integration.py index 9026bcbe4..ec0bde543 100644 --- a/tests/test_logs/test_integration.py +++ b/tests/test_logs/test_integration.py @@ -158,9 +158,9 @@ def test_put_subscription_filter_with_lambda(): msg_showed_up, received_message = _wait_for_log_msg( client_logs, "/aws/lambda/test", "awslogs" ) - assert msg_showed_up, "CloudWatch log event was not found. All logs: {}".format( - received_message - ) + assert ( + msg_showed_up + ), f"CloudWatch log event was not found. All logs: {received_message}" data = json.loads(received_message)["awslogs"]["data"] response = json.loads( @@ -229,9 +229,9 @@ def test_subscription_filter_applies_to_new_streams(): msg_showed_up, received_message = _wait_for_log_msg( client_logs, "/aws/lambda/test", "awslogs" ) - assert msg_showed_up, "CloudWatch log event was not found. All logs: {}".format( - received_message - ) + assert ( + msg_showed_up + ), f"CloudWatch log event was not found. All logs: {received_message}" data = json.loads(received_message)["awslogs"]["data"] response = json.loads( diff --git a/tests/test_logs/test_logs.py b/tests/test_logs/test_logs.py index d5cd4d5c4..6b8bc6afb 100644 --- a/tests/test_logs/test_logs.py +++ b/tests/test_logs/test_logs.py @@ -1087,7 +1087,7 @@ def test_describe_log_streams_paging(): resp["logStreams"].should.have.length_of(2) resp["logStreams"][0]["arn"].should.contain(log_group_name) resp["nextToken"].should.equal( - "{}@{}".format(log_group_name, resp["logStreams"][1]["logStreamName"]) + f"{log_group_name}@{resp['logStreams'][1]['logStreamName']}" ) resp = client.describe_log_streams( @@ -1096,7 +1096,7 @@ def test_describe_log_streams_paging(): resp["logStreams"].should.have.length_of(1) resp["logStreams"][0]["arn"].should.contain(log_group_name) resp["nextToken"].should.equal( - "{}@{}".format(log_group_name, resp["logStreams"][0]["logStreamName"]) + f"{log_group_name}@{resp['logStreams'][0]['logStreamName']}" ) resp = client.describe_log_streams( @@ -1170,7 +1170,7 @@ def test_get_too_many_log_events(nr_of_events): err["Code"].should.equal("InvalidParameterException") err["Message"].should.contain("1 validation error detected") err["Message"].should.contain( - "Value '{}' at 'limit' failed to satisfy constraint".format(nr_of_events) + f"Value '{nr_of_events}' at 'limit' failed to satisfy constraint" ) err["Message"].should.contain("Member must have value less than or equal to 10000") @@ -1194,7 +1194,7 @@ def test_filter_too_many_log_events(nr_of_events): err["Code"].should.equal("InvalidParameterException") err["Message"].should.contain("1 validation error detected") err["Message"].should.contain( - "Value '{}' at 'limit' failed to satisfy constraint".format(nr_of_events) + f"Value '{nr_of_events}' at 'limit' failed to satisfy constraint" ) err["Message"].should.contain("Member must have value less than or equal to 10000") @@ -1209,7 +1209,7 @@ def test_describe_too_many_log_groups(nr_of_groups): err["Code"].should.equal("InvalidParameterException") err["Message"].should.contain("1 validation error detected") err["Message"].should.contain( - "Value '{}' at 'limit' failed to satisfy constraint".format(nr_of_groups) + f"Value '{nr_of_groups}' at 'limit' failed to satisfy constraint" ) err["Message"].should.contain("Member must have value less than or equal to 50") @@ -1226,7 +1226,7 @@ def test_describe_too_many_log_streams(nr_of_streams): err["Code"].should.equal("InvalidParameterException") err["Message"].should.contain("1 validation error detected") err["Message"].should.contain( - "Value '{}' at 'limit' failed to satisfy constraint".format(nr_of_streams) + f"Value '{nr_of_streams}' at 'limit' failed to satisfy constraint" ) err["Message"].should.contain("Member must have value less than or equal to 50") @@ -1242,9 +1242,7 @@ def test_create_log_group_invalid_name_length(length): err["Code"].should.equal("InvalidParameterException") err["Message"].should.contain("1 validation error detected") err["Message"].should.contain( - "Value '{}' at 'logGroupName' failed to satisfy constraint".format( - log_group_name - ) + f"Value '{log_group_name}' at 'logGroupName' failed to satisfy constraint" ) err["Message"].should.contain("Member must have length less than or equal to 512") @@ -1263,7 +1261,7 @@ def test_describe_log_streams_invalid_order_by(invalid_orderby): err["Code"].should.equal("InvalidParameterException") err["Message"].should.contain("1 validation error detected") err["Message"].should.contain( - "Value '{}' at 'orderBy' failed to satisfy constraint".format(invalid_orderby) + f"Value '{invalid_orderby}' at 'orderBy' failed to satisfy constraint" ) err["Message"].should.contain( "Member must satisfy enum value set: [LogStreamName, LastEventTime]" diff --git a/tests/test_logs/test_logs_filter.py b/tests/test_logs/test_logs_filter.py index cb7adfb1f..0a67ef555 100644 --- a/tests/test_logs/test_logs_filter.py +++ b/tests/test_logs/test_logs_filter.py @@ -82,9 +82,7 @@ class TestLogFilterParameters(TestLogFilter): timestamp = int(unix_time_millis(datetime.utcnow())) messages = [] for i in range(25): - messages.append( - {"message": "Message number {}".format(i), "timestamp": timestamp} - ) + messages.append({"message": f"Message number {i}", "timestamp": timestamp}) timestamp += 100 self.conn.put_log_events( diff --git a/tests/test_managedblockchain/test_managedblockchain_members.py b/tests/test_managedblockchain/test_managedblockchain_members.py index 2ae0eb0a7..94b7d7f6f 100644 --- a/tests/test_managedblockchain/test_managedblockchain_members.py +++ b/tests/test_managedblockchain/test_managedblockchain_members.py @@ -171,7 +171,7 @@ def test_create_another_member_withopts(): ) err = ex.value.response["Error"] err["Code"].should.equal("InvalidRequestException") - err["Message"].should.contain("Invitation {0} not valid".format(invitation_id)) + err["Message"].should.contain(f"Invitation {invitation_id} not valid") # Delete member 2 conn.delete_member(NetworkId=network_id, MemberId=member_id2) @@ -186,7 +186,7 @@ def test_create_another_member_withopts(): conn.get_member(NetworkId=network_id, MemberId=member_id2) err = ex.value.response["Error"] err["Code"].should.equal("ResourceNotFoundException") - err["Message"].should.contain("Member {0} not found".format(member_id2)) + err["Message"].should.contain(f"Member {member_id2} not found") # Delete member 1 conn.delete_member(NetworkId=network_id, MemberId=member_id) @@ -422,9 +422,7 @@ def test_create_another_member_alreadyhave(): err = ex.value.response["Error"] err["Code"].should.equal("InvalidRequestException") err["Message"].should.contain( - "Member name {0} already exists in network {1}".format( - "testmember1", network_id - ) + f"Member name testmember1 already exists in network {network_id}" ) diff --git a/tests/test_managedblockchain/test_managedblockchain_nodes.py b/tests/test_managedblockchain/test_managedblockchain_nodes.py index a4f768612..bf8056c73 100644 --- a/tests/test_managedblockchain/test_managedblockchain_nodes.py +++ b/tests/test_managedblockchain/test_managedblockchain_nodes.py @@ -78,7 +78,7 @@ def test_create_node(): conn.get_node(NetworkId=network_id, MemberId=member_id, NodeId=node_id) err = ex.value.response["Error"] err["Code"].should.equal("ResourceNotFoundException") - err["Message"].should.contain("Node {0} not found".format(node_id)) + err["Message"].should.contain(f"Node {node_id} not found") @mock_managedblockchain @@ -147,7 +147,7 @@ def test_create_node_standard_edition(): conn.list_nodes(NetworkId=network_id, MemberId=member_id) err = ex.value.response["Error"] err["Code"].should.equal("ResourceNotFoundException") - err["Message"].should.contain("Member {0} not found".format(member_id)) + err["Message"].should.contain(f"Member {member_id} not found") @mock_managedblockchain @@ -196,7 +196,7 @@ def test_create_too_many_nodes(): err = ex.value.response["Error"] err["Code"].should.equal("ResourceLimitExceededException") err["Message"].should.contain( - "Maximum number of nodes exceeded in member {0}".format(member_id) + f"Maximum number of nodes exceeded in member {member_id}" ) diff --git a/tests/test_managedblockchain/test_managedblockchain_proposalvotes.py b/tests/test_managedblockchain/test_managedblockchain_proposalvotes.py index 3453a95df..625b1d401 100644 --- a/tests/test_managedblockchain/test_managedblockchain_proposalvotes.py +++ b/tests/test_managedblockchain/test_managedblockchain_proposalvotes.py @@ -313,7 +313,7 @@ def test_vote_on_proposal_expiredproposal(): err = ex.value.response["Error"] err["Code"].should.equal("InvalidRequestException") err["Message"].should.contain( - "Proposal {0} is expired and you cannot vote on it.".format(proposal_id) + f"Proposal {proposal_id} is expired and you cannot vote on it." ) # Get proposal details - should be EXPIRED @@ -633,7 +633,7 @@ def test_vote_on_proposal_alreadyvoted(): err = ex.value.response["Error"] err["Code"].should.equal("ResourceAlreadyExistsException") err["Message"].should.contain( - "Member {0} has already voted on proposal {1}.".format(member_id, proposal_id) + f"Member {member_id} has already voted on proposal {proposal_id}." ) diff --git a/tests/test_medialive/test_medialive.py b/tests/test_medialive/test_medialive.py index 1cf9ef49c..c1ec3503f 100644 --- a/tests/test_medialive/test_medialive.py +++ b/tests/test_medialive/test_medialive.py @@ -11,7 +11,7 @@ region = "eu-west-1" def _create_input_config(name, **kwargs): role_arn = kwargs.get( "role_arn", - "arn:aws:iam::{}:role/TestMediaLiveInputCreateRole".format(ACCOUNT_ID), + f"arn:aws:iam::{ACCOUNT_ID}:role/TestMediaLiveInputCreateRole", ) input_type = kwargs.get("type", "RTP_PUSH") request_id = kwargs.get("request_id", uuid4().hex) @@ -52,7 +52,7 @@ def _create_input_config(name, **kwargs): def _create_channel_config(name, **kwargs): role_arn = kwargs.get( "role_arn", - "arn:aws:iam::{}:role/TestMediaLiveChannelCreateRole".format(ACCOUNT_ID), + f"arn:aws:iam::{ACCOUNT_ID}:role/TestMediaLiveChannelCreateRole", ) input_id = kwargs.get("input_id", "an-attachment-id") input_settings = kwargs.get( @@ -114,7 +114,7 @@ def test_create_channel_succeeds(): response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) response["Channel"]["Arn"].should.equal( - "arn:aws:medialive:channel:{}".format(response["Channel"]["Id"]) + f"arn:aws:medialive:channel:{response['Channel']['Id']}" ) response["Channel"]["Destinations"].should.equal(channel_config["Destinations"]) response["Channel"]["EncoderSettings"].should.equal( @@ -175,7 +175,7 @@ def test_describe_channel_succeeds(): ) describe_response["Arn"].should.equal( - "arn:aws:medialive:channel:{}".format(describe_response["Id"]) + f"arn:aws:medialive:channel:{describe_response['Id']}" ) describe_response["Destinations"].should.equal(channel_config["Destinations"]) describe_response["EncoderSettings"].should.equal(channel_config["EncoderSettings"]) @@ -256,7 +256,7 @@ def test_create_input_succeeds(): r_input = create_response["Input"] input_id = r_input["Id"] assert len(input_id) > 1 - r_input["Arn"].should.equal("arn:aws:medialive:input:{}".format(r_input["Id"])) + r_input["Arn"].should.equal(f"arn:aws:medialive:input:{r_input['Id']}") r_input["Name"].should.equal(input_name) r_input["AttachedChannels"].should.equal([]) r_input["Destinations"].should.equal(input_config["Destinations"]) diff --git a/tests/test_mediapackage/test_mediapackage.py b/tests/test_mediapackage/test_mediapackage.py index 039498a0e..220641648 100644 --- a/tests/test_mediapackage/test_mediapackage.py +++ b/tests/test_mediapackage/test_mediapackage.py @@ -60,9 +60,7 @@ def test_create_channel_succeeds(): response = client.create_channel(**channel_config) response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) - response["Arn"].should.equal( - "arn:aws:mediapackage:channel:{}".format(response["Id"]) - ) + response["Arn"].should.equal(f"arn:aws:mediapackage:channel:{response['Id']}") response["Description"].should.equal("Awesome channel!") response["Id"].should.equal("channel-id") response["Tags"]["Customer"].should.equal("moto") @@ -76,7 +74,7 @@ def test_describe_channel_succeeds(): create_response = client.create_channel(**channel_config) describe_response = client.describe_channel(Id=create_response["Id"]) describe_response["Arn"].should.equal( - "arn:aws:mediapackage:channel:{}".format(describe_response["Id"]) + f"arn:aws:mediapackage:channel:{describe_response['Id']}" ) describe_response["Description"].should.equal(channel_config["Description"]) describe_response["Tags"]["Customer"].should.equal("moto") @@ -90,7 +88,7 @@ def test_describe_unknown_channel_throws_error(): client.describe_channel(Id=channel_id) err = err.value.response["Error"] err["Code"].should.equal("NotFoundException") - err["Message"].should.equal("channel with id={} not found".format(str(channel_id))) + err["Message"].should.equal(f"channel with id={channel_id} not found") @mock_mediapackage @@ -101,7 +99,7 @@ def test_delete_unknown_channel_throws_error(): client.delete_channel(Id=channel_id) err = err.value.response["Error"] err["Code"].should.equal("NotFoundException") - err["Message"].should.equal("channel with id={} not found".format(str(channel_id))) + err["Message"].should.equal(f"channel with id={channel_id} not found") @mock_mediapackage @@ -131,7 +129,7 @@ def test_list_channels_succeds(): len(channels_list).should.equal(1) first_channel = channels_list[0] first_channel["Arn"].should.equal( - "arn:aws:mediapackage:channel:{}".format(first_channel["Id"]) + f"arn:aws:mediapackage:channel:{first_channel['Id']}" ) first_channel["Description"].should.equal(channel_config["Description"]) first_channel["Tags"]["Customer"].should.equal("moto") @@ -145,7 +143,7 @@ def test_create_origin_endpoint_succeeds(): response = client.create_origin_endpoint(**origin_endpoint_config) response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) response["Arn"].should.equal( - "arn:aws:mediapackage:origin_endpoint:{}".format(response["Id"]) + f"arn:aws:mediapackage:origin_endpoint:{response['Id']}" ) response["ChannelId"].should.equal(origin_endpoint_config["ChannelId"]) response["Description"].should.equal(origin_endpoint_config["Description"]) @@ -162,16 +160,14 @@ def test_describe_origin_endpoint_succeeds(): describe_response = client.describe_origin_endpoint(Id=create_response["Id"]) describe_response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) describe_response["Arn"].should.equal( - "arn:aws:mediapackage:origin_endpoint:{}".format(describe_response["Id"]) + f"arn:aws:mediapackage:origin_endpoint:{describe_response['Id']}" ) describe_response["ChannelId"].should.equal(origin_endpoint_config["ChannelId"]) describe_response["Description"].should.equal(origin_endpoint_config["Description"]) describe_response["HlsPackage"].should.equal(origin_endpoint_config["HlsPackage"]) describe_response["Origination"].should.equal("ALLOW") describe_response["Url"].should.equal( - "https://origin-endpoint.mediapackage.{}.amazonaws.com/{}".format( - region, describe_response["Id"] - ) + f"https://origin-endpoint.mediapackage.{region}.amazonaws.com/{describe_response['Id']}" ) @@ -183,9 +179,7 @@ def test_describe_unknown_origin_endpoint_throws_error(): client.describe_origin_endpoint(Id=channel_id) err = err.value.response["Error"] err["Code"].should.equal("NotFoundException") - err["Message"].should.equal( - "origin endpoint with id={} not found".format(str(channel_id)) - ) + err["Message"].should.equal(f"origin endpoint with id={channel_id} not found") @mock_mediapackage @@ -213,9 +207,7 @@ def test_delete_unknown_origin_endpoint_throws_error(): client.delete_origin_endpoint(Id=channel_id) err = err.value.response["Error"] err["Code"].should.equal("NotFoundException") - err["Message"].should.equal( - "origin endpoint with id={} not found".format(str(channel_id)) - ) + err["Message"].should.equal(f"origin endpoint with id={channel_id} not found") @mock_mediapackage @@ -244,9 +236,7 @@ def test_update_unknown_origin_endpoint_throws_error(): ) err = err.value.response["Error"] err["Code"].should.equal("NotFoundException") - err["Message"].should.equal( - "origin endpoint with id={} not found".format(str(channel_id)) - ) + err["Message"].should.equal(f"origin endpoint with id={channel_id} not found") @mock_mediapackage @@ -261,7 +251,7 @@ def test_list_origin_endpoint_succeeds(): len(origin_endpoints_list).should.equal(1) first_origin_endpoint = origin_endpoints_list[0] first_origin_endpoint["Arn"].should.equal( - "arn:aws:mediapackage:origin_endpoint:{}".format(first_origin_endpoint["Id"]) + f"arn:aws:mediapackage:origin_endpoint:{first_origin_endpoint['Id']}" ) first_origin_endpoint["ChannelId"].should.equal(origin_endpoint_config["ChannelId"]) first_origin_endpoint["Description"].should.equal( diff --git a/tests/test_mediastore/test_mediastore.py b/tests/test_mediastore/test_mediastore.py index 4dada0f7a..8c5e0871f 100644 --- a/tests/test_mediastore/test_mediastore.py +++ b/tests/test_mediastore/test_mediastore.py @@ -16,9 +16,7 @@ def test_create_container_succeeds(): ) container = response["Container"] response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) - container["ARN"].should.equal( - "arn:aws:mediastore:container:{}".format(container["Name"]) - ) + container["ARN"].should.equal(f"arn:aws:mediastore:container:{container['Name']}") container["Name"].should.equal("Awesome container!") container["Status"].should.equal("CREATING") @@ -33,9 +31,7 @@ def test_describe_container_succeeds(): response = client.describe_container(ContainerName=container_name) response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) container = response["Container"] - container["ARN"].should.equal( - "arn:aws:mediastore:container:{}".format(container["Name"]) - ) + container["ARN"].should.equal(f"arn:aws:mediastore:container:{container_name}") container["Name"].should.equal("Awesome container!") container["Status"].should.equal("ACTIVE") diff --git a/tests/test_opsworks/test_stack.py b/tests/test_opsworks/test_stack.py index 399257f96..80645660a 100644 --- a/tests/test_opsworks/test_stack.py +++ b/tests/test_opsworks/test_stack.py @@ -22,7 +22,7 @@ def test_describe_stacks(): client = boto3.client("opsworks", region_name="us-east-1") for i in range(1, 4): client.create_stack( - Name="test_stack_{0}".format(i), + Name=f"test_stack_{i}", Region="us-east-1", ServiceRoleArn="service_arn", DefaultInstanceProfileArn="profile_arn", diff --git a/tests/test_organizations/test_organizations_boto3.py b/tests/test_organizations/test_organizations_boto3.py index 4a04da617..5e7e8f85f 100644 --- a/tests/test_organizations/test_organizations_boto3.py +++ b/tests/test_organizations/test_organizations_boto3.py @@ -1614,9 +1614,7 @@ def test_register_delegated_administrator(): admin = response["DelegatedAdministrators"][0] admin["Id"].should.equal(account_id) admin["Arn"].should.equal( - "arn:aws:organizations::{0}:account/{1}/{2}".format( - ACCOUNT_ID, org_id, account_id - ) + f"arn:aws:organizations::{ACCOUNT_ID}:account/{org_id}/{account_id}" ) admin["Email"].should.equal(mockemail) admin["Name"].should.equal(mockname) @@ -1740,9 +1738,7 @@ def test_list_delegated_administrators(): admin = response["DelegatedAdministrators"][0] admin["Id"].should.equal(account_id_1) admin["Arn"].should.equal( - "arn:aws:organizations::{0}:account/{1}/{2}".format( - ACCOUNT_ID, org_id, account_id_1 - ) + f"arn:aws:organizations::{ACCOUNT_ID}:account/{org_id}/{account_id_1}" ) admin["Email"].should.equal(mockemail) admin["Name"].should.equal(mockname) diff --git a/tests/test_ram/test_ram.py b/tests/test_ram/test_ram.py index cf3fc915d..07de238bd 100644 --- a/tests/test_ram/test_ram.py +++ b/tests/test_ram/test_ram.py @@ -38,7 +38,7 @@ def test_create_resource_share(): name="test", allowExternalPrincipals=False, resourceArns=[ - "arn:aws:ec2:us-east-1:{}:transit-gateway/tgw-123456789".format(ACCOUNT_ID) + f"arn:aws:ec2:us-east-1:{ACCOUNT_ID}:transit-gateway/tgw-123456789" ], ) @@ -80,7 +80,7 @@ def test_create_resource_share_errors(): # when with pytest.raises(ClientError) as e: client.create_resource_share( - name="test", resourceArns=["arn:aws:iam::{}:role/test".format(ACCOUNT_ID)] + name="test", resourceArns=[f"arn:aws:iam::{ACCOUNT_ID}:role/test"] ) ex = e.value ex.operation_name.should.equal("CreateResourceShare") @@ -97,9 +97,7 @@ def test_create_resource_share_errors(): name="test", principals=["invalid"], resourceArns=[ - "arn:aws:ec2:us-east-1:{}:transit-gateway/tgw-123456789".format( - ACCOUNT_ID - ) + f"arn:aws:ec2:us-east-1:{ACCOUNT_ID}:transit-gateway/tgw-123456789" ], ) ex = e.value @@ -129,7 +127,7 @@ def test_create_resource_share_with_organization(): name="test", principals=[org_arn], resourceArns=[ - "arn:aws:ec2:us-east-1:{}:transit-gateway/tgw-123456789".format(ACCOUNT_ID) + f"arn:aws:ec2:us-east-1:{ACCOUNT_ID}:transit-gateway/tgw-123456789" ], ) @@ -142,7 +140,7 @@ def test_create_resource_share_with_organization(): name="test", principals=[ou_arn], resourceArns=[ - "arn:aws:ec2:us-east-1:{}:transit-gateway/tgw-123456789".format(ACCOUNT_ID) + f"arn:aws:ec2:us-east-1:{ACCOUNT_ID}:transit-gateway/tgw-123456789" ], ) @@ -165,13 +163,9 @@ def test_create_resource_share_with_organization_errors(): with pytest.raises(ClientError) as e: client.create_resource_share( name="test", - principals=[ - "arn:aws:organizations::{}:organization/o-unknown".format(ACCOUNT_ID) - ], + principals=[f"arn:aws:organizations::{ACCOUNT_ID}:organization/o-unknown"], resourceArns=[ - "arn:aws:ec2:us-east-1:{}:transit-gateway/tgw-123456789".format( - ACCOUNT_ID - ) + f"arn:aws:ec2:us-east-1:{ACCOUNT_ID}:transit-gateway/tgw-123456789" ], ) ex = e.value @@ -187,13 +181,9 @@ def test_create_resource_share_with_organization_errors(): with pytest.raises(ClientError) as e: client.create_resource_share( name="test", - principals=[ - "arn:aws:organizations::{}:ou/o-unknown/ou-unknown".format(ACCOUNT_ID) - ], + principals=[f"arn:aws:organizations::{ACCOUNT_ID}:ou/o-unknown/ou-unknown"], resourceArns=[ - "arn:aws:ec2:us-east-1:{}:transit-gateway/tgw-123456789".format( - ACCOUNT_ID - ) + f"arn:aws:ec2:us-east-1:{ACCOUNT_ID}:transit-gateway/tgw-123456789" ], ) ex = e.value @@ -284,9 +274,7 @@ def test_update_resource_share_errors(): # when with pytest.raises(ClientError) as e: client.update_resource_share( - resourceShareArn="arn:aws:ram:us-east-1:{}:resource-share/not-existing".format( - ACCOUNT_ID - ), + resourceShareArn=f"arn:aws:ram:us-east-1:{ACCOUNT_ID}:resource-share/not-existing", name="test-update", ) ex = e.value @@ -294,9 +282,7 @@ def test_update_resource_share_errors(): ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("UnknownResourceException") ex.response["Error"]["Message"].should.equal( - "ResourceShare arn:aws:ram:us-east-1:{}:resource-share/not-existing could not be found.".format( - ACCOUNT_ID - ) + f"ResourceShare arn:aws:ram:us-east-1:{ACCOUNT_ID}:resource-share/not-existing could not be found." ) @@ -330,18 +316,14 @@ def test_delete_resource_share_errors(): # when with pytest.raises(ClientError) as e: client.delete_resource_share( - resourceShareArn="arn:aws:ram:us-east-1:{}:resource-share/not-existing".format( - ACCOUNT_ID - ) + resourceShareArn=f"arn:aws:ram:us-east-1:{ACCOUNT_ID}:resource-share/not-existing" ) ex = e.value ex.operation_name.should.equal("DeleteResourceShare") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("UnknownResourceException") ex.response["Error"]["Message"].should.equal( - "ResourceShare arn:aws:ram:us-east-1:{}:resource-share/not-existing could not be found.".format( - ACCOUNT_ID - ) + f"ResourceShare arn:aws:ram:us-east-1:{ACCOUNT_ID}:resource-share/not-existing could not be found." ) diff --git a/tests/test_rds/test_filters.py b/tests/test_rds/test_filters.py index 81e8e46ad..488c87c9e 100644 --- a/tests/test_rds/test_filters.py +++ b/tests/test_rds/test_filters.py @@ -15,8 +15,8 @@ class TestDBInstanceFilters(object): cls.mock.start() client = boto3.client("rds", region_name="us-west-2") for i in range(10): - instance_identifier = "db-instance-{}".format(i) - cluster_identifier = "db-cluster-{}".format(i) + instance_identifier = f"db-instance-{i}" + cluster_identifier = f"db-cluster-{i}" engine = "postgres" if (i % 3) else "mysql" client.create_db_instance( DBInstanceIdentifier=instance_identifier, @@ -200,7 +200,7 @@ class TestDBSnapshotFilters(object): # We'll set up two instances (one postgres, one mysql) # with two snapshots each. for i in range(2): - identifier = "db-instance-{}".format(i) + identifier = f"db-instance-{i}" engine = "postgres" if i else "mysql" client.create_db_instance( DBInstanceIdentifier=identifier, @@ -210,7 +210,7 @@ class TestDBSnapshotFilters(object): for j in range(2): client.create_db_snapshot( DBInstanceIdentifier=identifier, - DBSnapshotIdentifier="{}-snapshot-{}".format(identifier, j), + DBSnapshotIdentifier=f"{identifier}-snapshot-{j}", ) cls.client = client diff --git a/tests/test_rds/test_rds.py b/tests/test_rds/test_rds.py index 4749405bf..67567772b 100644 --- a/tests/test_rds/test_rds.py +++ b/tests/test_rds/test_rds.py @@ -30,7 +30,7 @@ def test_create_database(): db_instance["MasterUsername"].should.equal("root") db_instance["DBSecurityGroups"][0]["DBSecurityGroupName"].should.equal("my_sg") db_instance["DBInstanceArn"].should.equal( - "arn:aws:rds:us-west-2:{}:db:db-master-1".format(ACCOUNT_ID) + f"arn:aws:rds:us-west-2:{ACCOUNT_ID}:db:db-master-1" ) db_instance["DBInstanceStatus"].should.equal("available") db_instance["DBName"].should.equal("staging-postgres") @@ -333,7 +333,7 @@ def test_get_databases(): instances["DBInstances"][0]["DBInstanceIdentifier"].should.equal("db-master-1") instances["DBInstances"][0]["DeletionProtection"].should.equal(False) instances["DBInstances"][0]["DBInstanceArn"].should.equal( - "arn:aws:rds:us-west-2:{}:db:db-master-1".format(ACCOUNT_ID) + f"arn:aws:rds:us-west-2:{ACCOUNT_ID}:db:db-master-1" ) instances = conn.describe_db_instances(DBInstanceIdentifier="db-master-2") @@ -350,7 +350,7 @@ def test_get_databases_paginated(): conn.create_db_instance( AllocatedStorage=5, Port=5432, - DBInstanceIdentifier="rds%d" % i, + DBInstanceIdentifier=f"rds{i}", DBInstanceClass="db.t1.micro", Engine="postgres", ) @@ -1345,7 +1345,7 @@ def test_list_tags_security_group(): DBSecurityGroupDescription="DB Security Group", Tags=[{"Value": "bar", "Key": "foo"}, {"Value": "bar1", "Key": "foo1"}], )["DBSecurityGroup"]["DBSecurityGroupName"] - resource = "arn:aws:rds:us-west-2:1234567890:secgrp:{0}".format(security_group) + resource = f"arn:aws:rds:us-west-2:1234567890:secgrp:{security_group}" result = conn.list_tags_for_resource(ResourceName=resource) result["TagList"].should.equal( [{"Value": "bar", "Key": "foo"}, {"Value": "bar1", "Key": "foo1"}] @@ -1362,7 +1362,7 @@ def test_add_tags_security_group(): DBSecurityGroupName="db_sg", DBSecurityGroupDescription="DB Security Group" )["DBSecurityGroup"]["DBSecurityGroupName"] - resource = "arn:aws:rds:us-west-2:1234567890:secgrp:{0}".format(security_group) + resource = f"arn:aws:rds:us-west-2:1234567890:secgrp:{security_group}" conn.add_tags_to_resource( ResourceName=resource, Tags=[{"Value": "bar", "Key": "foo"}, {"Value": "bar1", "Key": "foo1"}], @@ -1386,7 +1386,7 @@ def test_remove_tags_security_group(): Tags=[{"Value": "bar", "Key": "foo"}, {"Value": "bar1", "Key": "foo1"}], )["DBSecurityGroup"]["DBSecurityGroupName"] - resource = "arn:aws:rds:us-west-2:1234567890:secgrp:{0}".format(security_group) + resource = f"arn:aws:rds:us-west-2:1234567890:secgrp:{security_group}" conn.remove_tags_from_resource(ResourceName=resource, TagKeys=["foo"]) result = conn.list_tags_for_resource(ResourceName=resource) @@ -1566,7 +1566,7 @@ def test_list_tags_database_subnet_group(): Tags=[{"Value": "bar", "Key": "foo"}, {"Value": "bar1", "Key": "foo1"}], )["DBSubnetGroup"]["DBSubnetGroupName"] result = conn.list_tags_for_resource( - ResourceName="arn:aws:rds:us-west-2:1234567890:subgrp:{0}".format(subnet) + ResourceName=f"arn:aws:rds:us-west-2:1234567890:subgrp:{subnet}" ) result["TagList"].should.equal( [{"Value": "bar", "Key": "foo"}, {"Value": "bar1", "Key": "foo1"}] @@ -1643,7 +1643,7 @@ def test_add_tags_database_subnet_group(): SubnetIds=[subnet["SubnetId"]], Tags=[], )["DBSubnetGroup"]["DBSubnetGroupName"] - resource = "arn:aws:rds:us-west-2:1234567890:subgrp:{0}".format(subnet) + resource = f"arn:aws:rds:us-west-2:1234567890:subgrp:{subnet}" conn.add_tags_to_resource( ResourceName=resource, @@ -1675,7 +1675,7 @@ def test_remove_tags_database_subnet_group(): SubnetIds=[subnet["SubnetId"]], Tags=[{"Value": "bar", "Key": "foo"}, {"Value": "bar1", "Key": "foo1"}], )["DBSubnetGroup"]["DBSubnetGroupName"] - resource = "arn:aws:rds:us-west-2:1234567890:subgrp:{0}".format(subnet) + resource = f"arn:aws:rds:us-west-2:1234567890:subgrp:{subnet}" conn.remove_tags_from_resource(ResourceName=resource, TagKeys=["foo"]) @@ -1804,7 +1804,7 @@ def test_create_db_parameter_group(): "test parameter group" ) db_parameter_group["DBParameterGroup"]["DBParameterGroupArn"].should.equal( - "arn:aws:rds:{0}:{1}:pg:{2}".format(region, ACCOUNT_ID, pg_name) + f"arn:aws:rds:{region}:{ACCOUNT_ID}:pg:{pg_name}" ) @@ -1930,7 +1930,7 @@ def test_describe_db_parameter_group(): "test" ) db_parameter_groups["DBParameterGroups"][0]["DBParameterGroupArn"].should.equal( - "arn:aws:rds:{0}:{1}:pg:{2}".format(region, ACCOUNT_ID, pg_name) + f"arn:aws:rds:{region}:{ACCOUNT_ID}:pg:{pg_name}" ) diff --git a/tests/test_redshift/test_redshift.py b/tests/test_redshift/test_redshift.py index 47f7adc18..9ca7eacc8 100644 --- a/tests/test_redshift/test_redshift.py +++ b/tests/test_redshift/test_redshift.py @@ -125,7 +125,7 @@ def test_create_many_snapshot_copy_grants(): for i in range(10): client.create_snapshot_copy_grant( - SnapshotCopyGrantName="test-us-east-1-{0}".format(i), KmsKeyId="fake" + SnapshotCopyGrantName=f"test-us-east-1-{i}", KmsKeyId="fake" ) response = client.describe_snapshot_copy_grants() len(response["SnapshotCopyGrants"]).should.equal(10) @@ -216,7 +216,7 @@ def test_create_cluster_all_attributes(): cluster["AutomatedSnapshotRetentionPeriod"].should.equal(10) # Endpoint only returned when ClusterStatus=Available cluster["Endpoint"]["Address"].should.match( - "{}.[a-z0-9]+.{}.redshift.amazonaws.com".format(cluster_identifier, region) + f"{cluster_identifier}.[a-z0-9]+.{region}.redshift.amazonaws.com" ) cluster["Endpoint"]["Port"].should.equal(1234) cluster["ClusterVersion"].should.equal("1.0") @@ -795,7 +795,7 @@ def test_create_cluster_snapshot_of_non_existent_cluster(): cluster_identifier = "non-existent-cluster-id" client.create_cluster_snapshot.when.called_with( SnapshotIdentifier="snapshot-id", ClusterIdentifier=cluster_identifier - ).should.throw(ClientError, "Cluster {} not found.".format(cluster_identifier)) + ).should.throw(ClientError, f"Cluster {cluster_identifier} not found.") @mock_redshift @@ -849,9 +849,7 @@ def test_delete_automated_snapshot(): SnapshotIdentifier=snapshot_identifier ).should.throw( ClientError, - "Cannot delete the snapshot {0} because only manual snapshots may be deleted".format( - snapshot_identifier - ), + f"Cannot delete the snapshot {snapshot_identifier} because only manual snapshots may be deleted", ) @@ -929,13 +927,11 @@ def test_describe_snapshot_with_filter(): client.describe_cluster_snapshots.when.called_with( SnapshotIdentifier=snapshot_identifier, SnapshotType="automated" - ).should.throw(ClientError, "Snapshot {0} not found.".format(snapshot_identifier)) + ).should.throw(ClientError, f"Snapshot {snapshot_identifier} not found.") client.describe_cluster_snapshots.when.called_with( SnapshotIdentifier=auto_snapshot_identifier, SnapshotType="manual" - ).should.throw( - ClientError, "Snapshot {0} not found.".format(auto_snapshot_identifier) - ) + ).should.throw(ClientError, f"Snapshot {auto_snapshot_identifier} not found.") @mock_redshift @@ -1074,7 +1070,7 @@ def test_describe_cluster_snapshots_not_found_error(): client.describe_cluster_snapshots.when.called_with( SnapshotIdentifier=snapshot_identifier - ).should.throw(ClientError, "Snapshot {} not found.".format(snapshot_identifier)) + ).should.throw(ClientError, f"Snapshot {snapshot_identifier} not found.") @mock_redshift @@ -1131,7 +1127,7 @@ def test_cluster_snapshot_already_exists(): client.create_cluster_snapshot.when.called_with( SnapshotIdentifier=snapshot_identifier, ClusterIdentifier=cluster_identifier - ).should.throw(ClientError, "{} already exists".format(snapshot_identifier)) + ).should.throw(ClientError, f"{snapshot_identifier} already exists") @mock_redshift @@ -1287,13 +1283,11 @@ def test_create_cluster_status_update(): def test_describe_tags_with_resource_type(): client = boto3.client("redshift", region_name="us-east-1") cluster_identifier = "my_cluster" - cluster_arn = "arn:aws:redshift:us-east-1:{}:" "cluster:{}".format( - ACCOUNT_ID, cluster_identifier + cluster_arn = ( + f"arn:aws:redshift:us-east-1:{ACCOUNT_ID}:cluster:{cluster_identifier}" ) snapshot_identifier = "my_snapshot" - snapshot_arn = "arn:aws:redshift:us-east-1:{}:" "snapshot:{}/{}".format( - ACCOUNT_ID, cluster_identifier, snapshot_identifier - ) + snapshot_arn = f"arn:aws:redshift:us-east-1:{ACCOUNT_ID}:snapshot:{cluster_identifier}/{snapshot_identifier}" tag_key = "test-tag-key" tag_value = "test-tag-value" @@ -1333,9 +1327,7 @@ def test_describe_tags_with_resource_type(): @mock_redshift def test_describe_tags_cannot_specify_resource_type_and_resource_name(): client = boto3.client("redshift", region_name="us-east-1") - resource_name = "arn:aws:redshift:us-east-1:{}:cluster:cluster-id".format( - ACCOUNT_ID - ) + resource_name = f"arn:aws:redshift:us-east-1:{ACCOUNT_ID}:cluster:cluster-id" resource_type = "cluster" client.describe_tags.when.called_with( ResourceName=resource_name, ResourceType=resource_type @@ -1346,13 +1338,11 @@ def test_describe_tags_cannot_specify_resource_type_and_resource_name(): def test_describe_tags_with_resource_name(): client = boto3.client("redshift", region_name="us-east-1") cluster_identifier = "cluster-id" - cluster_arn = "arn:aws:redshift:us-east-1:{}:" "cluster:{}".format( - ACCOUNT_ID, cluster_identifier + cluster_arn = ( + f"arn:aws:redshift:us-east-1:{ACCOUNT_ID}:cluster:{cluster_identifier}" ) snapshot_identifier = "snapshot-id" - snapshot_arn = "arn:aws:redshift:us-east-1:{}:" "snapshot:{}/{}".format( - ACCOUNT_ID, cluster_identifier, snapshot_identifier - ) + snapshot_arn = f"arn:aws:redshift:us-east-1:{ACCOUNT_ID}:snapshot:{cluster_identifier}/{snapshot_identifier}" tag_key = "test-tag-key" tag_value = "test-tag-value" @@ -1393,15 +1383,15 @@ def test_describe_tags_with_resource_name(): def test_create_tags(): client = boto3.client("redshift", region_name="us-east-1") cluster_identifier = "cluster-id" - cluster_arn = "arn:aws:redshift:us-east-1:{}:" "cluster:{}".format( - ACCOUNT_ID, cluster_identifier + cluster_arn = ( + f"arn:aws:redshift:us-east-1:{ACCOUNT_ID}:cluster:{cluster_identifier}" ) tag_key = "test-tag-key" tag_value = "test-tag-value" num_tags = 5 tags = [] for i in range(0, num_tags): - tag = {"Key": "{}-{}".format(tag_key, i), "Value": "{}-{}".format(tag_value, i)} + tag = {"Key": f"{tag_key}-{i}", "Value": f"{tag_value}-{i}"} tags.append(tag) client.create_cluster( @@ -1424,14 +1414,14 @@ def test_create_tags(): def test_delete_tags(): client = boto3.client("redshift", region_name="us-east-1") cluster_identifier = "cluster-id" - cluster_arn = "arn:aws:redshift:us-east-1:{}:" "cluster:{}".format( - ACCOUNT_ID, cluster_identifier + cluster_arn = ( + f"arn:aws:redshift:us-east-1:{ACCOUNT_ID}:cluster:{cluster_identifier}" ) tag_key = "test-tag-key" tag_value = "test-tag-value" tags = [] for i in range(1, 2): - tag = {"Key": "{}-{}".format(tag_key, i), "Value": "{}-{}".format(tag_value, i)} + tag = {"Key": f"{tag_key}-{i}", "Value": f"{tag_value}-{i}"} tags.append(tag) client.create_cluster( @@ -1445,7 +1435,7 @@ def test_delete_tags(): ) client.delete_tags( ResourceName=cluster_arn, - TagKeys=[tag["Key"] for tag in tags if tag["Key"] != "{}-1".format(tag_key)], + TagKeys=[tag["Key"] for tag in tags if tag["Key"] != f"{tag_key}-1"], ) response = client.describe_clusters(ClusterIdentifier=cluster_identifier) cluster = response["Clusters"][0] @@ -1727,7 +1717,7 @@ def test_delete_cluster_without_final_snapshot(): cluster["DBName"].should.equal("test") endpoint = cluster["Endpoint"] endpoint["Address"].should.match( - "{}.[a-z0-9]+.{}.redshift.amazonaws.com".format(cluster_identifier, "us-east-1") + f"{cluster_identifier}.[a-z0-9]+.us-east-1.redshift.amazonaws.com" ) endpoint["Port"].should.equal(5439) cluster["AutomatedSnapshotRetentionPeriod"].should.equal(1) @@ -1864,7 +1854,7 @@ def test_get_cluster_credentials(): response = client.get_cluster_credentials( ClusterIdentifier=cluster_identifier, DbUser=db_user ) - response["DbUser"].should.equal("IAM:%s" % db_user) + response["DbUser"].should.equal(f"IAM:{db_user}") assert time.mktime((response["Expiration"]).timetuple()) == pytest.approx( expected_expiration ) @@ -1873,12 +1863,12 @@ def test_get_cluster_credentials(): response = client.get_cluster_credentials( ClusterIdentifier=cluster_identifier, DbUser=db_user, AutoCreate=True ) - response["DbUser"].should.equal("IAMA:%s" % db_user) + response["DbUser"].should.equal(f"IAMA:{db_user}") response = client.get_cluster_credentials( ClusterIdentifier=cluster_identifier, DbUser="some_other_user", AutoCreate=False ) - response["DbUser"].should.equal("IAM:%s" % "some_other_user") + response["DbUser"].should.equal("IAM:some_other_user") expected_expiration = time.mktime( (datetime.datetime.now() + datetime.timedelta(0, 3000)).timetuple() diff --git a/tests/test_redshiftdata/test_server.py b/tests/test_redshiftdata/test_server.py index 7e137ac4c..1ccf9bba9 100644 --- a/tests/test_redshiftdata/test_server.py +++ b/tests/test_redshiftdata/test_server.py @@ -13,7 +13,7 @@ CLIENT_ENDPOINT = "/" def headers(action): return { - "X-Amz-Target": "RedshiftData.%s" % action, + "X-Amz-Target": f"RedshiftData.{action}", "Content-Type": "application/x-amz-json-1.1", } @@ -218,8 +218,7 @@ def test_redshiftdata_execute_statement_and_cancel_statement(client): should_return_expected_exception( cancel_response2, "ValidationException", - "Could not cancel a query that is already in %s state with ID: %s" - % ("ABORTED", execute_payload["Id"]), + f"Could not cancel a query that is already in ABORTED state with ID: {execute_payload['Id']}", ) diff --git a/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py b/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py index 1340d80b4..f0ccce2da 100644 --- a/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py +++ b/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py @@ -387,15 +387,15 @@ def test_get_resources_rds(): resources_untagged = [] for i in range(3): database = client.create_db_instance( - DBInstanceIdentifier="db-instance-{}".format(i), + DBInstanceIdentifier=f"db-instance-{i}", Engine="postgres", DBInstanceClass="db.m1.small", CopyTagsToSnapshot=True if i else False, - Tags=[{"Key": "test", "Value": "value-{}".format(i)}] if i else [], + Tags=[{"Key": "test", "Value": f"value-{i}"}] if i else [], ).get("DBInstance") snapshot = client.create_db_snapshot( DBInstanceIdentifier=database["DBInstanceIdentifier"], - DBSnapshotIdentifier="snapshot-{}".format(i), + DBSnapshotIdentifier=f"snapshot-{i}", ).get("DBSnapshot") group = resources_tagged if i else resources_untagged group.append(database["DBInstanceArn"]) @@ -409,7 +409,7 @@ def test_get_resources_rds(): arn.should.be.within(resources_tagged) arn.should_not.be.within(resources_untagged) if resource_type: - sure.this(":{}:".format(resource_type)).should.be.within(arn) + sure.this(f":{resource_type}:").should.be.within(arn) rtapi = boto3.client("resourcegroupstaggingapi", region_name="us-west-2") resp = rtapi.get_resources(ResourceTypeFilters=["rds"]) diff --git a/tests/test_route53/test_route53.py b/tests/test_route53/test_route53.py index 749264be6..f8a0277a1 100644 --- a/tests/test_route53/test_route53.py +++ b/tests/test_route53/test_route53.py @@ -1130,7 +1130,7 @@ def test_list_resource_record_sets_name_type_filters(): def create_resource_record_set(rec_type, rec_name): payload = { - "Comment": "create {} record {}".format(rec_type, rec_name), + "Comment": f"create {rec_type} record {rec_name}", "Changes": [ { "Action": "CREATE", @@ -1206,12 +1206,12 @@ def test_change_resource_record_sets_records_limit(): for ci in range(4): resourcerecords = [] for rri in range(250): - resourcerecords.append({"Value": "127.0.0.%d" % (rri)}) + resourcerecords.append({"Value": f"127.0.0.{rri}"}) changes.append( { "Action": "CREATE", "ResourceRecordSet": { - "Name": "foo%d.db." % (ci), + "Name": f"foo{ci}.db.", "Type": "A", "TTL": 10, "ResourceRecords": resourcerecords, @@ -1258,12 +1258,12 @@ def test_change_resource_record_sets_records_limit(): for ci in range(2): resourcerecords = [] for rri in range(250): - resourcerecords.append({"Value": "127.0.0.%d" % (rri)}) + resourcerecords.append({"Value": f"127.0.0.{rri}"}) changes.append( { "Action": "UPSERT", "ResourceRecordSet": { - "Name": "foo%d.db." % (ci), + "Name": f"foo{ci}.db.", "Type": "A", "TTL": 10, "ResourceRecords": resourcerecords, diff --git a/tests/test_route53/test_route53_cloudformation.py b/tests/test_route53/test_route53_cloudformation.py index a76b24536..f1e6a7bba 100644 --- a/tests/test_route53/test_route53_cloudformation.py +++ b/tests/test_route53/test_route53_cloudformation.py @@ -108,9 +108,7 @@ def test_route53_roundrobin(): stack = cf.describe_stacks(StackName="test_stack")["Stacks"][0] output = stack["Outputs"][0] output["OutputKey"].should.equal("DomainName") - output["OutputValue"].should.equal( - "arn:aws:route53:::hostedzone/{0}".format(zone_id) - ) + output["OutputValue"].should.equal(f"arn:aws:route53:::hostedzone/{zone_id}") @mock_cloudformation @@ -138,7 +136,7 @@ def test_route53_ec2_instance_with_public_ip(): rrsets.should.have.length_of(2) record_set = rrsets[1] - record_set["Name"].should.equal("{0}.us-west-1.my_zone.".format(instance_id)) + record_set["Name"].should.equal(f"{instance_id}.us-west-1.my_zone.") record_set.shouldnt.have.key("SetIdentifier") record_set["Type"].should.equal("A") record_set["TTL"].should.equal(900) diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 185dcdf95..35226208b 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -1023,9 +1023,7 @@ def test_policy(): "Effect": "Deny", "Principal": "*", "Action": "s3:PutObject", - "Resource": "arn:aws:s3:::{bucket_name}/*".format( - bucket_name=bucket_name - ), + "Resource": f"arn:aws:s3:::{bucket_name}/*", "Condition": { "StringNotEquals": { "s3:x-amz-server-side-encryption": "aws:kms" @@ -1206,7 +1204,7 @@ def test_list_objects_v2_common_prefix_pagination(): s3.create_bucket(Bucket="mybucket") max_keys = 1 - keys = ["test/{i}/{i}".format(i=i) for i in range(3)] + keys = [f"test/{i}/{i}" for i in range(3)] for key in keys: s3.put_object(Bucket="mybucket", Key=key, Body=b"v") @@ -1235,7 +1233,7 @@ def test_list_objects_v2_common_invalid_continuation_token(): s3.create_bucket(Bucket="mybucket") max_keys = 1 - keys = ["test/{i}/{i}".format(i=i) for i in range(3)] + keys = [f"test/{i}/{i}" for i in range(3)] for key in keys: s3.put_object(Bucket="mybucket", Key=key, Body=b"v") @@ -2139,11 +2137,9 @@ def test_put_bucket_notification_errors(): s3.put_bucket_notification_configuration( Bucket="bucket", NotificationConfiguration={ - "{}Configurations".format(tech): [ + f"{tech}Configurations": [ { - "{}Arn".format( - tech - ): "arn:aws:{}:us-east-1:012345678910:lksajdfkldskfj", + f"{tech}Arn": "arn:aws:{}:us-east-1:012345678910:lksajdfkldskfj", "Events": ["s3:ObjectCreated:*"], } ] @@ -2280,13 +2276,13 @@ def test_put_bucket_logging(): BucketLoggingStatus={ "LoggingEnabled": { "TargetBucket": log_bucket, - "TargetPrefix": "{}/".format(bucket_name), + "TargetPrefix": f"{bucket_name}/", } }, ) result = s3.get_bucket_logging(Bucket=bucket_name) assert result["LoggingEnabled"]["TargetBucket"] == log_bucket - assert result["LoggingEnabled"]["TargetPrefix"] == "{}/".format(bucket_name) + assert result["LoggingEnabled"]["TargetPrefix"] == f"{bucket_name}/" assert not result["LoggingEnabled"].get("TargetGrants") # And disabling: @@ -2299,7 +2295,7 @@ def test_put_bucket_logging(): BucketLoggingStatus={ "LoggingEnabled": { "TargetBucket": log_bucket, - "TargetPrefix": "{}/".format(bucket_name), + "TargetPrefix": f"{bucket_name}/", "TargetGrants": [ { "Grantee": { @@ -2333,7 +2329,7 @@ def test_put_bucket_logging(): BucketLoggingStatus={ "LoggingEnabled": { "TargetBucket": log_bucket, - "TargetPrefix": "{}/".format(bucket_name), + "TargetPrefix": f"{bucket_name}/", "TargetGrants": [ { "Grantee": { @@ -2356,7 +2352,7 @@ def test_put_bucket_logging(): BucketLoggingStatus={ "LoggingEnabled": { "TargetBucket": log_bucket, - "TargetPrefix": "{}/".format(bucket_name), + "TargetPrefix": f"{bucket_name}/", "TargetGrants": [ { "Grantee": { @@ -3074,7 +3070,7 @@ def test_creating_presigned_post(): ] conditions.append(["content-length-range", 1, 30]) - real_key = "{file_uid}.txt".format(file_uid=file_uid) + real_key = f"{file_uid}.txt" data = s3.generate_presigned_post( Bucket=bucket, Key=real_key, @@ -3428,12 +3424,12 @@ def test_request_partial_content_should_contain_all_metadata(): obj = boto3.resource("s3").Object(bucket, object_key) obj.put(Body=body) - response = obj.get(Range="bytes={}".format(query_range)) + response = obj.get(Range=f"bytes={query_range}") assert response["ETag"] == obj.e_tag assert response["LastModified"] == obj.last_modified assert response["ContentLength"] == 4 - assert response["ContentRange"] == "bytes {}/{}".format(query_range, len(body)) + assert response["ContentRange"] == f"bytes {query_range}/{len(body)}" @mock_s3 diff --git a/tests/test_s3/test_s3_acl.py b/tests/test_s3/test_s3_acl.py index a32568098..041b3c87d 100644 --- a/tests/test_s3/test_s3_acl.py +++ b/tests/test_s3/test_s3_acl.py @@ -120,7 +120,7 @@ def test_s3_object_in_public_bucket_using_multiple_presigned_urls(): ) for i in range(1, 10): response = requests.get(presigned_url) - assert response.status_code == 200, "Failed on req number {}".format(i) + assert response.status_code == 200, f"Failed on req number {i}" @mock_s3 diff --git a/tests/test_s3/test_s3_cloudformation.py b/tests/test_s3/test_s3_cloudformation.py index 5496184a0..638cb1c3e 100644 --- a/tests/test_s3/test_s3_cloudformation.py +++ b/tests/test_s3/test_s3_cloudformation.py @@ -192,23 +192,15 @@ def test_s3_bucket_cloudformation_outputs(): outputs_list = cf.Stack(stack_name).outputs output = {item["OutputKey"]: item["OutputValue"] for item in outputs_list} s3.head_bucket(Bucket=output["BucketName"]) - output["BucketARN"].should.match("arn:aws:s3.+{bucket}".format(bucket=bucket_name)) - output["BucketDomainName"].should.equal( - "{bucket}.s3.amazonaws.com".format(bucket=bucket_name) - ) + output["BucketARN"].should.match(f"arn:aws:s3.+{bucket_name}") + output["BucketDomainName"].should.equal(f"{bucket_name}.s3.amazonaws.com") output["BucketDualStackDomainName"].should.equal( - "{bucket}.s3.dualstack.{region}.amazonaws.com".format( - bucket=bucket_name, region=region_name - ) + f"{bucket_name}.s3.dualstack.{region_name}.amazonaws.com" ) output["BucketRegionalDomainName"].should.equal( - "{bucket}.s3.{region}.amazonaws.com".format( - bucket=bucket_name, region=region_name - ) + f"{bucket_name}.s3.{region_name}.amazonaws.com" ) output["BucketWebsiteURL"].should.equal( - "http://{bucket}.s3-website.{region}.amazonaws.com".format( - bucket=bucket_name, region=region_name - ) + f"http://{bucket_name}.s3-website.{region_name}.amazonaws.com" ) output["BucketName"].should.equal(bucket_name) diff --git a/tests/test_s3/test_s3_config.py b/tests/test_s3/test_s3_config.py index 2dde559cc..50e9ab71c 100644 --- a/tests/test_s3/test_s3_config.py +++ b/tests/test_s3/test_s3_config.py @@ -33,7 +33,7 @@ def test_s3_public_access_block_to_config_dict(): ].public_access_block.to_config_dict() for key, value in public_access_block.items(): - k = "{lowercase}{rest}".format(lowercase=key[0].lower(), rest=key[1:]) + k = f"{key[0].lower()}{key[1:]}" assert result[k] is (value == "True") # Verify that this resides in the full bucket's to_config_dict: @@ -70,15 +70,15 @@ def test_list_config_discovered_resources(): for x in range(0, 10): assert result[x] == { "type": "AWS::S3::Bucket", - "id": "bucket{}".format(x), - "name": "bucket{}".format(x), + "id": f"bucket{x}", + "name": f"bucket{x}", "region": "us-west-2", } for x in range(10, 12): assert result[x] == { "type": "AWS::S3::Bucket", - "id": "eu-bucket{}".format(x), - "name": "eu-bucket{}".format(x), + "id": f"eu-bucket{x}", + "name": f"eu-bucket{x}", "region": "eu-west-1", } diff --git a/tests/test_s3/test_s3_copyobject.py b/tests/test_s3/test_s3_copyobject.py index dff201779..c360624f8 100644 --- a/tests/test_s3/test_s3_copyobject.py +++ b/tests/test_s3/test_s3_copyobject.py @@ -28,7 +28,7 @@ def test_copy_key_boto3(key_name): key.put(Body=b"some value") key2 = s3.Object("foobar", "new-key") - key2.copy_from(CopySource="foobar/{}".format(key_name)) + key2.copy_from(CopySource=f"foobar/{key_name}") resp = client.get_object(Bucket="foobar", Key=key_name) resp["Body"].read().should.equal(b"some value") @@ -55,9 +55,7 @@ def test_copy_key_with_version_boto3(): old_version = [v for v in all_versions if not v["IsLatest"]][0] key2 = s3.Object("foobar", "new-key") - key2.copy_from( - CopySource="foobar/the-key?versionId={}".format(old_version["VersionId"]) - ) + key2.copy_from(CopySource=f"foobar/the-key?versionId={old_version['VersionId']}") resp = client.get_object(Bucket="foobar", Key="the-key") resp["Body"].read().should.equal(b"another value") @@ -155,7 +153,7 @@ def test_copy_key_without_changes_should_error(): with pytest.raises(ClientError) as e: client.copy_object( Bucket=bucket_name, - CopySource="{}/{}".format(bucket_name, key_name), + CopySource=f"{bucket_name}/{key_name}", Key=key_name, ) e.value.response["Error"]["Message"].should.equal( @@ -176,7 +174,7 @@ def test_copy_key_without_changes_should_not_error(): client.copy_object( Bucket=bucket_name, - CopySource="{}/{}".format(bucket_name, key_name), + CopySource=f"{bucket_name}/{key_name}", Key=key_name, Metadata={"some-key": "some-value"}, MetadataDirective="REPLACE", diff --git a/tests/test_s3/test_server.py b/tests/test_s3/test_server.py index 09fbd09cc..70ca0faeb 100644 --- a/tests/test_s3/test_server.py +++ b/tests/test_s3/test_server.py @@ -119,7 +119,7 @@ def test_s3_server_post_to_bucket_redirect(): "success_action_redirect": redirect_base, }, ) - real_key = "asdf/the-key/{}".format(filename) + real_key = f"asdf/the-key/{filename}" res.status_code.should.equal(303) redirect = res.headers["location"] assert redirect.startswith(redirect_base) @@ -129,7 +129,7 @@ def test_s3_server_post_to_bucket_redirect(): assert args["key"][0] == real_key assert args["bucket"][0] == "tester" - res = test_client.get("/{}".format(real_key), "http://tester.localhost:5000/") + res = test_client.get(f"/{real_key}", "http://tester.localhost:5000/") res.status_code.should.equal(200) res.data.should.equal(filecontent.encode("utf8")) diff --git a/tests/test_sagemaker/cloudformation_test_configs.py b/tests/test_sagemaker/cloudformation_test_configs.py index 3571c8c80..014e016c4 100644 --- a/tests/test_sagemaker/cloudformation_test_configs.py +++ b/tests/test_sagemaker/cloudformation_test_configs.py @@ -69,9 +69,7 @@ class NotebookInstanceTestConfig(TestConfig): def get_cloudformation_template(self, include_outputs=True, **kwargs): instance_type = kwargs.get("instance_type", "ml.c4.xlarge") - role_arn = kwargs.get( - "role_arn", "arn:aws:iam::{}:role/FakeRole".format(ACCOUNT_ID) - ) + role_arn = kwargs.get("role_arn", f"arn:aws:iam::{ACCOUNT_ID}:role/FakeRole") template = { "AWSTemplateFormatVersion": "2010-09-09", @@ -170,7 +168,7 @@ class ModelTestConfig(TestConfig): def get_cloudformation_template(self, include_outputs=True, **kwargs): execution_role_arn = kwargs.get( - "execution_role_arn", "arn:aws:iam::{}:role/FakeRole".format(ACCOUNT_ID) + "execution_role_arn", f"arn:aws:iam::{ACCOUNT_ID}:role/FakeRole" ) image = kwargs.get( "image", "404615174143.dkr.ecr.us-east-2.amazonaws.com/linear-learner:1" @@ -224,7 +222,7 @@ class EndpointConfigTestConfig(TestConfig): "InitialVariantWeight": 1, "InstanceType": "ml.c4.xlarge", "ModelName": self.resource_name, - "VariantName": "variant-name-{}".format(i), + "VariantName": f"variant-name-{i}", } for i in range(num_production_variants) ] @@ -252,7 +250,7 @@ class EndpointConfigTestConfig(TestConfig): sagemaker_client.create_model( ModelName=self.resource_name, - ExecutionRoleArn="arn:aws:iam::{}:role/FakeRole".format(ACCOUNT_ID), + ExecutionRoleArn=f"arn:aws:iam::{ACCOUNT_ID}:role/FakeRole", PrimaryContainer={ "Image": "404615174143.dkr.ecr.us-east-2.amazonaws.com/linear-learner:1", }, @@ -302,7 +300,7 @@ class EndpointTestConfig(TestConfig): sagemaker_client.create_model( ModelName=self.resource_name, - ExecutionRoleArn="arn:aws:iam::{}:role/FakeRole".format(ACCOUNT_ID), + ExecutionRoleArn=f"arn:aws:iam::{ACCOUNT_ID}:role/FakeRole", PrimaryContainer={ "Image": "404615174143.dkr.ecr.us-east-2.amazonaws.com/linear-learner:1", }, diff --git a/tests/test_sagemaker/test_sagemaker_cloudformation.py b/tests/test_sagemaker/test_sagemaker_cloudformation.py index 742126a13..2119eb749 100644 --- a/tests/test_sagemaker/test_sagemaker_cloudformation.py +++ b/tests/test_sagemaker/test_sagemaker_cloudformation.py @@ -44,7 +44,7 @@ def test_sagemaker_cloudformation_create(test_config): # Utilize test configuration to set-up any mock SageMaker resources test_config.run_setup_procedure(sm) - stack_name = "{}_stack".format(test_config.resource_name) + stack_name = f"{test_config.resource_name}_stack" cf.create_stack( StackName=stack_name, TemplateBody=test_config.get_cloudformation_template(include_outputs=False), @@ -77,7 +77,7 @@ def test_sagemaker_cloudformation_get_attr(test_config): test_config.run_setup_procedure(sm) # Create stack and get description for output values - stack_name = "{}_stack".format(test_config.resource_name) + stack_name = f"{test_config.resource_name}_stack" cf.create_stack( StackName=stack_name, TemplateBody=test_config.get_cloudformation_template() ) @@ -113,7 +113,7 @@ def test_sagemaker_cloudformation_notebook_instance_delete(test_config, error_me test_config.run_setup_procedure(sm) # Create stack and verify existence - stack_name = "{}_stack".format(test_config.resource_name) + stack_name = f"{test_config.resource_name}_stack" cf.create_stack( StackName=stack_name, TemplateBody=test_config.get_cloudformation_template() ) @@ -142,7 +142,7 @@ def test_sagemaker_cloudformation_notebook_instance_update(): test_config = NotebookInstanceTestConfig() # Set up template for stack with two different instance types - stack_name = "{}_stack".format(test_config.resource_name) + stack_name = f"{test_config.resource_name}_stack" initial_instance_type = "ml.c4.xlarge" updated_instance_type = "ml.c4.4xlarge" initial_template_json = test_config.get_cloudformation_template( @@ -184,7 +184,7 @@ def test_sagemaker_cloudformation_notebook_instance_lifecycle_config_update(): test_config = NotebookInstanceLifecycleConfigTestConfig() # Set up template for stack with two different OnCreate scripts - stack_name = "{}_stack".format(test_config.resource_name) + stack_name = f"{test_config.resource_name}_stack" initial_on_create_script = "echo Hello World" updated_on_create_script = "echo Goodbye World" initial_template_json = test_config.get_cloudformation_template( @@ -232,7 +232,7 @@ def test_sagemaker_cloudformation_model_update(): test_config = ModelTestConfig() # Set up template for stack with two different image versions - stack_name = "{}_stack".format(test_config.resource_name) + stack_name = f"{test_config.resource_name}_stack" image = "404615174143.dkr.ecr.us-east-2.amazonaws.com/kmeans:{}" initial_image_version = 1 updated_image_version = 2 @@ -282,7 +282,7 @@ def test_sagemaker_cloudformation_endpoint_config_update(): test_config.run_setup_procedure(sm) # Set up template for stack with two different production variant counts - stack_name = "{}_stack".format(test_config.resource_name) + stack_name = f"{test_config.resource_name}_stack" initial_num_production_variants = 1 updated_num_production_variants = 2 initial_template_json = test_config.get_cloudformation_template( @@ -328,7 +328,7 @@ def test_sagemaker_cloudformation_endpoint_update(): test_config = EndpointTestConfig() # Set up template for stack with two different endpoint config names - stack_name = "{}_stack".format(test_config.resource_name) + stack_name = f"{test_config.resource_name}_stack" initial_endpoint_config_name = test_config.resource_name updated_endpoint_config_name = "updated-endpoint-config-name" initial_template_json = test_config.get_cloudformation_template( @@ -341,7 +341,7 @@ def test_sagemaker_cloudformation_endpoint_update(): # Create SM resources and stack with initial template and check attributes sm.create_model( ModelName=initial_endpoint_config_name, - ExecutionRoleArn="arn:aws:iam::{}:role/FakeRole".format(ACCOUNT_ID), + ExecutionRoleArn=f"arn:aws:iam::{ACCOUNT_ID}:role/FakeRole", PrimaryContainer={ "Image": "404615174143.dkr.ecr.us-east-2.amazonaws.com/linear-learner:1", }, @@ -372,7 +372,7 @@ def test_sagemaker_cloudformation_endpoint_update(): # Create additional SM resources and update stack sm.create_model( ModelName=updated_endpoint_config_name, - ExecutionRoleArn="arn:aws:iam::{}:role/FakeRole".format(ACCOUNT_ID), + ExecutionRoleArn=f"arn:aws:iam::{ACCOUNT_ID}:role/FakeRole", PrimaryContainer={ "Image": "404615174143.dkr.ecr.us-east-2.amazonaws.com/linear-learner:1", }, diff --git a/tests/test_sagemaker/test_sagemaker_endpoint.py b/tests/test_sagemaker/test_sagemaker_endpoint.py index a1fe05c6e..af2ab0e8b 100644 --- a/tests/test_sagemaker/test_sagemaker_endpoint.py +++ b/tests/test_sagemaker/test_sagemaker_endpoint.py @@ -10,7 +10,7 @@ from moto.core import DEFAULT_ACCOUNT_ID as ACCOUNT_ID import pytest TEST_REGION_NAME = "us-east-1" -TEST_ROLE_ARN = "arn:aws:iam::{}:role/FakeRole".format(ACCOUNT_ID) +TEST_ROLE_ARN = f"arn:aws:iam::{ACCOUNT_ID}:role/FakeRole" GENERIC_TAGS_PARAM = [ {"Key": "newkey1", "Value": "newval1"}, {"Key": "newkey2", "Value": "newval2"}, @@ -56,18 +56,14 @@ def create_endpoint_config_helper(sagemaker_client, production_variants): ProductionVariants=production_variants, ) resp["EndpointConfigArn"].should.match( - r"^arn:aws:sagemaker:.*:.*:endpoint-config/{}$".format( - TEST_ENDPOINT_CONFIG_NAME - ) + rf"^arn:aws:sagemaker:.*:.*:endpoint-config/{TEST_ENDPOINT_CONFIG_NAME}$" ) resp = sagemaker_client.describe_endpoint_config( EndpointConfigName=TEST_ENDPOINT_CONFIG_NAME ) resp["EndpointConfigArn"].should.match( - r"^arn:aws:sagemaker:.*:.*:endpoint-config/{}$".format( - TEST_ENDPOINT_CONFIG_NAME - ) + rf"^arn:aws:sagemaker:.*:.*:endpoint-config/{TEST_ENDPOINT_CONFIG_NAME}$" ) resp["EndpointConfigName"].should.equal(TEST_ENDPOINT_CONFIG_NAME) resp["ProductionVariants"].should.equal(production_variants) @@ -104,18 +100,14 @@ def test_delete_endpoint_config(sagemaker_client): ProductionVariants=TEST_PRODUCTION_VARIANTS, ) resp["EndpointConfigArn"].should.match( - r"^arn:aws:sagemaker:.*:.*:endpoint-config/{}$".format( - TEST_ENDPOINT_CONFIG_NAME - ) + rf"^arn:aws:sagemaker:.*:.*:endpoint-config/{TEST_ENDPOINT_CONFIG_NAME}$" ) resp = sagemaker_client.describe_endpoint_config( EndpointConfigName=TEST_ENDPOINT_CONFIG_NAME ) resp["EndpointConfigArn"].should.match( - r"^arn:aws:sagemaker:.*:.*:endpoint-config/{}$".format( - TEST_ENDPOINT_CONFIG_NAME - ) + rf"^arn:aws:sagemaker:.*:.*:endpoint-config/{TEST_ENDPOINT_CONFIG_NAME}$" ) sagemaker_client.delete_endpoint_config( @@ -151,9 +143,7 @@ def test_create_endpoint_invalid_instance_type(sagemaker_client): ProductionVariants=production_variants, ) assert e.value.response["Error"]["Code"] == "ValidationException" - expected_message = "Value '{}' at 'instanceType' failed to satisfy constraint: Member must satisfy enum value set: [".format( - instance_type - ) + expected_message = f"Value '{instance_type}' at 'instanceType' failed to satisfy constraint: Member must satisfy enum value set: [" assert expected_message in e.value.response["Error"]["Message"] @@ -170,9 +160,7 @@ def test_create_endpoint_invalid_memory_size(sagemaker_client): ProductionVariants=production_variants, ) assert e.value.response["Error"]["Code"] == "ValidationException" - expected_message = "Value '{}' at 'MemorySizeInMB' failed to satisfy constraint: Member must satisfy enum value set: [".format( - memory_size - ) + expected_message = f"Value '{memory_size}' at 'MemorySizeInMB' failed to satisfy constraint: Member must satisfy enum value set: [" assert expected_message in e.value.response["Error"]["Message"] @@ -198,12 +186,12 @@ def test_create_endpoint(sagemaker_client): Tags=GENERIC_TAGS_PARAM, ) resp["EndpointArn"].should.match( - r"^arn:aws:sagemaker:.*:.*:endpoint/{}$".format(TEST_ENDPOINT_NAME) + rf"^arn:aws:sagemaker:.*:.*:endpoint/{TEST_ENDPOINT_NAME}$" ) resp = sagemaker_client.describe_endpoint(EndpointName=TEST_ENDPOINT_NAME) resp["EndpointArn"].should.match( - r"^arn:aws:sagemaker:.*:.*:endpoint/{}$".format(TEST_ENDPOINT_NAME) + rf"^arn:aws:sagemaker:.*:.*:endpoint/{TEST_ENDPOINT_NAME}$" ) resp["EndpointName"].should.equal(TEST_ENDPOINT_NAME) resp["EndpointConfigName"].should.equal(TEST_ENDPOINT_CONFIG_NAME) @@ -308,12 +296,12 @@ def test_update_endpoint_weights_and_capacities_one_variant(sagemaker_client): ], ) response["EndpointArn"].should.match( - r"^arn:aws:sagemaker:.*:.*:endpoint/{}$".format(TEST_ENDPOINT_NAME) + rf"^arn:aws:sagemaker:.*:.*:endpoint/{TEST_ENDPOINT_NAME}$" ) resp = sagemaker_client.describe_endpoint(EndpointName=TEST_ENDPOINT_NAME) resp["EndpointArn"].should.match( - r"^arn:aws:sagemaker:.*:.*:endpoint/{}$".format(TEST_ENDPOINT_NAME) + rf"^arn:aws:sagemaker:.*:.*:endpoint/{TEST_ENDPOINT_NAME}$" ) resp["EndpointName"].should.equal(TEST_ENDPOINT_NAME) resp["EndpointConfigName"].should.equal(TEST_ENDPOINT_CONFIG_NAME) @@ -377,12 +365,12 @@ def test_update_endpoint_weights_and_capacities_two_variants(sagemaker_client): DesiredWeightsAndCapacities=desired_weights_and_capacities, ) response["EndpointArn"].should.match( - r"^arn:aws:sagemaker:.*:.*:endpoint/{}$".format(TEST_ENDPOINT_NAME) + rf"^arn:aws:sagemaker:.*:.*:endpoint/{TEST_ENDPOINT_NAME}$" ) resp = sagemaker_client.describe_endpoint(EndpointName=TEST_ENDPOINT_NAME) resp["EndpointArn"].should.match( - r"^arn:aws:sagemaker:.*:.*:endpoint/{}$".format(TEST_ENDPOINT_NAME) + rf"^arn:aws:sagemaker:.*:.*:endpoint/{TEST_ENDPOINT_NAME}$" ) resp["EndpointName"].should.equal(TEST_ENDPOINT_NAME) resp["EndpointConfigName"].should.equal(TEST_ENDPOINT_CONFIG_NAME) @@ -565,7 +553,7 @@ def _create_endpoint_config( EndpointConfigName=endpoint_config_name, ProductionVariants=production_variants ) resp["EndpointConfigArn"].should.match( - r"^arn:aws:sagemaker:.*:.*:endpoint-config/{}$".format(endpoint_config_name) + rf"^arn:aws:sagemaker:.*:.*:endpoint-config/{endpoint_config_name}$" ) @@ -574,5 +562,5 @@ def _create_endpoint(boto_client, endpoint_name, endpoint_config_name): EndpointName=endpoint_name, EndpointConfigName=endpoint_config_name ) resp["EndpointArn"].should.match( - r"^arn:aws:sagemaker:.*:.*:endpoint/{}$".format(endpoint_name) + rf"^arn:aws:sagemaker:.*:.*:endpoint/{endpoint_name}$" ) diff --git a/tests/test_sagemaker/test_sagemaker_models.py b/tests/test_sagemaker/test_sagemaker_models.py index 2bb5bdd9d..32f9212c7 100644 --- a/tests/test_sagemaker/test_sagemaker_models.py +++ b/tests/test_sagemaker/test_sagemaker_models.py @@ -58,7 +58,7 @@ def test_create_model(sagemaker_client): VpcConfig=vpc_config.response_object, ) model["ModelArn"].should.match( - r"^arn:aws:sagemaker:.*:.*:model/{}$".format(TEST_MODEL_NAME) + rf"^arn:aws:sagemaker:.*:.*:model/{TEST_MODEL_NAME}$" ) @@ -84,7 +84,7 @@ def test_list_models(sagemaker_client): assert len(models["Models"]).should.equal(1) assert models["Models"][0]["ModelName"].should.equal(TEST_MODEL_NAME) assert models["Models"][0]["ModelArn"].should.match( - r"^arn:aws:sagemaker:.*:.*:model/{}$".format(TEST_MODEL_NAME) + rf"^arn:aws:sagemaker:.*:.*:model/{TEST_MODEL_NAME}$" ) diff --git a/tests/test_sagemaker/test_sagemaker_notebooks.py b/tests/test_sagemaker/test_sagemaker_notebooks.py index f59dda640..11c16311d 100644 --- a/tests/test_sagemaker/test_sagemaker_notebooks.py +++ b/tests/test_sagemaker/test_sagemaker_notebooks.py @@ -10,7 +10,7 @@ import pytest TEST_REGION_NAME = "us-east-1" FAKE_SUBNET_ID = "subnet-012345678" FAKE_SECURITY_GROUP_IDS = ["sg-0123456789abcdef0", "sg-0123456789abcdef1"] -FAKE_ROLE_ARN = "arn:aws:iam::{}:role/FakeRole".format(ACCOUNT_ID) +FAKE_ROLE_ARN = f"arn:aws:iam::{ACCOUNT_ID}:role/FakeRole" FAKE_KMS_KEY_ID = "62d4509a-9f96-446c-a9ba-6b1c353c8c58" GENERIC_TAGS_PARAM = [ {"Key": "newkey1", "Value": "newval1"}, @@ -130,9 +130,7 @@ def test_create_notebook_instance_invalid_instance_type(sagemaker_client): with pytest.raises(ClientError) as ex: sagemaker_client.create_notebook_instance(**args) assert ex.value.response["Error"]["Code"] == "ValidationException" - expected_message = "Value '{}' at 'instanceType' failed to satisfy constraint: Member must satisfy enum value set: [".format( - instance_type - ) + expected_message = f"Value '{instance_type}' at 'instanceType' failed to satisfy constraint: Member must satisfy enum value set: [" assert expected_message in ex.value.response["Error"]["Message"] @@ -155,9 +153,7 @@ def test_notebook_instance_lifecycle(sagemaker_client): with pytest.raises(ClientError) as ex: sagemaker_client.delete_notebook_instance(NotebookInstanceName=FAKE_NAME_PARAM) assert ex.value.response["Error"]["Code"] == "ValidationException" - expected_message = "Status (InService) not in ([Stopped, Failed]). Unable to transition to (Deleting) for Notebook Instance ({})".format( - notebook_instance_arn - ) + expected_message = f"Status (InService) not in ([Stopped, Failed]). Unable to transition to (Deleting) for Notebook Instance ({notebook_instance_arn})" assert expected_message in ex.value.response["Error"]["Message"] sagemaker_client.stop_notebook_instance(NotebookInstanceName=FAKE_NAME_PARAM) diff --git a/tests/test_sagemaker/test_sagemaker_processing.py b/tests/test_sagemaker/test_sagemaker_processing.py index 1d9652ddc..f9f500073 100644 --- a/tests/test_sagemaker/test_sagemaker_processing.py +++ b/tests/test_sagemaker/test_sagemaker_processing.py @@ -54,7 +54,7 @@ class MyProcessingJobModel(object): "InputName": "input", "AppManaged": False, "S3Input": { - "S3Uri": "s3://{}/{}/processing/".format(self.bucket, self.prefix), + "S3Uri": f"s3://{self.bucket}/{self.prefix}/processing/", "LocalPath": "/opt/ml/processing/input", "S3DataType": "S3Prefix", "S3InputMode": "File", @@ -68,9 +68,7 @@ class MyProcessingJobModel(object): { "OutputName": "output", "S3Output": { - "S3Uri": "s3://{}/{}/processing/".format( - self.bucket, self.prefix - ), + "S3Uri": f"s3://{self.bucket}/{self.prefix}/processing/", "LocalPath": "/opt/ml/processing/output", "S3UploadMode": "EndOfJob", }, @@ -132,7 +130,7 @@ def test_create_processing_job(sagemaker_client): ) resp = job.save(sagemaker_client) resp["ProcessingJobArn"].should.match( - r"^arn:aws:sagemaker:.*:.*:processing-job/{}$".format(FAKE_PROCESSING_JOB_NAME) + rf"^arn:aws:sagemaker:.*:.*:processing-job/{FAKE_PROCESSING_JOB_NAME}$" ) resp = sagemaker_client.describe_processing_job( @@ -140,7 +138,7 @@ def test_create_processing_job(sagemaker_client): ) resp["ProcessingJobName"].should.equal(FAKE_PROCESSING_JOB_NAME) resp["ProcessingJobArn"].should.match( - r"^arn:aws:sagemaker:.*:.*:processing-job/{}$".format(FAKE_PROCESSING_JOB_NAME) + rf"^arn:aws:sagemaker:.*:.*:processing-job/{FAKE_PROCESSING_JOB_NAME}$" ) assert "python3" in resp["AppSpecification"]["ContainerEntrypoint"] assert "app.py" in resp["AppSpecification"]["ContainerEntrypoint"] @@ -164,7 +162,7 @@ def test_list_processing_jobs(sagemaker_client): assert processing_jobs["ProcessingJobSummaries"][0][ "ProcessingJobArn" ].should.match( - r"^arn:aws:sagemaker:.*:.*:processing-job/{}$".format(FAKE_PROCESSING_JOB_NAME) + rf"^arn:aws:sagemaker:.*:.*:processing-job/{FAKE_PROCESSING_JOB_NAME}$" ) assert processing_jobs.get("NextToken") is None @@ -216,15 +214,15 @@ def test_list_processing_jobs_should_validate_input(sagemaker_client): def test_list_processing_jobs_with_name_filters(sagemaker_client): for i in range(5): - name = "xgboost-{}".format(i) - arn = "arn:aws:sagemaker:us-east-1:000000000000:x-x/foobar-{}".format(i) + name = f"xgboost-{i}" + arn = f"arn:aws:sagemaker:us-east-1:000000000000:x-x/foobar-{i}" MyProcessingJobModel(processing_job_name=name, role_arn=arn).save( sagemaker_client ) for i in range(5): - name = "vgg-{}".format(i) - arn = "arn:aws:sagemaker:us-east-1:000000000000:x-x/barfoo-{}".format(i) + name = f"vgg-{i}" + arn = f"arn:aws:sagemaker:us-east-1:000000000000:x-x/barfoo-{i}" MyProcessingJobModel(processing_job_name=name, role_arn=arn).save( sagemaker_client ) @@ -240,8 +238,8 @@ def test_list_processing_jobs_with_name_filters(sagemaker_client): def test_list_processing_jobs_paginated(sagemaker_client): for i in range(5): - name = "xgboost-{}".format(i) - arn = "arn:aws:sagemaker:us-east-1:000000000000:x-x/foobar-{}".format(i) + name = f"xgboost-{i}" + arn = f"arn:aws:sagemaker:us-east-1:000000000000:x-x/foobar-{i}" MyProcessingJobModel(processing_job_name=name, role_arn=arn).save( sagemaker_client ) @@ -269,15 +267,15 @@ def test_list_processing_jobs_paginated(sagemaker_client): def test_list_processing_jobs_paginated_with_target_in_middle(sagemaker_client): for i in range(5): - name = "xgboost-{}".format(i) - arn = "arn:aws:sagemaker:us-east-1:000000000000:x-x/foobar-{}".format(i) + name = f"xgboost-{i}" + arn = f"arn:aws:sagemaker:us-east-1:000000000000:x-x/foobar-{i}" MyProcessingJobModel(processing_job_name=name, role_arn=arn).save( sagemaker_client ) for i in range(5): - name = "vgg-{}".format(i) - arn = "arn:aws:sagemaker:us-east-1:000000000000:x-x/barfoo-{}".format(i) + name = f"vgg-{i}" + arn = f"arn:aws:sagemaker:us-east-1:000000000000:x-x/barfoo-{i}" MyProcessingJobModel(processing_job_name=name, role_arn=arn).save( sagemaker_client ) @@ -311,15 +309,15 @@ def test_list_processing_jobs_paginated_with_target_in_middle(sagemaker_client): def test_list_processing_jobs_paginated_with_fragmented_targets(sagemaker_client): for i in range(5): - name = "xgboost-{}".format(i) - arn = "arn:aws:sagemaker:us-east-1:000000000000:x-x/foobar-{}".format(i) + name = f"xgboost-{i}" + arn = f"arn:aws:sagemaker:us-east-1:000000000000:x-x/foobar-{i}" MyProcessingJobModel(processing_job_name=name, role_arn=arn).save( sagemaker_client ) for i in range(5): - name = "vgg-{}".format(i) - arn = "arn:aws:sagemaker:us-east-1:000000000000:x-x/barfoo-{}".format(i) + name = f"vgg-{i}" + arn = f"arn:aws:sagemaker:us-east-1:000000000000:x-x/barfoo-{i}" MyProcessingJobModel(processing_job_name=name, role_arn=arn).save( sagemaker_client ) @@ -351,7 +349,7 @@ def test_list_processing_jobs_paginated_with_fragmented_targets(sagemaker_client def test_add_and_delete_tags_in_training_job(sagemaker_client): processing_job_name = "MyProcessingJob" - role_arn = "arn:aws:iam::{}:role/FakeRole".format(ACCOUNT_ID) + role_arn = f"arn:aws:iam::{ACCOUNT_ID}:role/FakeRole" container = "382416733822.dkr.ecr.us-east-1.amazonaws.com/linear-learner:1" bucket = "my-bucket" prefix = "my-prefix" diff --git a/tests/test_sagemaker/test_sagemaker_training.py b/tests/test_sagemaker/test_sagemaker_training.py index cdf656289..5a130e30f 100644 --- a/tests/test_sagemaker/test_sagemaker_training.py +++ b/tests/test_sagemaker/test_sagemaker_training.py @@ -7,7 +7,7 @@ import pytest from moto import mock_sagemaker from moto.core import DEFAULT_ACCOUNT_ID as ACCOUNT_ID -FAKE_ROLE_ARN = "arn:aws:iam::{}:role/FakeRole".format(ACCOUNT_ID) +FAKE_ROLE_ARN = f"arn:aws:iam::{ACCOUNT_ID}:role/FakeRole" TEST_REGION_NAME = "us-east-1" @@ -48,7 +48,7 @@ class MyTrainingJobModel(object): "DataSource": { "S3DataSource": { "S3DataType": "S3Prefix", - "S3Uri": "s3://{}/{}/train/".format(self.bucket, self.prefix), + "S3Uri": f"s3://{self.bucket}/{self.prefix}/train/", "S3DataDistributionType": "ShardedByS3Key", } }, @@ -60,9 +60,7 @@ class MyTrainingJobModel(object): "DataSource": { "S3DataSource": { "S3DataType": "S3Prefix", - "S3Uri": "s3://{}/{}/validation/".format( - self.bucket, self.prefix - ), + "S3Uri": f"s3://{self.bucket}/{self.prefix}/validation/", "S3DataDistributionType": "FullyReplicated", } }, @@ -71,7 +69,7 @@ class MyTrainingJobModel(object): }, ] self.output_data_config = output_data_config or { - "S3OutputPath": "s3://{}/{}/".format(self.bucket, self.prefix) + "S3OutputPath": f"s3://{self.bucket}/{self.prefix}/" } self.hyper_parameters = hyper_parameters or { "feature_dim": "30", @@ -105,7 +103,7 @@ def test_create_training_job(): sagemaker = boto3.client("sagemaker", region_name=TEST_REGION_NAME) training_job_name = "MyTrainingJob" - role_arn = "arn:aws:iam::{}:role/FakeRole".format(ACCOUNT_ID) + role_arn = f"arn:aws:iam::{ACCOUNT_ID}:role/FakeRole" container = "382416733822.dkr.ecr.us-east-1.amazonaws.com/linear-learner:1" bucket = "my-bucket" prefix = "sagemaker/DEMO-breast-cancer-prediction/" @@ -124,7 +122,7 @@ def test_create_training_job(): "DataSource": { "S3DataSource": { "S3DataType": "S3Prefix", - "S3Uri": "s3://{}/{}/train/".format(bucket, prefix), + "S3Uri": f"s3://{bucket}/{prefix}/train/", "S3DataDistributionType": "ShardedByS3Key", } }, @@ -136,7 +134,7 @@ def test_create_training_job(): "DataSource": { "S3DataSource": { "S3DataType": "S3Prefix", - "S3Uri": "s3://{}/{}/validation/".format(bucket, prefix), + "S3Uri": f"s3://{bucket}/{prefix}/validation/", "S3DataDistributionType": "FullyReplicated", } }, @@ -144,7 +142,7 @@ def test_create_training_job(): "RecordWrapperType": "None", }, ] - output_data_config = {"S3OutputPath": "s3://{}/{}/".format(bucket, prefix)} + output_data_config = {"S3OutputPath": f"s3://{bucket}/{prefix}/"} hyper_parameters = { "feature_dim": "30", "mini_batch_size": "100", @@ -170,13 +168,13 @@ def test_create_training_job(): ) resp = job.save() resp["TrainingJobArn"].should.match( - r"^arn:aws:sagemaker:.*:.*:training-job/{}$".format(training_job_name) + rf"^arn:aws:sagemaker:.*:.*:training-job/{training_job_name}$" ) resp = sagemaker.describe_training_job(TrainingJobName=training_job_name) resp["TrainingJobName"].should.equal(training_job_name) resp["TrainingJobArn"].should.match( - r"^arn:aws:sagemaker:.*:.*:training-job/{}$".format(training_job_name) + rf"^arn:aws:sagemaker:.*:.*:training-job/{training_job_name}$" ) assert resp["ModelArtifacts"]["S3ModelArtifacts"].startswith( output_data_config["S3OutputPath"] @@ -233,7 +231,7 @@ def test_list_training_jobs(): ) assert training_jobs["TrainingJobSummaries"][0]["TrainingJobArn"].should.match( - r"^arn:aws:sagemaker:.*:.*:training-job/{}$".format(name) + rf"^arn:aws:sagemaker:.*:.*:training-job/{name}$" ) assert training_jobs.get("NextToken") is None @@ -293,12 +291,12 @@ def test_list_training_jobs_should_validate_input(): def test_list_training_jobs_with_name_filters(): client = boto3.client("sagemaker", region_name="us-east-1") for i in range(5): - name = "xgboost-{}".format(i) - arn = "arn:aws:sagemaker:us-east-1:000000000000:x-x/foobar-{}".format(i) + name = f"xgboost-{i}" + arn = f"arn:aws:sagemaker:us-east-1:000000000000:x-x/foobar-{i}" MyTrainingJobModel(training_job_name=name, role_arn=arn).save() for i in range(5): - name = "vgg-{}".format(i) - arn = "arn:aws:sagemaker:us-east-1:000000000000:x-x/barfoo-{}".format(i) + name = f"vgg-{i}" + arn = f"arn:aws:sagemaker:us-east-1:000000000000:x-x/barfoo-{i}" MyTrainingJobModel(training_job_name=name, role_arn=arn).save() xgboost_training_jobs = client.list_training_jobs(NameContains="xgboost") assert len(xgboost_training_jobs["TrainingJobSummaries"]).should.equal(5) @@ -311,8 +309,8 @@ def test_list_training_jobs_with_name_filters(): def test_list_training_jobs_paginated(): client = boto3.client("sagemaker", region_name="us-east-1") for i in range(5): - name = "xgboost-{}".format(i) - arn = "arn:aws:sagemaker:us-east-1:000000000000:x-x/foobar-{}".format(i) + name = f"xgboost-{i}" + arn = f"arn:aws:sagemaker:us-east-1:000000000000:x-x/foobar-{i}" MyTrainingJobModel(training_job_name=name, role_arn=arn).save() xgboost_training_job_1 = client.list_training_jobs( NameContains="xgboost", MaxResults=1 @@ -339,12 +337,12 @@ def test_list_training_jobs_paginated(): def test_list_training_jobs_paginated_with_target_in_middle(): client = boto3.client("sagemaker", region_name="us-east-1") for i in range(5): - name = "xgboost-{}".format(i) - arn = "arn:aws:sagemaker:us-east-1:000000000000:x-x/foobar-{}".format(i) + name = f"xgboost-{i}" + arn = f"arn:aws:sagemaker:us-east-1:000000000000:x-x/foobar-{i}" MyTrainingJobModel(training_job_name=name, role_arn=arn).save() for i in range(5): - name = "vgg-{}".format(i) - arn = "arn:aws:sagemaker:us-east-1:000000000000:x-x/barfoo-{}".format(i) + name = f"vgg-{i}" + arn = f"arn:aws:sagemaker:us-east-1:000000000000:x-x/barfoo-{i}" MyTrainingJobModel(training_job_name=name, role_arn=arn).save() vgg_training_job_1 = client.list_training_jobs(NameContains="vgg", MaxResults=1) @@ -372,12 +370,12 @@ def test_list_training_jobs_paginated_with_target_in_middle(): def test_list_training_jobs_paginated_with_fragmented_targets(): client = boto3.client("sagemaker", region_name="us-east-1") for i in range(5): - name = "xgboost-{}".format(i) - arn = "arn:aws:sagemaker:us-east-1:000000000000:x-x/foobar-{}".format(i) + name = f"xgboost-{i}" + arn = f"arn:aws:sagemaker:us-east-1:000000000000:x-x/foobar-{i}" MyTrainingJobModel(training_job_name=name, role_arn=arn).save() for i in range(5): - name = "vgg-{}".format(i) - arn = "arn:aws:sagemaker:us-east-1:000000000000:x-x/barfoo-{}".format(i) + name = f"vgg-{i}" + arn = f"arn:aws:sagemaker:us-east-1:000000000000:x-x/barfoo-{i}" MyTrainingJobModel(training_job_name=name, role_arn=arn).save() training_jobs_with_2 = client.list_training_jobs(NameContains="2", MaxResults=8) diff --git a/tests/test_secretsmanager/test_secretsmanager.py b/tests/test_secretsmanager/test_secretsmanager.py index 3fc8e7f73..2017350d1 100644 --- a/tests/test_secretsmanager/test_secretsmanager.py +++ b/tests/test_secretsmanager/test_secretsmanager.py @@ -65,9 +65,7 @@ def test_get_secret_value_by_arn(): secret_value = "test_get_secret_value_by_arn" result = conn.create_secret(Name=name, SecretString=secret_value) arn = result["ARN"] - arn.should.match( - "^arn:aws:secretsmanager:us-west-2:{}:secret:{}".format(ACCOUNT_ID, name) - ) + arn.should.match(f"^arn:aws:secretsmanager:us-west-2:{ACCOUNT_ID}:secret:{name}") result = conn.get_secret_value(SecretId=arn) assert result["SecretString"] == secret_value diff --git a/tests/test_ses/test_ses_boto3.py b/tests/test_ses/test_ses_boto3.py index 4994d643a..c2c27cdb9 100644 --- a/tests/test_ses/test_ses_boto3.py +++ b/tests/test_ses/test_ses_boto3.py @@ -83,7 +83,7 @@ def test_send_email(): conn.verify_domain_identity(Domain="example.com") conn.send_email(**kwargs) - too_many_addresses = list("to%s@example.com" % i for i in range(51)) + too_many_addresses = list(f"to{i}@example.com" for i in range(51)) conn.send_email.when.called_with( **dict(kwargs, Destination={"ToAddresses": too_many_addresses}) ).should.throw(ClientError) @@ -226,7 +226,7 @@ def test_send_bulk_templated_email(): too_many_destinations = list( { "Destination": { - "ToAddresses": ["to%s@example.com" % i], + "ToAddresses": [f"to{i}@example.com"], "CcAddresses": [], "BccAddresses": [], } @@ -241,7 +241,7 @@ def test_send_bulk_templated_email(): ex.value.response["Error"]["Code"].should.equal("MessageRejected") ex.value.response["Error"]["Message"].should.equal("Too many destinations.") - too_many_destinations = list("to%s@example.com" % i for i in range(51)) + too_many_destinations = list(f"to{i}@example.com" for i in range(51)) with pytest.raises(ClientError) as ex: args = dict( @@ -301,7 +301,7 @@ def test_send_templated_email(): conn.send_templated_email(**kwargs) - too_many_addresses = list("to%s@example.com" % i for i in range(51)) + too_many_addresses = list(f"to{i}@example.com" for i in range(51)) conn.send_templated_email.when.called_with( **dict(kwargs, Destination={"ToAddresses": too_many_addresses}) ).should.throw(ClientError) diff --git a/tests/test_ses/test_ses_sns_boto3.py b/tests/test_ses/test_ses_sns_boto3.py index 6f6e94361..3040021b5 100644 --- a/tests/test_ses/test_ses_sns_boto3.py +++ b/tests/test_ses/test_ses_sns_boto3.py @@ -30,7 +30,7 @@ def __setup_feedback_env__( sns_conn.subscribe( TopicArn=topic_arn, Protocol="sqs", - Endpoint="arn:aws:sqs:%s:%s:%s" % (region, ACCOUNT_ID, queue), + Endpoint=f"arn:aws:sqs:{region}:{ACCOUNT_ID}:{queue}", ) # Verify SES domain ses_conn.verify_domain_identity(Domain=domain) diff --git a/tests/test_sns/test_application_boto3.py b/tests/test_sns/test_application_boto3.py index e9799532d..bdc7e3e4c 100644 --- a/tests/test_sns/test_application_boto3.py +++ b/tests/test_sns/test_application_boto3.py @@ -19,7 +19,7 @@ def test_create_platform_application(): ) application_arn = response["PlatformApplicationArn"] application_arn.should.equal( - "arn:aws:sns:us-east-1:{}:app/APNS/my-application".format(ACCOUNT_ID) + f"arn:aws:sns:us-east-1:{ACCOUNT_ID}:app/APNS/my-application" ) @@ -131,7 +131,7 @@ def test_create_platform_endpoint(): endpoint_arn = endpoint["EndpointArn"] endpoint_arn.should.contain( - "arn:aws:sns:us-east-1:{}:endpoint/APNS/my-application/".format(ACCOUNT_ID) + f"arn:aws:sns:us-east-1:{ACCOUNT_ID}:endpoint/APNS/my-application/" ) diff --git a/tests/test_sns/test_publish_batch.py b/tests/test_sns/test_publish_batch.py index 8cb6abaf6..a0d65162a 100644 --- a/tests/test_sns/test_publish_batch.py +++ b/tests/test_sns/test_publish_batch.py @@ -124,7 +124,7 @@ def test_publish_batch_to_sqs(): sqs_conn = boto3.resource("sqs", region_name="us-east-1") queue = sqs_conn.create_queue(QueueName="test-queue") - queue_url = "arn:aws:sqs:us-east-1:{}:test-queue".format(ACCOUNT_ID) + queue_url = f"arn:aws:sqs:us-east-1:{ACCOUNT_ID}:test-queue" client.subscribe(TopicArn=topic_arn, Protocol="sqs", Endpoint=queue_url) resp = client.publish_batch(TopicArn=topic_arn, PublishBatchRequestEntries=entries) @@ -155,7 +155,7 @@ def test_publish_batch_to_sqs_raw(): sqs = boto3.resource("sqs", region_name="us-east-1") queue = sqs.create_queue(QueueName="test-queue") - queue_url = "arn:aws:sqs:us-east-1:{}:test-queue".format(ACCOUNT_ID) + queue_url = f"arn:aws:sqs:us-east-1:{ACCOUNT_ID}:test-queue" client.subscribe( TopicArn=topic_arn, Protocol="sqs", diff --git a/tests/test_sns/test_publishing_boto3.py b/tests/test_sns/test_publishing_boto3.py index 7968b2825..b73217a17 100644 --- a/tests/test_sns/test_publishing_boto3.py +++ b/tests/test_sns/test_publishing_boto3.py @@ -37,7 +37,7 @@ def test_publish_to_sqs(): conn.subscribe( TopicArn=topic_arn, Protocol="sqs", - Endpoint="arn:aws:sqs:us-east-1:{}:test-queue".format(ACCOUNT_ID), + Endpoint=f"arn:aws:sqs:us-east-1:{ACCOUNT_ID}:test-queue", ) message = "my message" with freeze_time("2015-01-01 12:00:00"): @@ -117,7 +117,7 @@ def test_publish_to_sqs_bad(): conn.subscribe( TopicArn=topic_arn, Protocol="sqs", - Endpoint="arn:aws:sqs:us-east-1:{}:test-queue".format(ACCOUNT_ID), + Endpoint=f"arn:aws:sqs:us-east-1:{ACCOUNT_ID}:test-queue", ) message = "my message" try: @@ -325,7 +325,7 @@ def test_publish_to_sqs_dump_json(): conn.subscribe( TopicArn=topic_arn, Protocol="sqs", - Endpoint="arn:aws:sqs:us-east-1:{}:test-queue".format(ACCOUNT_ID), + Endpoint=f"arn:aws:sqs:us-east-1:{ACCOUNT_ID}:test-queue", ) message = json.dumps( @@ -374,7 +374,7 @@ def test_publish_to_sqs_in_different_region(): conn.subscribe( TopicArn=topic_arn, Protocol="sqs", - Endpoint="arn:aws:sqs:us-west-2:{}:test-queue".format(ACCOUNT_ID), + Endpoint=f"arn:aws:sqs:us-west-2:{ACCOUNT_ID}:test-queue", ) message = "my message" @@ -444,7 +444,7 @@ def test_publish_subject(): conn.subscribe( TopicArn=topic_arn, Protocol="sqs", - Endpoint="arn:aws:sqs:us-east-1:{}:test-queue".format(ACCOUNT_ID), + Endpoint=f"arn:aws:sqs:us-east-1:{ACCOUNT_ID}:test-queue", ) message = "my message" subject1 = "test subject" @@ -476,7 +476,7 @@ def test_publish_null_subject(): conn.subscribe( TopicArn=topic_arn, Protocol="sqs", - Endpoint="arn:aws:sqs:us-east-1:{}:test-queue".format(ACCOUNT_ID), + Endpoint=f"arn:aws:sqs:us-east-1:{ACCOUNT_ID}:test-queue", ) message = "my message" with freeze_time("2015-01-01 12:00:00"): diff --git a/tests/test_sns/test_server.py b/tests/test_sns/test_server.py index 338cc2789..bba85538b 100644 --- a/tests/test_sns/test_server.py +++ b/tests/test_sns/test_server.py @@ -16,11 +16,11 @@ def test_sns_server_get(): topic_data = test_client.action_data("CreateTopic", Name="testtopic") topic_data.should.contain("CreateTopicResult") topic_data.should.contain( - "arn:aws:sns:us-east-1:{}:testtopic".format(ACCOUNT_ID) + f"arn:aws:sns:us-east-1:{ACCOUNT_ID}:testtopic" ) topics_data = test_client.action_data("ListTopics") topics_data.should.contain("ListTopicsResult") topic_data.should.contain( - "arn:aws:sns:us-east-1:{}:testtopic".format(ACCOUNT_ID) + f"arn:aws:sns:us-east-1:{ACCOUNT_ID}:testtopic" ) diff --git a/tests/test_sns/test_topics_boto3.py b/tests/test_sns/test_topics_boto3.py index 8aa82aa9f..76844c8aa 100644 --- a/tests/test_sns/test_topics_boto3.py +++ b/tests/test_sns/test_topics_boto3.py @@ -19,9 +19,7 @@ def test_create_and_delete_topic(): topics = topics_json["Topics"] topics.should.have.length_of(1) topics[0]["TopicArn"].should.equal( - "arn:aws:sns:{0}:{1}:{2}".format( - conn._client_config.region_name, ACCOUNT_ID, topic_name - ) + f"arn:aws:sns:{conn._client_config.region_name}:{ACCOUNT_ID}:{topic_name}" ) # Delete the topic @@ -108,7 +106,7 @@ def test_create_topic_must_meet_constraints(): conn = boto3.client("sns", region_name="us-east-1") common_random_chars = [":", ";", "!", "@", "|", "^", "%"] for char in common_random_chars: - conn.create_topic.when.called_with(Name="no%s_invalidchar" % char).should.throw( + conn.create_topic.when.called_with(Name=f"no{char}_invalidchar").should.throw( ClientError ) conn.create_topic.when.called_with(Name="no spaces allowed").should.throw( @@ -140,9 +138,7 @@ def test_topic_corresponds_to_region(): conn.create_topic(Name="some-topic") topics_json = conn.list_topics() topic_arn = topics_json["Topics"][0]["TopicArn"] - topic_arn.should.equal( - "arn:aws:sns:{0}:{1}:some-topic".format(region, ACCOUNT_ID) - ) + topic_arn.should.equal(f"arn:aws:sns:{region}:{ACCOUNT_ID}:some-topic") @mock_sns @@ -155,9 +151,7 @@ def test_topic_attributes(): attributes = conn.get_topic_attributes(TopicArn=topic_arn)["Attributes"] attributes["TopicArn"].should.equal( - "arn:aws:sns:{0}:{1}:some-topic".format( - conn._client_config.region_name, ACCOUNT_ID - ) + f"arn:aws:sns:{conn._client_config.region_name}:{ACCOUNT_ID}:some-topic" ) attributes["Owner"].should.equal(ACCOUNT_ID) json.loads(attributes["Policy"]).should.equal( @@ -180,9 +174,7 @@ def test_topic_attributes(): "SNS:Publish", "SNS:Receive", ], - "Resource": "arn:aws:sns:us-east-1:{}:some-topic".format( - ACCOUNT_ID - ), + "Resource": f"arn:aws:sns:us-east-1:{ACCOUNT_ID}:some-topic", "Condition": {"StringEquals": {"AWS:SourceOwner": ACCOUNT_ID}}, } ], @@ -274,9 +266,7 @@ def test_add_remove_permissions(): "SNS:Publish", "SNS:Receive", ], - "Resource": "arn:aws:sns:us-east-1:{}:test-permissions".format( - ACCOUNT_ID - ), + "Resource": f"arn:aws:sns:us-east-1:{ACCOUNT_ID}:test-permissions", "Condition": {"StringEquals": {"AWS:SourceOwner": ACCOUNT_ID}}, }, { @@ -284,9 +274,7 @@ def test_add_remove_permissions(): "Effect": "Allow", "Principal": {"AWS": "arn:aws:iam::999999999999:root"}, "Action": "SNS:Publish", - "Resource": "arn:aws:sns:us-east-1:{}:test-permissions".format( - ACCOUNT_ID - ), + "Resource": f"arn:aws:sns:us-east-1:{ACCOUNT_ID}:test-permissions", }, ], } @@ -315,9 +303,7 @@ def test_add_remove_permissions(): "SNS:Publish", "SNS:Receive", ], - "Resource": "arn:aws:sns:us-east-1:{}:test-permissions".format( - ACCOUNT_ID - ), + "Resource": f"arn:aws:sns:us-east-1:{ACCOUNT_ID}:test-permissions", "Condition": {"StringEquals": {"AWS:SourceOwner": ACCOUNT_ID}}, } ], @@ -343,7 +329,7 @@ def test_add_remove_permissions(): ] }, "Action": ["SNS:Publish", "SNS:Subscribe"], - "Resource": "arn:aws:sns:us-east-1:{}:test-permissions".format(ACCOUNT_ID), + "Resource": f"arn:aws:sns:us-east-1:{ACCOUNT_ID}:test-permissions", } ) @@ -484,8 +470,7 @@ def test_tag_resource_errors(): ).should.throw(ClientError, "Resource does not exist") too_many_tags = [ - {"Key": "tag_key_{}".format(i), "Value": "tag_value_{}".format(i)} - for i in range(51) + {"Key": f"tag_key_{i}", "Value": f"tag_value_{i}"} for i in range(51) ] conn.tag_resource.when.called_with( ResourceArn=topic_arn, Tags=too_many_tags diff --git a/tests/test_sqs/test_server.py b/tests/test_sqs/test_server.py index afdd888fd..fd8b8a77e 100644 --- a/tests/test_sqs/test_server.py +++ b/tests/test_sqs/test_server.py @@ -24,14 +24,14 @@ def test_sqs_list_identities(): for queue_name in ("testqueue", "otherqueue.fifo"): - res = test_client.put("/?Action=CreateQueue&QueueName=%s" % queue_name) + res = test_client.put(f"/?Action=CreateQueue&QueueName={queue_name}") res = test_client.put( - "/123/%s?MessageBody=test-message&Action=SendMessage" % queue_name + f"/123/{queue_name}?MessageBody=test-message&Action=SendMessage" ) res = test_client.get( - "/123/%s?Action=ReceiveMessage&MaxNumberOfMessages=1" % queue_name + f"/123/{queue_name}?Action=ReceiveMessage&MaxNumberOfMessages=1" ) message = re.search("(.*?)", res.data.decode("utf-8")).groups()[0] diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index ad64fb22b..ad215e736 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -73,8 +73,9 @@ def test_create_queue_with_same_attributes(): "MaximumMessageSize": "262144", "MessageRetentionPeriod": "1209600", "ReceiveMessageWaitTimeSeconds": "20", - "RedrivePolicy": '{"deadLetterTargetArn": "%s", "maxReceiveCount": 100}' - % (dlq_arn), + "RedrivePolicy": json.dumps( + {"deadLetterTargetArn": dlq_arn, "maxReceiveCount": 100} + ), "VisibilityTimeout": "43200", } @@ -763,7 +764,7 @@ def test_get_queue_attributes(): response["Attributes"]["MaximumMessageSize"].should.equal("262144") response["Attributes"]["MessageRetentionPeriod"].should.equal("345600") response["Attributes"]["QueueArn"].should.equal( - "arn:aws:sqs:us-east-1:{}:{}".format(ACCOUNT_ID, q_name) + f"arn:aws:sqs:us-east-1:{ACCOUNT_ID}:{q_name}" ) response["Attributes"]["ReceiveMessageWaitTimeSeconds"].should.equal("0") response["Attributes"]["VisibilityTimeout"].should.equal("30") @@ -783,7 +784,7 @@ def test_get_queue_attributes(): { "ApproximateNumberOfMessages": "0", "MaximumMessageSize": "262144", - "QueueArn": "arn:aws:sqs:us-east-1:{}:{}".format(ACCOUNT_ID, q_name), + "QueueArn": f"arn:aws:sqs:us-east-1:{ACCOUNT_ID}:{q_name}", "VisibilityTimeout": "30", "RedrivePolicy": json.dumps( {"deadLetterTargetArn": dlq_arn1, "maxReceiveCount": 2} @@ -2057,9 +2058,7 @@ def test_send_message_batch_errors(): ], ).should.throw(ClientError, "Id id_2 repeated.") - entries = [ - {"Id": "id_{}".format(i), "MessageBody": "body_{}".format(i)} for i in range(11) - ] + entries = [{"Id": f"id_{i}", "MessageBody": f"body_{i}"} for i in range(11)] client.send_message_batch.when.called_with( QueueUrl=queue_url, Entries=entries ).should.throw( @@ -2443,9 +2442,7 @@ def test_tag_queue_errors(): ClientError, "The request must contain the parameter Tags." ) - too_many_tags = { - "tag_key_{}".format(i): "tag_value_{}".format(i) for i in range(51) - } + too_many_tags = {f"tag_key_{i}": f"tag_value_{i}" for i in range(51)} client.tag_queue.when.called_with( QueueUrl=queue_url, Tags=too_many_tags ).should.throw(ClientError, f"Too many tags added for queue {q_name}.") @@ -2620,7 +2617,7 @@ def test_redrive_policy_available(): def test_redrive_policy_non_existent_queue(): sqs = boto3.client("sqs", region_name="us-east-1") redrive_policy = { - "deadLetterTargetArn": "arn:aws:sqs:us-east-1:{}:no-queue".format(ACCOUNT_ID), + "deadLetterTargetArn": f"arn:aws:sqs:us-east-1:{ACCOUNT_ID}:no-queue", "maxReceiveCount": 1, } @@ -2890,9 +2887,7 @@ def test_send_message_fails_when_message_size_greater_than_max_message_size(): queue.send_message(MessageBody="a" * (message_size_limit + 1)) ex = e.value ex.response["Error"]["Code"].should.equal("InvalidParameterValue") - ex.response["Error"]["Message"].should.contain( - "{} bytes".format(message_size_limit) - ) + ex.response["Error"]["Message"].should.contain(f"{message_size_limit} bytes") @mock_sqs diff --git a/tests/test_sqs/test_sqs_cloudformation.py b/tests/test_sqs/test_sqs_cloudformation.py index c96d63fef..983c896d0 100644 --- a/tests/test_sqs/test_sqs_cloudformation.py +++ b/tests/test_sqs/test_sqs_cloudformation.py @@ -56,7 +56,7 @@ def test_describe_stack_subresources(): cf.create_stack(StackName=stack_name, TemplateBody=template_body) queue_urls = client.list_queues(QueueNamePrefix=q_name)["QueueUrls"] - assert any(["{}/{}".format(ACCOUNT_ID, q_name) in url for url in queue_urls]) + assert any([f"{ACCOUNT_ID}/{q_name}" in url for url in queue_urls]) stack = res.Stack(stack_name) for s in stack.resource_summaries.all(): @@ -77,7 +77,7 @@ def test_list_stack_resources(): cf.create_stack(StackName=stack_name, TemplateBody=template_body) queue_urls = client.list_queues(QueueNamePrefix=q_name)["QueueUrls"] - assert any(["{}/{}".format(ACCOUNT_ID, q_name) in url for url in queue_urls]) + assert any([f"{ACCOUNT_ID}/{q_name}" in url for url in queue_urls]) queue = cf.list_stack_resources(StackName=stack_name)["StackResourceSummaries"][0] diff --git a/tests/test_ssm/test_ssm_boto3.py b/tests/test_ssm/test_ssm_boto3.py index a795be0c7..757dd1877 100644 --- a/tests/test_ssm/test_ssm_boto3.py +++ b/tests/test_ssm/test_ssm_boto3.py @@ -119,8 +119,8 @@ def test_get_parameters_by_path(): {p["ARN"] for p in response["Parameters"]}.should.equal( set( [ - "arn:aws:ssm:us-east-1:{}:parameter/foo".format(ACCOUNT_ID), - "arn:aws:ssm:us-east-1:{}:parameter/baz".format(ACCOUNT_ID), + f"arn:aws:ssm:us-east-1:{ACCOUNT_ID}:parameter/foo", + f"arn:aws:ssm:us-east-1:{ACCOUNT_ID}:parameter/baz", ] ) ) @@ -254,7 +254,7 @@ def test_put_parameter(name): response["Parameters"][0]["DataType"].should.equal("text") response["Parameters"][0]["LastModifiedDate"].should.be.a(datetime.datetime) response["Parameters"][0]["ARN"].should.equal( - "arn:aws:ssm:us-east-1:{}:parameter/{}".format(ACCOUNT_ID, name) + f"arn:aws:ssm:us-east-1:{ACCOUNT_ID}:parameter/{name}" ) initial_modification_date = response["Parameters"][0]["LastModifiedDate"] @@ -266,7 +266,7 @@ def test_put_parameter(name): except botocore.exceptions.ClientError as err: err.operation_name.should.equal("PutParameter") err.response["Error"]["Message"].should.equal( - "Parameter {} already exists.".format(name) + f"Parameter {name} already exists." ) response = client.get_parameters(Names=[name], WithDecryption=False) @@ -282,7 +282,7 @@ def test_put_parameter(name): initial_modification_date ) response["Parameters"][0]["ARN"].should.equal( - "arn:aws:ssm:us-east-1:{}:parameter/{}".format(ACCOUNT_ID, name) + f"arn:aws:ssm:us-east-1:{ACCOUNT_ID}:parameter/{name}" ) new_data_type = "aws:ec2:image" @@ -323,7 +323,7 @@ def test_put_parameter(name): initial_modification_date ) response["Parameters"][0]["ARN"].should.equal( - "arn:aws:ssm:us-east-1:{}:parameter/{}".format(ACCOUNT_ID, name) + f"arn:aws:ssm:us-east-1:{ACCOUNT_ID}:parameter/{name}" ) @@ -390,16 +390,12 @@ def test_put_parameter_invalid_names(): aws_path = "/aws_test/path/to/var" client.put_parameter.when.called_with( Name=aws_path, Value="value", Type="String" - ).should.throw( - ClientError, "No access to reserved parameter name: {}.".format(aws_path) - ) + ).should.throw(ClientError, f"No access to reserved parameter name: {aws_path}.") aws_path = "/AWS/PATH/TO/VAR" client.put_parameter.when.called_with( Name=aws_path, Value="value", Type="String" - ).should.throw( - ClientError, "No access to reserved parameter name: {}.".format(aws_path) - ) + ).should.throw(ClientError, f"No access to reserved parameter name: {aws_path}.") @mock_ssm @@ -447,7 +443,7 @@ def test_get_parameter(): response["Parameter"]["DataType"].should.equal("text") response["Parameter"]["LastModifiedDate"].should.be.a(datetime.datetime) response["Parameter"]["ARN"].should.equal( - "arn:aws:ssm:us-east-1:{}:parameter/test".format(ACCOUNT_ID) + f"arn:aws:ssm:us-east-1:{ACCOUNT_ID}:parameter/test" ) @@ -474,7 +470,7 @@ def test_get_parameter_with_version_and_labels(): response["Parameter"]["DataType"].should.equal("text") response["Parameter"]["LastModifiedDate"].should.be.a(datetime.datetime) response["Parameter"]["ARN"].should.equal( - "arn:aws:ssm:us-east-1:{}:parameter/test-1".format(ACCOUNT_ID) + f"arn:aws:ssm:us-east-1:{ACCOUNT_ID}:parameter/test-1" ) response = client.get_parameter(Name="test-2:1", WithDecryption=False) @@ -484,7 +480,7 @@ def test_get_parameter_with_version_and_labels(): response["Parameter"]["DataType"].should.equal("text") response["Parameter"]["LastModifiedDate"].should.be.a(datetime.datetime) response["Parameter"]["ARN"].should.equal( - "arn:aws:ssm:us-east-1:{}:parameter/test-2".format(ACCOUNT_ID) + f"arn:aws:ssm:us-east-1:{ACCOUNT_ID}:parameter/test-2" ) response = client.get_parameter(Name="test-2:test-label", WithDecryption=False) @@ -494,7 +490,7 @@ def test_get_parameter_with_version_and_labels(): response["Parameter"]["DataType"].should.equal("text") response["Parameter"]["LastModifiedDate"].should.be.a(datetime.datetime) response["Parameter"]["ARN"].should.equal( - "arn:aws:ssm:us-east-1:{}:parameter/test-2".format(ACCOUNT_ID) + f"arn:aws:ssm:us-east-1:{ACCOUNT_ID}:parameter/test-2" ) with pytest.raises(ClientError) as ex: @@ -532,12 +528,11 @@ def test_get_parameters_errors(): ex.operation_name.should.equal("GetParameters") ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) ex.response["Error"]["Code"].should.contain("ValidationException") + all_keys = ", ".join(ssm_parameters.keys()) ex.response["Error"]["Message"].should.equal( "1 validation error detected: " - "Value '[{}]' at 'names' failed to satisfy constraint: " - "Member must have length less than or equal to 10.".format( - ", ".join(ssm_parameters.keys()) - ) + f"Value '[{all_keys}]' at 'names' failed to satisfy constraint: " + "Member must have length less than or equal to 10." ) @@ -582,7 +577,7 @@ def test_describe_parameters_paging(): client = boto3.client("ssm", region_name="us-east-1") for i in range(50): - client.put_parameter(Name="param-%d" % i, Value="value-%d" % i, Type="String") + client.put_parameter(Name=f"param-{i}", Value=f"value-{i}", Type="String") response = client.describe_parameters() response["Parameters"].should.have.length_of(10) @@ -614,7 +609,7 @@ def test_describe_parameters_filter_names(): client = boto3.client("ssm", region_name="us-east-1") for i in range(50): - p = {"Name": "param-%d" % i, "Value": "value-%d" % i, "Type": "String"} + p = {"Name": f"param-{i}", "Value": f"value-{i}", "Type": "String"} if i % 5 == 0: p["Type"] = "SecureString" p["KeyId"] = "a key" @@ -636,7 +631,7 @@ def test_describe_parameters_filter_type(): client = boto3.client("ssm", region_name="us-east-1") for i in range(50): - p = {"Name": "param-%d" % i, "Value": "value-%d" % i, "Type": "String"} + p = {"Name": f"param-{i}", "Value": f"value-{i}", "Type": "String"} if i % 5 == 0: p["Type"] = "SecureString" p["KeyId"] = "a key" @@ -657,10 +652,10 @@ def test_describe_parameters_filter_keyid(): client = boto3.client("ssm", region_name="us-east-1") for i in range(50): - p = {"Name": "param-%d" % i, "Value": "value-%d" % i, "Type": "String"} + p = {"Name": f"param-{i}", "Value": f"value-{i}", "Type": "String"} if i % 5 == 0: p["Type"] = "SecureString" - p["KeyId"] = "key:%d" % i + p["KeyId"] = f"key:{i}" client.put_parameter(**p) response = client.describe_parameters( @@ -1160,8 +1155,8 @@ def test_get_parameter_history(): for i in range(3): client.put_parameter( Name=test_parameter_name, - Description="A test parameter version %d" % i, - Value="value-%d" % i, + Description=f"A test parameter version {i}", + Value=f"value-{i}", Type="String", Overwrite=True, ) @@ -1172,9 +1167,9 @@ def test_get_parameter_history(): for index, param in enumerate(parameters_response): param["Name"].should.equal(test_parameter_name) param["Type"].should.equal("String") - param["Value"].should.equal("value-%d" % index) + param["Value"].should.equal(f"value-{index}") param["Version"].should.equal(index + 1) - param["Description"].should.equal("A test parameter version %d" % index) + param["Description"].should.equal(f"A test parameter version {index}") param["Labels"].should.equal([]) len(parameters_response).should.equal(3) @@ -1189,8 +1184,8 @@ def test_get_parameter_history_with_secure_string(): for i in range(3): client.put_parameter( Name=test_parameter_name, - Description="A test parameter version %d" % i, - Value="value-%d" % i, + Description=f"A test parameter version {i}", + Value=f"value-{i}", Type="SecureString", Overwrite=True, ) @@ -1204,15 +1199,15 @@ def test_get_parameter_history_with_secure_string(): for index, param in enumerate(parameters_response): param["Name"].should.equal(test_parameter_name) param["Type"].should.equal("SecureString") - expected_plaintext_value = "value-%d" % index + expected_plaintext_value = f"value-{index}" if with_decryption: param["Value"].should.equal(expected_plaintext_value) else: param["Value"].should.equal( - "kms:alias/aws/ssm:%s" % expected_plaintext_value + f"kms:alias/aws/ssm:{expected_plaintext_value}" ) param["Version"].should.equal(index + 1) - param["Description"].should.equal("A test parameter version %d" % index) + param["Description"].should.equal(f"A test parameter version {index}") len(parameters_response).should.equal(3) @@ -1294,8 +1289,8 @@ def test_label_parameter_moving_versions(): for i in range(3): client.put_parameter( Name=test_parameter_name, - Description="A test parameter version %d" % i, - Value="value-%d" % i, + Description=f"A test parameter version {i}", + Value=f"value-{i}", Type="String", Overwrite=True, ) @@ -1317,9 +1312,9 @@ def test_label_parameter_moving_versions(): for index, param in enumerate(parameters_response): param["Name"].should.equal(test_parameter_name) param["Type"].should.equal("String") - param["Value"].should.equal("value-%d" % index) + param["Value"].should.equal(f"value-{index}") param["Version"].should.equal(index + 1) - param["Description"].should.equal("A test parameter version %d" % index) + param["Description"].should.equal(f"A test parameter version {index}") labels = test_labels if param["Version"] == 2 else [] param["Labels"].should.equal(labels) @@ -1335,8 +1330,8 @@ def test_label_parameter_moving_versions_complex(): for i in range(3): client.put_parameter( Name=test_parameter_name, - Description="A test parameter version %d" % i, - Value="value-%d" % i, + Description=f"A test parameter version {i}", + Value=f"value-{i}", Type="String", Overwrite=True, ) @@ -1362,9 +1357,9 @@ def test_label_parameter_moving_versions_complex(): for index, param in enumerate(parameters_response): param["Name"].should.equal(test_parameter_name) param["Type"].should.equal("String") - param["Value"].should.equal("value-%d" % index) + param["Value"].should.equal(f"value-{index}") param["Version"].should.equal(index + 1) - param["Description"].should.equal("A test parameter version %d" % index) + param["Description"].should.equal(f"A test parameter version {index}") labels = ( ["test-label2", "test-label3"] if param["Version"] == 2 @@ -1520,15 +1515,15 @@ def test_label_parameter_version_invalid_label(): ) response["InvalidLabels"].should.equal(["abc/123"]) + long_name = "a" * 101 client.label_parameter_version.when.called_with( - Name=test_parameter_name, ParameterVersion=1, Labels=["a" * 101] + Name=test_parameter_name, ParameterVersion=1, Labels=[long_name] ).should.throw( ClientError, "1 validation error detected: " - "Value '[%s]' at 'labels' failed to satisfy constraint: " + f"Value '[{long_name}]' at 'labels' failed to satisfy constraint: " "Member must satisfy constraint: " - "[Member must have length less than or equal to 100, Member must have length greater than or equal to 1]" - % ("a" * 101), + "[Member must have length less than or equal to 100, Member must have length greater than or equal to 1]", ) @@ -1542,8 +1537,8 @@ def test_get_parameter_history_with_label(): for i in range(3): client.put_parameter( Name=test_parameter_name, - Description="A test parameter version %d" % i, - Value="value-%d" % i, + Description=f"A test parameter version {i}", + Value=f"value-{i}", Type="String", Overwrite=True, ) @@ -1558,9 +1553,9 @@ def test_get_parameter_history_with_label(): for index, param in enumerate(parameters_response): param["Name"].should.equal(test_parameter_name) param["Type"].should.equal("String") - param["Value"].should.equal("value-%d" % index) + param["Value"].should.equal(f"value-{index}") param["Version"].should.equal(index + 1) - param["Description"].should.equal("A test parameter version %d" % index) + param["Description"].should.equal(f"A test parameter version {index}") labels = test_labels if param["Version"] == 1 else [] param["Labels"].should.equal(labels) @@ -1577,8 +1572,8 @@ def test_get_parameter_history_with_label_non_latest(): for i in range(3): client.put_parameter( Name=test_parameter_name, - Description="A test parameter version %d" % i, - Value="value-%d" % i, + Description=f"A test parameter version {i}", + Value=f"value-{i}", Type="String", Overwrite=True, ) @@ -1593,9 +1588,9 @@ def test_get_parameter_history_with_label_non_latest(): for index, param in enumerate(parameters_response): param["Name"].should.equal(test_parameter_name) param["Type"].should.equal("String") - param["Value"].should.equal("value-%d" % index) + param["Value"].should.equal(f"value-{index}") param["Version"].should.equal(index + 1) - param["Description"].should.equal("A test parameter version %d" % index) + param["Description"].should.equal(f"A test parameter version {index}") labels = test_labels if param["Version"] == 2 else [] param["Labels"].should.equal(labels) @@ -1612,8 +1607,8 @@ def test_get_parameter_history_with_label_latest_assumed(): for i in range(3): client.put_parameter( Name=test_parameter_name, - Description="A test parameter version %d" % i, - Value="value-%d" % i, + Description=f"A test parameter version {i}", + Value=f"value-{i}", Type="String", Overwrite=True, ) @@ -1626,9 +1621,9 @@ def test_get_parameter_history_with_label_latest_assumed(): for index, param in enumerate(parameters_response): param["Name"].should.equal(test_parameter_name) param["Type"].should.equal("String") - param["Value"].should.equal("value-%d" % index) + param["Value"].should.equal(f"value-{index}") param["Version"].should.equal(index + 1) - param["Description"].should.equal("A test parameter version %d" % index) + param["Description"].should.equal(f"A test parameter version {index}") labels = test_labels if param["Version"] == 3 else [] param["Labels"].should.equal(labels) @@ -1848,7 +1843,7 @@ def test_parameter_version_limit(): for i in range(PARAMETER_VERSION_LIMIT + 1): client.put_parameter( Name=parameter_name, - Value="value-%d" % (i + 1), + Value=f"value-{(i+1)}", Type="String", Overwrite=True, ) @@ -1862,7 +1857,7 @@ def test_parameter_version_limit(): len(parameter_history).should.equal(PARAMETER_VERSION_LIMIT) parameter_history[0]["Value"].should.equal("value-2") latest_version_index = PARAMETER_VERSION_LIMIT - 1 - latest_version_value = "value-%d" % (PARAMETER_VERSION_LIMIT + 1) + latest_version_value = f"value-{PARAMETER_VERSION_LIMIT + 1}" parameter_history[latest_version_index]["Value"].should.equal(latest_version_value) @@ -1873,7 +1868,7 @@ def test_parameter_overwrite_fails_when_limit_reached_and_oldest_version_has_lab for i in range(PARAMETER_VERSION_LIMIT): client.put_parameter( Name=parameter_name, - Value="value-%d" % (i + 1), + Value=f"value-{(i+1)}", Type="String", Overwrite=True, ) @@ -1903,21 +1898,21 @@ def test_get_parameters_includes_invalid_parameter_when_requesting_invalid_versi for i in range(versions_to_create): client.put_parameter( Name=parameter_name, - Value="value-%d" % (i + 1), + Value=f"value-{(i+1)}", Type="String", Overwrite=True, ) response = client.get_parameters( Names=[ - "test-param:%d" % (versions_to_create + 1), - "test-param:%d" % (versions_to_create - 1), + f"test-param:{versions_to_create + 1}", + f"test-param:{versions_to_create - 1}", ] ) len(response["InvalidParameters"]).should.equal(1) response["InvalidParameters"][0].should.equal( - "test-param:%d" % (versions_to_create + 1) + f"test-param:{versions_to_create + 1}" ) len(response["Parameters"]).should.equal(1) @@ -1935,7 +1930,7 @@ def test_get_parameters_includes_invalid_parameter_when_requesting_invalid_label for i in range(versions_to_create): client.put_parameter( Name=parameter_name, - Value="value-%d" % (i + 1), + Value=f"value-{(i+1)}", Type="String", Overwrite=True, ) @@ -1990,10 +1985,8 @@ def test_get_parameter_history_should_throw_exception_when_MaxResults_is_too_lar error["Code"].should.equal("ValidationException") error["Message"].should.equal( "1 validation error detected: " - "Value '{}' at 'maxResults' failed to satisfy constraint: " - "Member must have value less than or equal to 50.".format( - PARAMETER_HISTORY_MAX_RESULTS + 1 - ) + f"Value '{PARAMETER_HISTORY_MAX_RESULTS + 1}' at 'maxResults' failed to satisfy constraint: " + "Member must have value less than or equal to 50." ) diff --git a/tests/test_stepfunctions/test_stepfunctions.py b/tests/test_stepfunctions/test_stepfunctions.py index ef1a93583..31eb7f186 100644 --- a/tests/test_stepfunctions/test_stepfunctions.py +++ b/tests/test_stepfunctions/test_stepfunctions.py @@ -228,7 +228,7 @@ def test_state_machine_list_returns_created_state_machines(): def test_state_machine_list_pagination(): client = boto3.client("stepfunctions", region_name=region) for i in range(25): - machine_name = "StateMachine-{}".format(i) + machine_name = f"StateMachine-{i}" client.create_state_machine( name=machine_name, definition=str(simple_definition), diff --git a/tests/test_stepfunctions/test_stepfunctions_cloudformation.py b/tests/test_stepfunctions/test_stepfunctions_cloudformation.py index 3e2aef855..232c87c57 100644 --- a/tests/test_stepfunctions/test_stepfunctions_cloudformation.py +++ b/tests/test_stepfunctions/test_stepfunctions_cloudformation.py @@ -49,8 +49,8 @@ def test_state_machine_cloudformation(): state_machine["definition"].should.equal(definition) tags = sf.list_tags_for_resource(resourceArn=output["StateMachineArn"]).get("tags") for i, tag in enumerate(tags, 1): - tag["key"].should.equal("key{}".format(i)) - tag["value"].should.equal("value{}".format(i)) + tag["key"].should.equal(f"key{i}") + tag["value"].should.equal(f"value{i}") cf.Stack("test_stack").delete() with pytest.raises(ClientError) as ex: diff --git a/tests/test_sts/test_sts.py b/tests/test_sts/test_sts.py index 3d122f02e..e1c446a05 100644 --- a/tests/test_sts/test_sts.py +++ b/tests/test_sts/test_sts.py @@ -52,11 +52,9 @@ def test_get_federation_token_boto3(): creds["SecretAccessKey"].should.equal("wJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY") fed_user["Arn"].should.equal( - "arn:aws:sts::{account_id}:federated-user/{token_name}".format( - account_id=ACCOUNT_ID, token_name=token_name - ) + f"arn:aws:sts::{ACCOUNT_ID}:federated-user/{token_name}" ) - fed_user["FederatedUserId"].should.equal("{}:{}".format(ACCOUNT_ID, token_name)) + fed_user["FederatedUserId"].should.equal(f"{ACCOUNT_ID}:{token_name}") @freeze_time("2012-01-01 12:00:00") @@ -83,9 +81,7 @@ def test_assume_role(): "Version": "2012-10-17", "Statement": { "Effect": "Allow", - "Principal": { - "AWS": "arn:aws:iam::{account_id}:root".format(account_id=ACCOUNT_ID) - }, + "Principal": {"AWS": f"arn:aws:iam::{ACCOUNT_ID}:root"}, "Action": "sts:AssumeRole", }, } @@ -112,9 +108,7 @@ def test_assume_role(): credentials["SecretAccessKey"].should.have.length_of(40) assume_role_response["AssumedRoleUser"]["Arn"].should.equal( - "arn:aws:sts::{account_id}:assumed-role/{role_name}/{session_name}".format( - account_id=ACCOUNT_ID, role_name=role_name, session_name=session_name - ) + f"arn:aws:sts::{ACCOUNT_ID}:assumed-role/{role_name}/{session_name}" ) assert assume_role_response["AssumedRoleUser"]["AssumedRoleId"].startswith("AROA") assert ( @@ -137,13 +131,9 @@ def test_assume_role_with_saml(): provider_name = "TestProvFed" fed_identifier = "7ca82df9-1bad-4dd3-9b2b-adb68b554282" fed_name = "testuser" - role_input = "arn:aws:iam::{account_id}:role/{role_name}".format( - account_id=ACCOUNT_ID, role_name=role_name - ) - principal_role = "arn:aws:iam:{account_id}:saml-provider/{provider_name}".format( - account_id=ACCOUNT_ID, provider_name=provider_name - ) - saml_assertion = """ + role_input = f"arn:aws:iam::{ACCOUNT_ID}:role/{role_name}" + principal_role = f"arn:aws:iam:{ACCOUNT_ID}:saml-provider/{provider_name}" + saml_assertion = f""" http://localhost/ @@ -188,7 +178,7 @@ def test_assume_role_with_saml(): {fed_name} - arn:aws:iam::{account_id}:saml-provider/{provider_name},arn:aws:iam::{account_id}:role/{role_name} + arn:aws:iam::{ACCOUNT_ID}:saml-provider/{provider_name},arn:aws:iam::{ACCOUNT_ID}:role/{role_name} 900 @@ -200,13 +190,7 @@ def test_assume_role_with_saml(): -""".format( - account_id=ACCOUNT_ID, - role_name=role_name, - provider_name=provider_name, - fed_identifier=fed_identifier, - fed_name=fed_name, - ).replace( +""".replace( "\n", "" ) @@ -226,16 +210,14 @@ def test_assume_role_with_saml(): credentials["SecretAccessKey"].should.have.length_of(40) assume_role_response["AssumedRoleUser"]["Arn"].should.equal( - "arn:aws:sts::{account_id}:assumed-role/{role_name}/{fed_name}".format( - account_id=ACCOUNT_ID, role_name=role_name, fed_name=fed_name - ) + f"arn:aws:sts::{ACCOUNT_ID}:assumed-role/{role_name}/{fed_name}" ) assert assume_role_response["AssumedRoleUser"]["AssumedRoleId"].startswith("AROA") assert assume_role_response["AssumedRoleUser"]["AssumedRoleId"].endswith( - ":{fed_name}".format(fed_name=fed_name) + f":{fed_name}" ) assume_role_response["AssumedRoleUser"]["AssumedRoleId"].should.have.length_of( - 21 + 1 + len("{fed_name}".format(fed_name=fed_name)) + 21 + 1 + len(f"{fed_name}") ) @@ -247,13 +229,9 @@ def test_assume_role_with_saml_should_not_rely_on_attribute_order(): provider_name = "TestProvFed" fed_identifier = "7ca82df9-1bad-4dd3-9b2b-adb68b554282" fed_name = "testuser" - role_input = "arn:aws:iam::{account_id}:role/{role_name}".format( - account_id=ACCOUNT_ID, role_name=role_name - ) - principal_role = "arn:aws:iam:{account_id}:saml-provider/{provider_name}".format( - account_id=ACCOUNT_ID, provider_name=provider_name - ) - saml_assertion = """ + role_input = f"arn:aws:iam::{ACCOUNT_ID}:role/{role_name}" + principal_role = f"arn:aws:iam:{ACCOUNT_ID}:saml-provider/{provider_name}" + saml_assertion = f""" http://localhost/ @@ -295,7 +273,7 @@ def test_assume_role_with_saml_should_not_rely_on_attribute_order(): - arn:aws:iam::{account_id}:saml-provider/{provider_name},arn:aws:iam::{account_id}:role/{role_name} + arn:aws:iam::{ACCOUNT_ID}:saml-provider/{provider_name},arn:aws:iam::{ACCOUNT_ID}:role/{role_name} 900 @@ -310,13 +288,7 @@ def test_assume_role_with_saml_should_not_rely_on_attribute_order(): -""".format( - account_id=ACCOUNT_ID, - role_name=role_name, - provider_name=provider_name, - fed_identifier=fed_identifier, - fed_name=fed_name, - ).replace( +""".replace( "\n", "" ) @@ -331,9 +303,7 @@ def test_assume_role_with_saml_should_not_rely_on_attribute_order(): credentials["Expiration"].isoformat().should.equal("2012-01-01T12:15:00+00:00") assume_role_response["AssumedRoleUser"]["Arn"].should.equal( - "arn:aws:sts::{account_id}:assumed-role/{role_name}/{fed_name}".format( - account_id=ACCOUNT_ID, role_name=role_name, fed_name=fed_name - ) + f"arn:aws:sts::{ACCOUNT_ID}:assumed-role/{role_name}/{fed_name}" ) @@ -345,13 +315,9 @@ def test_assume_role_with_saml_should_respect_xml_namespaces(): provider_name = "TestProvFed" fed_identifier = "7ca82df9-1bad-4dd3-9b2b-adb68b554282" fed_name = "testuser" - role_input = "arn:aws:iam::{account_id}:role/{role_name}".format( - account_id=ACCOUNT_ID, role_name=role_name - ) - principal_role = "arn:aws:iam:{account_id}:saml-provider/{provider_name}".format( - account_id=ACCOUNT_ID, provider_name=provider_name - ) - saml_assertion = """ + role_input = f"arn:aws:iam::{ACCOUNT_ID}:role/{role_name}" + principal_role = f"arn:aws:iam:{ACCOUNT_ID}:saml-provider/{provider_name}" + saml_assertion = f""" http://localhost/ @@ -396,7 +362,7 @@ def test_assume_role_with_saml_should_respect_xml_namespaces(): {fed_name} - arn:aws:iam::{account_id}:saml-provider/{provider_name},arn:aws:iam::{account_id}:role/{role_name} + arn:aws:iam::{ACCOUNT_ID}:saml-provider/{provider_name},arn:aws:iam::{ACCOUNT_ID}:role/{role_name} 900 @@ -408,13 +374,7 @@ def test_assume_role_with_saml_should_respect_xml_namespaces(): -""".format( - account_id=ACCOUNT_ID, - role_name=role_name, - provider_name=provider_name, - fed_identifier=fed_identifier, - fed_name=fed_name, - ).replace( +""".replace( "\n", "" ) @@ -429,9 +389,7 @@ def test_assume_role_with_saml_should_respect_xml_namespaces(): credentials["Expiration"].isoformat().should.equal("2012-01-01T12:15:00+00:00") assume_role_response["AssumedRoleUser"]["Arn"].should.equal( - "arn:aws:sts::{account_id}:assumed-role/{role_name}/{fed_name}".format( - account_id=ACCOUNT_ID, role_name=role_name, fed_name=fed_name - ) + f"arn:aws:sts::{ACCOUNT_ID}:assumed-role/{role_name}/{fed_name}" ) @@ -443,13 +401,9 @@ def test_assume_role_with_saml_should_retrieve_attribute_value_from_text_when_xm provider_name = "TestProvFed" fed_identifier = "7ca82df9-1bad-4dd3-9b2b-adb68b554282" fed_name = "testuser" - role_input = "arn:aws:iam::{account_id}:role/{role_name}".format( - account_id=ACCOUNT_ID, role_name=role_name - ) - principal_role = "arn:aws:iam:{account_id}:saml-provider/{provider_name}".format( - account_id=ACCOUNT_ID, provider_name=provider_name - ) - saml_assertion = """ + role_input = f"arn:aws:iam::{ACCOUNT_ID}:role/{role_name}" + principal_role = f"arn:aws:iam:{ACCOUNT_ID}:saml-provider/{provider_name}" + saml_assertion = f""" http://localhost/ @@ -498,7 +452,7 @@ def test_assume_role_with_saml_should_retrieve_attribute_value_from_text_when_xm arn:aws:iam::{account_id}:saml-provider/{provider_name},arn:aws:iam::{account_id}:role/{role_name} + xsi:type="xs:string">arn:aws:iam::{ACCOUNT_ID}:saml-provider/{provider_name},arn:aws:iam::{ACCOUNT_ID}:role/{role_name} -""".format( - account_id=ACCOUNT_ID, - role_name=role_name, - provider_name=provider_name, - fed_identifier=fed_identifier, - fed_name=fed_name, - ).replace( +""".replace( "\n", "" ) @@ -533,9 +481,7 @@ def test_assume_role_with_saml_should_retrieve_attribute_value_from_text_when_xm credentials["Expiration"].isoformat().should.equal("2012-01-01T12:15:00+00:00") assume_role_response["AssumedRoleUser"]["Arn"].should.equal( - "arn:aws:sts::{account_id}:assumed-role/{role_name}/{fed_name}".format( - account_id=ACCOUNT_ID, role_name=role_name, fed_name=fed_name - ) + f"arn:aws:sts::{ACCOUNT_ID}:assumed-role/{role_name}/{fed_name}" ) @@ -547,13 +493,9 @@ def test_assume_role_with_saml_should_default_session_duration_to_3600_seconds_w provider_name = "TestProvFed" fed_identifier = "7ca82df9-1bad-4dd3-9b2b-adb68b554282" fed_name = "testuser" - role_input = "arn:aws:iam::{account_id}:role/{role_name}".format( - account_id=ACCOUNT_ID, role_name=role_name - ) - principal_role = "arn:aws:iam:{account_id}:saml-provider/{provider_name}".format( - account_id=ACCOUNT_ID, provider_name=provider_name - ) - saml_assertion = """ + role_input = f"arn:aws:iam::{ACCOUNT_ID}:role/{role_name}" + principal_role = f"arn:aws:iam:{ACCOUNT_ID}:saml-provider/{provider_name}" + saml_assertion = f""" http://localhost/ @@ -595,7 +537,7 @@ def test_assume_role_with_saml_should_default_session_duration_to_3600_seconds_w - arn:aws:iam::{account_id}:saml-provider/{provider_name},arn:aws:iam::{account_id}:role/{role_name} + arn:aws:iam::{ACCOUNT_ID}:saml-provider/{provider_name},arn:aws:iam::{ACCOUNT_ID}:role/{role_name} {fed_name} @@ -607,13 +549,7 @@ def test_assume_role_with_saml_should_default_session_duration_to_3600_seconds_w -""".format( - account_id=ACCOUNT_ID, - role_name=role_name, - provider_name=provider_name, - fed_identifier=fed_identifier, - fed_name=fed_name, - ).replace( +""".replace( "\n", "" ) @@ -647,9 +583,7 @@ def test_assume_role_with_web_identity_boto3(): } ) role_name = "test-role" - s3_role = "arn:aws:iam::{account_id}:role/{role_name}".format( - account_id=ACCOUNT_ID, role_name=role_name - ) + s3_role = f"arn:aws:iam::{ACCOUNT_ID}:role/{role_name}" session_name = "session-name" role = client.assume_role_with_web_identity( RoleArn=s3_role, @@ -675,9 +609,7 @@ def test_assume_role_with_web_identity_boto3(): creds["SecretAccessKey"].should.have.length_of(40) user["Arn"].should.equal( - "arn:aws:sts::{account_id}:assumed-role/{role_name}/{session_name}".format( - account_id=ACCOUNT_ID, role_name=role_name, session_name=session_name - ) + f"arn:aws:sts::{ACCOUNT_ID}:assumed-role/{role_name}/{session_name}" ) user["AssumedRoleId"].should.contain("session-name") @@ -686,9 +618,7 @@ def test_assume_role_with_web_identity_boto3(): def test_get_caller_identity_with_default_credentials(): identity = boto3.client("sts", region_name="us-east-1").get_caller_identity() - identity["Arn"].should.equal( - "arn:aws:sts::{account_id}:user/moto".format(account_id=ACCOUNT_ID) - ) + identity["Arn"].should.equal(f"arn:aws:sts::{ACCOUNT_ID}:user/moto") identity["UserId"].should.equal("AKIAIOSFODNN7EXAMPLE") identity["Account"].should.equal(str(ACCOUNT_ID)) @@ -723,9 +653,7 @@ def test_get_caller_identity_with_assumed_role_credentials(): "Version": "2012-10-17", "Statement": { "Effect": "Allow", - "Principal": { - "AWS": "arn:aws:iam::{account_id}:root".format(account_id=ACCOUNT_ID) - }, + "Principal": {"AWS": f"arn:aws:iam::{ACCOUNT_ID}:root"}, "Action": "sts:AssumeRole", }, } diff --git a/tests/test_sts/test_sts_integration.py b/tests/test_sts/test_sts_integration.py index 08f78e9b8..26bf8c335 100644 --- a/tests/test_sts/test_sts_integration.py +++ b/tests/test_sts/test_sts_integration.py @@ -43,15 +43,9 @@ class TestStsAssumeRole(unittest.TestCase): provider_name = "TestProvFed" fed_identifier = "7ca82df9-1bad-4dd3-9b2b-adb68b554282" fed_name = "testuser" - role_input = "arn:aws:iam::{account_id}:role/{role_name}".format( - account_id=self.account_b, role_name=role_name - ) - principal_role = ( - "arn:aws:iam:{account_id}:saml-provider/{provider_name}".format( - account_id=ACCOUNT_ID, provider_name=provider_name - ) - ) - saml_assertion = """ + role_input = f"arn:aws:iam::{self.account_b}:role/{role_name}" + principal_role = f"arn:aws:iam:{ACCOUNT_ID}:saml-provider/{provider_name}" + saml_assertion = f""" http://localhost/ @@ -95,7 +89,7 @@ class TestStsAssumeRole(unittest.TestCase): {fed_name} - arn:aws:iam::{account_id}:role/{role_name},arn:aws:iam::{account_id}:saml-provider/{provider_name} + arn:aws:iam::{self.account_b}:role/{role_name},arn:aws:iam::{self.account_b}:saml-provider/{provider_name} 900 @@ -107,13 +101,7 @@ class TestStsAssumeRole(unittest.TestCase): - """.format( - account_id=self.account_b, - role_name=role_name, - provider_name=provider_name, - fed_identifier=fed_identifier, - fed_name=fed_name, - ).replace( + """.replace( "\n", "" ) diff --git a/tests/test_swf/models/test_domain.py b/tests/test_swf/models/test_domain.py index 896118b0f..bd6a01b03 100644 --- a/tests/test_swf/models/test_domain.py +++ b/tests/test_swf/models/test_domain.py @@ -18,7 +18,7 @@ def test_domain_short_dict_representation(): { "name": "foo", "status": "REGISTERED", - "arn": "arn:aws:swf:{0}:{1}:/domain/foo".format(TEST_REGION, ACCOUNT_ID), + "arn": f"arn:aws:swf:{TEST_REGION}:{ACCOUNT_ID}:/domain/foo", } ) diff --git a/tests/test_swf/responses/test_activity_tasks.py b/tests/test_swf/responses/test_activity_tasks.py index 5e5f4c030..57d4625e9 100644 --- a/tests/test_swf/responses/test_activity_tasks.py +++ b/tests/test_swf/responses/test_activity_tasks.py @@ -125,9 +125,7 @@ def test_respond_activity_task_completed_on_closed_workflow_execution_boto3(): client.respond_activity_task_completed(taskToken=activity_token) ex.value.response["Error"]["Code"].should.equal("UnknownResourceFault") ex.value.response["Error"]["Message"].should.equal( - "Unknown execution: WorkflowExecution=[workflowId=uid-abcd1234, runId={}]".format( - client.run_id - ) + f"Unknown execution: WorkflowExecution=[workflowId=uid-abcd1234, runId={client.run_id}]" ) ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) diff --git a/tests/test_swf/responses/test_decision_tasks.py b/tests/test_swf/responses/test_decision_tasks.py index 780c3803d..d7b356427 100644 --- a/tests/test_swf/responses/test_decision_tasks.py +++ b/tests/test_swf/responses/test_decision_tasks.py @@ -302,9 +302,7 @@ def test_respond_decision_task_completed_on_close_workflow_execution_boto3(): client.respond_decision_task_completed(taskToken=task_token) ex.value.response["Error"]["Code"].should.equal("UnknownResourceFault") ex.value.response["Error"]["Message"].should.equal( - "Unknown execution: WorkflowExecution=[workflowId=uid-abcd1234, runId={}]".format( - client.run_id - ) + f"Unknown execution: WorkflowExecution=[workflowId=uid-abcd1234, runId={client.run_id}]" ) ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) diff --git a/tests/test_swf/responses/test_domains.py b/tests/test_swf/responses/test_domains.py index e5b4cd88d..fda8625c5 100644 --- a/tests/test_swf/responses/test_domains.py +++ b/tests/test_swf/responses/test_domains.py @@ -25,7 +25,7 @@ def test_register_domain_boto3(): domain["status"].should.equal("REGISTERED") domain["description"].should.equal("A test domain") domain["arn"].should.equal( - "arn:aws:swf:us-west-1:{0}:/domain/test-domain".format(ACCOUNT_ID) + f"arn:aws:swf:us-west-1:{ACCOUNT_ID}:/domain/test-domain" ) diff --git a/tests/test_swf/responses/test_workflow_executions.py b/tests/test_swf/responses/test_workflow_executions.py index 7aa2642df..bf5f38301 100644 --- a/tests/test_swf/responses/test_workflow_executions.py +++ b/tests/test_swf/responses/test_workflow_executions.py @@ -359,9 +359,7 @@ def test_terminate_workflow_execution_with_wrong_workflow_or_run_id_boto3(): ) ex.value.response["Error"]["Code"].should.equal("UnknownResourceFault") ex.value.response["Error"]["Message"].should.equal( - "Unknown execution: WorkflowExecution=[workflowId=uid-abcd1234, runId={}]".format( - run_id - ) + f"Unknown execution: WorkflowExecution=[workflowId=uid-abcd1234, runId={run_id}]" ) # already closed, without run_id diff --git a/tests/test_transcribe/test_transcribe_boto3.py b/tests/test_transcribe/test_transcribe_boto3.py index c993aea49..f2a2d0f0d 100644 --- a/tests/test_transcribe/test_transcribe_boto3.py +++ b/tests/test_transcribe/test_transcribe_boto3.py @@ -61,11 +61,7 @@ def test_run_medical_transcription_job_minimal_params(): transcription_job.should.contain("CompletionTime") transcription_job["Transcript"].should.equal( { - "TranscriptFileUri": "https://s3.{}.amazonaws.com/{}/medical/{}.json".format( - region_name, - args["OutputBucketName"], - args["MedicalTranscriptionJobName"], - ) + "TranscriptFileUri": f"https://s3.{region_name}.amazonaws.com/{args['OutputBucketName']}/medical/{args['MedicalTranscriptionJobName']}.json" } ) @@ -168,11 +164,7 @@ def test_run_medical_transcription_job_all_params(): transcription_job.should.contain("CompletionTime") transcription_job["Transcript"].should.equal( { - "TranscriptFileUri": "https://s3.{}.amazonaws.com/{}/medical/{}.json".format( - region_name, - args["OutputBucketName"], - args["MedicalTranscriptionJobName"], - ) + "TranscriptFileUri": f"https://s3.{region_name}.amazonaws.com/{args['OutputBucketName']}/medical/{args['MedicalTranscriptionJobName']}.json" } ) @@ -262,9 +254,7 @@ def test_run_transcription_job_all_params(): transcription_job.should.contain("CompletionTime") transcription_job["Transcript"].should.equal( { - "TranscriptFileUri": "https://s3.{}.amazonaws.com/{}/{}.json".format( - region_name, args["OutputBucketName"], args["TranscriptionJobName"] - ) + "TranscriptFileUri": f"https://s3.{region_name}.amazonaws.com/{args['OutputBucketName']}/{args['TranscriptionJobName']}.json" } ) @@ -325,7 +315,7 @@ def test_run_transcription_job_minimal_params(): transcription_job.should.contain("Transcript") # Check aws hosted bucket transcription_job["Transcript"]["TranscriptFileUri"].should.contain( - "https://s3.{0}.amazonaws.com/aws-transcribe-{0}-prod/".format(region_name) + f"https://s3.{region_name}.amazonaws.com/aws-transcribe-{region_name}-prod/" ) # Delete @@ -543,7 +533,7 @@ def test_list_medical_transcription_jobs(): client = boto3.client("transcribe", region_name=region_name) def run_job(index, target_status): - job_name = "Job_{}".format(index) + job_name = f"Job_{index}" args = { "MedicalTranscriptionJobName": job_name, "LanguageCode": "en-US", @@ -635,7 +625,7 @@ def test_list_transcription_jobs(): client = boto3.client("transcribe", region_name=region_name) def run_job(index, target_status): - job_name = "Job_{}".format(index) + job_name = f"Job_{index}" args = { "TranscriptionJobName": job_name, "Media": {"MediaFileUri": "s3://my-bucket/my-media-file.wav"}, @@ -796,9 +786,7 @@ def test_create_vocabulary(): resp["VocabularyState"].should.equal("PENDING") resp["DownloadUri"].should.contain(vocabulary_name) resp["DownloadUri"].should.contain( - "https://s3.{0}.amazonaws.com/aws-transcribe-dictionary-model-{0}-prod".format( - region_name - ) + f"https://s3.{region_name}.amazonaws.com/aws-transcribe-dictionary-model-{region_name}-prod" ) # IN_PROGRESS resp = client.get_vocabulary(VocabularyName=vocabulary_name) @@ -813,7 +801,7 @@ def test_list_vocabularies(): client = boto3.client("transcribe", region_name=region_name) def create_vocab(index, target_status): - vocabulary_name = "Vocab_{}".format(index) + vocabulary_name = f"Vocab_{index}" args = { "VocabularyName": vocabulary_name, "LanguageCode": "en-US", @@ -896,7 +884,7 @@ def test_list_medical_vocabularies(): client = boto3.client("transcribe", region_name=region_name) def create_vocab(index, target_status): - vocabulary_name = "Vocab_{}".format(index) + vocabulary_name = f"Vocab_{index}" resp = client.create_medical_vocabulary( VocabularyName=vocabulary_name, LanguageCode="en-US", diff --git a/tests/test_wafv2/test_server.py b/tests/test_wafv2/test_server.py index 28b313949..867dc6b2d 100644 --- a/tests/test_wafv2/test_server.py +++ b/tests/test_wafv2/test_server.py @@ -32,7 +32,7 @@ def test_create_web_acl(): web_acl = res.json["Summary"] assert web_acl.get("Name") == "John" assert web_acl.get("ARN").startswith( - "arn:aws:wafv2:us-east-1:{}:regional/webacl/John/".format(ACCOUNT_ID) + f"arn:aws:wafv2:us-east-1:{ACCOUNT_ID}:regional/webacl/John/" ) # Duplicate name - should raise error @@ -55,7 +55,7 @@ def test_create_web_acl(): ) web_acl = res.json["Summary"] assert web_acl.get("ARN").startswith( - "arn:aws:wafv2:global:{}:global/webacl/Carl/".format(ACCOUNT_ID) + f"arn:aws:wafv2:global:{ACCOUNT_ID}:global/webacl/Carl/" ) diff --git a/tests/test_wafv2/test_utils.py b/tests/test_wafv2/test_utils.py index c970cdc60..54510c118 100644 --- a/tests/test_wafv2/test_utils.py +++ b/tests/test_wafv2/test_utils.py @@ -10,12 +10,10 @@ def test_make_arn_for_wacl(): name = "testName" scope = "REGIONAL" arn = make_arn_for_wacl(name, ACCOUNT_ID, region, uniqueID, scope) - assert arn == "arn:aws:wafv2:{}:{}:regional/webacl/{}/{}".format( - region, ACCOUNT_ID, name, uniqueID + assert ( + arn == f"arn:aws:wafv2:{region}:{ACCOUNT_ID}:regional/webacl/{name}/{uniqueID}" ) scope = "CLOUDFRONT" arn = make_arn_for_wacl(name, ACCOUNT_ID, region, uniqueID, scope) - assert arn == "arn:aws:wafv2:{}:{}:global/webacl/{}/{}".format( - region, ACCOUNT_ID, name, uniqueID - ) + assert arn == f"arn:aws:wafv2:{region}:{ACCOUNT_ID}:global/webacl/{name}/{uniqueID}" diff --git a/tests/test_wafv2/test_wafv2.py b/tests/test_wafv2/test_wafv2.py index aa0a34685..202a0db1f 100644 --- a/tests/test_wafv2/test_wafv2.py +++ b/tests/test_wafv2/test_wafv2.py @@ -16,7 +16,7 @@ def test_create_web_acl(): web_acl = res["Summary"] assert web_acl.get("Name") == "John" assert web_acl.get("ARN").startswith( - "arn:aws:wafv2:us-east-1:{}:regional/webacl/John/".format(ACCOUNT_ID) + f"arn:aws:wafv2:us-east-1:{ACCOUNT_ID}:regional/webacl/John/" ) # Duplicate name - should raise error with pytest.raises(ClientError) as ex: @@ -30,7 +30,7 @@ def test_create_web_acl(): res = conn.create_web_acl(**CREATE_WEB_ACL_BODY("Carl", "CLOUDFRONT")) web_acl = res["Summary"] assert web_acl.get("ARN").startswith( - "arn:aws:wafv2:global:{}:global/webacl/Carl/".format(ACCOUNT_ID) + f"arn:aws:wafv2:global:{ACCOUNT_ID}:global/webacl/Carl/" )