From ca49b415997c687e9f472d88ec4fd5d6b1266d23 Mon Sep 17 00:00:00 2001 From: Tim Van Laer Date: Tue, 2 Jun 2020 11:32:47 +0200 Subject: [PATCH 01/20] Make sure the UTC tz is included in the bucket creation timestamp --- moto/s3/models.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/moto/s3/models.py b/moto/s3/models.py index 25ead4f5e..36252bc17 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -5,6 +5,7 @@ import json import os import base64 import datetime +import pytz import hashlib import copy import itertools @@ -776,7 +777,7 @@ class FakeBucket(BaseModel): self.notification_configuration = None self.accelerate_configuration = None self.payer = "BucketOwner" - self.creation_date = datetime.datetime.utcnow() + self.creation_date = datetime.datetime.now(tz=pytz.utc) self.public_access_block = None self.encryption = None From 9ca10e36301c759870276fda3910aa817995c8c2 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Wed, 3 Jun 2020 15:36:32 +0100 Subject: [PATCH 02/20] #3046 - DynamoDB - Add Key-size Validation for BatchGetItem --- moto/dynamodb2/responses.py | 20 ++++++++++++ tests/test_dynamodb2/test_dynamodb.py | 46 +++++++++++++++++++++++++++ 2 files changed, 66 insertions(+) diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index aec7c7560..199a09b94 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -371,6 +371,26 @@ class DynamoHandler(BaseResponse): results = {"ConsumedCapacity": [], "Responses": {}, "UnprocessedKeys": {}} + # Validation: Can only request up to 100 items at the same time + # Scenario 1: We're requesting more than a 100 keys from a single table + for table_name, table_request in table_batches.items(): + if len(table_request["Keys"]) > 100: + return self.error( + "com.amazonaws.dynamodb.v20111205#ValidationException", + "1 validation error detected: Value at 'requestItems." + + table_name + + ".member.keys' failed to satisfy constraint: Member must have length less than or equal to 100", + ) + # Scenario 2: We're requesting more than a 100 keys across all tables + nr_of_keys_across_all_tables = sum( + [len(req["Keys"]) for _, req in table_batches.items()] + ) + if nr_of_keys_across_all_tables > 100: + return self.error( + "com.amazonaws.dynamodb.v20111205#ValidationException", + "Too many items requested for the BatchGetItem call", + ) + for table_name, table_request in table_batches.items(): keys = table_request["Keys"] if self._contains_duplicates(keys): diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 50fd4fd6c..19c585bfa 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -3038,6 +3038,52 @@ def test_batch_items_returns_all(): ] +@mock_dynamodb2 +def test_batch_items_throws_exception_when_requesting_100_items_for_single_table(): + dynamodb = _create_user_table() + with assert_raises(ClientError) as ex: + dynamodb.batch_get_item( + RequestItems={ + "users": { + "Keys": [{"username": {"S": f"user{i}"}} for i in range(0, 104)], + "ConsistentRead": True, + } + } + ) + ex.exception.response["Error"]["Code"].should.equal("ValidationException") + msg = ex.exception.response["Error"]["Message"] + msg.should.contain("1 validation error detected: Value") + msg.should.contain( + "at 'requestItems.users.member.keys' failed to satisfy constraint: Member must have length less than or equal to 100" + ) + + +@mock_dynamodb2 +def test_batch_items_throws_exception_when_requesting_100_items_across_all_tables(): + dynamodb = _create_user_table() + with assert_raises(ClientError) as ex: + dynamodb.batch_get_item( + RequestItems={ + "users": { + "Keys": [ + {"username": {"S": "user" + str(i)}} for i in range(0, 75) + ], + "ConsistentRead": True, + }, + "users2": { + "Keys": [ + {"username": {"S": "user" + str(i)}} for i in range(0, 75) + ], + "ConsistentRead": True, + }, + } + ) + ex.exception.response["Error"]["Code"].should.equal("ValidationException") + ex.exception.response["Error"]["Message"].should.equal( + "Too many items requested for the BatchGetItem call" + ) + + @mock_dynamodb2 def test_batch_items_with_basic_projection_expression(): dynamodb = _create_user_table() From b0da78c29de801dde9e8757f0e29e5044112e03b Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Wed, 3 Jun 2020 16:15:46 +0100 Subject: [PATCH 03/20] Update test_dynamodb.py --- tests/test_dynamodb2/test_dynamodb.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 19c585bfa..8071a4d8d 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -3045,7 +3045,7 @@ def test_batch_items_throws_exception_when_requesting_100_items_for_single_table dynamodb.batch_get_item( RequestItems={ "users": { - "Keys": [{"username": {"S": f"user{i}"}} for i in range(0, 104)], + "Keys": [{"username": {"S": "user" + str(i)}} for i in range(0, 104)], "ConsistentRead": True, } } From d21088699e9224b254f164d5654fa02759c9a5e1 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Wed, 3 Jun 2020 17:14:48 +0100 Subject: [PATCH 04/20] Linting --- tests/test_dynamodb2/test_dynamodb.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 8071a4d8d..370999116 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -3045,7 +3045,9 @@ def test_batch_items_throws_exception_when_requesting_100_items_for_single_table dynamodb.batch_get_item( RequestItems={ "users": { - "Keys": [{"username": {"S": "user" + str(i)}} for i in range(0, 104)], + "Keys": [ + {"username": {"S": "user" + str(i)}} for i in range(0, 104) + ], "ConsistentRead": True, } } From a66b0e5b1a5087b206b9e6bb384e2de993c96f2e Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Thu, 4 Jun 2020 07:45:00 +0100 Subject: [PATCH 05/20] CloudFormation - Support DynamoDB Streams --- moto/dynamodb2/models/__init__.py | 2 ++ .../test_cloudformation_stack_integration.py | 7 +++++++ 2 files changed, 9 insertions(+) diff --git a/moto/dynamodb2/models/__init__.py b/moto/dynamodb2/models/__init__.py index a5277800f..ff4ad3594 100644 --- a/moto/dynamodb2/models/__init__.py +++ b/moto/dynamodb2/models/__init__.py @@ -342,6 +342,8 @@ class Table(BaseModel): params["throughput"] = properties["ProvisionedThroughput"] if "LocalSecondaryIndexes" in properties: params["indexes"] = properties["LocalSecondaryIndexes"] + if "StreamSpecification" in properties: + params["streams"] = properties["StreamSpecification"] table = dynamodb_backends[region_name].create_table( name=properties["TableName"], **params diff --git a/tests/test_cloudformation/test_cloudformation_stack_integration.py b/tests/test_cloudformation/test_cloudformation_stack_integration.py index 27bac5e57..ad2436696 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_integration.py +++ b/tests/test_cloudformation/test_cloudformation_stack_integration.py @@ -2307,6 +2307,7 @@ def test_stack_dynamodb_resources_integration(): }, } ], + "StreamSpecification": {"StreamViewType": "KEYS_ONLY"}, }, } }, @@ -2319,6 +2320,12 @@ def test_stack_dynamodb_resources_integration(): StackName="dynamodb_stack", TemplateBody=dynamodb_template_json ) + dynamodb_client = boto3.client("dynamodb", region_name="us-east-1") + table_desc = dynamodb_client.describe_table(TableName="myTableName")["Table"] + table_desc["StreamSpecification"].should.equal( + {"StreamEnabled": True, "StreamViewType": "KEYS_ONLY",} + ) + dynamodb_conn = boto3.resource("dynamodb", region_name="us-east-1") table = dynamodb_conn.Table("myTableName") table.name.should.equal("myTableName") From 20784a2d67f791f0c01a576c3eda88ca2036e235 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Sat, 6 Jun 2020 13:15:50 +0100 Subject: [PATCH 06/20] Improve implementation coverage --- IMPLEMENTATION_COVERAGE.md | 341 ++++++++++++++++++++++++++++--- moto/awslambda/models.py | 4 +- moto/cloudformation/parsing.py | 2 +- moto/cloudformation/responses.py | 2 +- moto/s3/models.py | 30 ++- moto/s3/responses.py | 64 +++--- tests/test_s3/test_s3.py | 2 +- 7 files changed, 378 insertions(+), 67 deletions(-) diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index bfcdd3167..43983d912 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -377,6 +377,7 @@ - [ ] delete_route_settings - [ ] delete_stage - [ ] delete_vpc_link +- [ ] export_api - [ ] get_api - [ ] get_api_mapping - [ ] get_api_mappings @@ -754,6 +755,7 @@ - [ ] describe_copy_job - [ ] describe_protected_resource - [ ] describe_recovery_point +- [ ] describe_region_settings - [ ] describe_restore_job - [ ] export_backup_plan_template - [ ] get_backup_plan @@ -786,6 +788,7 @@ - [ ] untag_resource - [ ] update_backup_plan - [ ] update_recovery_point_lifecycle +- [ ] update_region_settings ## batch @@ -875,6 +878,7 @@ - [ ] create_bot - [ ] create_meeting - [ ] create_phone_number_order +- [ ] create_proxy_session - [ ] create_room - [ ] create_room_membership - [ ] create_user @@ -885,11 +889,13 @@ - [ ] delete_events_configuration - [ ] delete_meeting - [ ] delete_phone_number +- [ ] delete_proxy_session - [ ] delete_room - [ ] delete_room_membership - [ ] delete_voice_connector - [ ] delete_voice_connector_group - [ ] delete_voice_connector_origination +- [ ] delete_voice_connector_proxy - [ ] delete_voice_connector_streaming_configuration - [ ] delete_voice_connector_termination - [ ] delete_voice_connector_termination_credentials @@ -907,6 +913,8 @@ - [ ] get_phone_number - [ ] get_phone_number_order - [ ] get_phone_number_settings +- [ ] get_proxy_session +- [ ] get_retention_settings - [ ] get_room - [ ] get_user - [ ] get_user_settings @@ -914,39 +922,55 @@ - [ ] get_voice_connector_group - [ ] get_voice_connector_logging_configuration - [ ] get_voice_connector_origination +- [ ] get_voice_connector_proxy - [ ] get_voice_connector_streaming_configuration - [ ] get_voice_connector_termination - [ ] get_voice_connector_termination_health - [ ] invite_users - [ ] list_accounts +- [ ] list_attendee_tags - [ ] list_attendees - [ ] list_bots +- [ ] list_meeting_tags - [ ] list_meetings - [ ] list_phone_number_orders - [ ] list_phone_numbers +- [ ] list_proxy_sessions - [ ] list_room_memberships - [ ] list_rooms +- [ ] list_tags_for_resource - [ ] list_users - [ ] list_voice_connector_groups - [ ] list_voice_connector_termination_credentials - [ ] list_voice_connectors - [ ] logout_user - [ ] put_events_configuration +- [ ] put_retention_settings - [ ] put_voice_connector_logging_configuration - [ ] put_voice_connector_origination +- [ ] put_voice_connector_proxy - [ ] put_voice_connector_streaming_configuration - [ ] put_voice_connector_termination - [ ] put_voice_connector_termination_credentials +- [ ] redact_conversation_message +- [ ] redact_room_message - [ ] regenerate_security_token - [ ] reset_personal_pin - [ ] restore_phone_number - [ ] search_available_phone_numbers +- [ ] tag_attendee +- [ ] tag_meeting +- [ ] tag_resource +- [ ] untag_attendee +- [ ] untag_meeting +- [ ] untag_resource - [ ] update_account - [ ] update_account_settings - [ ] update_bot - [ ] update_global_settings - [ ] update_phone_number - [ ] update_phone_number_settings +- [ ] update_proxy_session - [ ] update_room - [ ] update_room_membership - [ ] update_user @@ -1446,6 +1470,7 @@ - [ ] delete_deployment_config - [ ] delete_deployment_group - [ ] delete_git_hub_account_token +- [ ] delete_resources_by_external_id - [ ] deregister_on_premises_instance - [ ] get_application - [ ] get_application_revision @@ -1482,9 +1507,15 @@ 0% implemented - [ ] associate_repository +- [ ] describe_code_review +- [ ] describe_recommendation_feedback - [ ] describe_repository_association - [ ] disassociate_repository +- [ ] list_code_reviews +- [ ] list_recommendation_feedback +- [ ] list_recommendations - [ ] list_repository_associations +- [ ] put_recommendation_feedback ## codeguruprofiler @@ -1495,10 +1526,13 @@ - [ ] create_profiling_group - [ ] delete_profiling_group - [ ] describe_profiling_group +- [ ] get_policy - [ ] get_profile - [ ] list_profile_times - [ ] list_profiling_groups - [ ] post_agent_profile +- [ ] put_permission +- [ ] remove_permission - [ ] update_profiling_group @@ -1577,6 +1611,9 @@ - [ ] delete_connection - [ ] get_connection - [ ] list_connections +- [ ] list_tags_for_resource +- [ ] tag_resource +- [ ] untag_resource ## codestar-notifications @@ -1816,18 +1853,26 @@ 0% implemented - [ ] describe_entities_detection_v2_job +- [ ] describe_icd10_cm_inference_job - [ ] describe_phi_detection_job +- [ ] describe_rx_norm_inference_job - [ ] detect_entities - [ ] detect_entities_v2 - [ ] detect_phi - [ ] infer_icd10_cm - [ ] infer_rx_norm - [ ] list_entities_detection_v2_jobs +- [ ] list_icd10_cm_inference_jobs - [ ] list_phi_detection_jobs +- [ ] list_rx_norm_inference_jobs - [ ] start_entities_detection_v2_job +- [ ] start_icd10_cm_inference_job - [ ] start_phi_detection_job +- [ ] start_rx_norm_inference_job - [ ] stop_entities_detection_v2_job +- [ ] stop_icd10_cm_inference_job - [ ] stop_phi_detection_job +- [ ] stop_rx_norm_inference_job ## compute-optimizer @@ -2113,6 +2158,7 @@ - [ ] list_invitations - [ ] list_members - [ ] reject_invitation +- [ ] start_monitoring_member ## devicefarm @@ -2679,6 +2725,7 @@ - [X] delete_vpn_gateway - [ ] deprovision_byoip_cidr - [X] deregister_image +- [ ] deregister_instance_event_notification_attributes - [ ] deregister_transit_gateway_multicast_group_members - [ ] deregister_transit_gateway_multicast_group_sources - [ ] describe_account_attributes @@ -2721,6 +2768,7 @@ - [ ] describe_import_snapshot_tasks - [X] describe_instance_attribute - [X] describe_instance_credit_specifications +- [ ] describe_instance_event_notification_attributes - [ ] describe_instance_status - [ ] describe_instance_type_offerings - [ ] describe_instance_types @@ -2892,6 +2940,7 @@ - [ ] purchase_scheduled_instances - [X] reboot_instances - [ ] register_image +- [ ] register_instance_event_notification_attributes - [ ] register_transit_gateway_multicast_group_members - [ ] register_transit_gateway_multicast_group_sources - [ ] reject_transit_gateway_peering_attachment @@ -3094,6 +3143,9 @@
0% implemented +- [ ] describe_accelerator_offerings +- [ ] describe_accelerator_types +- [ ] describe_accelerators - [ ] list_tags_for_resource - [ ] tag_resource - [ ] untag_resource @@ -3195,6 +3247,7 @@ - [ ] describe_instances_health - [ ] describe_platform_version - [X] list_available_solution_stacks +- [ ] list_platform_branches - [ ] list_platform_versions - [X] list_tags_for_resource - [ ] rebuild_environment @@ -3312,7 +3365,7 @@ ## emr
-50% implemented +45% implemented - [ ] add_instance_fleet - [X] add_instance_groups @@ -3326,6 +3379,7 @@ - [ ] describe_security_configuration - [X] describe_step - [ ] get_block_public_access_configuration +- [ ] get_managed_scaling_policy - [X] list_bootstrap_actions - [X] list_clusters - [ ] list_instance_fleets @@ -3338,7 +3392,9 @@ - [X] modify_instance_groups - [ ] put_auto_scaling_policy - [ ] put_block_public_access_configuration +- [ ] put_managed_scaling_policy - [ ] remove_auto_scaling_policy +- [ ] remove_managed_scaling_policy - [X] remove_tags - [X] run_job_flow - [X] set_termination_protection @@ -3351,22 +3407,29 @@ 0% implemented - [ ] add_tags +- [ ] associate_package - [ ] cancel_elasticsearch_service_software_update - [ ] create_elasticsearch_domain +- [ ] create_package - [ ] delete_elasticsearch_domain - [ ] delete_elasticsearch_service_role +- [ ] delete_package - [ ] describe_elasticsearch_domain - [ ] describe_elasticsearch_domain_config - [ ] describe_elasticsearch_domains - [ ] describe_elasticsearch_instance_type_limits +- [ ] describe_packages - [ ] describe_reserved_elasticsearch_instance_offerings - [ ] describe_reserved_elasticsearch_instances +- [ ] dissociate_package - [ ] get_compatible_elasticsearch_versions - [ ] get_upgrade_history - [ ] get_upgrade_status - [ ] list_domain_names +- [ ] list_domains_for_package - [ ] list_elasticsearch_instance_types - [ ] list_elasticsearch_versions +- [ ] list_packages_for_domain - [ ] list_tags - [ ] purchase_reserved_elasticsearch_instance_offering - [ ] remove_tags @@ -3502,8 +3565,10 @@ - [ ] create_model_version - [ ] create_rule - [ ] create_variable +- [ ] delete_detector - [ ] delete_detector_version - [ ] delete_event +- [ ] delete_rule_version - [ ] describe_detector - [ ] describe_model_versions - [ ] get_detector_version @@ -3553,9 +3618,11 @@ 0% implemented - [ ] accept_match +- [ ] claim_game_server - [ ] create_alias - [ ] create_build - [ ] create_fleet +- [ ] create_game_server_group - [ ] create_game_session - [ ] create_game_session_queue - [ ] create_matchmaking_configuration @@ -3568,6 +3635,7 @@ - [ ] delete_alias - [ ] delete_build - [ ] delete_fleet +- [ ] delete_game_server_group - [ ] delete_game_session_queue - [ ] delete_matchmaking_configuration - [ ] delete_matchmaking_rule_set @@ -3575,6 +3643,7 @@ - [ ] delete_script - [ ] delete_vpc_peering_authorization - [ ] delete_vpc_peering_connection +- [ ] deregister_game_server - [ ] describe_alias - [ ] describe_build - [ ] describe_ec2_instance_limits @@ -3583,6 +3652,8 @@ - [ ] describe_fleet_events - [ ] describe_fleet_port_settings - [ ] describe_fleet_utilization +- [ ] describe_game_server +- [ ] describe_game_server_group - [ ] describe_game_session_details - [ ] describe_game_session_placement - [ ] describe_game_session_queues @@ -3602,11 +3673,15 @@ - [ ] list_aliases - [ ] list_builds - [ ] list_fleets +- [ ] list_game_server_groups +- [ ] list_game_servers - [ ] list_scripts - [ ] list_tags_for_resource - [ ] put_scaling_policy +- [ ] register_game_server - [ ] request_upload_credentials - [ ] resolve_alias +- [ ] resume_game_server_group - [ ] search_game_sessions - [ ] start_fleet_actions - [ ] start_game_session_placement @@ -3615,6 +3690,7 @@ - [ ] stop_fleet_actions - [ ] stop_game_session_placement - [ ] stop_matchmaking +- [ ] suspend_game_server_group - [ ] tag_resource - [ ] untag_resource - [ ] update_alias @@ -3622,6 +3698,8 @@ - [ ] update_fleet_attributes - [ ] update_fleet_capacity - [ ] update_fleet_port_settings +- [ ] update_game_server +- [ ] update_game_server_group - [ ] update_game_session - [ ] update_game_session_queue - [ ] update_matchmaking_configuration @@ -3813,6 +3891,7 @@ - [ ] stop_crawler - [ ] stop_crawler_schedule - [ ] stop_trigger +- [ ] stop_workflow_run - [ ] tag_resource - [ ] untag_resource - [ ] update_classifier @@ -3978,9 +4057,12 @@ - [ ] delete_members - [ ] delete_publishing_destination - [ ] delete_threat_intel_set +- [ ] describe_organization_configuration - [ ] describe_publishing_destination +- [ ] disable_organization_admin_account - [ ] disassociate_from_master_account - [ ] disassociate_members +- [ ] enable_organization_admin_account - [ ] get_detector - [ ] get_filter - [ ] get_findings @@ -3997,6 +4079,7 @@ - [ ] list_invitations - [ ] list_ip_sets - [ ] list_members +- [ ] list_organization_admin_accounts - [ ] list_publishing_destinations - [ ] list_tags_for_resource - [ ] list_threat_intel_sets @@ -4009,6 +4092,7 @@ - [ ] update_filter - [ ] update_findings_feedback - [ ] update_ip_set +- [ ] update_organization_configuration - [ ] update_publishing_destination - [ ] update_threat_intel_set
@@ -4034,7 +4118,7 @@ ## iam
-68% implemented +69% implemented - [ ] add_client_id_to_open_id_connect_provider - [X] add_role_to_instance_profile @@ -4063,7 +4147,7 @@ - [X] delete_account_password_policy - [X] delete_group - [ ] delete_group_policy -- [ ] delete_instance_profile +- [X] delete_instance_profile - [X] delete_login_profile - [X] delete_open_id_connect_provider - [X] delete_policy @@ -4303,6 +4387,7 @@ - [ ] create_authorizer - [ ] create_billing_group - [ ] create_certificate_from_csr +- [ ] create_dimension - [ ] create_domain_configuration - [ ] create_dynamic_thing_group - [X] create_job @@ -4328,6 +4413,7 @@ - [ ] delete_billing_group - [ ] delete_ca_certificate - [X] delete_certificate +- [ ] delete_dimension - [ ] delete_domain_configuration - [ ] delete_dynamic_thing_group - [X] delete_job @@ -4359,6 +4445,7 @@ - [ ] describe_ca_certificate - [X] describe_certificate - [ ] describe_default_authorizer +- [ ] describe_dimension - [ ] describe_domain_configuration - [ ] describe_endpoint - [ ] describe_event_configurations @@ -4407,6 +4494,7 @@ - [ ] list_ca_certificates - [X] list_certificates - [ ] list_certificates_by_ca +- [ ] list_dimensions - [ ] list_domain_configurations - [ ] list_indices - [X] list_job_executions_for_job @@ -4445,6 +4533,7 @@ - [ ] list_violation_events - [ ] register_ca_certificate - [X] register_certificate +- [ ] register_certificate_without_ca - [ ] register_thing - [ ] reject_certificate_transfer - [ ] remove_thing_from_billing_group @@ -4470,6 +4559,7 @@ - [ ] update_billing_group - [ ] update_ca_certificate - [X] update_certificate +- [ ] update_dimension - [ ] update_domain_configuration - [ ] update_dynamic_thing_group - [ ] update_event_configurations @@ -4634,6 +4724,66 @@ - [ ] untag_resource
+## iotsitewise +
+0% implemented + +- [ ] associate_assets +- [ ] batch_associate_project_assets +- [ ] batch_disassociate_project_assets +- [ ] batch_put_asset_property_value +- [ ] create_access_policy +- [ ] create_asset +- [ ] create_asset_model +- [ ] create_dashboard +- [ ] create_gateway +- [ ] create_portal +- [ ] create_project +- [ ] delete_access_policy +- [ ] delete_asset +- [ ] delete_asset_model +- [ ] delete_dashboard +- [ ] delete_gateway +- [ ] delete_portal +- [ ] delete_project +- [ ] describe_access_policy +- [ ] describe_asset +- [ ] describe_asset_model +- [ ] describe_asset_property +- [ ] describe_dashboard +- [ ] describe_gateway +- [ ] describe_gateway_capability_configuration +- [ ] describe_logging_options +- [ ] describe_portal +- [ ] describe_project +- [ ] disassociate_assets +- [ ] get_asset_property_aggregates +- [ ] get_asset_property_value +- [ ] get_asset_property_value_history +- [ ] list_access_policies +- [ ] list_asset_models +- [ ] list_assets +- [ ] list_associated_assets +- [ ] list_dashboards +- [ ] list_gateways +- [ ] list_portals +- [ ] list_project_assets +- [ ] list_projects +- [ ] list_tags_for_resource +- [ ] put_logging_options +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_access_policy +- [ ] update_asset +- [ ] update_asset_model +- [ ] update_asset_property +- [ ] update_dashboard +- [ ] update_gateway +- [ ] update_gateway_capability_configuration +- [ ] update_portal +- [ ] update_project +
+ ## iotthingsgraph
0% implemented @@ -4687,6 +4837,7 @@ - [ ] describe_configuration - [ ] describe_configuration_revision - [ ] get_bootstrap_brokers +- [ ] get_compatible_kafka_versions - [ ] list_cluster_operations - [ ] list_clusters - [ ] list_configuration_revisions @@ -4699,6 +4850,7 @@ - [ ] update_broker_count - [ ] update_broker_storage - [ ] update_cluster_configuration +- [ ] update_cluster_kafka_version - [ ] update_monitoring
@@ -4711,6 +4863,7 @@ - [ ] create_data_source - [ ] create_faq - [ ] create_index +- [ ] delete_data_source - [ ] delete_faq - [ ] delete_index - [ ] describe_data_source @@ -4720,10 +4873,13 @@ - [ ] list_data_sources - [ ] list_faqs - [ ] list_indices +- [ ] list_tags_for_resource - [ ] query - [ ] start_data_source_sync_job - [ ] stop_data_source_sync_job - [ ] submit_feedback +- [ ] tag_resource +- [ ] untag_resource - [ ] update_data_source - [ ] update_index
@@ -4766,6 +4922,7 @@
0% implemented +- [ ] get_clip - [ ] get_dash_streaming_session_url - [ ] get_hls_streaming_session_url - [ ] get_media_for_fragment_list @@ -5195,7 +5352,7 @@ ## logs
-43% implemented +40% implemented - [ ] associate_kms_key - [ ] cancel_export_task @@ -5206,6 +5363,7 @@ - [X] delete_log_group - [X] delete_log_stream - [ ] delete_metric_filter +- [ ] delete_query_definition - [ ] delete_resource_policy - [X] delete_retention_policy - [X] delete_subscription_filter @@ -5215,6 +5373,7 @@ - [X] describe_log_streams - [ ] describe_metric_filters - [ ] describe_queries +- [ ] describe_query_definitions - [ ] describe_resource_policies - [X] describe_subscription_filters - [ ] disassociate_kms_key @@ -5228,6 +5387,7 @@ - [ ] put_destination_policy - [X] put_log_events - [ ] put_metric_filter +- [ ] put_query_definition - [ ] put_resource_policy - [X] put_retention_policy - [X] put_subscription_filter @@ -5285,27 +5445,88 @@ - [ ] update_s3_resources
+## macie2 +
+0% implemented + +- [ ] accept_invitation +- [ ] archive_findings +- [ ] batch_get_custom_data_identifiers +- [ ] create_classification_job +- [ ] create_custom_data_identifier +- [ ] create_findings_filter +- [ ] create_invitations +- [ ] create_member +- [ ] create_sample_findings +- [ ] decline_invitations +- [ ] delete_custom_data_identifier +- [ ] delete_findings_filter +- [ ] delete_invitations +- [ ] delete_member +- [ ] describe_buckets +- [ ] describe_classification_job +- [ ] describe_organization_configuration +- [ ] disable_macie +- [ ] disable_organization_admin_account +- [ ] disassociate_from_master_account +- [ ] disassociate_member +- [ ] enable_macie +- [ ] enable_organization_admin_account +- [ ] get_bucket_statistics +- [ ] get_classification_export_configuration +- [ ] get_custom_data_identifier +- [ ] get_finding_statistics +- [ ] get_findings +- [ ] get_findings_filter +- [ ] get_invitations_count +- [ ] get_macie_session +- [ ] get_master_account +- [ ] get_member +- [ ] get_usage_statistics +- [ ] get_usage_totals +- [ ] list_classification_jobs +- [ ] list_custom_data_identifiers +- [ ] list_findings +- [ ] list_findings_filters +- [ ] list_invitations +- [ ] list_members +- [ ] list_organization_admin_accounts +- [ ] list_tags_for_resource +- [ ] put_classification_export_configuration +- [ ] tag_resource +- [ ] test_custom_data_identifier +- [ ] unarchive_findings +- [ ] untag_resource +- [ ] update_classification_job +- [ ] update_findings_filter +- [ ] update_macie_session +- [ ] update_member_session +- [ ] update_organization_configuration +
+ ## managedblockchain
-77% implemented +100% implemented - [X] create_member - [X] create_network -- [ ] create_node +- [X] create_node - [X] create_proposal - [X] delete_member -- [ ] delete_node +- [X] delete_node - [X] get_member - [X] get_network -- [ ] get_node +- [X] get_node - [X] get_proposal - [X] list_invitations - [X] list_members - [X] list_networks -- [ ] list_nodes +- [X] list_nodes - [X] list_proposal_votes - [X] list_proposals - [X] reject_invitation +- [X] update_member +- [X] update_node - [X] vote_on_proposal
@@ -5342,6 +5563,7 @@ - [ ] add_flow_outputs - [ ] add_flow_sources +- [ ] add_flow_vpc_interfaces - [ ] create_flow - [ ] delete_flow - [ ] describe_flow @@ -5351,6 +5573,7 @@ - [ ] list_tags_for_resource - [ ] remove_flow_output - [ ] remove_flow_source +- [ ] remove_flow_vpc_interface - [ ] revoke_flow_entitlement - [ ] start_flow - [ ] stop_flow @@ -5414,6 +5637,7 @@ - [ ] delete_tags - [ ] describe_channel - [ ] describe_input +- [ ] describe_input_device - [ ] describe_input_security_group - [ ] describe_multiplex - [ ] describe_multiplex_program @@ -5421,6 +5645,7 @@ - [ ] describe_reservation - [ ] describe_schedule - [ ] list_channels +- [ ] list_input_devices - [ ] list_input_security_groups - [ ] list_inputs - [ ] list_multiplex_programs @@ -5436,6 +5661,7 @@ - [ ] update_channel - [ ] update_channel_class - [ ] update_input +- [ ] update_input_device - [ ] update_input_security_group - [ ] update_multiplex - [ ] update_multiplex_program @@ -5482,6 +5708,9 @@ - [ ] list_assets - [ ] list_packaging_configurations - [ ] list_packaging_groups +- [ ] list_tags_for_resource +- [ ] tag_resource +- [ ] untag_resource
## mediastore @@ -5493,15 +5722,18 @@ - [ ] delete_container_policy - [ ] delete_cors_policy - [ ] delete_lifecycle_policy +- [ ] delete_metric_policy - [ ] describe_container - [ ] get_container_policy - [ ] get_cors_policy - [ ] get_lifecycle_policy +- [ ] get_metric_policy - [ ] list_containers - [ ] list_tags_for_resource - [ ] put_container_policy - [ ] put_cors_policy - [ ] put_lifecycle_policy +- [ ] put_metric_policy - [ ] start_access_logging - [ ] stop_access_logging - [ ] tag_resource @@ -5868,7 +6100,7 @@ ## organizations
-51% implemented +47% implemented - [ ] accept_handshake - [X] attach_policy @@ -5882,6 +6114,7 @@ - [ ] delete_organization - [ ] delete_organizational_unit - [ ] delete_policy +- [ ] deregister_delegated_administrator - [X] describe_account - [X] describe_create_account_status - [ ] describe_effective_policy @@ -5902,6 +6135,8 @@ - [ ] list_aws_service_access_for_organization - [X] list_children - [ ] list_create_account_status +- [ ] list_delegated_administrators +- [ ] list_delegated_services_for_account - [ ] list_handshakes_for_account - [ ] list_handshakes_for_organization - [X] list_organizational_units_for_parent @@ -5912,6 +6147,7 @@ - [X] list_tags_for_resource - [X] list_targets_for_policy - [X] move_account +- [ ] register_delegated_administrator - [ ] remove_account_from_organization - [X] tag_resource - [X] untag_resource @@ -6207,18 +6443,22 @@
0% implemented +- [ ] cancel_journal_kinesis_stream - [ ] create_ledger - [ ] delete_ledger +- [ ] describe_journal_kinesis_stream - [ ] describe_journal_s3_export - [ ] describe_ledger - [ ] export_journal_to_s3 - [ ] get_block - [ ] get_digest - [ ] get_revision +- [ ] list_journal_kinesis_streams_for_ledger - [ ] list_journal_s3_exports - [ ] list_journal_s3_exports_for_ledger - [ ] list_ledgers - [ ] list_tags_for_resource +- [ ] stream_journal_to_kinesis - [ ] tag_resource - [ ] untag_resource - [ ] update_ledger @@ -6324,6 +6564,7 @@ - [ ] list_permissions - [ ] list_principals - [ ] list_resource_share_permissions +- [ ] list_resource_types - [ ] list_resources - [ ] promote_resource_share_created_from_policy - [ ] reject_resource_share_invitation @@ -6482,7 +6723,7 @@ ## redshift
-29% implemented +28% implemented - [ ] accept_reserved_node_exchange - [ ] authorize_cluster_security_group_ingress @@ -6503,6 +6744,7 @@ - [X] create_snapshot_copy_grant - [ ] create_snapshot_schedule - [X] create_tags +- [ ] create_usage_limit - [X] delete_cluster - [X] delete_cluster_parameter_group - [X] delete_cluster_security_group @@ -6515,6 +6757,7 @@ - [X] delete_snapshot_copy_grant - [ ] delete_snapshot_schedule - [X] delete_tags +- [ ] delete_usage_limit - [ ] describe_account_attributes - [ ] describe_cluster_db_revisions - [X] describe_cluster_parameter_groups @@ -6543,6 +6786,7 @@ - [ ] describe_storage - [ ] describe_table_restore_status - [X] describe_tags +- [ ] describe_usage_limits - [ ] disable_logging - [X] disable_snapshot_copy - [ ] enable_logging @@ -6561,6 +6805,7 @@ - [ ] modify_scheduled_action - [X] modify_snapshot_copy_retention_period - [ ] modify_snapshot_schedule +- [ ] modify_usage_limit - [ ] pause_cluster - [ ] purchase_reserved_node_offering - [ ] reboot_cluster @@ -6585,6 +6830,8 @@ - [ ] create_stream_processor - [ ] delete_collection - [ ] delete_faces +- [ ] delete_project +- [ ] delete_project_version - [ ] delete_stream_processor - [ ] describe_collection - [ ] describe_project_versions @@ -6767,6 +7014,8 @@
0% implemented +- [ ] accept_domain_transfer_from_another_aws_account +- [ ] cancel_domain_transfer_to_another_aws_account - [ ] check_domain_availability - [ ] check_domain_transferability - [ ] delete_tags_for_domain @@ -6782,10 +7031,12 @@ - [ ] list_operations - [ ] list_tags_for_domain - [ ] register_domain +- [ ] reject_domain_transfer_from_another_aws_account - [ ] renew_domain - [ ] resend_contact_reachability_email - [ ] retrieve_domain_auth_code - [ ] transfer_domain +- [ ] transfer_domain_to_another_aws_account - [ ] update_domain_contact - [ ] update_domain_contact_privacy - [ ] update_domain_nameservers @@ -6823,7 +7074,7 @@ ## s3
-13% implemented +25% implemented - [ ] abort_multipart_upload - [ ] complete_multipart_upload @@ -6833,7 +7084,7 @@ - [X] delete_bucket - [ ] delete_bucket_analytics_configuration - [X] delete_bucket_cors -- [ ] delete_bucket_encryption +- [X] delete_bucket_encryption - [ ] delete_bucket_inventory_configuration - [ ] delete_bucket_lifecycle - [ ] delete_bucket_metrics_configuration @@ -6841,31 +7092,31 @@ - [ ] delete_bucket_replication - [X] delete_bucket_tagging - [ ] delete_bucket_website -- [ ] delete_object +- [X] delete_object - [ ] delete_object_tagging - [ ] delete_objects - [ ] delete_public_access_block - [ ] get_bucket_accelerate_configuration - [X] get_bucket_acl - [ ] get_bucket_analytics_configuration -- [ ] get_bucket_cors -- [ ] get_bucket_encryption +- [X] get_bucket_cors +- [X] get_bucket_encryption - [ ] get_bucket_inventory_configuration - [ ] get_bucket_lifecycle - [ ] get_bucket_lifecycle_configuration - [ ] get_bucket_location -- [ ] get_bucket_logging +- [X] get_bucket_logging - [ ] get_bucket_metrics_configuration - [ ] get_bucket_notification -- [ ] get_bucket_notification_configuration +- [X] get_bucket_notification_configuration - [X] get_bucket_policy - [ ] get_bucket_policy_status - [ ] get_bucket_replication - [ ] get_bucket_request_payment -- [ ] get_bucket_tagging +- [X] get_bucket_tagging - [X] get_bucket_versioning - [ ] get_bucket_website -- [ ] get_object +- [X] get_object - [ ] get_object_acl - [ ] get_object_legal_hold - [ ] get_object_lock_configuration @@ -6888,7 +7139,7 @@ - [ ] put_bucket_acl - [ ] put_bucket_analytics_configuration - [X] put_bucket_cors -- [ ] put_bucket_encryption +- [X] put_bucket_encryption - [ ] put_bucket_inventory_configuration - [ ] put_bucket_lifecycle - [ ] put_bucket_lifecycle_configuration @@ -6899,7 +7150,7 @@ - [ ] put_bucket_policy - [ ] put_bucket_replication - [ ] put_bucket_request_payment -- [ ] put_bucket_tagging +- [X] put_bucket_tagging - [ ] put_bucket_versioning - [ ] put_bucket_website - [ ] put_object @@ -7120,6 +7371,7 @@ - [ ] create_schema - [ ] delete_discoverer - [ ] delete_registry +- [ ] delete_resource_policy - [ ] delete_schema - [ ] delete_schema_version - [ ] describe_code_binding @@ -7128,18 +7380,18 @@ - [ ] describe_schema - [ ] get_code_binding_source - [ ] get_discovered_schema +- [ ] get_resource_policy - [ ] list_discoverers - [ ] list_registries - [ ] list_schema_versions - [ ] list_schemas - [ ] list_tags_for_resource -- [ ] lock_service_linked_role - [ ] put_code_binding +- [ ] put_resource_policy - [ ] search_schemas - [ ] start_discoverer - [ ] stop_discoverer - [ ] tag_resource -- [ ] unlock_service_linked_role - [ ] untag_resource - [ ] update_discoverer - [ ] update_registry @@ -7194,6 +7446,7 @@ - [ ] batch_disable_standards - [ ] batch_enable_standards - [ ] batch_import_findings +- [ ] batch_update_findings - [ ] create_action_target - [ ] create_insight - [ ] create_members @@ -7900,6 +8153,7 @@ - [ ] create_stored_iscsi_volume - [ ] create_tape_with_barcode - [ ] create_tapes +- [ ] delete_automatic_tape_creation_policy - [ ] delete_bandwidth_rate_limit - [ ] delete_chap_credentials - [ ] delete_file_share @@ -7929,6 +8183,7 @@ - [ ] detach_volume - [ ] disable_gateway - [ ] join_domain +- [ ] list_automatic_tape_creation_policies - [ ] list_file_shares - [ ] list_gateways - [ ] list_local_disks @@ -7948,6 +8203,7 @@ - [ ] shutdown_gateway - [ ] start_availability_monitor_test - [ ] start_gateway +- [ ] update_automatic_tape_creation_policy - [ ] update_bandwidth_rate_limit - [ ] update_chap_credentials - [ ] update_gateway_information @@ -8037,6 +8293,25 @@ - [ ] untag_resource
+## synthetics +
+0% implemented + +- [ ] create_canary +- [ ] delete_canary +- [ ] describe_canaries +- [ ] describe_canaries_last_run +- [ ] describe_runtime_versions +- [ ] get_canary +- [ ] get_canary_runs +- [ ] list_tags_for_resource +- [ ] start_canary +- [ ] stop_canary +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_canary +
+ ## textract
0% implemented @@ -8053,18 +8328,27 @@
0% implemented +- [ ] create_medical_vocabulary - [ ] create_vocabulary - [ ] create_vocabulary_filter +- [ ] delete_medical_transcription_job +- [ ] delete_medical_vocabulary - [ ] delete_transcription_job - [ ] delete_vocabulary - [ ] delete_vocabulary_filter +- [ ] get_medical_transcription_job +- [ ] get_medical_vocabulary - [ ] get_transcription_job - [ ] get_vocabulary - [ ] get_vocabulary_filter +- [ ] list_medical_transcription_jobs +- [ ] list_medical_vocabularies - [ ] list_transcription_jobs - [ ] list_vocabularies - [ ] list_vocabulary_filters +- [ ] start_medical_transcription_job - [ ] start_transcription_job +- [ ] update_medical_vocabulary - [ ] update_vocabulary - [ ] update_vocabulary_filter
@@ -8123,6 +8407,7 @@ - [ ] create_size_constraint_set - [ ] create_sql_injection_match_set - [ ] create_web_acl +- [ ] create_web_acl_migration_stack - [ ] create_xss_match_set - [ ] delete_byte_match_set - [ ] delete_geo_match_set @@ -8206,6 +8491,7 @@ - [ ] create_size_constraint_set - [ ] create_sql_injection_match_set - [ ] create_web_acl +- [ ] create_web_acl_migration_stack - [ ] create_xss_match_set - [ ] delete_byte_match_set - [ ] delete_geo_match_set @@ -8286,8 +8572,10 @@ - [ ] create_regex_pattern_set - [ ] create_rule_group - [ ] create_web_acl +- [ ] delete_firewall_manager_rule_groups - [ ] delete_ip_set - [ ] delete_logging_configuration +- [ ] delete_permission_policy - [ ] delete_regex_pattern_set - [ ] delete_rule_group - [ ] delete_web_acl @@ -8295,6 +8583,7 @@ - [ ] disassociate_web_acl - [ ] get_ip_set - [ ] get_logging_configuration +- [ ] get_permission_policy - [ ] get_rate_based_statement_managed_keys - [ ] get_regex_pattern_set - [ ] get_rule_group @@ -8310,6 +8599,7 @@ - [ ] list_tags_for_resource - [ ] list_web_acls - [ ] put_logging_configuration +- [ ] put_permission_policy - [ ] tag_resource - [ ] untag_resource - [ ] update_ip_set @@ -8416,6 +8706,7 @@ - [ ] delete_group - [ ] delete_mailbox_permissions - [ ] delete_resource +- [ ] delete_retention_policy - [ ] delete_user - [ ] deregister_from_work_mail - [ ] describe_group @@ -8425,6 +8716,7 @@ - [ ] disassociate_delegate_from_resource - [ ] disassociate_member_from_group - [ ] get_access_control_effect +- [ ] get_default_retention_policy - [ ] get_mailbox_details - [ ] list_access_control_rules - [ ] list_aliases @@ -8438,6 +8730,7 @@ - [ ] list_users - [ ] put_access_control_rule - [ ] put_mailbox_permissions +- [ ] put_retention_policy - [ ] register_to_work_mail - [ ] reset_password - [ ] tag_resource diff --git a/moto/awslambda/models.py b/moto/awslambda/models.py index 360c47528..967944b91 100644 --- a/moto/awslambda/models.py +++ b/moto/awslambda/models.py @@ -218,7 +218,7 @@ class LambdaFunction(BaseModel): key = None try: # FIXME: does not validate bucket region - key = s3_backend.get_key(self.code["S3Bucket"], self.code["S3Key"]) + key = s3_backend.get_object(self.code["S3Bucket"], self.code["S3Key"]) except MissingBucket: if do_validate_s3(): raise InvalidParameterValueException( @@ -344,7 +344,7 @@ class LambdaFunction(BaseModel): key = None try: # FIXME: does not validate bucket region - key = s3_backend.get_key( + key = s3_backend.get_object( updated_spec["S3Bucket"], updated_spec["S3Key"] ) except MissingBucket: diff --git a/moto/cloudformation/parsing.py b/moto/cloudformation/parsing.py index 05ebdace8..a489f54fe 100644 --- a/moto/cloudformation/parsing.py +++ b/moto/cloudformation/parsing.py @@ -541,7 +541,7 @@ class ResourceMap(collections_abc.Mapping): if name == "AWS::Include": location = params["Location"] bucket_name, name = bucket_and_name_from_url(location) - key = s3_backend.get_key(bucket_name, name) + key = s3_backend.get_object(bucket_name, name) self._parsed_resources.update(json.loads(key.value)) def load_parameters(self): diff --git a/moto/cloudformation/responses.py b/moto/cloudformation/responses.py index 302849481..17b76854a 100644 --- a/moto/cloudformation/responses.py +++ b/moto/cloudformation/responses.py @@ -36,7 +36,7 @@ class CloudFormationResponse(BaseResponse): bucket_name = template_url_parts.netloc.split(".")[0] key_name = template_url_parts.path.lstrip("/") - key = s3_backend.get_key(bucket_name, key_name) + key = s3_backend.get_object(bucket_name, key_name) return key.value.decode("utf-8") def create_stack(self): diff --git a/moto/s3/models.py b/moto/s3/models.py index 25ead4f5e..c998a2bb6 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -1315,7 +1315,7 @@ class S3Backend(BaseBackend): return self.account_public_access_block - def set_key( + def set_object( self, bucket_name, key_name, value, storage=None, etag=None, multipart=None ): key_name = clean_key_name(key_name) @@ -1346,11 +1346,11 @@ class S3Backend(BaseBackend): def append_to_key(self, bucket_name, key_name, value): key_name = clean_key_name(key_name) - key = self.get_key(bucket_name, key_name) + key = self.get_object(bucket_name, key_name) key.append_to_value(value) return key - def get_key(self, bucket_name, key_name, version_id=None, part_number=None): + def get_object(self, bucket_name, key_name, version_id=None, part_number=None): key_name = clean_key_name(key_name) bucket = self.get_bucket(bucket_name) key = None @@ -1385,11 +1385,11 @@ class S3Backend(BaseBackend): ) return key - def get_bucket_tags(self, bucket_name): + def get_bucket_tagging(self, bucket_name): bucket = self.get_bucket(bucket_name) return self.tagger.list_tags_for_resource(bucket.arn) - def put_bucket_tags(self, bucket_name, tags): + def put_bucket_tagging(self, bucket_name, tags): bucket = self.get_bucket(bucket_name) self.tagger.delete_all_tags_for_resource(bucket.arn) self.tagger.tag_resource( @@ -1481,7 +1481,7 @@ class S3Backend(BaseBackend): return del bucket.multiparts[multipart_id] - key = self.set_key( + key = self.set_object( bucket_name, multipart.key_name, value, etag=etag, multipart=multipart ) key.set_metadata(multipart.metadata) @@ -1521,7 +1521,7 @@ class S3Backend(BaseBackend): dest_bucket = self.get_bucket(dest_bucket_name) multipart = dest_bucket.multiparts[multipart_id] - src_value = self.get_key( + src_value = self.get_object( src_bucket_name, src_key_name, version_id=src_version_id ).value if start_byte is not None: @@ -1565,7 +1565,7 @@ class S3Backend(BaseBackend): bucket = self.get_bucket(bucket_name) bucket.keys[key_name] = FakeDeleteMarker(key=bucket.keys[key_name]) - def delete_key(self, bucket_name, key_name, version_id=None): + def delete_object(self, bucket_name, key_name, version_id=None): key_name = clean_key_name(key_name) bucket = self.get_bucket(bucket_name) @@ -1606,7 +1606,7 @@ class S3Backend(BaseBackend): src_key_name = clean_key_name(src_key_name) dest_key_name = clean_key_name(dest_key_name) dest_bucket = self.get_bucket(dest_bucket_name) - key = self.get_key(src_bucket_name, src_key_name, version_id=src_version_id) + key = self.get_object(src_bucket_name, src_key_name, version_id=src_version_id) new_key = key.copy(dest_key_name, dest_bucket.is_versioned) self.tagger.copy_tags(key.arn, new_key.arn) @@ -1626,5 +1626,17 @@ class S3Backend(BaseBackend): bucket = self.get_bucket(bucket_name) return bucket.acl + def get_bucket_cors(self, bucket_name): + bucket = self.get_bucket(bucket_name) + return bucket.cors + + def get_bucket_logging(self, bucket_name): + bucket = self.get_bucket(bucket_name) + return bucket.logging + + def get_bucket_notification_configuration(self, bucket_name): + bucket = self.get_bucket(bucket_name) + return bucket.notification_configuration + s3_backend = S3Backend() diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 4aaba1fcd..41db43af7 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -382,7 +382,7 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): template = self.response_template(S3_OBJECT_ACL_RESPONSE) return template.render(obj=bucket) elif "tagging" in querystring: - tags = self.backend.get_bucket_tags(bucket_name)["Tags"] + tags = self.backend.get_bucket_tagging(bucket_name)["Tags"] # "Special Error" if no tags: if len(tags) == 0: template = self.response_template(S3_NO_BUCKET_TAGGING) @@ -390,25 +390,27 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): template = self.response_template(S3_OBJECT_TAGGING_RESPONSE) return template.render(tags=tags) elif "logging" in querystring: - bucket = self.backend.get_bucket(bucket_name) - if not bucket.logging: + logging = self.backend.get_bucket_logging(bucket_name) + if not logging: template = self.response_template(S3_NO_LOGGING_CONFIG) return 200, {}, template.render() template = self.response_template(S3_LOGGING_CONFIG) - return 200, {}, template.render(logging=bucket.logging) + return 200, {}, template.render(logging=logging) elif "cors" in querystring: - bucket = self.backend.get_bucket(bucket_name) - if len(bucket.cors) == 0: + cors = self.backend.get_bucket_cors(bucket_name) + if len(cors) == 0: template = self.response_template(S3_NO_CORS_CONFIG) return 404, {}, template.render(bucket_name=bucket_name) template = self.response_template(S3_BUCKET_CORS_RESPONSE) - return template.render(bucket=bucket) + return template.render(cors=cors) elif "notification" in querystring: - bucket = self.backend.get_bucket(bucket_name) - if not bucket.notification_configuration: + notification_configuration = self.backend.get_bucket_notification_configuration( + bucket_name + ) + if not notification_configuration: return 200, {}, "" template = self.response_template(S3_GET_BUCKET_NOTIFICATION_CONFIG) - return template.render(bucket=bucket) + return template.render(config=notification_configuration) elif "accelerate" in querystring: bucket = self.backend.get_bucket(bucket_name) if bucket.accelerate_configuration is None: @@ -663,7 +665,7 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): return "" elif "tagging" in querystring: tagging = self._bucket_tagging_from_xml(body) - self.backend.put_bucket_tags(bucket_name, tagging) + self.backend.put_bucket_tagging(bucket_name, tagging) return "" elif "website" in querystring: self.backend.set_bucket_website_configuration(bucket_name, body) @@ -840,7 +842,7 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): else: status_code = 204 - new_key = self.backend.set_key(bucket_name, key, f) + new_key = self.backend.set_object(bucket_name, key, f) # Metadata metadata = metadata_from_headers(form) @@ -879,7 +881,7 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): key_name = object_["Key"] version_id = object_.get("VersionId", None) - success = self.backend.delete_key( + success = self.backend.delete_object( bucket_name, undo_clean_key_name(key_name), version_id=version_id ) if success: @@ -1056,7 +1058,7 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): signed_url = "Signature=" in request.url elif hasattr(request, "requestline"): signed_url = "Signature=" in request.path - key = self.backend.get_key(bucket_name, key_name) + key = self.backend.get_object(bucket_name, key_name) if key: if not key.acl.public_read and not signed_url: @@ -1118,7 +1120,7 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): ) version_id = query.get("versionId", [None])[0] if_modified_since = headers.get("If-Modified-Since", None) - key = self.backend.get_key(bucket_name, key_name, version_id=version_id) + key = self.backend.get_object(bucket_name, key_name, version_id=version_id) if key is None: raise MissingKey(key_name) if if_modified_since: @@ -1164,7 +1166,9 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): except ValueError: start_byte, end_byte = None, None - if self.backend.get_key(src_bucket, src_key, version_id=src_version_id): + if self.backend.get_object( + src_bucket, src_key, version_id=src_version_id + ): key = self.backend.copy_part( bucket_name, upload_id, @@ -1193,7 +1197,7 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): tagging = self._tagging_from_headers(request.headers) if "acl" in query: - key = self.backend.get_key(bucket_name, key_name) + key = self.backend.get_object(bucket_name, key_name) # TODO: Support the XML-based ACL format key.set_acl(acl) return 200, response_headers, "" @@ -1203,7 +1207,7 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): version_id = query["versionId"][0] else: version_id = None - key = self.backend.get_key(bucket_name, key_name, version_id=version_id) + key = self.backend.get_object(bucket_name, key_name, version_id=version_id) tagging = self._tagging_from_xml(body) self.backend.set_key_tags(key, tagging, key_name) return 200, response_headers, "" @@ -1221,7 +1225,9 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): ) src_version_id = parse_qs(src_key_parsed.query).get("versionId", [None])[0] - key = self.backend.get_key(src_bucket, src_key, version_id=src_version_id) + key = self.backend.get_object( + src_bucket, src_key, version_id=src_version_id + ) if key is not None: if key.storage_class in ["GLACIER", "DEEP_ARCHIVE"]: @@ -1238,7 +1244,7 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): else: return 404, response_headers, "" - new_key = self.backend.get_key(bucket_name, key_name) + new_key = self.backend.get_object(bucket_name, key_name) mdirective = request.headers.get("x-amz-metadata-directive") if mdirective is not None and mdirective == "REPLACE": metadata = metadata_from_headers(request.headers) @@ -1254,13 +1260,13 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): closing_connection = headers.get("connection") == "close" if closing_connection and streaming_request: # Closing the connection of a streaming request. No more data - new_key = self.backend.get_key(bucket_name, key_name) + new_key = self.backend.get_object(bucket_name, key_name) elif streaming_request: # Streaming request, more data new_key = self.backend.append_to_key(bucket_name, key_name, body) else: # Initial data - new_key = self.backend.set_key( + new_key = self.backend.set_object( bucket_name, key_name, body, storage=storage_class ) request.streaming = True @@ -1286,7 +1292,7 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): if if_modified_since: if_modified_since = str_to_rfc_1123_datetime(if_modified_since) - key = self.backend.get_key( + key = self.backend.get_object( bucket_name, key_name, version_id=version_id, part_number=part_number ) if key: @@ -1596,7 +1602,7 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): self.backend.cancel_multipart(bucket_name, upload_id) return 204, {}, "" version_id = query.get("versionId", [None])[0] - self.backend.delete_key(bucket_name, key_name, version_id=version_id) + self.backend.delete_object(bucket_name, key_name, version_id=version_id) return 204, {}, "" def _complete_multipart_body(self, body): @@ -1633,7 +1639,7 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): elif "restore" in query: es = minidom.parseString(body).getElementsByTagName("Days") days = es[0].childNodes[0].wholeText - key = self.backend.get_key(bucket_name, key_name) + key = self.backend.get_object(bucket_name, key_name) r = 202 if key.expiry_date is not None: r = 200 @@ -1959,7 +1965,7 @@ S3_OBJECT_TAGGING_RESPONSE = """\ S3_BUCKET_CORS_RESPONSE = """ - {% for cors in bucket.cors %} + {% for cors in cors %} {% for origin in cors.allowed_origins %} {{ origin }} @@ -2192,7 +2198,7 @@ S3_NO_ENCRYPTION = """ S3_GET_BUCKET_NOTIFICATION_CONFIG = """ - {% for topic in bucket.notification_configuration.topic %} + {% for topic in config.topic %} {{ topic.id }} {{ topic.arn }} @@ -2213,7 +2219,7 @@ S3_GET_BUCKET_NOTIFICATION_CONFIG = """ {% endif %} {% endfor %} - {% for queue in bucket.notification_configuration.queue %} + {% for queue in config.queue %} {{ queue.id }} {{ queue.arn }} @@ -2234,7 +2240,7 @@ S3_GET_BUCKET_NOTIFICATION_CONFIG = """ {% endif %} {% endfor %} - {% for cf in bucket.notification_configuration.cloud_function %} + {% for cf in config.cloud_function %} {{ cf.id }} {{ cf.arn }} diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 363ccc02d..b91eb9983 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -4364,7 +4364,7 @@ def test_s3_config_dict(): # With 1 bucket in us-west-2: s3_config_query.backends["global"].create_bucket("bucket1", "us-west-2") - s3_config_query.backends["global"].put_bucket_tags("bucket1", tags) + s3_config_query.backends["global"].put_bucket_tagging("bucket1", tags) # With a log bucket: s3_config_query.backends["global"].create_bucket("logbucket", "us-west-2") From b1d515c9295311170e240cf583251b93e9989b48 Mon Sep 17 00:00:00 2001 From: usmangani1 Date: Wed, 10 Jun 2020 12:28:40 +0530 Subject: [PATCH 07/20] =?UTF-8?q?Enhancement=20:=20API-Gateway=20Put=20Int?= =?UTF-8?q?egration=20Response=20-=20Adding=20support=20f=E2=80=A6=20(#305?= =?UTF-8?q?8)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Enhancement : API-Gateway Put Integration Response - Adding support for contentHandling. * Added tests where the contentHandling is None also gets tested. * Linting Co-authored-by: usmankb Co-authored-by: Bert Blommers --- moto/apigateway/models.py | 17 +++++-- moto/apigateway/responses.py | 2 + tests/test_apigateway/test_apigateway.py | 58 ++++++++++++++++++++++++ 3 files changed, 73 insertions(+), 4 deletions(-) diff --git a/moto/apigateway/models.py b/moto/apigateway/models.py index 4513c75ab..fbd525df1 100644 --- a/moto/apigateway/models.py +++ b/moto/apigateway/models.py @@ -56,13 +56,21 @@ class Deployment(BaseModel, dict): class IntegrationResponse(BaseModel, dict): - def __init__(self, status_code, selection_pattern=None, response_templates=None): + def __init__( + self, + status_code, + selection_pattern=None, + response_templates=None, + content_handling=None, + ): if response_templates is None: response_templates = {"application/json": None} self["responseTemplates"] = response_templates self["statusCode"] = status_code if selection_pattern: self["selectionPattern"] = selection_pattern + if content_handling: + self["contentHandling"] = content_handling class Integration(BaseModel, dict): @@ -75,12 +83,12 @@ class Integration(BaseModel, dict): self["integrationResponses"] = {"200": IntegrationResponse(200)} def create_integration_response( - self, status_code, selection_pattern, response_templates + self, status_code, selection_pattern, response_templates, content_handling ): if response_templates == {}: response_templates = None integration_response = IntegrationResponse( - status_code, selection_pattern, response_templates + status_code, selection_pattern, response_templates, content_handling ) self["integrationResponses"][status_code] = integration_response return integration_response @@ -959,12 +967,13 @@ class APIGatewayBackend(BaseBackend): status_code, selection_pattern, response_templates, + content_handling, ): if response_templates is None: raise InvalidRequestInput() integration = self.get_integration(function_id, resource_id, method_type) integration_response = integration.create_integration_response( - status_code, selection_pattern, response_templates + status_code, selection_pattern, response_templates, content_handling ) return integration_response diff --git a/moto/apigateway/responses.py b/moto/apigateway/responses.py index 1a7689d28..e3951192b 100644 --- a/moto/apigateway/responses.py +++ b/moto/apigateway/responses.py @@ -387,6 +387,7 @@ class APIGatewayResponse(BaseResponse): elif self.method == "PUT": selection_pattern = self._get_param("selectionPattern") response_templates = self._get_param("responseTemplates") + content_handling = self._get_param("contentHandling") integration_response = self.backend.create_integration_response( function_id, resource_id, @@ -394,6 +395,7 @@ class APIGatewayResponse(BaseResponse): status_code, selection_pattern, response_templates, + content_handling, ) elif self.method == "DELETE": integration_response = self.backend.delete_integration_response( diff --git a/tests/test_apigateway/test_apigateway.py b/tests/test_apigateway/test_apigateway.py index 295cd1c54..1c7f6d385 100644 --- a/tests/test_apigateway/test_apigateway.py +++ b/tests/test_apigateway/test_apigateway.py @@ -544,6 +544,7 @@ def test_integration_response(): selectionPattern="foobar", responseTemplates={}, ) + # this is hard to match against, so remove it response["ResponseMetadata"].pop("HTTPHeaders", None) response["ResponseMetadata"].pop("RetryAttempts", None) @@ -592,6 +593,63 @@ def test_integration_response(): response = client.get_method(restApiId=api_id, resourceId=root_id, httpMethod="GET") response["methodIntegration"]["integrationResponses"].should.equal({}) + # adding a new method and perfomring put intergration with contentHandling as CONVERT_TO_BINARY + client.put_method( + restApiId=api_id, resourceId=root_id, httpMethod="PUT", authorizationType="none" + ) + + client.put_method_response( + restApiId=api_id, resourceId=root_id, httpMethod="PUT", statusCode="200" + ) + + client.put_integration( + restApiId=api_id, + resourceId=root_id, + httpMethod="PUT", + type="HTTP", + uri="http://httpbin.org/robots.txt", + integrationHttpMethod="POST", + ) + + response = client.put_integration_response( + restApiId=api_id, + resourceId=root_id, + httpMethod="PUT", + statusCode="200", + selectionPattern="foobar", + responseTemplates={}, + contentHandling="CONVERT_TO_BINARY", + ) + + # this is hard to match against, so remove it + response["ResponseMetadata"].pop("HTTPHeaders", None) + response["ResponseMetadata"].pop("RetryAttempts", None) + response.should.equal( + { + "statusCode": "200", + "selectionPattern": "foobar", + "ResponseMetadata": {"HTTPStatusCode": 200}, + "responseTemplates": {"application/json": None}, + "contentHandling": "CONVERT_TO_BINARY", + } + ) + + response = client.get_integration_response( + restApiId=api_id, resourceId=root_id, httpMethod="PUT", statusCode="200" + ) + # this is hard to match against, so remove it + response["ResponseMetadata"].pop("HTTPHeaders", None) + response["ResponseMetadata"].pop("RetryAttempts", None) + response.should.equal( + { + "statusCode": "200", + "selectionPattern": "foobar", + "ResponseMetadata": {"HTTPStatusCode": 200}, + "responseTemplates": {"application/json": None}, + "contentHandling": "CONVERT_TO_BINARY", + } + ) + @mock_apigateway @mock_cognitoidp From fbc5769b745ed9be4b77a64f2159ed0140b29330 Mon Sep 17 00:00:00 2001 From: Kristopher Chun Date: Wed, 10 Jun 2020 00:54:03 -0700 Subject: [PATCH 08/20] =?UTF-8?q?Fix:=20SecretsManager=20-=20Added=20missi?= =?UTF-8?q?ng=20pop()=20override=20to=20get=5Fsecret=5Fname=E2=80=A6=20(#3?= =?UTF-8?q?057)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Fix: SecretsManager - Added missing pop() override to get_secret_name_from_arn (#3056) * Added test case for delete_secret_force_with_arn (#3057) * Fixed lint for test_delete_secret_force_with_arn (#3057) --- moto/secretsmanager/models.py | 4 ++++ .../test_secretsmanager/test_secretsmanager.py | 18 ++++++++++++++++++ 2 files changed, 22 insertions(+) diff --git a/moto/secretsmanager/models.py b/moto/secretsmanager/models.py index 01acf2dbb..8641916a7 100644 --- a/moto/secretsmanager/models.py +++ b/moto/secretsmanager/models.py @@ -38,6 +38,10 @@ class SecretsStore(dict): new_key = get_secret_name_from_arn(key) return dict.__contains__(self, new_key) + def pop(self, key, *args, **kwargs): + new_key = get_secret_name_from_arn(key) + return super(SecretsStore, self).pop(new_key, *args, **kwargs) + class SecretsManagerBackend(BaseBackend): def __init__(self, region_name=None, **kwargs): diff --git a/tests/test_secretsmanager/test_secretsmanager.py b/tests/test_secretsmanager/test_secretsmanager.py index 0fe23fd7f..59992e094 100644 --- a/tests/test_secretsmanager/test_secretsmanager.py +++ b/tests/test_secretsmanager/test_secretsmanager.py @@ -211,6 +211,24 @@ def test_delete_secret_force(): result = conn.get_secret_value(SecretId="test-secret") +@mock_secretsmanager +def test_delete_secret_force_with_arn(): + conn = boto3.client("secretsmanager", region_name="us-west-2") + + create_secret = conn.create_secret(Name="test-secret", SecretString="foosecret") + + result = conn.delete_secret( + SecretId=create_secret["ARN"], ForceDeleteWithoutRecovery=True + ) + + assert result["ARN"] + assert result["DeletionDate"] > datetime.fromtimestamp(1, pytz.utc) + assert result["Name"] == "test-secret" + + with assert_raises(ClientError): + result = conn.get_secret_value(SecretId="test-secret") + + @mock_secretsmanager def test_delete_secret_that_does_not_exist(): conn = boto3.client("secretsmanager", region_name="us-west-2") From dcde2570b16cbe5183b358531faddcfe2c2b5a56 Mon Sep 17 00:00:00 2001 From: usmangani1 Date: Wed, 10 Jun 2020 14:53:43 +0530 Subject: [PATCH 09/20] =?UTF-8?q?Enhancement=20:=20SES=20-=20Added=20creat?= =?UTF-8?q?e-receipt-rule-set,=20create-receipt-rul=E2=80=A6=20(#3059)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Enhancement : SES - Added create-receipt-rule-set, create-receipt-rule functionalities. * Linting Co-authored-by: usmankb Co-authored-by: Bert Blommers --- moto/ses/exceptions.py | 23 +++++++ moto/ses/models.py | 18 +++++ moto/ses/responses.py | 27 ++++++++ tests/test_ses/test_ses_boto3.py | 112 +++++++++++++++++++++++++++++++ 4 files changed, 180 insertions(+) diff --git a/moto/ses/exceptions.py b/moto/ses/exceptions.py index 7a4ef1b03..d3e60aef5 100644 --- a/moto/ses/exceptions.py +++ b/moto/ses/exceptions.py @@ -41,3 +41,26 @@ class TemplateDoesNotExist(RESTError): def __init__(self, message): super(TemplateDoesNotExist, self).__init__("TemplateDoesNotExist", message) + + +class RuleSetNameAlreadyExists(RESTError): + code = 400 + + def __init__(self, message): + super(RuleSetNameAlreadyExists, self).__init__( + "RuleSetNameAlreadyExists", message + ) + + +class RuleAlreadyExists(RESTError): + code = 400 + + def __init__(self, message): + super(RuleAlreadyExists, self).__init__("RuleAlreadyExists", message) + + +class RuleSetDoesNotExist(RESTError): + code = 400 + + def __init__(self, message): + super(RuleSetDoesNotExist, self).__init__("RuleSetDoesNotExist", message) diff --git a/moto/ses/models.py b/moto/ses/models.py index f918d9021..e90f66fa8 100644 --- a/moto/ses/models.py +++ b/moto/ses/models.py @@ -12,6 +12,9 @@ from .exceptions import ( EventDestinationAlreadyExists, TemplateNameAlreadyExists, TemplateDoesNotExist, + RuleSetNameAlreadyExists, + RuleSetDoesNotExist, + RuleAlreadyExists, ) from .utils import get_random_message_id from .feedback import COMMON_MAIL, BOUNCE, COMPLAINT, DELIVERY @@ -94,6 +97,7 @@ class SESBackend(BaseBackend): self.config_set_event_destination = {} self.event_destinations = {} self.templates = {} + self.receipt_rule_set = {} def _is_verified_address(self, source): _, address = parseaddr(source) @@ -294,5 +298,19 @@ class SESBackend(BaseBackend): def list_templates(self): return list(self.templates.values()) + def create_receipt_rule_set(self, rule_set_name): + if self.receipt_rule_set.get(rule_set_name) is not None: + raise RuleSetNameAlreadyExists("Duplicate receipt rule set Name.") + self.receipt_rule_set[rule_set_name] = [] + + def create_receipt_rule(self, rule_set_name, rule): + rule_set = self.receipt_rule_set.get(rule_set_name) + if rule_set is None: + raise RuleSetDoesNotExist("Invalid Rule Set Name.") + if rule in rule_set: + raise RuleAlreadyExists("Duplicate Rule Name.") + rule_set.append(rule) + self.receipt_rule_set[rule_set_name] = rule_set + ses_backend = SESBackend() diff --git a/moto/ses/responses.py b/moto/ses/responses.py index f0780e98a..9702c724d 100644 --- a/moto/ses/responses.py +++ b/moto/ses/responses.py @@ -199,6 +199,19 @@ class EmailResponse(BaseResponse): template = self.response_template(LIST_TEMPLATES) return template.render(templates=email_templates) + def create_receipt_rule_set(self): + rule_set_name = self._get_param("RuleSetName") + ses_backend.create_receipt_rule_set(rule_set_name) + template = self.response_template(CREATE_RECEIPT_RULE_SET) + return template.render() + + def create_receipt_rule(self): + rule_set_name = self._get_param("RuleSetName") + rule = self._get_dict_param("Rule") + ses_backend.create_receipt_rule(rule_set_name, rule) + template = self.response_template(CREATE_RECEIPT_RULE) + return template.render() + VERIFY_EMAIL_IDENTITY = """ @@ -385,3 +398,17 @@ LIST_TEMPLATES = """ + + + 47e0ef1a-9bf2-11e1-9279-01ab88cf109a + +""" + +CREATE_RECEIPT_RULE = """ + + + 15e0ef1a-9bf2-11e1-9279-01ab88cf109a + +""" diff --git a/tests/test_ses/test_ses_boto3.py b/tests/test_ses/test_ses_boto3.py index 707afe8fb..de8ec7261 100644 --- a/tests/test_ses/test_ses_boto3.py +++ b/tests/test_ses/test_ses_boto3.py @@ -300,6 +300,118 @@ def test_create_configuration_set(): ex.exception.response["Error"]["Code"].should.equal("EventDestinationAlreadyExists") +@mock_ses +def test_create_receipt_rule_set(): + conn = boto3.client("ses", region_name="us-east-1") + result = conn.create_receipt_rule_set(RuleSetName="testRuleSet") + + result["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + with assert_raises(ClientError) as ex: + conn.create_receipt_rule_set(RuleSetName="testRuleSet") + + ex.exception.response["Error"]["Code"].should.equal("RuleSetNameAlreadyExists") + + +@mock_ses +def test_create_receipt_rule(): + conn = boto3.client("ses", region_name="us-east-1") + rule_set_name = "testRuleSet" + conn.create_receipt_rule_set(RuleSetName=rule_set_name) + + result = conn.create_receipt_rule( + RuleSetName=rule_set_name, + Rule={ + "Name": "testRule", + "Enabled": False, + "TlsPolicy": "Optional", + "Recipients": ["string"], + "Actions": [ + { + "S3Action": { + "TopicArn": "string", + "BucketName": "string", + "ObjectKeyPrefix": "string", + "KmsKeyArn": "string", + }, + "BounceAction": { + "TopicArn": "string", + "SmtpReplyCode": "string", + "StatusCode": "string", + "Message": "string", + "Sender": "string", + }, + } + ], + "ScanEnabled": False, + }, + ) + + result["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + with assert_raises(ClientError) as ex: + conn.create_receipt_rule( + RuleSetName=rule_set_name, + Rule={ + "Name": "testRule", + "Enabled": False, + "TlsPolicy": "Optional", + "Recipients": ["string"], + "Actions": [ + { + "S3Action": { + "TopicArn": "string", + "BucketName": "string", + "ObjectKeyPrefix": "string", + "KmsKeyArn": "string", + }, + "BounceAction": { + "TopicArn": "string", + "SmtpReplyCode": "string", + "StatusCode": "string", + "Message": "string", + "Sender": "string", + }, + } + ], + "ScanEnabled": False, + }, + ) + + ex.exception.response["Error"]["Code"].should.equal("RuleAlreadyExists") + + with assert_raises(ClientError) as ex: + conn.create_receipt_rule( + RuleSetName="InvalidRuleSetaName", + Rule={ + "Name": "testRule", + "Enabled": False, + "TlsPolicy": "Optional", + "Recipients": ["string"], + "Actions": [ + { + "S3Action": { + "TopicArn": "string", + "BucketName": "string", + "ObjectKeyPrefix": "string", + "KmsKeyArn": "string", + }, + "BounceAction": { + "TopicArn": "string", + "SmtpReplyCode": "string", + "StatusCode": "string", + "Message": "string", + "Sender": "string", + }, + } + ], + "ScanEnabled": False, + }, + ) + + ex.exception.response["Error"]["Code"].should.equal("RuleSetDoesNotExist") + + @mock_ses def test_create_ses_template(): conn = boto3.client("ses", region_name="us-east-1") From b88f1660991ebd0118839a48948cab943913d317 Mon Sep 17 00:00:00 2001 From: Gordon Cassie Date: Thu, 11 Jun 2020 01:50:50 -0400 Subject: [PATCH 10/20] Fix: Support streaming upload from requests. (#3062) * Fix: Support streaming upload from requests. * [FIX] style. Co-authored-by: Gordon Cassie --- moto/core/models.py | 2 ++ tests/test_s3/test_s3.py | 16 ++++++++++++++++ 2 files changed, 18 insertions(+) diff --git a/moto/core/models.py b/moto/core/models.py index 1597efc7b..ba4564e4a 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -184,6 +184,8 @@ class CallbackResponse(responses.CallbackResponse): body = None elif isinstance(request.body, six.text_type): body = six.BytesIO(six.b(request.body)) + elif hasattr(request.body, "read"): + body = six.BytesIO(request.body.read()) else: body = six.BytesIO(request.body) req = Request.from_values( diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 363ccc02d..1d1e83478 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -1040,6 +1040,22 @@ def test_s3_object_in_public_bucket_using_multiple_presigned_urls(): assert response.status_code == 200, "Failed on req number {}".format(i) +@mock_s3 +def test_streaming_upload_from_file_to_presigned_url(): + s3 = boto3.resource("s3") + bucket = s3.Bucket("test-bucket") + bucket.create() + bucket.put_object(Body=b"ABCD", Key="file.txt") + + params = {"Bucket": "test-bucket", "Key": "file.txt"} + presigned_url = boto3.client("s3").generate_presigned_url( + "put_object", params, ExpiresIn=900 + ) + with open(__file__, "rb") as f: + response = requests.get(presigned_url, data=f) + assert response.status_code == 200 + + @mock_s3 def test_s3_object_in_private_bucket(): s3 = boto3.resource("s3") From 5880d31f7e746388a64ff42b7f90077a0d666a82 Mon Sep 17 00:00:00 2001 From: ktrueda Date: Fri, 12 Jun 2020 01:27:29 +0900 Subject: [PATCH 11/20] Implemented Athena create_named_query, get_named_query (#1524) (#3065) * Implemented Athena create_named_query, get_named_query --- moto/athena/models.py | 25 +++++++++++++++++++++ moto/athena/responses.py | 29 ++++++++++++++++++++++++ tests/test_athena/test_athena.py | 38 ++++++++++++++++++++++++++++++++ 3 files changed, 92 insertions(+) diff --git a/moto/athena/models.py b/moto/athena/models.py index c39c13817..24ad73ab9 100644 --- a/moto/athena/models.py +++ b/moto/athena/models.py @@ -60,6 +60,16 @@ class Execution(BaseModel): self.status = "QUEUED" +class NamedQuery(BaseModel): + def __init__(self, name, description, database, query_string, workgroup): + self.id = str(uuid4()) + self.name = name + self.description = description + self.database = database + self.query_string = query_string + self.workgroup = workgroup + + class AthenaBackend(BaseBackend): region_name = None @@ -68,6 +78,7 @@ class AthenaBackend(BaseBackend): self.region_name = region_name self.work_groups = {} self.executions = {} + self.named_queries = {} def create_work_group(self, name, configuration, description, tags): if name in self.work_groups: @@ -113,6 +124,20 @@ class AthenaBackend(BaseBackend): execution = self.executions[exec_id] execution.status = "CANCELLED" + def create_named_query(self, name, description, database, query_string, workgroup): + nq = NamedQuery( + name=name, + description=description, + database=database, + query_string=query_string, + workgroup=workgroup, + ) + self.named_queries[nq.id] = nq + return nq.id + + def get_named_query(self, query_id): + return self.named_queries[query_id] if query_id in self.named_queries else None + athena_backends = {} for region in Session().get_available_regions("athena"): diff --git a/moto/athena/responses.py b/moto/athena/responses.py index b52e0beed..b5e6d6a95 100644 --- a/moto/athena/responses.py +++ b/moto/athena/responses.py @@ -85,3 +85,32 @@ class AthenaResponse(BaseResponse): json.dumps({"__type": "InvalidRequestException", "Message": msg,}), dict(status=status), ) + + def create_named_query(self): + name = self._get_param("Name") + description = self._get_param("Description") + database = self._get_param("Database") + query_string = self._get_param("QueryString") + workgroup = self._get_param("WorkGroup") + if workgroup and not self.athena_backend.get_work_group(workgroup): + return self.error("WorkGroup does not exist", 400) + query_id = self.athena_backend.create_named_query( + name, description, database, query_string, workgroup + ) + return json.dumps({"NamedQueryId": query_id}) + + def get_named_query(self): + query_id = self._get_param("NamedQueryId") + nq = self.athena_backend.get_named_query(query_id) + return json.dumps( + { + "NamedQuery": { + "Name": nq.name, + "Description": nq.description, + "Database": nq.database, + "QueryString": nq.query_string, + "NamedQueryId": nq.id, + "WorkGroup": nq.workgroup, + } + } + ) diff --git a/tests/test_athena/test_athena.py b/tests/test_athena/test_athena.py index 93ca436aa..805a653e3 100644 --- a/tests/test_athena/test_athena.py +++ b/tests/test_athena/test_athena.py @@ -172,6 +172,44 @@ def test_stop_query_execution(): details["Status"]["State"].should.equal("CANCELLED") +@mock_athena +def test_create_named_query(): + client = boto3.client("athena", region_name="us-east-1") + + # craete named query + res = client.create_named_query( + Name="query-name", Database="target_db", QueryString="SELECT * FROM table1", + ) + + assert "NamedQueryId" in res + + +@mock_athena +def test_get_named_query(): + client = boto3.client("athena", region_name="us-east-1") + query_name = "query-name" + database = "target_db" + query_string = "SELECT * FROM tbl1" + description = "description of this query" + + # craete named query + res_create = client.create_named_query( + Name=query_name, + Database=database, + QueryString=query_string, + Description=description, + ) + query_id = res_create["NamedQueryId"] + + # get named query + res_get = client.get_named_query(NamedQueryId=query_id)["NamedQuery"] + res_get["Name"].should.equal(query_name) + res_get["Description"].should.equal(description) + res_get["Database"].should.equal(database) + res_get["QueryString"].should.equal(query_string) + res_get["NamedQueryId"].should.equal(query_id) + + def create_basic_workgroup(client, name): client.create_work_group( Name=name, From 475f022b7839420ffacb940abe6b0214bfa10279 Mon Sep 17 00:00:00 2001 From: usmangani1 Date: Fri, 12 Jun 2020 20:46:55 +0530 Subject: [PATCH 12/20] Enhancement: EC2 added create route with networkInterfaceId (#3063) * Enhancement:EC2- create route with network interfcaeID * modifying existing test case * Linting Co-authored-by: usmankb Co-authored-by: Bert Blommers --- moto/ec2/models.py | 33 +++++++++++++++++------------ tests/test_ec2/test_route_tables.py | 28 +++++++++++++++++++++++- 2 files changed, 46 insertions(+), 15 deletions(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index f8ebd02ec..cb7ba0ff2 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -3639,26 +3639,31 @@ class RouteBackend(object): interface_id=None, vpc_peering_connection_id=None, ): + gateway = None + nat_gateway = None + route_table = self.get_route_table(route_table_id) if interface_id: - self.raise_not_implemented_error("CreateRoute to NetworkInterfaceId") + # for validating interface Id whether it is valid or not. + self.get_network_interface(interface_id) - gateway = None - if gateway_id: - if EC2_RESOURCE_TO_PREFIX["vpn-gateway"] in gateway_id: - gateway = self.get_vpn_gateway(gateway_id) - elif EC2_RESOURCE_TO_PREFIX["internet-gateway"] in gateway_id: - gateway = self.get_internet_gateway(gateway_id) + else: + if gateway_id: + if EC2_RESOURCE_TO_PREFIX["vpn-gateway"] in gateway_id: + gateway = self.get_vpn_gateway(gateway_id) + elif EC2_RESOURCE_TO_PREFIX["internet-gateway"] in gateway_id: + gateway = self.get_internet_gateway(gateway_id) - try: - ipaddress.IPv4Network(six.text_type(destination_cidr_block), strict=False) - except ValueError: - raise InvalidDestinationCIDRBlockParameterError(destination_cidr_block) + try: + ipaddress.IPv4Network( + six.text_type(destination_cidr_block), strict=False + ) + except ValueError: + raise InvalidDestinationCIDRBlockParameterError(destination_cidr_block) - nat_gateway = None - if nat_gateway_id is not None: - nat_gateway = self.nat_gateways.get(nat_gateway_id) + if nat_gateway_id is not None: + nat_gateway = self.nat_gateways.get(nat_gateway_id) route = Route( route_table, diff --git a/tests/test_ec2/test_route_tables.py b/tests/test_ec2/test_route_tables.py index a64fbae1a..61fb33f90 100644 --- a/tests/test_ec2/test_route_tables.py +++ b/tests/test_ec2/test_route_tables.py @@ -462,7 +462,7 @@ def test_routes_not_supported(): # Create conn.create_route.when.called_with( main_route_table.id, ROUTE_CIDR, interface_id="eni-1234abcd" - ).should.throw(NotImplementedError) + ).should.throw("InvalidNetworkInterfaceID.NotFound") # Replace igw = conn.create_internet_gateway() @@ -583,6 +583,32 @@ def test_create_route_with_invalid_destination_cidr_block_parameter(): ) +@mock_ec2 +def test_create_route_with_network_interface_id(): + ec2 = boto3.resource("ec2", region_name="us-west-2") + ec2_client = boto3.client("ec2", region_name="us-west-2") + + vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16") + subnet = ec2.create_subnet( + VpcId=vpc.id, CidrBlock="10.0.0.0/24", AvailabilityZone="us-west-2a" + ) + + route_table = ec2_client.create_route_table(VpcId=vpc.id) + + route_table_id = route_table["RouteTable"]["RouteTableId"] + + eni1 = ec2_client.create_network_interface( + SubnetId=subnet.id, PrivateIpAddress="10.0.10.5" + ) + + route = ec2_client.create_route( + NetworkInterfaceId=eni1["NetworkInterface"]["NetworkInterfaceId"], + RouteTableId=route_table_id, + ) + + route["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + @mock_ec2 def test_describe_route_tables_with_nat_gateway(): ec2 = boto3.client("ec2", region_name="us-west-1") From bbe1320e7c8587ed08418358b28f43d44d563d71 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Sat, 13 Jun 2020 20:27:05 +0100 Subject: [PATCH 13/20] DynamoDB - Add default GSI throughput --- moto/dynamodb2/models/__init__.py | 109 ++++++++++++++---- moto/dynamodb2/responses.py | 5 +- .../test_dynamodb_table_with_range_key.py | 77 +++++++++++++ 3 files changed, 166 insertions(+), 25 deletions(-) diff --git a/moto/dynamodb2/models/__init__.py b/moto/dynamodb2/models/__init__.py index a5277800f..60bc1b2fe 100644 --- a/moto/dynamodb2/models/__init__.py +++ b/moto/dynamodb2/models/__init__.py @@ -272,6 +272,66 @@ class StreamShard(BaseModel): return [i.to_json() for i in self.items[start:end]] +class LocalSecondaryIndex(BaseModel): + def __init__(self, index_name, schema, projection): + self.name = index_name + self.schema = schema + self.projection = projection + + def describe(self): + return { + "IndexName": self.name, + "KeySchema": self.schema, + "Projection": self.projection, + } + + @staticmethod + def create(dct): + return LocalSecondaryIndex( + index_name=dct["IndexName"], + schema=dct["KeySchema"], + projection=dct["Projection"], + ) + + +class GlobalSecondaryIndex(BaseModel): + def __init__( + self, index_name, schema, projection, status="ACTIVE", throughput=None + ): + self.name = index_name + self.schema = schema + self.projection = projection + self.status = status + self.throughput = throughput or { + "ReadCapacityUnits": 0, + "WriteCapacityUnits": 0, + } + + def describe(self): + return { + "IndexName": self.name, + "KeySchema": self.schema, + "Projection": self.projection, + "IndexStatus": self.status, + "ProvisionedThroughput": self.throughput, + } + + @staticmethod + def create(dct): + return GlobalSecondaryIndex( + index_name=dct["IndexName"], + schema=dct["KeySchema"], + projection=dct["Projection"], + throughput=dct.get("ProvisionedThroughput", None), + ) + + def update(self, u): + self.name = u.get("IndexName", self.name) + self.schema = u.get("KeySchema", self.schema) + self.projection = u.get("Projection", self.projection) + self.throughput = u.get("ProvisionedThroughput", self.throughput) + + class Table(BaseModel): def __init__( self, @@ -302,12 +362,13 @@ class Table(BaseModel): else: self.throughput = throughput self.throughput["NumberOfDecreasesToday"] = 0 - self.indexes = indexes - self.global_indexes = global_indexes if global_indexes else [] - for index in self.global_indexes: - index[ - "IndexStatus" - ] = "ACTIVE" # One of 'CREATING'|'UPDATING'|'DELETING'|'ACTIVE' + self.indexes = [ + LocalSecondaryIndex.create(i) for i in (indexes if indexes else []) + ] + self.global_indexes = [ + GlobalSecondaryIndex.create(i) + for i in (global_indexes if global_indexes else []) + ] self.created_at = datetime.datetime.utcnow() self.items = defaultdict(dict) self.table_arn = self._generate_arn(table_name) @@ -374,8 +435,10 @@ class Table(BaseModel): "KeySchema": self.schema, "ItemCount": len(self), "CreationDateTime": unix_time(self.created_at), - "GlobalSecondaryIndexes": [index for index in self.global_indexes], - "LocalSecondaryIndexes": [index for index in self.indexes], + "GlobalSecondaryIndexes": [ + index.describe() for index in self.global_indexes + ], + "LocalSecondaryIndexes": [index.describe() for index in self.indexes], } } if self.stream_specification and self.stream_specification["StreamEnabled"]: @@ -401,7 +464,7 @@ class Table(BaseModel): keys = [self.hash_key_attr] for index in self.global_indexes: hash_key = None - for key in index["KeySchema"]: + for key in index.schema: if key["KeyType"] == "HASH": hash_key = key["AttributeName"] keys.append(hash_key) @@ -412,7 +475,7 @@ class Table(BaseModel): keys = [self.range_key_attr] for index in self.global_indexes: range_key = None - for key in index["KeySchema"]: + for key in index.schema: if key["KeyType"] == "RANGE": range_key = keys.append(key["AttributeName"]) keys.append(range_key) @@ -545,7 +608,7 @@ class Table(BaseModel): if index_name: all_indexes = self.all_indexes() - indexes_by_name = dict((i["IndexName"], i) for i in all_indexes) + indexes_by_name = dict((i.name, i) for i in all_indexes) if index_name not in indexes_by_name: raise ValueError( "Invalid index: %s for table: %s. Available indexes are: %s" @@ -555,14 +618,14 @@ class Table(BaseModel): index = indexes_by_name[index_name] try: index_hash_key = [ - key for key in index["KeySchema"] if key["KeyType"] == "HASH" + key for key in index.schema if key["KeyType"] == "HASH" ][0] except IndexError: - raise ValueError("Missing Hash Key. KeySchema: %s" % index["KeySchema"]) + raise ValueError("Missing Hash Key. KeySchema: %s" % index.name) try: index_range_key = [ - key for key in index["KeySchema"] if key["KeyType"] == "RANGE" + key for key in index.schema if key["KeyType"] == "RANGE" ][0] except IndexError: index_range_key = None @@ -667,9 +730,9 @@ class Table(BaseModel): def has_idx_items(self, index_name): all_indexes = self.all_indexes() - indexes_by_name = dict((i["IndexName"], i) for i in all_indexes) + indexes_by_name = dict((i.name, i) for i in all_indexes) idx = indexes_by_name[index_name] - idx_col_set = set([i["AttributeName"] for i in idx["KeySchema"]]) + idx_col_set = set([i["AttributeName"] for i in idx.schema]) for hash_set in self.items.values(): if self.range_key_attr: @@ -692,7 +755,7 @@ class Table(BaseModel): results = [] scanned_count = 0 all_indexes = self.all_indexes() - indexes_by_name = dict((i["IndexName"], i) for i in all_indexes) + indexes_by_name = dict((i.name, i) for i in all_indexes) if index_name: if index_name not in indexes_by_name: @@ -773,9 +836,9 @@ class Table(BaseModel): if scanned_index: all_indexes = self.all_indexes() - indexes_by_name = dict((i["IndexName"], i) for i in all_indexes) + indexes_by_name = dict((i.name, i) for i in all_indexes) idx = indexes_by_name[scanned_index] - idx_col_list = [i["AttributeName"] for i in idx["KeySchema"]] + idx_col_list = [i["AttributeName"] for i in idx.schema] for col in idx_col_list: last_evaluated_key[col] = results[-1].attrs[col] @@ -885,7 +948,7 @@ class DynamoDBBackend(BaseBackend): def update_table_global_indexes(self, name, global_index_updates): table = self.tables[name] - gsis_by_name = dict((i["IndexName"], i) for i in table.global_indexes) + gsis_by_name = dict((i.name, i) for i in table.global_indexes) for gsi_update in global_index_updates: gsi_to_create = gsi_update.get("Create") gsi_to_update = gsi_update.get("Update") @@ -906,7 +969,7 @@ class DynamoDBBackend(BaseBackend): if index_name not in gsis_by_name: raise ValueError( "Global Secondary Index does not exist, but tried to update: %s" - % gsi_to_update["IndexName"] + % index_name ) gsis_by_name[index_name].update(gsi_to_update) @@ -917,7 +980,9 @@ class DynamoDBBackend(BaseBackend): % gsi_to_create["IndexName"] ) - gsis_by_name[gsi_to_create["IndexName"]] = gsi_to_create + gsis_by_name[gsi_to_create["IndexName"]] = GlobalSecondaryIndex.create( + gsi_to_create + ) # in python 3.6, dict.values() returns a dict_values object, but we expect it to be a list in other # parts of the codebase diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index aec7c7560..6500a0a63 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -411,7 +411,6 @@ class DynamoHandler(BaseResponse): def query(self): name = self.body["TableName"] - # {u'KeyConditionExpression': u'#n0 = :v0', u'ExpressionAttributeValues': {u':v0': {u'S': u'johndoe'}}, u'ExpressionAttributeNames': {u'#n0': u'username'}} key_condition_expression = self.body.get("KeyConditionExpression") projection_expression = self.body.get("ProjectionExpression") expression_attribute_names = self.body.get("ExpressionAttributeNames", {}) @@ -439,7 +438,7 @@ class DynamoHandler(BaseResponse): index_name = self.body.get("IndexName") if index_name: all_indexes = (table.global_indexes or []) + (table.indexes or []) - indexes_by_name = dict((i["IndexName"], i) for i in all_indexes) + indexes_by_name = dict((i.name, i) for i in all_indexes) if index_name not in indexes_by_name: er = "com.amazonaws.dynamodb.v20120810#ResourceNotFoundException" return self.error( @@ -449,7 +448,7 @@ class DynamoHandler(BaseResponse): ), ) - index = indexes_by_name[index_name]["KeySchema"] + index = indexes_by_name[index_name].schema else: index = table.schema diff --git a/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py b/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py index 33f65d5ec..12e75a73e 100644 --- a/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py +++ b/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py @@ -931,6 +931,83 @@ boto3 """ +@mock_dynamodb2 +def test_boto3_create_table_with_gsi(): + dynamodb = boto3.client("dynamodb", region_name="us-east-1") + + table = dynamodb.create_table( + TableName="users", + KeySchema=[ + {"AttributeName": "forum_name", "KeyType": "HASH"}, + {"AttributeName": "subject", "KeyType": "RANGE"}, + ], + AttributeDefinitions=[ + {"AttributeName": "forum_name", "AttributeType": "S"}, + {"AttributeName": "subject", "AttributeType": "S"}, + ], + BillingMode="PAY_PER_REQUEST", + GlobalSecondaryIndexes=[ + { + "IndexName": "test_gsi", + "KeySchema": [{"AttributeName": "subject", "KeyType": "HASH"}], + "Projection": {"ProjectionType": "ALL"}, + } + ], + ) + table["TableDescription"]["GlobalSecondaryIndexes"].should.equal( + [ + { + "KeySchema": [{"KeyType": "HASH", "AttributeName": "subject"}], + "IndexName": "test_gsi", + "Projection": {"ProjectionType": "ALL"}, + "IndexStatus": "ACTIVE", + "ProvisionedThroughput": { + "ReadCapacityUnits": 0, + "WriteCapacityUnits": 0, + }, + } + ] + ) + + table = dynamodb.create_table( + TableName="users2", + KeySchema=[ + {"AttributeName": "forum_name", "KeyType": "HASH"}, + {"AttributeName": "subject", "KeyType": "RANGE"}, + ], + AttributeDefinitions=[ + {"AttributeName": "forum_name", "AttributeType": "S"}, + {"AttributeName": "subject", "AttributeType": "S"}, + ], + BillingMode="PAY_PER_REQUEST", + GlobalSecondaryIndexes=[ + { + "IndexName": "test_gsi", + "KeySchema": [{"AttributeName": "subject", "KeyType": "HASH"}], + "Projection": {"ProjectionType": "ALL"}, + "ProvisionedThroughput": { + "ReadCapacityUnits": 3, + "WriteCapacityUnits": 5, + }, + } + ], + ) + table["TableDescription"]["GlobalSecondaryIndexes"].should.equal( + [ + { + "KeySchema": [{"KeyType": "HASH", "AttributeName": "subject"}], + "IndexName": "test_gsi", + "Projection": {"ProjectionType": "ALL"}, + "IndexStatus": "ACTIVE", + "ProvisionedThroughput": { + "ReadCapacityUnits": 3, + "WriteCapacityUnits": 5, + }, + } + ] + ) + + @mock_dynamodb2 def test_boto3_conditions(): dynamodb = boto3.resource("dynamodb", region_name="us-east-1") From 849f16ff2da596fa20a3b54e04fb24c26b2e7b14 Mon Sep 17 00:00:00 2001 From: Tomoya Kabe Date: Sun, 14 Jun 2020 17:23:52 +0900 Subject: [PATCH 14/20] Correct group inline policy rendering (#3069) * Correct group inline policy rendering in iam:GetAccountAuthorizationDetails response * Include user inline policy if exists * Add tests for IAM inline policies * Remove unnecessary print stmts --- moto/iam/responses.py | 12 +++++++++++- tests/test_iam/test_iam.py | 10 ++++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/moto/iam/responses.py b/moto/iam/responses.py index 60ab46069..3a8296760 100644 --- a/moto/iam/responses.py +++ b/moto/iam/responses.py @@ -2083,6 +2083,16 @@ GET_ACCOUNT_AUTHORIZATION_DETAILS_TEMPLATE = """{{ user.name }} {{ user.arn }} {{ user.created_iso_8601 }} + {% if user.policies %} + + {% for policy in user.policies %} + + {{ policy }} + {{ user.policies[policy] }} + + {% endfor %} + + {% endif %} {% endfor %} @@ -2106,7 +2116,7 @@ GET_ACCOUNT_AUTHORIZATION_DETAILS_TEMPLATE = """ {{ policy }} - {{ group.get_policy(policy) }} + {{ group.policies[policy] }} {% endfor %} diff --git a/tests/test_iam/test_iam.py b/tests/test_iam/test_iam.py index 7b59a5726..a749a37e7 100644 --- a/tests/test_iam/test_iam.py +++ b/tests/test_iam/test_iam.py @@ -1690,11 +1690,15 @@ def test_get_account_authorization_details(): assert result["RoleDetailList"][0]["AttachedManagedPolicies"][0][ "PolicyArn" ] == "arn:aws:iam::{}:policy/testPolicy".format(ACCOUNT_ID) + assert result["RoleDetailList"][0]["RolePolicyList"][0][ + "PolicyDocument" + ] == json.loads(test_policy) result = conn.get_account_authorization_details(Filter=["User"]) assert len(result["RoleDetailList"]) == 0 assert len(result["UserDetailList"]) == 1 assert len(result["UserDetailList"][0]["GroupList"]) == 1 + assert len(result["UserDetailList"][0]["UserPolicyList"]) == 1 assert len(result["UserDetailList"][0]["AttachedManagedPolicies"]) == 1 assert len(result["GroupDetailList"]) == 0 assert len(result["Policies"]) == 0 @@ -1705,6 +1709,9 @@ def test_get_account_authorization_details(): assert result["UserDetailList"][0]["AttachedManagedPolicies"][0][ "PolicyArn" ] == "arn:aws:iam::{}:policy/testPolicy".format(ACCOUNT_ID) + assert result["UserDetailList"][0]["UserPolicyList"][0][ + "PolicyDocument" + ] == json.loads(test_policy) result = conn.get_account_authorization_details(Filter=["Group"]) assert len(result["RoleDetailList"]) == 0 @@ -1720,6 +1727,9 @@ def test_get_account_authorization_details(): assert result["GroupDetailList"][0]["AttachedManagedPolicies"][0][ "PolicyArn" ] == "arn:aws:iam::{}:policy/testPolicy".format(ACCOUNT_ID) + assert result["GroupDetailList"][0]["GroupPolicyList"][0][ + "PolicyDocument" + ] == json.loads(test_policy) result = conn.get_account_authorization_details(Filter=["LocalManagedPolicy"]) assert len(result["RoleDetailList"]) == 0 From 4556a2f96f2820c6123004fe3139b61036c97910 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Sun, 14 Jun 2020 11:31:44 +0100 Subject: [PATCH 15/20] #1954 - CF - Check stack name in use --- moto/cloudformation/responses.py | 21 +++++++++++++++++++ .../test_cloudformation_stack_crud.py | 4 ++-- .../test_cloudformation_stack_crud_boto3.py | 20 ++++++++++++++++-- 3 files changed, 41 insertions(+), 4 deletions(-) diff --git a/moto/cloudformation/responses.py b/moto/cloudformation/responses.py index 302849481..e503a1d19 100644 --- a/moto/cloudformation/responses.py +++ b/moto/cloudformation/responses.py @@ -50,6 +50,12 @@ class CloudFormationResponse(BaseResponse): for item in self._get_list_prefix("Tags.member") ) + if self.stack_name_exists(new_stack_name=stack_name): + template = self.response_template( + CREATE_STACK_NAME_EXISTS_RESPONSE_TEMPLATE + ) + return 400, {"status": 400}, template.render(name=stack_name) + # Hack dict-comprehension parameters = dict( [ @@ -82,6 +88,12 @@ class CloudFormationResponse(BaseResponse): template = self.response_template(CREATE_STACK_RESPONSE_TEMPLATE) return template.render(stack=stack) + def stack_name_exists(self, new_stack_name): + for stack in self.cloudformation_backend.stacks.values(): + if stack.name == new_stack_name: + return True + return False + @amzn_request_id def create_change_set(self): stack_name = self._get_param("StackName") @@ -564,6 +576,15 @@ CREATE_STACK_RESPONSE_TEMPLATE = """ """ +CREATE_STACK_NAME_EXISTS_RESPONSE_TEMPLATE = """ + + Sender + AlreadyExistsException + Stack [{{ name }}] already exists + + 950ff8d7-812a-44b3-bb0c-9b271b954104 +""" + UPDATE_STACK_RESPONSE_TEMPLATE = """ {{ stack.stack_id }} diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud.py b/tests/test_cloudformation/test_cloudformation_stack_crud.py index 3d1b2ab8c..800362ad2 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud.py @@ -98,12 +98,12 @@ def test_create_stack_hosted_zone_by_id(): }, } conn.create_stack( - "test_stack", template_body=json.dumps(dummy_template), parameters={}.items() + "test_stack1", template_body=json.dumps(dummy_template), parameters={}.items() ) r53_conn = boto.connect_route53() zone_id = r53_conn.get_zones()[0].id conn.create_stack( - "test_stack", + "test_stack2", template_body=json.dumps(dummy_template2), parameters={"ZoneId": zone_id}.items(), ) diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py index cd76743dd..43f63dca2 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py @@ -919,7 +919,9 @@ def test_execute_change_set_w_name(): def test_describe_stack_pagination(): conn = boto3.client("cloudformation", region_name="us-east-1") for i in range(100): - conn.create_stack(StackName="test_stack", TemplateBody=dummy_template_json) + conn.create_stack( + StackName="test_stack_{}".format(i), TemplateBody=dummy_template_json + ) resp = conn.describe_stacks() stacks = resp["Stacks"] @@ -1211,7 +1213,8 @@ def test_list_exports_with_token(): # Add index to ensure name is unique dummy_output_template["Outputs"]["StackVPC"]["Export"]["Name"] += str(i) cf.create_stack( - StackName="test_stack", TemplateBody=json.dumps(dummy_output_template) + StackName="test_stack_{}".format(i), + TemplateBody=json.dumps(dummy_output_template), ) exports = cf.list_exports() exports["Exports"].should.have.length_of(100) @@ -1273,3 +1276,16 @@ def test_non_json_redrive_policy(): stack.Resource("MainQueue").resource_status.should.equal("CREATE_COMPLETE") stack.Resource("DeadLetterQueue").resource_status.should.equal("CREATE_COMPLETE") + + +@mock_cloudformation +def test_boto3_create_duplicate_stack(): + cf_conn = boto3.client("cloudformation", region_name="us-east-1") + cf_conn.create_stack( + StackName="test_stack", TemplateBody=dummy_template_json, + ) + + with assert_raises(ClientError): + cf_conn.create_stack( + StackName="test_stack", TemplateBody=dummy_template_json, + ) From 0dd41d4c32e5ae5f5a8a1c2dccc0a271d883b139 Mon Sep 17 00:00:00 2001 From: Guilherme Martins Crocetti Date: Sun, 14 Jun 2020 12:03:00 -0300 Subject: [PATCH 16/20] Cloudformation support for EventSourceMapping (#3045) * change line position for uuid and last_modified because they're not input parameters * add event_source_arn validator and setter * refactor batch_size as setter * add helper function to parse arn and return source service * fix for EventSource's create_from_cfn, there was no reference in the lambda object for the esm if created by cfn * add esm deletion by cloudformation * remove unused variable in test * add cfn's update * add complete implementation of delete_from_cfn * blacked changed files * fix test with invalid batchsize for sqs * Dynamodb2 Table - Bugfix for localindex and implemented get_cfn_attributes * Dynamodb2 eventsource - fix test to use StreamArn attribute * Lambda Test - fix test_update_event_source_mapping --- moto/awslambda/models.py | 142 +++++++----- moto/dynamodb2/models/__init__.py | 10 + tests/test_awslambda/test_lambda.py | 3 +- .../test_lambda_cloudformation.py | 207 +++++++++++++++++- .../test_cloudformation_stack_crud.py | 3 +- 5 files changed, 313 insertions(+), 52 deletions(-) diff --git a/moto/awslambda/models.py b/moto/awslambda/models.py index 967944b91..91ecc4287 100644 --- a/moto/awslambda/models.py +++ b/moto/awslambda/models.py @@ -555,40 +555,63 @@ class LambdaFunction(BaseModel): class EventSourceMapping(BaseModel): def __init__(self, spec): # required - self.function_arn = spec["FunctionArn"] + self.function_name = spec["FunctionName"] self.event_source_arn = spec["EventSourceArn"] + + # optional + self.batch_size = spec.get("BatchSize") + self.starting_position = spec.get("StartingPosition", "TRIM_HORIZON") + self.enabled = spec.get("Enabled", True) + self.starting_position_timestamp = spec.get("StartingPositionTimestamp", None) + + self.function_arn = spec["FunctionArn"] self.uuid = str(uuid.uuid4()) self.last_modified = time.mktime(datetime.datetime.utcnow().timetuple()) - # BatchSize service default/max mapping - batch_size_map = { + def _get_service_source_from_arn(self, event_source_arn): + return event_source_arn.split(":")[2].lower() + + def _validate_event_source(self, event_source_arn): + valid_services = ("dynamodb", "kinesis", "sqs") + service = self._get_service_source_from_arn(event_source_arn) + return True if service in valid_services else False + + @property + def event_source_arn(self): + return self._event_source_arn + + @event_source_arn.setter + def event_source_arn(self, event_source_arn): + if not self._validate_event_source(event_source_arn): + raise ValueError( + "InvalidParameterValueException", "Unsupported event source type" + ) + self._event_source_arn = event_source_arn + + @property + def batch_size(self): + return self._batch_size + + @batch_size.setter + def batch_size(self, batch_size): + batch_size_service_map = { "kinesis": (100, 10000), "dynamodb": (100, 1000), "sqs": (10, 10), } - source_type = self.event_source_arn.split(":")[2].lower() - batch_size_entry = batch_size_map.get(source_type) - if batch_size_entry: - # Use service default if not provided - batch_size = int(spec.get("BatchSize", batch_size_entry[0])) - if batch_size > batch_size_entry[1]: - raise ValueError( - "InvalidParameterValueException", - "BatchSize {} exceeds the max of {}".format( - batch_size, batch_size_entry[1] - ), - ) - else: - self.batch_size = batch_size - else: - raise ValueError( - "InvalidParameterValueException", "Unsupported event source type" - ) - # optional - self.starting_position = spec.get("StartingPosition", "TRIM_HORIZON") - self.enabled = spec.get("Enabled", True) - self.starting_position_timestamp = spec.get("StartingPositionTimestamp", None) + source_type = self._get_service_source_from_arn(self.event_source_arn) + batch_size_for_source = batch_size_service_map[source_type] + + if batch_size is None: + self._batch_size = batch_size_for_source[0] + elif batch_size > batch_size_for_source[1]: + error_message = "BatchSize {} exceeds the max of {}".format( + batch_size, batch_size_for_source[1] + ) + raise ValueError("InvalidParameterValueException", error_message) + else: + self._batch_size = int(batch_size) def get_configuration(self): return { @@ -602,23 +625,42 @@ class EventSourceMapping(BaseModel): "StateTransitionReason": "User initiated", } + def delete(self, region_name): + lambda_backend = lambda_backends[region_name] + lambda_backend.delete_event_source_mapping(self.uuid) + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name ): properties = cloudformation_json["Properties"] - func = lambda_backends[region_name].get_function(properties["FunctionName"]) - spec = { - "FunctionArn": func.function_arn, - "EventSourceArn": properties["EventSourceArn"], - "StartingPosition": properties["StartingPosition"], - "BatchSize": properties.get("BatchSize", 100), - } - optional_properties = "BatchSize Enabled StartingPositionTimestamp".split() - for prop in optional_properties: - if prop in properties: - spec[prop] = properties[prop] - return EventSourceMapping(spec) + lambda_backend = lambda_backends[region_name] + return lambda_backend.create_event_source_mapping(properties) + + @classmethod + def update_from_cloudformation_json( + cls, new_resource_name, cloudformation_json, original_resource, region_name + ): + properties = cloudformation_json["Properties"] + event_source_uuid = original_resource.uuid + lambda_backend = lambda_backends[region_name] + return lambda_backend.update_event_source_mapping(event_source_uuid, properties) + + @classmethod + def delete_from_cloudformation_json( + cls, resource_name, cloudformation_json, region_name + ): + properties = cloudformation_json["Properties"] + lambda_backend = lambda_backends[region_name] + esms = lambda_backend.list_event_source_mappings( + event_source_arn=properties["EventSourceArn"], + function_name=properties["FunctionName"], + ) + + for esm in esms: + if esm.logical_resource_id in resource_name: + lambda_backend.delete_event_source_mapping + esm.delete(region_name) class LambdaVersion(BaseModel): @@ -819,7 +861,7 @@ class LambdaBackend(BaseBackend): ) # Validate function name - func = self._lambdas.get_function_by_name_or_arn(spec.pop("FunctionName", "")) + func = self._lambdas.get_function_by_name_or_arn(spec.get("FunctionName", "")) if not func: raise RESTError("ResourceNotFoundException", "Invalid FunctionName") @@ -877,18 +919,20 @@ class LambdaBackend(BaseBackend): def update_event_source_mapping(self, uuid, spec): esm = self.get_event_source_mapping(uuid) - if esm: - if spec.get("FunctionName"): - func = self._lambdas.get_function_by_name_or_arn( - spec.get("FunctionName") - ) + if not esm: + return False + + for key, value in spec.items(): + if key == "FunctionName": + func = self._lambdas.get_function_by_name_or_arn(spec[key]) esm.function_arn = func.function_arn - if "BatchSize" in spec: - esm.batch_size = spec["BatchSize"] - if "Enabled" in spec: - esm.enabled = spec["Enabled"] - return esm - return False + elif key == "BatchSize": + esm.batch_size = spec[key] + elif key == "Enabled": + esm.enabled = spec[key] + + esm.last_modified = time.mktime(datetime.datetime.utcnow().timetuple()) + return esm def list_event_source_mappings(self, event_source_arn, function_name): esms = list(self._event_source_mappings.values()) diff --git a/moto/dynamodb2/models/__init__.py b/moto/dynamodb2/models/__init__.py index 48b4bbbfd..13ee94948 100644 --- a/moto/dynamodb2/models/__init__.py +++ b/moto/dynamodb2/models/__init__.py @@ -386,6 +386,16 @@ class Table(BaseModel): }, } + def get_cfn_attribute(self, attribute_name): + from moto.cloudformation.exceptions import UnformattedGetAttTemplateException + + if attribute_name == "Arn": + return self.table_arn + elif attribute_name == "StreamArn" and self.stream_specification: + return self.describe()["TableDescription"]["LatestStreamArn"] + + raise UnformattedGetAttTemplateException() + @classmethod def create_from_cloudformation_json( cls, resource_name, cloudformation_json, region_name diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index 8879ad7e3..1cd943f04 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -1446,11 +1446,12 @@ def test_update_event_source_mapping(): assert response["State"] == "Enabled" mapping = conn.update_event_source_mapping( - UUID=response["UUID"], Enabled=False, BatchSize=15, FunctionName="testFunction2" + UUID=response["UUID"], Enabled=False, BatchSize=2, FunctionName="testFunction2" ) assert mapping["UUID"] == response["UUID"] assert mapping["FunctionArn"] == func2["FunctionArn"] assert mapping["State"] == "Disabled" + assert mapping["BatchSize"] == 2 @mock_lambda diff --git a/tests/test_awslambda/test_lambda_cloudformation.py b/tests/test_awslambda/test_lambda_cloudformation.py index f57354d69..c3061ff3a 100644 --- a/tests/test_awslambda/test_lambda_cloudformation.py +++ b/tests/test_awslambda/test_lambda_cloudformation.py @@ -3,7 +3,7 @@ import io import sure # noqa import zipfile from botocore.exceptions import ClientError -from moto import mock_cloudformation, mock_iam, mock_lambda, mock_s3 +from moto import mock_cloudformation, mock_iam, mock_lambda, mock_s3, mock_sqs from nose.tools import assert_raises from string import Template from uuid import uuid4 @@ -48,6 +48,23 @@ template = Template( }""" ) +event_source_mapping_template = Template( + """{ + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "$resource_name": { + "Type": "AWS::Lambda::EventSourceMapping", + "Properties": { + "BatchSize": $batch_size, + "EventSourceArn": $event_source_arn, + "FunctionName": $function_name, + "Enabled": $enabled + } + } + } +}""" +) + @mock_cloudformation @mock_lambda @@ -97,6 +114,194 @@ def test_lambda_can_be_deleted_by_cloudformation(): e.exception.response["Error"]["Code"].should.equal("ResourceNotFoundException") +@mock_cloudformation +@mock_lambda +@mock_s3 +@mock_sqs +def test_event_source_mapping_create_from_cloudformation_json(): + sqs = boto3.resource("sqs", region_name="us-east-1") + s3 = boto3.client("s3", "us-east-1") + cf = boto3.client("cloudformation", region_name="us-east-1") + lmbda = boto3.client("lambda", region_name="us-east-1") + + queue = sqs.create_queue(QueueName="test-sqs-queue1") + + # Creates lambda + _, lambda_stack = create_stack(cf, s3) + created_fn_name = get_created_function_name(cf, lambda_stack) + created_fn_arn = lmbda.get_function(FunctionName=created_fn_name)["Configuration"][ + "FunctionArn" + ] + + template = event_source_mapping_template.substitute( + { + "resource_name": "Foo", + "batch_size": 1, + "event_source_arn": queue.attributes["QueueArn"], + "function_name": created_fn_name, + "enabled": True, + } + ) + + cf.create_stack(StackName="test-event-source", TemplateBody=template) + event_sources = lmbda.list_event_source_mappings(FunctionName=created_fn_name) + + event_sources["EventSourceMappings"].should.have.length_of(1) + event_source = event_sources["EventSourceMappings"][0] + event_source["EventSourceArn"].should.be.equal(queue.attributes["QueueArn"]) + event_source["FunctionArn"].should.be.equal(created_fn_arn) + + +@mock_cloudformation +@mock_lambda +@mock_s3 +@mock_sqs +def test_event_source_mapping_delete_stack(): + sqs = boto3.resource("sqs", region_name="us-east-1") + s3 = boto3.client("s3", "us-east-1") + cf = boto3.client("cloudformation", region_name="us-east-1") + lmbda = boto3.client("lambda", region_name="us-east-1") + + queue = sqs.create_queue(QueueName="test-sqs-queue1") + + # Creates lambda + _, lambda_stack = create_stack(cf, s3) + created_fn_name = get_created_function_name(cf, lambda_stack) + + template = event_source_mapping_template.substitute( + { + "resource_name": "Foo", + "batch_size": 1, + "event_source_arn": queue.attributes["QueueArn"], + "function_name": created_fn_name, + "enabled": True, + } + ) + + esm_stack = cf.create_stack(StackName="test-event-source", TemplateBody=template) + event_sources = lmbda.list_event_source_mappings(FunctionName=created_fn_name) + + event_sources["EventSourceMappings"].should.have.length_of(1) + + cf.delete_stack(StackName=esm_stack["StackId"]) + event_sources = lmbda.list_event_source_mappings(FunctionName=created_fn_name) + + event_sources["EventSourceMappings"].should.have.length_of(0) + + +@mock_cloudformation +@mock_lambda +@mock_s3 +@mock_sqs +def test_event_source_mapping_update_from_cloudformation_json(): + sqs = boto3.resource("sqs", region_name="us-east-1") + s3 = boto3.client("s3", "us-east-1") + cf = boto3.client("cloudformation", region_name="us-east-1") + lmbda = boto3.client("lambda", region_name="us-east-1") + + queue = sqs.create_queue(QueueName="test-sqs-queue1") + + # Creates lambda + _, lambda_stack = create_stack(cf, s3) + created_fn_name = get_created_function_name(cf, lambda_stack) + created_fn_arn = lmbda.get_function(FunctionName=created_fn_name)["Configuration"][ + "FunctionArn" + ] + + original_template = event_source_mapping_template.substitute( + { + "resource_name": "Foo", + "batch_size": 1, + "event_source_arn": queue.attributes["QueueArn"], + "function_name": created_fn_name, + "enabled": True, + } + ) + + cf.create_stack(StackName="test-event-source", TemplateBody=original_template) + event_sources = lmbda.list_event_source_mappings(FunctionName=created_fn_name) + original_esm = event_sources["EventSourceMappings"][0] + + original_esm["State"].should.equal("Enabled") + original_esm["BatchSize"].should.equal(1) + + # Update + new_template = event_source_mapping_template.substitute( + { + "resource_name": "Foo", + "batch_size": 10, + "event_source_arn": queue.attributes["QueueArn"], + "function_name": created_fn_name, + "enabled": False, + } + ) + + cf.update_stack(StackName="test-event-source", TemplateBody=new_template) + event_sources = lmbda.list_event_source_mappings(FunctionName=created_fn_name) + updated_esm = event_sources["EventSourceMappings"][0] + + updated_esm["State"].should.equal("Disabled") + updated_esm["BatchSize"].should.equal(10) + + +@mock_cloudformation +@mock_lambda +@mock_s3 +@mock_sqs +def test_event_source_mapping_delete_from_cloudformation_json(): + sqs = boto3.resource("sqs", region_name="us-east-1") + s3 = boto3.client("s3", "us-east-1") + cf = boto3.client("cloudformation", region_name="us-east-1") + lmbda = boto3.client("lambda", region_name="us-east-1") + + queue = sqs.create_queue(QueueName="test-sqs-queue1") + + # Creates lambda + _, lambda_stack = create_stack(cf, s3) + created_fn_name = get_created_function_name(cf, lambda_stack) + created_fn_arn = lmbda.get_function(FunctionName=created_fn_name)["Configuration"][ + "FunctionArn" + ] + + original_template = event_source_mapping_template.substitute( + { + "resource_name": "Foo", + "batch_size": 1, + "event_source_arn": queue.attributes["QueueArn"], + "function_name": created_fn_name, + "enabled": True, + } + ) + + cf.create_stack(StackName="test-event-source", TemplateBody=original_template) + event_sources = lmbda.list_event_source_mappings(FunctionName=created_fn_name) + original_esm = event_sources["EventSourceMappings"][0] + + original_esm["State"].should.equal("Enabled") + original_esm["BatchSize"].should.equal(1) + + # Update with deletion of old resources + new_template = event_source_mapping_template.substitute( + { + "resource_name": "Bar", # changed name + "batch_size": 10, + "event_source_arn": queue.attributes["QueueArn"], + "function_name": created_fn_name, + "enabled": False, + } + ) + + cf.update_stack(StackName="test-event-source", TemplateBody=new_template) + event_sources = lmbda.list_event_source_mappings(FunctionName=created_fn_name) + + event_sources["EventSourceMappings"].should.have.length_of(1) + updated_esm = event_sources["EventSourceMappings"][0] + + updated_esm["State"].should.equal("Disabled") + updated_esm["BatchSize"].should.equal(10) + updated_esm["UUID"].shouldnt.equal(original_esm["UUID"]) + + def create_stack(cf, s3): bucket_name = str(uuid4()) s3.create_bucket(Bucket=bucket_name) diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud.py b/tests/test_cloudformation/test_cloudformation_stack_crud.py index 3d1b2ab8c..8a0a0b11c 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud.py @@ -541,13 +541,14 @@ def test_create_stack_lambda_and_dynamodb(): "ReadCapacityUnits": 10, "WriteCapacityUnits": 10, }, + "StreamSpecification": {"StreamViewType": "KEYS_ONLY"}, }, }, "func1mapping": { "Type": "AWS::Lambda::EventSourceMapping", "Properties": { "FunctionName": {"Ref": "func1"}, - "EventSourceArn": "arn:aws:dynamodb:region:XXXXXX:table/tab1/stream/2000T00:00:00.000", + "EventSourceArn": {"Fn::GetAtt": ["tab1", "StreamArn"]}, "StartingPosition": "0", "BatchSize": 100, "Enabled": True, From 5988e5efaa07e4f6cde6b9a92d72f6e636f81677 Mon Sep 17 00:00:00 2001 From: usmangani1 Date: Mon, 15 Jun 2020 15:02:43 +0530 Subject: [PATCH 17/20] EC2 : Fix - modified volume type in ec2 describe images. (#3074) * "modified volume type in ec2 describe images" * removed unncessary comments * Linting Co-authored-by: usmankb Co-authored-by: Bert Blommers --- moto/ec2/responses/amis.py | 2 +- tests/test_ec2/test_amis.py | 6 +++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/moto/ec2/responses/amis.py b/moto/ec2/responses/amis.py index 6736a7175..0e70182bb 100755 --- a/moto/ec2/responses/amis.py +++ b/moto/ec2/responses/amis.py @@ -125,7 +125,7 @@ DESCRIBE_IMAGES_RESPONSE = """ Date: Tue, 16 Jun 2020 08:03:02 +0300 Subject: [PATCH 18/20] Improve parsing of string values that represents booleans during block device mapping construction (#3073) * convert str into bool * Fix python2 * Fix python2 * pylint --- moto/ec2/responses/instances.py | 19 +++++++++++++++---- tests/test_ec2/test_instances.py | 30 +++++++++++++++++++++++++++++- 2 files changed, 44 insertions(+), 5 deletions(-) diff --git a/moto/ec2/responses/instances.py b/moto/ec2/responses/instances.py index adcbfa741..9090847be 100644 --- a/moto/ec2/responses/instances.py +++ b/moto/ec2/responses/instances.py @@ -13,6 +13,7 @@ from moto.elbv2 import elbv2_backends from moto.core import ACCOUNT_ID from copy import deepcopy +import six class InstanceResponse(BaseResponse): @@ -283,15 +284,15 @@ class InstanceResponse(BaseResponse): device_template["Ebs"]["VolumeSize"] = device_mapping.get( "ebs._volume_size" ) - device_template["Ebs"]["DeleteOnTermination"] = device_mapping.get( - "ebs._delete_on_termination", False + device_template["Ebs"]["DeleteOnTermination"] = self._convert_to_bool( + device_mapping.get("ebs._delete_on_termination", False) ) device_template["Ebs"]["VolumeType"] = device_mapping.get( "ebs._volume_type" ) device_template["Ebs"]["Iops"] = device_mapping.get("ebs._iops") - device_template["Ebs"]["Encrypted"] = device_mapping.get( - "ebs._encrypted", False + device_template["Ebs"]["Encrypted"] = self._convert_to_bool( + device_mapping.get("ebs._encrypted", False) ) mappings.append(device_template) @@ -308,6 +309,16 @@ class InstanceResponse(BaseResponse): ): raise MissingParameterError("size or snapshotId") + @staticmethod + def _convert_to_bool(bool_str): + if isinstance(bool_str, bool): + return bool_str + + if isinstance(bool_str, six.text_type): + return str(bool_str).lower() == "true" + + return False + BLOCK_DEVICE_MAPPING_TEMPLATE = { "VirtualName": None, diff --git a/tests/test_ec2/test_instances.py b/tests/test_ec2/test_instances.py index d25880975..c775ab0ab 100644 --- a/tests/test_ec2/test_instances.py +++ b/tests/test_ec2/test_instances.py @@ -128,7 +128,35 @@ def test_instance_terminate_discard_volumes(): @mock_ec2 -def test_instance_terminate_keep_volumes(): +def test_instance_terminate_keep_volumes_explicit(): + + ec2_resource = boto3.resource("ec2", "us-west-1") + + result = ec2_resource.create_instances( + ImageId="ami-d3adb33f", + MinCount=1, + MaxCount=1, + BlockDeviceMappings=[ + { + "DeviceName": "/dev/sda1", + "Ebs": {"VolumeSize": 50, "DeleteOnTermination": False}, + } + ], + ) + instance = result[0] + + instance_volume_ids = [] + for volume in instance.volumes.all(): + instance_volume_ids.append(volume.volume_id) + + instance.terminate() + instance.wait_until_terminated() + + assert len(list(ec2_resource.volumes.all())) == 1 + + +@mock_ec2 +def test_instance_terminate_keep_volumes_implicit(): ec2_resource = boto3.resource("ec2", "us-west-1") result = ec2_resource.create_instances( From 6305f707d28d11000fda0336fe628a3d4743f4ba Mon Sep 17 00:00:00 2001 From: Shane Dowling Date: Thu, 18 Jun 2020 09:50:58 +0100 Subject: [PATCH 19/20] fix to capture yaml scanner error (#3077) --- moto/cloudformation/models.py | 4 +-- .../test_cloudformation/test_stack_parsing.py | 25 +++++++++++++++++++ 2 files changed, 27 insertions(+), 2 deletions(-) diff --git a/moto/cloudformation/models.py b/moto/cloudformation/models.py index 8c14f55b8..23cdc0925 100644 --- a/moto/cloudformation/models.py +++ b/moto/cloudformation/models.py @@ -315,8 +315,8 @@ class FakeStack(BaseModel): yaml.add_multi_constructor("", yaml_tag_constructor) try: self.template_dict = yaml.load(self.template, Loader=yaml.Loader) - except yaml.parser.ParserError: - self.template_dict = json.loads(self.template, Loader=yaml.Loader) + except (yaml.parser.ParserError, yaml.scanner.ScannerError): + self.template_dict = json.loads(self.template) @property def stack_parameters(self): diff --git a/tests/test_cloudformation/test_stack_parsing.py b/tests/test_cloudformation/test_stack_parsing.py index 85df76592..116287162 100644 --- a/tests/test_cloudformation/test_stack_parsing.py +++ b/tests/test_cloudformation/test_stack_parsing.py @@ -38,6 +38,16 @@ name_type_template = { }, } +name_type_template_with_tabs_json = """ +\t{ +\t\t"AWSTemplateFormatVersion": "2010-09-09", +\t\t"Description": "Create a multi-az, load balanced, Auto Scaled sample web site. The Auto Scaling trigger is based on the CPU utilization of the web servers. The AMI is chosen based on the region in which the stack is run. This example creates a web service running across all availability zones in a region. The instances are load balanced with a simple health check. The web site is available on port 80, however, the instances can be configured to listen on any port (8888 by default). **WARNING** This template creates one or more Amazon EC2 instances. You will be billed for the AWS resources used if you create a stack from this template.", +\t\t"Resources": { +\t\t\t"Queue": {"Type": "AWS::SQS::Queue", "Properties": {"VisibilityTimeout": 60}} +\t\t} +\t} +""" + output_dict = { "Outputs": { "Output1": {"Value": {"Ref": "Queue"}, "Description": "This is a description."} @@ -186,6 +196,21 @@ def test_parse_stack_with_name_type_resource(): queue.should.be.a(Queue) +def test_parse_stack_with_tabbed_json_template(): + stack = FakeStack( + stack_id="test_id", + name="test_stack", + template=name_type_template_with_tabs_json, + parameters={}, + region_name="us-west-1", + ) + + stack.resource_map.should.have.length_of(1) + list(stack.resource_map.keys())[0].should.equal("Queue") + queue = list(stack.resource_map.values())[0] + queue.should.be.a(Queue) + + def test_parse_stack_with_yaml_template(): stack = FakeStack( stack_id="test_id", From 8ce12027dd2d89e00fa3ecfd295357471957ba3d Mon Sep 17 00:00:00 2001 From: Dawn James Date: Fri, 19 Jun 2020 11:44:43 +0100 Subject: [PATCH 20/20] Return correct error when creating a bucket with empty CreateBucketConfiguration (#3079) * Several updates to the contributor documentation with extra information. * Fix failing test by providing a region. * Create test for issue 2210. * Check if CreateBucketConfiguration is supplied and empty; raise MalformedXML error if so. --- CONTRIBUTING.md | 25 +++++++++++++++++-------- moto/s3/responses.py | 16 ++++++++++++++++ tests/test_s3/test_s3.py | 11 ++++++++++- 3 files changed, 43 insertions(+), 9 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 941fc0624..7e54236bd 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,32 +1,41 @@ -### Contributing code +# Contributing code Moto has a [Code of Conduct](https://github.com/spulec/moto/blob/master/CODE_OF_CONDUCT.md), you can expect to be treated with respect at all times when interacting with this project. ## Running the tests locally -Moto has a Makefile which has some helpful commands for getting setup. You should be able to run `make init` to install the dependencies and then `make test` to run the tests. +Moto has a [Makefile](./Makefile) which has some helpful commands for getting set up. +You should be able to run `make init` to install the dependencies and then `make test` to run the tests. + +*NB. On first run, some tests might take a while to execute, especially the Lambda ones, because they may need to download a Docker image before they can execute.* ## Linting + Run `make lint` or `black --check moto tests` to verify whether your code confirms to the guidelines. -## Is there a missing feature? +## Getting to grips with the codebase + +Moto maintains a list of [good first issues](https://github.com/spulec/moto/contribute) which you may want to look at before +implementing a whole new endpoint. + +## Missing features Moto is easier to contribute to than you probably think. There's [a list of which endpoints have been implemented](https://github.com/spulec/moto/blob/master/IMPLEMENTATION_COVERAGE.md) and we invite you to add new endpoints to existing services or to add new services. How to teach Moto to support a new AWS endpoint: -* Create an issue describing what's missing. This is where we'll all talk about the new addition and help you get it done. +* Search for an existing [issue](https://github.com/spulec/moto/issues) that matches what you want to achieve. +* If one doesn't already exist, create a new issue describing what's missing. This is where we'll all talk about the new addition and help you get it done. * Create a [pull request](https://help.github.com/articles/using-pull-requests/) and mention the issue # in the PR description. * Try to add a failing test case. For example, if you're trying to implement `boto3.client('acm').import_certificate()` you'll want to add a new method called `def test_import_certificate` to `tests/test_acm/test_acm.py`. * If you can also implement the code that gets that test passing that's great. If not, just ask the community for a hand and somebody will assist you. -# Maintainers +## Maintainers -## Releasing a new version of Moto +### Releasing a new version of Moto -You'll need a PyPi account and a Dockerhub account to release Moto. After we release a new PyPi package we build and push the [motoserver/moto](https://hub.docker.com/r/motoserver/moto/) Docker image. +You'll need a PyPi account and a DockerHub account to release Moto. After we release a new PyPi package we build and push the [motoserver/moto](https://hub.docker.com/r/motoserver/moto/) Docker image. * First, `scripts/bump_version` modifies the version and opens a PR * Then, merge the new pull request * Finally, generate and ship the new artifacts with `make publish` - diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 41db43af7..d4d872a8d 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -615,6 +615,19 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): pass return False + def _create_bucket_configuration_is_empty(self, body): + if body: + try: + create_bucket_configuration = xmltodict.parse(body)[ + "CreateBucketConfiguration" + ] + del create_bucket_configuration["@xmlns"] + if len(create_bucket_configuration) == 0: + return True + except KeyError: + pass + return False + def _parse_pab_config(self, body): parsed_xml = xmltodict.parse(body) parsed_xml["PublicAccessBlockConfiguration"].pop("@xmlns", None) @@ -733,6 +746,9 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): ): raise IllegalLocationConstraintException() if body: + if self._create_bucket_configuration_is_empty(body): + raise MalformedXML() + try: forced_region = xmltodict.parse(body)["CreateBucketConfiguration"][ "LocationConstraint" diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 010a23d50..dbdd1b90c 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -1042,7 +1042,7 @@ def test_s3_object_in_public_bucket_using_multiple_presigned_urls(): @mock_s3 def test_streaming_upload_from_file_to_presigned_url(): - s3 = boto3.resource("s3") + s3 = boto3.resource("s3", region_name="us-east-1") bucket = s3.Bucket("test-bucket") bucket.create() bucket.put_object(Body=b"ABCD", Key="file.txt") @@ -1976,6 +1976,15 @@ def test_boto3_bucket_create_eu_central(): ) +@mock_s3 +def test_bucket_create_empty_bucket_configuration_should_return_malformed_xml_error(): + s3 = boto3.resource("s3", region_name="us-east-1") + with assert_raises(ClientError) as e: + s3.create_bucket(Bucket="whatever", CreateBucketConfiguration={}) + e.exception.response["Error"]["Code"].should.equal("MalformedXML") + e.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + + @mock_s3 def test_boto3_head_object(): s3 = boto3.resource("s3", region_name=DEFAULT_REGION_NAME)