diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index f99d86df3..f56385b25 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -1,6 +1,8 @@ ## accessanalyzer -0% implemented +
+0% implemented + - [ ] create_analyzer - [ ] create_archive_rule - [ ] delete_analyzer @@ -19,9 +21,12 @@ - [ ] untag_resource - [ ] update_archive_rule - [ ] update_findings +
## acm -38% implemented +
+38% implemented + - [X] add_tags_to_certificate - [X] delete_certificate - [ ] describe_certificate @@ -35,9 +40,12 @@ - [X] request_certificate - [ ] resend_validation_email - [ ] update_certificate_options +
## acm-pca -0% implemented +
+0% implemented + - [ ] create_certificate_authority - [ ] create_certificate_authority_audit_report - [ ] create_permission @@ -58,9 +66,12 @@ - [ ] tag_certificate_authority - [ ] untag_certificate_authority - [ ] update_certificate_authority +
## alexaforbusiness -0% implemented +
+0% implemented + - [ ] approve_skill - [ ] associate_contact_with_address_book - [ ] associate_device_with_network_profile @@ -154,9 +165,12 @@ - [ ] update_profile - [ ] update_room - [ ] update_skill_group +
## amplify -0% implemented +
+0% implemented + - [ ] create_app - [ ] create_backend_environment - [ ] create_branch @@ -194,17 +208,20 @@ - [ ] update_branch - [ ] update_domain_association - [ ] update_webhook +
## apigateway -25% implemented +
+34% implemented + - [ ] create_api_key -- [ ] create_authorizer +- [X] create_authorizer - [ ] create_base_path_mapping - [X] create_deployment - [ ] create_documentation_part - [ ] create_documentation_version -- [ ] create_domain_name -- [ ] create_model +- [X] create_domain_name +- [X] create_model - [ ] create_request_validator - [X] create_resource - [X] create_rest_api @@ -213,7 +230,7 @@ - [X] create_usage_plan_key - [ ] create_vpc_link - [ ] delete_api_key -- [ ] delete_authorizer +- [X] delete_authorizer - [ ] delete_base_path_mapping - [ ] delete_client_certificate - [X] delete_deployment @@ -239,8 +256,8 @@ - [ ] get_account - [ ] get_api_key - [ ] get_api_keys -- [ ] get_authorizer -- [ ] get_authorizers +- [X] get_authorizer +- [X] get_authorizers - [ ] get_base_path_mapping - [ ] get_base_path_mappings - [ ] get_client_certificate @@ -251,8 +268,8 @@ - [ ] get_documentation_parts - [ ] get_documentation_version - [ ] get_documentation_versions -- [ ] get_domain_name -- [ ] get_domain_names +- [X] get_domain_name +- [X] get_domain_names - [ ] get_export - [ ] get_gateway_response - [ ] get_gateway_responses @@ -260,9 +277,9 @@ - [X] get_integration_response - [X] get_method - [X] get_method_response -- [ ] get_model +- [X] get_model - [ ] get_model_template -- [ ] get_models +- [X] get_models - [ ] get_request_validator - [ ] get_request_validators - [X] get_resource @@ -297,7 +314,7 @@ - [ ] untag_resource - [ ] update_account - [ ] update_api_key -- [ ] update_authorizer +- [X] update_authorizer - [ ] update_base_path_mapping - [ ] update_client_certificate - [ ] update_deployment @@ -317,15 +334,21 @@ - [ ] update_usage - [ ] update_usage_plan - [ ] update_vpc_link +
## apigatewaymanagementapi -0% implemented +
+0% implemented + - [ ] delete_connection - [ ] get_connection - [ ] post_to_connection +
## apigatewayv2 -0% implemented +
+0% implemented + - [ ] create_api - [ ] create_api_mapping - [ ] create_authorizer @@ -337,6 +360,8 @@ - [ ] create_route - [ ] create_route_response - [ ] create_stage +- [ ] create_vpc_link +- [ ] delete_access_log_settings - [ ] delete_api - [ ] delete_api_mapping - [ ] delete_authorizer @@ -347,9 +372,11 @@ - [ ] delete_integration_response - [ ] delete_model - [ ] delete_route +- [ ] delete_route_request_parameter - [ ] delete_route_response - [ ] delete_route_settings - [ ] delete_stage +- [ ] delete_vpc_link - [ ] get_api - [ ] get_api_mapping - [ ] get_api_mappings @@ -374,6 +401,8 @@ - [ ] get_stage - [ ] get_stages - [ ] get_tags +- [ ] get_vpc_link +- [ ] get_vpc_links - [ ] import_api - [ ] reimport_api - [ ] tag_resource @@ -389,9 +418,13 @@ - [ ] update_route - [ ] update_route_response - [ ] update_stage +- [ ] update_vpc_link +
## appconfig -0% implemented +
+0% implemented + - [ ] create_application - [ ] create_configuration_profile - [ ] create_deployment_strategy @@ -421,9 +454,12 @@ - [ ] update_deployment_strategy - [ ] update_environment - [ ] validate_configuration +
## application-autoscaling -0% implemented +
+0% implemented + - [ ] delete_scaling_policy - [ ] delete_scheduled_action - [ ] deregister_scalable_target @@ -434,9 +470,12 @@ - [ ] put_scaling_policy - [ ] put_scheduled_action - [ ] register_scalable_target +
## application-insights -0% implemented +
+0% implemented + - [ ] create_application - [ ] create_component - [ ] create_log_pattern @@ -453,6 +492,7 @@ - [ ] describe_problem_observations - [ ] list_applications - [ ] list_components +- [ ] list_configuration_history - [ ] list_log_pattern_sets - [ ] list_log_patterns - [ ] list_problems @@ -463,9 +503,12 @@ - [ ] update_component - [ ] update_component_configuration - [ ] update_log_pattern +
## appmesh -0% implemented +
+0% implemented + - [ ] create_mesh - [ ] create_route - [ ] create_virtual_node @@ -494,9 +537,12 @@ - [ ] update_virtual_node - [ ] update_virtual_router - [ ] update_virtual_service +
## appstream -0% implemented +
+0% implemented + - [ ] associate_fleet - [ ] batch_associate_user_stack - [ ] batch_disassociate_user_stack @@ -544,9 +590,12 @@ - [ ] update_fleet - [ ] update_image_permissions - [ ] update_stack +
## appsync -0% implemented +
+0% implemented + - [ ] create_api_cache - [ ] create_api_key - [ ] create_data_source @@ -588,9 +637,12 @@ - [ ] update_graphql_api - [ ] update_resolver - [ ] update_type +
## athena -10% implemented +
+10% implemented + - [ ] batch_get_named_query - [ ] batch_get_query_execution - [ ] create_named_query @@ -610,9 +662,12 @@ - [ ] tag_resource - [ ] untag_resource - [ ] update_work_group +
## autoscaling -44% implemented +
+44% implemented + - [X] attach_instances - [X] attach_load_balancer_target_groups - [X] attach_load_balancers @@ -667,18 +722,24 @@ - [X] suspend_processes - [ ] terminate_instance_in_auto_scaling_group - [X] update_auto_scaling_group +
## autoscaling-plans -0% implemented +
+0% implemented + - [ ] create_scaling_plan - [ ] delete_scaling_plan - [ ] describe_scaling_plan_resources - [ ] describe_scaling_plans - [ ] get_scaling_plan_resource_forecast_data - [ ] update_scaling_plan +
## backup -0% implemented +
+0% implemented + - [ ] create_backup_plan - [ ] create_backup_selection - [ ] create_backup_vault @@ -690,6 +751,7 @@ - [ ] delete_recovery_point - [ ] describe_backup_job - [ ] describe_backup_vault +- [ ] describe_copy_job - [ ] describe_protected_resource - [ ] describe_recovery_point - [ ] describe_restore_job @@ -708,6 +770,7 @@ - [ ] list_backup_plans - [ ] list_backup_selections - [ ] list_backup_vaults +- [ ] list_copy_jobs - [ ] list_protected_resources - [ ] list_recovery_points_by_backup_vault - [ ] list_recovery_points_by_resource @@ -716,15 +779,19 @@ - [ ] put_backup_vault_access_policy - [ ] put_backup_vault_notifications - [ ] start_backup_job +- [ ] start_copy_job - [ ] start_restore_job - [ ] stop_backup_job - [ ] tag_resource - [ ] untag_resource - [ ] update_backup_plan - [ ] update_recovery_point_lifecycle +
## batch -93% implemented +
+93% implemented + - [ ] cancel_job - [X] create_compute_environment - [X] create_job_queue @@ -741,9 +808,12 @@ - [X] terminate_job - [X] update_compute_environment - [X] update_job_queue +
## budgets -0% implemented +
+0% implemented + - [ ] create_budget - [ ] create_notification - [ ] create_subscriber @@ -758,9 +828,12 @@ - [ ] update_budget - [ ] update_notification - [ ] update_subscriber +
## ce -0% implemented +
+0% implemented + - [ ] create_cost_category_definition - [ ] delete_cost_category_definition - [ ] describe_cost_category_definition @@ -780,12 +853,16 @@ - [ ] get_usage_forecast - [ ] list_cost_category_definitions - [ ] update_cost_category_definition +
## chime -0% implemented +
+0% implemented + - [ ] associate_phone_number_with_user - [ ] associate_phone_numbers_with_voice_connector - [ ] associate_phone_numbers_with_voice_connector_group +- [ ] associate_signin_delegate_groups_with_account - [ ] batch_create_attendee - [ ] batch_create_room_membership - [ ] batch_delete_phone_number @@ -800,6 +877,7 @@ - [ ] create_phone_number_order - [ ] create_room - [ ] create_room_membership +- [ ] create_user - [ ] create_voice_connector - [ ] create_voice_connector_group - [ ] delete_account @@ -818,6 +896,7 @@ - [ ] disassociate_phone_number_from_user - [ ] disassociate_phone_numbers_from_voice_connector - [ ] disassociate_phone_numbers_from_voice_connector_group +- [ ] disassociate_signin_delegate_groups_from_account - [ ] get_account - [ ] get_account_settings - [ ] get_attendee @@ -874,9 +953,12 @@ - [ ] update_user_settings - [ ] update_voice_connector - [ ] update_voice_connector_group +
## cloud9 -0% implemented +
+0% implemented + - [ ] create_environment_ec2 - [ ] create_environment_membership - [ ] delete_environment @@ -885,11 +967,17 @@ - [ ] describe_environment_status - [ ] describe_environments - [ ] list_environments +- [ ] list_tags_for_resource +- [ ] tag_resource +- [ ] untag_resource - [ ] update_environment - [ ] update_environment_membership +
## clouddirectory -0% implemented +
+0% implemented + - [ ] add_facet_to_object - [ ] apply_schema - [ ] attach_object @@ -956,9 +1044,12 @@ - [ ] update_typed_link_facet - [ ] upgrade_applied_schema - [ ] upgrade_published_schema +
## cloudformation -32% implemented +
+32% implemented + - [ ] cancel_update_stack - [ ] continue_update_rollback - [X] create_change_set @@ -1014,9 +1105,12 @@ - [X] update_stack_set - [ ] update_termination_protection - [X] validate_template +
## cloudfront -0% implemented +
+0% implemented + - [ ] create_cloud_front_origin_access_identity - [ ] create_distribution - [ ] create_distribution_with_tags @@ -1062,9 +1156,12 @@ - [ ] update_field_level_encryption_profile - [ ] update_public_key - [ ] update_streaming_distribution +
## cloudhsm -0% implemented +
+0% implemented + - [ ] add_tags_to_resource - [ ] create_hapg - [ ] create_hsm @@ -1085,9 +1182,12 @@ - [ ] modify_hsm - [ ] modify_luna_client - [ ] remove_tags_from_resource +
## cloudhsmv2 -0% implemented +
+0% implemented + - [ ] copy_backup_to_region - [ ] create_cluster - [ ] create_hsm @@ -1101,9 +1201,12 @@ - [ ] restore_backup - [ ] tag_resource - [ ] untag_resource +
## cloudsearch -0% implemented +
+0% implemented + - [ ] build_suggesters - [ ] create_domain - [ ] define_analysis_scheme @@ -1130,15 +1233,21 @@ - [ ] update_domain_endpoint_options - [ ] update_scaling_parameters - [ ] update_service_access_policies +
## cloudsearchdomain -0% implemented +
+0% implemented + - [ ] search - [ ] suggest - [ ] upload_documents +
## cloudtrail -0% implemented +
+0% implemented + - [ ] add_tags - [ ] create_trail - [ ] delete_trail @@ -1157,9 +1266,12 @@ - [ ] start_logging - [ ] stop_logging - [ ] update_trail +
## cloudwatch -34% implemented +
+36% implemented + - [X] delete_alarms - [ ] delete_anomaly_detector - [X] delete_dashboards @@ -1175,13 +1287,14 @@ - [ ] enable_insight_rules - [X] get_dashboard - [ ] get_insight_rule_report -- [ ] get_metric_data +- [X] get_metric_data - [X] get_metric_statistics - [ ] get_metric_widget_image - [X] list_dashboards - [X] list_metrics - [ ] list_tags_for_resource - [ ] put_anomaly_detector +- [ ] put_composite_alarm - [X] put_dashboard - [ ] put_insight_rule - [X] put_metric_alarm @@ -1189,9 +1302,12 @@ - [X] set_alarm_state - [ ] tag_resource - [ ] untag_resource +
## codebuild -0% implemented +
+0% implemented + - [ ] batch_delete_builds - [ ] batch_get_builds - [ ] batch_get_projects @@ -1226,9 +1342,12 @@ - [ ] update_project - [ ] update_report_group - [ ] update_webhook +
## codecommit -0% implemented +
+4% implemented + - [ ] associate_approval_rule_template_with_repository - [ ] batch_associate_approval_rule_template_with_repositories - [ ] batch_describe_merge_conflicts @@ -1304,9 +1423,12 @@ - [ ] update_pull_request_title - [ ] update_repository_description - [ ] update_repository_name +
## codedeploy -0% implemented +
+0% implemented + - [ ] add_tags_to_on_premises_instances - [ ] batch_get_application_revisions - [ ] batch_get_applications @@ -1353,16 +1475,22 @@ - [ ] untag_resource - [ ] update_application - [ ] update_deployment_group +
## codeguru-reviewer -0% implemented +
+0% implemented + - [ ] associate_repository - [ ] describe_repository_association - [ ] disassociate_repository - [ ] list_repository_associations +
## codeguruprofiler -0% implemented +
+0% implemented + - [ ] configure_agent - [ ] create_profiling_group - [ ] delete_profiling_group @@ -1372,9 +1500,12 @@ - [ ] list_profiling_groups - [ ] post_agent_profile - [ ] update_profiling_group +
## codepipeline -22% implemented +
+21% implemented + - [ ] acknowledge_job - [ ] acknowledge_third_party_job - [ ] create_custom_action_type @@ -1408,12 +1539,16 @@ - [ ] register_webhook_with_third_party - [ ] retry_stage_execution - [ ] start_pipeline_execution +- [ ] stop_pipeline_execution - [X] tag_resource - [X] untag_resource - [X] update_pipeline +
## codestar -0% implemented +
+0% implemented + - [ ] associate_team_member - [ ] create_project - [ ] create_user_profile @@ -1432,9 +1567,22 @@ - [ ] update_project - [ ] update_team_member - [ ] update_user_profile +
+ +## codestar-connections +
+0% implemented + +- [ ] create_connection +- [ ] delete_connection +- [ ] get_connection +- [ ] list_connections +
## codestar-notifications -0% implemented +
+0% implemented + - [ ] create_notification_rule - [ ] delete_notification_rule - [ ] delete_target @@ -1448,9 +1596,12 @@ - [ ] unsubscribe - [ ] untag_resource - [ ] update_notification_rule +
## cognito-identity -28% implemented +
+28% implemented + - [X] create_identity_pool - [ ] delete_identities - [ ] delete_identity_pool @@ -1472,9 +1623,12 @@ - [ ] unlink_identity - [ ] untag_resource - [ ] update_identity_pool +
## cognito-idp -37% implemented +
+37% implemented + - [ ] add_custom_attributes - [X] admin_add_user_to_group - [ ] admin_confirm_sign_up @@ -1575,9 +1729,12 @@ - [X] update_user_pool_domain - [ ] verify_software_token - [ ] verify_user_attribute +
## cognito-sync -0% implemented +
+0% implemented + - [ ] bulk_publish - [ ] delete_dataset - [ ] describe_dataset @@ -1595,9 +1752,12 @@ - [ ] subscribe_to_dataset - [ ] unsubscribe_from_dataset - [ ] update_records +
## comprehend -0% implemented +
+0% implemented + - [ ] batch_detect_dominant_language - [ ] batch_detect_entities - [ ] batch_detect_key_phrases @@ -1649,32 +1809,43 @@ - [ ] tag_resource - [ ] untag_resource - [ ] update_endpoint +
## comprehendmedical -0% implemented +
+0% implemented + - [ ] describe_entities_detection_v2_job - [ ] describe_phi_detection_job - [ ] detect_entities - [ ] detect_entities_v2 - [ ] detect_phi +- [ ] infer_icd10_cm +- [ ] infer_rx_norm - [ ] list_entities_detection_v2_jobs - [ ] list_phi_detection_jobs - [ ] start_entities_detection_v2_job - [ ] start_phi_detection_job - [ ] stop_entities_detection_v2_job - [ ] stop_phi_detection_job +
## compute-optimizer -0% implemented +
+0% implemented + - [ ] get_auto_scaling_group_recommendations - [ ] get_ec2_instance_recommendations - [ ] get_ec2_recommendation_projected_metrics - [ ] get_enrollment_status - [ ] get_recommendation_summaries - [ ] update_enrollment_status +
## config -25% implemented +
+26% implemented + - [X] batch_get_aggregate_resource_config - [X] batch_get_resource_config - [X] delete_aggregation_authorization @@ -1739,13 +1910,14 @@ - [X] put_configuration_recorder - [ ] put_conformance_pack - [X] put_delivery_channel -- [ ] put_evaluations +- [X] put_evaluations - [ ] put_organization_config_rule - [ ] put_organization_conformance_pack - [ ] put_remediation_configurations - [ ] put_remediation_exceptions - [ ] put_resource_config - [ ] put_retention_configuration +- [ ] select_aggregate_resource_config - [ ] select_resource_config - [ ] start_config_rules_evaluation - [X] start_configuration_recorder @@ -1753,9 +1925,12 @@ - [X] stop_configuration_recorder - [ ] tag_resource - [ ] untag_resource +
## connect -0% implemented +
+0% implemented + - [ ] create_user - [ ] delete_user - [ ] describe_user @@ -1785,24 +1960,33 @@ - [ ] update_user_phone_config - [ ] update_user_routing_profile - [ ] update_user_security_profiles +
## connectparticipant -0% implemented +
+0% implemented + - [ ] create_participant_connection - [ ] disconnect_participant - [ ] get_transcript - [ ] send_event - [ ] send_message +
## cur -0% implemented +
+0% implemented + - [ ] delete_report_definition - [ ] describe_report_definitions - [ ] modify_report_definition - [ ] put_report_definition +
## dataexchange -0% implemented +
+0% implemented + - [ ] cancel_job - [ ] create_data_set - [ ] create_job @@ -1825,9 +2009,12 @@ - [ ] update_asset - [ ] update_data_set - [ ] update_revision +
## datapipeline -42% implemented +
+42% implemented + - [X] activate_pipeline - [ ] add_tags - [X] create_pipeline @@ -1847,12 +2034,16 @@ - [ ] set_status - [ ] set_task_status - [ ] validate_pipeline_definition +
## datasync -22% implemented +
+20% implemented + - [X] cancel_task_execution - [ ] create_agent - [ ] create_location_efs +- [ ] create_location_fsx_windows - [ ] create_location_nfs - [ ] create_location_s3 - [ ] create_location_smb @@ -1862,6 +2053,7 @@ - [X] delete_task - [ ] describe_agent - [ ] describe_location_efs +- [ ] describe_location_fsx_windows - [ ] describe_location_nfs - [ ] describe_location_s3 - [ ] describe_location_smb @@ -1877,9 +2069,12 @@ - [ ] untag_resource - [ ] update_agent - [X] update_task +
## dax -0% implemented +
+0% implemented + - [ ] create_cluster - [ ] create_parameter_group - [ ] create_subnet_group @@ -1901,9 +2096,12 @@ - [ ] update_cluster - [ ] update_parameter_group - [ ] update_subnet_group +
## detective -0% implemented +
+0% implemented + - [ ] accept_invitation - [ ] create_graph - [ ] create_members @@ -1915,14 +2113,19 @@ - [ ] list_invitations - [ ] list_members - [ ] reject_invitation +
## devicefarm -0% implemented +
+0% implemented + - [ ] create_device_pool - [ ] create_instance_profile - [ ] create_network_profile - [ ] create_project - [ ] create_remote_access_session +- [ ] create_test_grid_project +- [ ] create_test_grid_url - [ ] create_upload - [ ] create_vpce_configuration - [ ] delete_device_pool @@ -1931,6 +2134,7 @@ - [ ] delete_project - [ ] delete_remote_access_session - [ ] delete_run +- [ ] delete_test_grid_project - [ ] delete_upload - [ ] delete_vpce_configuration - [ ] get_account_settings @@ -1947,6 +2151,8 @@ - [ ] get_run - [ ] get_suite - [ ] get_test +- [ ] get_test_grid_project +- [ ] get_test_grid_session - [ ] get_upload - [ ] get_vpce_configuration - [ ] install_to_remote_access_session @@ -1966,6 +2172,10 @@ - [ ] list_samples - [ ] list_suites - [ ] list_tags_for_resource +- [ ] list_test_grid_projects +- [ ] list_test_grid_session_actions +- [ ] list_test_grid_session_artifacts +- [ ] list_test_grid_sessions - [ ] list_tests - [ ] list_unique_problems - [ ] list_uploads @@ -1983,11 +2193,15 @@ - [ ] update_instance_profile - [ ] update_network_profile - [ ] update_project +- [ ] update_test_grid_project - [ ] update_upload - [ ] update_vpce_configuration +
## directconnect -0% implemented +
+0% implemented + - [ ] accept_direct_connect_gateway_association_proposal - [ ] allocate_connection_on_interconnect - [ ] allocate_hosted_connection @@ -2041,9 +2255,12 @@ - [ ] update_direct_connect_gateway_association - [ ] update_lag - [ ] update_virtual_interface_attributes +
## discovery -0% implemented +
+0% implemented + - [ ] associate_configuration_items_to_application - [ ] batch_delete_import_data - [ ] create_application @@ -2069,9 +2286,12 @@ - [ ] stop_continuous_export - [ ] stop_data_collection_by_agent_ids - [ ] update_application +
## dlm -0% implemented +
+0% implemented + - [ ] create_lifecycle_policy - [ ] delete_lifecycle_policy - [ ] get_lifecycle_policies @@ -2080,9 +2300,12 @@ - [ ] tag_resource - [ ] untag_resource - [ ] update_lifecycle_policy +
## dms -0% implemented +
+0% implemented + - [ ] add_tags_to_resource - [ ] apply_pending_maintenance_action - [ ] create_endpoint @@ -2130,9 +2353,12 @@ - [ ] start_replication_task_assessment - [ ] stop_replication_task - [ ] test_connection +
## docdb -0% implemented +
+0% implemented + - [ ] add_tags_to_resource - [ ] apply_pending_maintenance_action - [ ] copy_db_cluster_parameter_group @@ -2175,9 +2401,12 @@ - [ ] restore_db_cluster_to_point_in_time - [ ] start_db_cluster - [ ] stop_db_cluster +
## ds -0% implemented +
+0% implemented + - [ ] accept_shared_directory - [ ] add_ip_routes - [ ] add_tags_to_resource @@ -2235,11 +2464,14 @@ - [ ] update_radius - [ ] update_trust - [ ] verify_trust +
## dynamodb -24% implemented -- [ ] batch_get_item -- [ ] batch_write_item +
+53% implemented + +- [X] batch_get_item +- [X] batch_write_item - [ ] create_backup - [ ] create_global_table - [X] create_table @@ -2247,54 +2479,63 @@ - [X] delete_item - [X] delete_table - [ ] describe_backup -- [ ] describe_continuous_backups +- [X] describe_continuous_backups - [ ] describe_contributor_insights - [ ] describe_endpoints - [ ] describe_global_table - [ ] describe_global_table_settings - [ ] describe_limits -- [ ] describe_table +- [X] describe_table - [ ] describe_table_replica_auto_scaling -- [ ] describe_time_to_live +- [X] describe_time_to_live - [X] get_item - [ ] list_backups - [ ] list_contributor_insights - [ ] list_global_tables -- [ ] list_tables -- [ ] list_tags_of_resource +- [X] list_tables +- [X] list_tags_of_resource - [X] put_item - [X] query - [ ] restore_table_from_backup - [ ] restore_table_to_point_in_time - [X] scan -- [ ] tag_resource +- [X] tag_resource - [X] transact_get_items -- [ ] transact_write_items -- [ ] untag_resource -- [ ] update_continuous_backups +- [X] transact_write_items +- [X] untag_resource +- [X] update_continuous_backups - [ ] update_contributor_insights - [ ] update_global_table - [ ] update_global_table_settings -- [ ] update_item -- [ ] update_table +- [X] update_item +- [X] update_table - [ ] update_table_replica_auto_scaling -- [ ] update_time_to_live +- [X] update_time_to_live +
## dynamodbstreams -100% implemented +
+100% implemented + - [X] describe_stream - [X] get_records - [X] get_shard_iterator - [X] list_streams +
## ebs -0% implemented +
+0% implemented + - [ ] get_snapshot_block - [ ] list_changed_blocks - [ ] list_snapshot_blocks +
## ec2 -26% implemented +
+26% implemented + - [ ] accept_reserved_instances_exchange_quote - [ ] accept_transit_gateway_peering_attachment - [ ] accept_transit_gateway_vpc_attachment @@ -2382,7 +2623,7 @@ - [ ] create_transit_gateway_vpc_attachment - [X] create_volume - [X] create_vpc -- [ ] create_vpc_endpoint +- [X] create_vpc_endpoint - [ ] create_vpc_endpoint_connection_notification - [ ] create_vpc_endpoint_service_configuration - [X] create_vpc_peering_connection @@ -2479,12 +2720,13 @@ - [ ] describe_import_image_tasks - [ ] describe_import_snapshot_tasks - [X] describe_instance_attribute -- [ ] describe_instance_credit_specifications +- [X] describe_instance_credit_specifications - [ ] describe_instance_status - [ ] describe_instance_type_offerings - [ ] describe_instance_types - [ ] describe_instances - [X] describe_internet_gateways +- [ ] describe_ipv6_pools - [X] describe_key_pairs - [ ] describe_launch_template_versions - [ ] describe_launch_templates @@ -2581,6 +2823,7 @@ - [ ] export_client_vpn_client_configuration - [ ] export_image - [ ] export_transit_gateway_routes +- [ ] get_associated_ipv6_pool_cidrs - [ ] get_capacity_reservation_usage - [ ] get_coip_pool_usage - [ ] get_console_output @@ -2602,6 +2845,7 @@ - [X] import_key_pair - [ ] import_snapshot - [ ] import_volume +- [ ] modify_availability_zone_group - [ ] modify_capacity_reservation - [ ] modify_client_vpn_endpoint - [ ] modify_default_credit_specification @@ -2682,6 +2926,7 @@ - [ ] search_transit_gateway_routes - [ ] send_diagnostic_interrupt - [X] start_instances +- [ ] start_vpc_endpoint_service_private_dns_verification - [X] stop_instances - [ ] terminate_client_vpn_connections - [X] terminate_instances @@ -2691,13 +2936,19 @@ - [ ] update_security_group_rule_descriptions_egress - [ ] update_security_group_rule_descriptions_ingress - [ ] withdraw_byoip_cidr +
## ec2-instance-connect -0% implemented -- [x] send_ssh_public_key +
+100% implemented + +- [X] send_ssh_public_key +
## ecr -27% implemented +
+27% implemented + - [ ] batch_check_layer_availability - [X] batch_delete_image - [X] batch_get_image @@ -2727,9 +2978,12 @@ - [ ] tag_resource - [ ] untag_resource - [ ] upload_layer_part +
## ecs -62% implemented +
+62% implemented + - [ ] create_capacity_provider - [X] create_cluster - [X] create_service @@ -2778,26 +3032,41 @@ - [X] update_service - [ ] update_service_primary_task_set - [ ] update_task_set +
## efs -0% implemented +
+0% implemented + +- [ ] create_access_point - [ ] create_file_system - [ ] create_mount_target - [ ] create_tags +- [ ] delete_access_point - [ ] delete_file_system +- [ ] delete_file_system_policy - [ ] delete_mount_target - [ ] delete_tags +- [ ] describe_access_points +- [ ] describe_file_system_policy - [ ] describe_file_systems - [ ] describe_lifecycle_configuration - [ ] describe_mount_target_security_groups - [ ] describe_mount_targets - [ ] describe_tags +- [ ] list_tags_for_resource - [ ] modify_mount_target_security_groups +- [ ] put_file_system_policy - [ ] put_lifecycle_configuration +- [ ] tag_resource +- [ ] untag_resource - [ ] update_file_system +
## eks -0% implemented +
+0% implemented + - [ ] create_cluster - [ ] create_fargate_profile - [ ] create_nodegroup @@ -2819,15 +3088,21 @@ - [ ] update_cluster_version - [ ] update_nodegroup_config - [ ] update_nodegroup_version +
## elastic-inference -0% implemented +
+0% implemented + - [ ] list_tags_for_resource - [ ] tag_resource - [ ] untag_resource +
## elasticache -0% implemented +
+0% implemented + - [ ] add_tags_to_resource - [ ] authorize_cache_security_group_ingress - [ ] batch_apply_update_action @@ -2838,13 +3113,16 @@ - [ ] create_cache_parameter_group - [ ] create_cache_security_group - [ ] create_cache_subnet_group +- [ ] create_global_replication_group - [ ] create_replication_group - [ ] create_snapshot +- [ ] decrease_node_groups_in_global_replication_group - [ ] decrease_replica_count - [ ] delete_cache_cluster - [ ] delete_cache_parameter_group - [ ] delete_cache_security_group - [ ] delete_cache_subnet_group +- [ ] delete_global_replication_group - [ ] delete_replication_group - [ ] delete_snapshot - [ ] describe_cache_clusters @@ -2855,30 +3133,39 @@ - [ ] describe_cache_subnet_groups - [ ] describe_engine_default_parameters - [ ] describe_events +- [ ] describe_global_replication_groups - [ ] describe_replication_groups - [ ] describe_reserved_cache_nodes - [ ] describe_reserved_cache_nodes_offerings - [ ] describe_service_updates - [ ] describe_snapshots - [ ] describe_update_actions +- [ ] disassociate_global_replication_group +- [ ] failover_global_replication_group +- [ ] increase_node_groups_in_global_replication_group - [ ] increase_replica_count - [ ] list_allowed_node_type_modifications - [ ] list_tags_for_resource - [ ] modify_cache_cluster - [ ] modify_cache_parameter_group - [ ] modify_cache_subnet_group +- [ ] modify_global_replication_group - [ ] modify_replication_group - [ ] modify_replication_group_shard_configuration - [ ] purchase_reserved_cache_nodes_offering +- [ ] rebalance_slots_in_global_replication_group - [ ] reboot_cache_cluster - [ ] remove_tags_from_resource - [ ] reset_cache_parameter_group - [ ] revoke_cache_security_group_ingress - [ ] start_migration - [ ] test_failover +
## elasticbeanstalk -13% implemented +
+13% implemented + - [ ] abort_environment_update - [ ] apply_environment_managed_action - [ ] check_dns_availability @@ -2923,9 +3210,12 @@ - [ ] update_environment - [X] update_tags_for_resource - [ ] validate_configuration_settings +
## elastictranscoder -0% implemented +
+0% implemented + - [ ] cancel_job - [ ] create_job - [ ] create_pipeline @@ -2943,9 +3233,12 @@ - [ ] update_pipeline - [ ] update_pipeline_notifications - [ ] update_pipeline_status +
## elb -34% implemented +
+34% implemented + - [ ] add_tags - [X] apply_security_groups_to_load_balancer - [ ] attach_load_balancer_to_subnets @@ -2975,9 +3268,12 @@ - [ ] set_load_balancer_listener_ssl_certificate - [ ] set_load_balancer_policies_for_backend_server - [X] set_load_balancer_policies_of_listener +
## elbv2 -70% implemented +
+70% implemented + - [ ] add_listener_certificates - [ ] add_tags - [X] create_listener @@ -3012,9 +3308,12 @@ - [X] set_rule_priorities - [X] set_security_groups - [X] set_subnets +
## emr -50% implemented +
+50% implemented + - [ ] add_instance_fleet - [X] add_instance_groups - [X] add_job_flow_steps @@ -3045,9 +3344,12 @@ - [X] set_termination_protection - [X] set_visible_to_all_users - [X] terminate_job_flows +
## es -0% implemented +
+0% implemented + - [ ] add_tags - [ ] cancel_elasticsearch_service_software_update - [ ] create_elasticsearch_domain @@ -3071,9 +3373,12 @@ - [ ] start_elasticsearch_service_software_update - [ ] update_elasticsearch_domain_config - [ ] upgrade_elasticsearch_domain +
## events -58% implemented +
+67% implemented + - [ ] activate_event_source - [X] create_event_bus - [ ] create_partner_event_source @@ -3093,7 +3398,7 @@ - [ ] list_partner_event_sources - [X] list_rule_names_by_target - [X] list_rules -- [ ] list_tags_for_resource +- [X] list_tags_for_resource - [X] list_targets_by_rule - [X] put_events - [ ] put_partner_events @@ -3102,12 +3407,15 @@ - [X] put_targets - [X] remove_permission - [X] remove_targets -- [ ] tag_resource +- [X] tag_resource - [X] test_event_pattern -- [ ] untag_resource +- [X] untag_resource +
## firehose -0% implemented +
+0% implemented + - [ ] create_delivery_stream - [ ] delete_delivery_stream - [ ] describe_delivery_stream @@ -3120,9 +3428,12 @@ - [ ] tag_delivery_stream - [ ] untag_delivery_stream - [ ] update_destination +
## fms -0% implemented +
+0% implemented + - [ ] associate_admin_account - [ ] delete_notification_channel - [ ] delete_policy @@ -3135,11 +3446,17 @@ - [ ] list_compliance_status - [ ] list_member_accounts - [ ] list_policies +- [ ] list_tags_for_resource - [ ] put_notification_channel - [ ] put_policy +- [ ] tag_resource +- [ ] untag_resource +
## forecast -0% implemented +
+0% implemented + - [ ] create_dataset - [ ] create_dataset_group - [ ] create_dataset_import_job @@ -3166,13 +3483,19 @@ - [ ] list_forecasts - [ ] list_predictors - [ ] update_dataset_group +
## forecastquery -0% implemented +
+0% implemented + - [ ] query_forecast +
## frauddetector -0% implemented +
+0% implemented + - [ ] batch_create_variable - [ ] batch_get_variable - [ ] create_detector_version @@ -3203,23 +3526,32 @@ - [ ] update_rule_metadata - [ ] update_rule_version - [ ] update_variable +
## fsx -0% implemented +
+0% implemented + +- [ ] cancel_data_repository_task - [ ] create_backup +- [ ] create_data_repository_task - [ ] create_file_system - [ ] create_file_system_from_backup - [ ] delete_backup - [ ] delete_file_system - [ ] describe_backups +- [ ] describe_data_repository_tasks - [ ] describe_file_systems - [ ] list_tags_for_resource - [ ] tag_resource - [ ] untag_resource - [ ] update_file_system +
## gamelift -0% implemented +
+0% implemented + - [ ] accept_match - [ ] create_alias - [ ] create_build @@ -3271,6 +3603,7 @@ - [ ] list_builds - [ ] list_fleets - [ ] list_scripts +- [ ] list_tags_for_resource - [ ] put_scaling_policy - [ ] request_upload_credentials - [ ] resolve_alias @@ -3282,6 +3615,8 @@ - [ ] stop_fleet_actions - [ ] stop_game_session_placement - [ ] stop_matchmaking +- [ ] tag_resource +- [ ] untag_resource - [ ] update_alias - [ ] update_build - [ ] update_fleet_attributes @@ -3293,9 +3628,12 @@ - [ ] update_runtime_configuration - [ ] update_script - [ ] validate_matchmaking_rule_set +
## glacier -12% implemented +
+12% implemented + - [ ] abort_multipart_upload - [ ] abort_vault_lock - [ ] add_tags_to_vault @@ -3329,33 +3667,47 @@ - [ ] set_vault_notifications - [ ] upload_archive - [ ] upload_multipart_part +
## globalaccelerator -0% implemented +
+0% implemented + +- [ ] advertise_byoip_cidr - [ ] create_accelerator - [ ] create_endpoint_group - [ ] create_listener - [ ] delete_accelerator - [ ] delete_endpoint_group - [ ] delete_listener +- [ ] deprovision_byoip_cidr - [ ] describe_accelerator - [ ] describe_accelerator_attributes - [ ] describe_endpoint_group - [ ] describe_listener - [ ] list_accelerators +- [ ] list_byoip_cidrs - [ ] list_endpoint_groups - [ ] list_listeners +- [ ] list_tags_for_resource +- [ ] provision_byoip_cidr +- [ ] tag_resource +- [ ] untag_resource - [ ] update_accelerator - [ ] update_accelerator_attributes - [ ] update_endpoint_group - [ ] update_listener +- [ ] withdraw_byoip_cidr +
## glue -11% implemented -- [X] batch_create_partition +
+5% implemented + +- [ ] batch_create_partition - [ ] batch_delete_connection -- [X] batch_delete_partition -- [X] batch_delete_table +- [ ] batch_delete_partition +- [ ] batch_delete_table - [ ] batch_delete_table_version - [ ] batch_get_crawlers - [ ] batch_get_dev_endpoints @@ -3372,7 +3724,7 @@ - [ ] create_dev_endpoint - [ ] create_job - [ ] create_ml_transform -- [X] create_partition +- [ ] create_partition - [ ] create_script - [ ] create_security_configuration - [X] create_table @@ -3418,7 +3770,7 @@ - [ ] get_ml_task_runs - [ ] get_ml_transform - [ ] get_ml_transforms -- [X] get_partition +- [ ] get_partition - [ ] get_partitions - [ ] get_plan - [ ] get_resource_policy @@ -3441,6 +3793,7 @@ - [ ] list_crawlers - [ ] list_dev_endpoints - [ ] list_jobs +- [ ] list_ml_transforms - [ ] list_triggers - [ ] list_workflows - [ ] put_data_catalog_encryption_settings @@ -3470,14 +3823,17 @@ - [ ] update_dev_endpoint - [ ] update_job - [ ] update_ml_transform -- [X] update_partition -- [X] update_table +- [ ] update_partition +- [ ] update_table - [ ] update_trigger - [ ] update_user_defined_function - [ ] update_workflow +
## greengrass -0% implemented +
+0% implemented + - [ ] associate_role_to_group - [ ] associate_service_role_to_account - [ ] create_connector_definition @@ -3568,9 +3924,12 @@ - [ ] update_logger_definition - [ ] update_resource_definition - [ ] update_subscription_definition +
## groundstation -0% implemented +
+0% implemented + - [ ] cancel_contact - [ ] create_config - [ ] create_dataflow_endpoint_group @@ -3596,9 +3955,12 @@ - [ ] untag_resource - [ ] update_config - [ ] update_mission_profile +
## guardduty -0% implemented +
+0% implemented + - [ ] accept_invitation - [ ] archive_findings - [ ] create_detector @@ -3649,18 +4011,31 @@ - [ ] update_ip_set - [ ] update_publishing_destination - [ ] update_threat_intel_set +
## health -0% implemented +
+0% implemented + +- [ ] describe_affected_accounts_for_organization - [ ] describe_affected_entities +- [ ] describe_affected_entities_for_organization - [ ] describe_entity_aggregates - [ ] describe_event_aggregates - [ ] describe_event_details +- [ ] describe_event_details_for_organization - [ ] describe_event_types - [ ] describe_events +- [ ] describe_events_for_organization +- [ ] describe_health_service_status_for_organization +- [ ] disable_health_service_access_for_organization +- [ ] enable_health_service_access_for_organization +
## iam -67% implemented +
+68% implemented + - [ ] add_client_id_to_open_id_connect_provider - [X] add_role_to_instance_profile - [X] add_user_to_group @@ -3801,9 +4176,12 @@ - [X] upload_server_certificate - [X] upload_signing_certificate - [X] upload_ssh_public_key +
## imagebuilder -0% implemented +
+0% implemented + - [ ] cancel_image_creation - [ ] create_component - [ ] create_distribution_configuration @@ -3846,18 +4224,24 @@ - [ ] update_distribution_configuration - [ ] update_image_pipeline - [ ] update_infrastructure_configuration +
## importexport -0% implemented +
+0% implemented + - [ ] cancel_job - [ ] create_job - [ ] get_shipping_label - [ ] get_status - [ ] list_jobs - [ ] update_job +
## inspector -0% implemented +
+0% implemented + - [ ] add_attributes_to_findings - [ ] create_assessment_target - [ ] create_assessment_template @@ -3895,9 +4279,12 @@ - [ ] subscribe_to_event - [ ] unsubscribe_from_event - [ ] update_assessment_target +
## iot -20% implemented +
+27% implemented + - [ ] accept_certificate_transfer - [ ] add_thing_to_billing_group - [X] add_thing_to_thing_group @@ -3909,8 +4296,8 @@ - [ ] cancel_audit_mitigation_actions_task - [ ] cancel_audit_task - [ ] cancel_certificate_transfer -- [ ] cancel_job -- [ ] cancel_job_execution +- [X] cancel_job +- [X] cancel_job_execution - [ ] clear_default_authorizer - [ ] confirm_topic_rule_destination - [ ] create_authorizer @@ -3923,7 +4310,7 @@ - [ ] create_mitigation_action - [ ] create_ota_update - [X] create_policy -- [ ] create_policy_version +- [X] create_policy_version - [ ] create_provisioning_claim - [ ] create_provisioning_template - [ ] create_provisioning_template_version @@ -3943,12 +4330,12 @@ - [X] delete_certificate - [ ] delete_domain_configuration - [ ] delete_dynamic_thing_group -- [ ] delete_job -- [ ] delete_job_execution +- [X] delete_job +- [X] delete_job_execution - [ ] delete_mitigation_action - [ ] delete_ota_update - [X] delete_policy -- [ ] delete_policy_version +- [X] delete_policy_version - [ ] delete_provisioning_template - [ ] delete_provisioning_template_version - [ ] delete_registration_code @@ -3977,7 +4364,7 @@ - [ ] describe_event_configurations - [ ] describe_index - [X] describe_job -- [ ] describe_job_execution +- [X] describe_job_execution - [ ] describe_mitigation_action - [ ] describe_provisioning_template - [ ] describe_provisioning_template_version @@ -3998,19 +4385,19 @@ - [ ] get_cardinality - [ ] get_effective_policies - [ ] get_indexing_configuration -- [ ] get_job_document +- [X] get_job_document - [ ] get_logging_options - [ ] get_ota_update - [ ] get_percentiles - [X] get_policy -- [ ] get_policy_version +- [X] get_policy_version - [ ] get_registration_code - [ ] get_statistics - [ ] get_topic_rule - [ ] get_topic_rule_destination - [ ] get_v2_logging_options - [ ] list_active_violations -- [ ] list_attached_policies +- [X] list_attached_policies - [ ] list_audit_findings - [ ] list_audit_mitigation_actions_executions - [ ] list_audit_mitigation_actions_tasks @@ -4022,15 +4409,15 @@ - [ ] list_certificates_by_ca - [ ] list_domain_configurations - [ ] list_indices -- [ ] list_job_executions_for_job -- [ ] list_job_executions_for_thing -- [ ] list_jobs +- [X] list_job_executions_for_job +- [X] list_job_executions_for_thing +- [X] list_jobs - [ ] list_mitigation_actions - [ ] list_ota_updates - [ ] list_outgoing_certificates - [X] list_policies - [X] list_policy_principals -- [ ] list_policy_versions +- [X] list_policy_versions - [X] list_principal_policies - [X] list_principal_things - [ ] list_provisioning_template_versions @@ -4065,7 +4452,7 @@ - [ ] replace_topic_rule - [ ] search_index - [ ] set_default_authorizer -- [ ] set_default_policy_version +- [X] set_default_policy_version - [ ] set_logging_options - [ ] set_v2_logging_level - [ ] set_v2_logging_options @@ -4099,23 +4486,32 @@ - [X] update_thing_groups_for_thing - [ ] update_topic_rule_destination - [ ] validate_security_profile_behaviors +
## iot-data -100% implemented +
+100% implemented + - [X] delete_thing_shadow - [X] get_thing_shadow - [X] publish - [X] update_thing_shadow +
## iot-jobs-data -0% implemented +
+0% implemented + - [ ] describe_job_execution - [ ] get_pending_job_executions - [ ] start_next_pending_job_execution - [ ] update_job_execution +
## iot1click-devices -0% implemented +
+0% implemented + - [ ] claim_devices_by_claim_code - [ ] describe_device - [ ] finalize_device_claim @@ -4129,9 +4525,12 @@ - [ ] unclaim_device - [ ] untag_resource - [ ] update_device_state +
## iot1click-projects -0% implemented +
+0% implemented + - [ ] associate_device_with_placement - [ ] create_placement - [ ] create_project @@ -4148,9 +4547,12 @@ - [ ] untag_resource - [ ] update_placement - [ ] update_project +
## iotanalytics -0% implemented +
+0% implemented + - [ ] batch_put_message - [ ] cancel_pipeline_reprocessing - [ ] create_channel @@ -4185,9 +4587,12 @@ - [ ] update_dataset - [ ] update_datastore - [ ] update_pipeline +
## iotevents -0% implemented +
+0% implemented + - [ ] create_detector_model - [ ] create_input - [ ] delete_detector_model @@ -4204,16 +4609,22 @@ - [ ] untag_resource - [ ] update_detector_model - [ ] update_input +
## iotevents-data -0% implemented +
+0% implemented + - [ ] batch_put_message - [ ] batch_update_detector - [ ] describe_detector - [ ] list_detectors +
## iotsecuretunneling -0% implemented +
+0% implemented + - [ ] close_tunnel - [ ] describe_tunnel - [ ] list_tags_for_resource @@ -4221,9 +4632,12 @@ - [ ] open_tunnel - [ ] tag_resource - [ ] untag_resource +
## iotthingsgraph -0% implemented +
+0% implemented + - [ ] associate_entity_to_thing - [ ] create_flow_template - [ ] create_system_instance @@ -4259,9 +4673,12 @@ - [ ] update_flow_template - [ ] update_system_template - [ ] upload_entity_definitions +
## kafka -0% implemented +
+0% implemented + - [ ] create_cluster - [ ] create_configuration - [ ] delete_cluster @@ -4274,6 +4691,7 @@ - [ ] list_clusters - [ ] list_configuration_revisions - [ ] list_configurations +- [ ] list_kafka_versions - [ ] list_nodes - [ ] list_tags_for_resource - [ ] tag_resource @@ -4282,9 +4700,12 @@ - [ ] update_broker_storage - [ ] update_cluster_configuration - [ ] update_monitoring +
## kendra -0% implemented +
+0% implemented + - [ ] batch_delete_document - [ ] batch_put_document - [ ] create_data_source @@ -4305,9 +4726,12 @@ - [ ] submit_feedback - [ ] update_data_source - [ ] update_index +
## kinesis -50% implemented +
+50% implemented + - [X] add_tags_to_stream - [X] create_stream - [ ] decrease_stream_retention_period @@ -4336,25 +4760,37 @@ - [ ] stop_stream_encryption - [ ] subscribe_to_shard - [ ] update_shard_count +
## kinesis-video-archived-media -0% implemented +
+0% implemented + - [ ] get_dash_streaming_session_url - [ ] get_hls_streaming_session_url - [ ] get_media_for_fragment_list - [ ] list_fragments +
## kinesis-video-media -0% implemented +
+0% implemented + - [ ] get_media +
## kinesis-video-signaling -0% implemented +
+0% implemented + - [ ] get_ice_server_config - [ ] send_alexa_offer_to_master +
## kinesisanalytics -0% implemented +
+0% implemented + - [ ] add_application_cloud_watch_logging_option - [ ] add_application_input - [ ] add_application_input_processing_configuration @@ -4375,9 +4811,12 @@ - [ ] tag_resource - [ ] untag_resource - [ ] update_application +
## kinesisanalyticsv2 -0% implemented +
+0% implemented + - [ ] add_application_cloud_watch_logging_option - [ ] add_application_input - [ ] add_application_input_processing_configuration @@ -4404,9 +4843,12 @@ - [ ] tag_resource - [ ] untag_resource - [ ] update_application +
## kinesisvideo -0% implemented +
+0% implemented + - [ ] create_signaling_channel - [ ] create_stream - [ ] delete_signaling_channel @@ -4426,9 +4868,12 @@ - [ ] update_data_retention - [ ] update_signaling_channel - [ ] update_stream +
## kms -43% implemented +
+45% implemented + - [X] cancel_key_deletion - [ ] connect_custom_key_store - [ ] create_alias @@ -4470,14 +4915,17 @@ - [X] schedule_key_deletion - [ ] sign - [X] tag_resource -- [ ] untag_resource +- [X] untag_resource - [ ] update_alias - [ ] update_custom_key_store - [X] update_key_description - [ ] verify +
## lakeformation -0% implemented +
+0% implemented + - [ ] batch_grant_permissions - [ ] batch_revoke_permissions - [ ] deregister_resource @@ -4491,11 +4939,14 @@ - [ ] register_resource - [ ] revoke_permissions - [ ] update_resource +
## lambda -32% implemented +
+38% implemented + - [ ] add_layer_version_permission -- [ ] add_permission +- [X] add_permission - [ ] create_alias - [X] create_event_source_mapping - [X] create_function @@ -4516,7 +4967,7 @@ - [ ] get_layer_version - [ ] get_layer_version_by_arn - [ ] get_layer_version_policy -- [ ] get_policy +- [X] get_policy - [ ] get_provisioned_concurrency_config - [X] invoke - [ ] invoke_async @@ -4535,7 +4986,7 @@ - [ ] put_function_event_invoke_config - [ ] put_provisioned_concurrency_config - [ ] remove_layer_version_permission -- [ ] remove_permission +- [X] remove_permission - [X] tag_resource - [X] untag_resource - [ ] update_alias @@ -4543,9 +4994,12 @@ - [X] update_function_code - [X] update_function_configuration - [ ] update_function_event_invoke_config +
## lex-models -0% implemented +
+0% implemented + - [ ] create_bot_version - [ ] create_intent_version - [ ] create_slot_type_version @@ -4577,22 +5031,31 @@ - [ ] get_slot_type_versions - [ ] get_slot_types - [ ] get_utterances_view +- [ ] list_tags_for_resource - [ ] put_bot - [ ] put_bot_alias - [ ] put_intent - [ ] put_slot_type - [ ] start_import +- [ ] tag_resource +- [ ] untag_resource +
## lex-runtime -0% implemented +
+0% implemented + - [ ] delete_session - [ ] get_session - [ ] post_content - [ ] post_text - [ ] put_session +
## license-manager -0% implemented +
+0% implemented + - [ ] create_license_configuration - [ ] delete_license_configuration - [ ] get_license_configuration @@ -4609,9 +5072,12 @@ - [ ] update_license_configuration - [ ] update_license_specifications_for_resource - [ ] update_service_settings +
## lightsail -0% implemented +
+0% implemented + - [ ] allocate_static_ip - [ ] attach_disk - [ ] attach_instances_to_load_balancer @@ -4620,6 +5086,7 @@ - [ ] close_instance_public_ports - [ ] copy_snapshot - [ ] create_cloud_formation_stack +- [ ] create_contact_method - [ ] create_disk - [ ] create_disk_from_snapshot - [ ] create_disk_snapshot @@ -4634,7 +5101,9 @@ - [ ] create_relational_database - [ ] create_relational_database_from_snapshot - [ ] create_relational_database_snapshot +- [ ] delete_alarm - [ ] delete_auto_snapshot +- [ ] delete_contact_method - [ ] delete_disk - [ ] delete_disk_snapshot - [ ] delete_domain @@ -4655,10 +5124,12 @@ - [ ] enable_add_on - [ ] export_snapshot - [ ] get_active_names +- [ ] get_alarms - [ ] get_auto_snapshots - [ ] get_blueprints - [ ] get_bundles - [ ] get_cloud_formation_stack_records +- [ ] get_contact_methods - [ ] get_disk - [ ] get_disk_snapshot - [ ] get_disk_snapshots @@ -4702,24 +5173,30 @@ - [ ] is_vpc_peered - [ ] open_instance_public_ports - [ ] peer_vpc +- [ ] put_alarm - [ ] put_instance_public_ports - [ ] reboot_instance - [ ] reboot_relational_database - [ ] release_static_ip +- [ ] send_contact_method_verification - [ ] start_instance - [ ] start_relational_database - [ ] stop_instance - [ ] stop_relational_database - [ ] tag_resource +- [ ] test_alarm - [ ] unpeer_vpc - [ ] untag_resource - [ ] update_domain_entry - [ ] update_load_balancer_attribute - [ ] update_relational_database - [ ] update_relational_database_parameters +
## logs -35% implemented +
+43% implemented + - [ ] associate_kms_key - [ ] cancel_export_task - [ ] create_export_task @@ -4731,7 +5208,7 @@ - [ ] delete_metric_filter - [ ] delete_resource_policy - [X] delete_retention_policy -- [ ] delete_subscription_filter +- [X] delete_subscription_filter - [ ] describe_destinations - [ ] describe_export_tasks - [X] describe_log_groups @@ -4739,7 +5216,7 @@ - [ ] describe_metric_filters - [ ] describe_queries - [ ] describe_resource_policies -- [ ] describe_subscription_filters +- [X] describe_subscription_filters - [ ] disassociate_kms_key - [X] filter_log_events - [X] get_log_events @@ -4753,15 +5230,18 @@ - [ ] put_metric_filter - [ ] put_resource_policy - [X] put_retention_policy -- [ ] put_subscription_filter +- [X] put_subscription_filter - [ ] start_query - [ ] stop_query - [X] tag_log_group - [ ] test_metric_filter - [X] untag_log_group +
## machinelearning -0% implemented +
+0% implemented + - [ ] add_tags - [ ] create_batch_prediction - [ ] create_data_source_from_rds @@ -4790,9 +5270,12 @@ - [ ] update_data_source - [ ] update_evaluation - [ ] update_ml_model +
## macie -0% implemented +
+0% implemented + - [ ] associate_member_account - [ ] associate_s3_resources - [ ] disassociate_member_account @@ -4800,49 +5283,65 @@ - [ ] list_member_accounts - [ ] list_s3_resources - [ ] update_s3_resources +
## managedblockchain -0% implemented +
+16% implemented + - [ ] create_member -- [ ] create_network +- [X] create_network - [ ] create_node - [ ] create_proposal - [ ] delete_member - [ ] delete_node - [ ] get_member -- [ ] get_network +- [X] get_network - [ ] get_node - [ ] get_proposal - [ ] list_invitations - [ ] list_members -- [ ] list_networks +- [X] list_networks - [ ] list_nodes - [ ] list_proposal_votes - [ ] list_proposals - [ ] reject_invitation - [ ] vote_on_proposal +
## marketplace-catalog -0% implemented +
+0% implemented + - [ ] cancel_change_set - [ ] describe_change_set - [ ] describe_entity - [ ] list_change_sets - [ ] list_entities - [ ] start_change_set +
## marketplace-entitlement -0% implemented +
+0% implemented + - [ ] get_entitlements +
## marketplacecommerceanalytics -0% implemented +
+0% implemented + - [ ] generate_data_set - [ ] start_support_data_export +
## mediaconnect -0% implemented +
+0% implemented + - [ ] add_flow_outputs +- [ ] add_flow_sources - [ ] create_flow - [ ] delete_flow - [ ] describe_flow @@ -4851,17 +5350,22 @@ - [ ] list_flows - [ ] list_tags_for_resource - [ ] remove_flow_output +- [ ] remove_flow_source - [ ] revoke_flow_entitlement - [ ] start_flow - [ ] stop_flow - [ ] tag_resource - [ ] untag_resource +- [ ] update_flow - [ ] update_flow_entitlement - [ ] update_flow_output - [ ] update_flow_source +
## mediaconvert -0% implemented +
+0% implemented + - [ ] associate_certificate - [ ] cancel_job - [ ] create_job @@ -4887,9 +5391,12 @@ - [ ] update_job_template - [ ] update_preset - [ ] update_queue +
## medialive -0% implemented +
+0% implemented + - [ ] batch_update_schedule - [ ] create_channel - [ ] create_input @@ -4933,9 +5440,12 @@ - [ ] update_multiplex - [ ] update_multiplex_program - [ ] update_reservation +
## mediapackage -0% implemented +
+0% implemented + - [ ] create_channel - [ ] create_harvest_job - [ ] create_origin_endpoint @@ -4954,9 +5464,12 @@ - [ ] untag_resource - [ ] update_channel - [ ] update_origin_endpoint +
## mediapackage-vod -0% implemented +
+0% implemented + - [ ] create_asset - [ ] create_packaging_configuration - [ ] create_packaging_group @@ -4969,9 +5482,12 @@ - [ ] list_assets - [ ] list_packaging_configurations - [ ] list_packaging_groups +
## mediastore -0% implemented +
+0% implemented + - [ ] create_container - [ ] delete_container - [ ] delete_container_policy @@ -4990,17 +5506,23 @@ - [ ] stop_access_logging - [ ] tag_resource - [ ] untag_resource +
## mediastore-data -0% implemented +
+0% implemented + - [ ] delete_object - [ ] describe_object - [ ] get_object - [ ] list_items - [ ] put_object +
## mediatailor -0% implemented +
+0% implemented + - [ ] delete_playback_configuration - [ ] get_playback_configuration - [ ] list_playback_configurations @@ -5008,16 +5530,22 @@ - [ ] put_playback_configuration - [ ] tag_resource - [ ] untag_resource +
## meteringmarketplace -0% implemented +
+0% implemented + - [ ] batch_meter_usage - [ ] meter_usage - [ ] register_usage - [ ] resolve_customer +
## mgh -0% implemented +
+0% implemented + - [ ] associate_created_artifact - [ ] associate_discovered_resource - [ ] create_progress_update_stream @@ -5027,6 +5555,7 @@ - [ ] disassociate_created_artifact - [ ] disassociate_discovered_resource - [ ] import_migration_task +- [ ] list_application_states - [ ] list_created_artifacts - [ ] list_discovered_resources - [ ] list_migration_tasks @@ -5034,15 +5563,21 @@ - [ ] notify_application_state - [ ] notify_migration_task_state - [ ] put_resource_attributes +
## migrationhub-config -0% implemented +
+0% implemented + - [ ] create_home_region_control - [ ] describe_home_region_controls - [ ] get_home_region +
## mobile -0% implemented +
+0% implemented + - [ ] create_project - [ ] delete_project - [ ] describe_bundle @@ -5052,9 +5587,12 @@ - [ ] list_bundles - [ ] list_projects - [ ] update_project +
## mq -0% implemented +
+0% implemented + - [ ] create_broker - [ ] create_configuration - [ ] create_tags @@ -5077,9 +5615,12 @@ - [ ] update_broker - [ ] update_configuration - [ ] update_user +
## mturk -0% implemented +
+0% implemented + - [ ] accept_qualification_request - [ ] approve_assignment - [ ] associate_qualification_with_worker @@ -5119,9 +5660,12 @@ - [ ] update_hit_type_of_hit - [ ] update_notification_settings - [ ] update_qualification_type +
## neptune -0% implemented +
+0% implemented + - [ ] add_role_to_db_cluster - [ ] add_source_identifier_to_subscription - [ ] add_tags_to_resource @@ -5179,9 +5723,14 @@ - [ ] reset_db_parameter_group - [ ] restore_db_cluster_from_snapshot - [ ] restore_db_cluster_to_point_in_time +- [ ] start_db_cluster +- [ ] stop_db_cluster +
## networkmanager -0% implemented +
+0% implemented + - [ ] associate_customer_gateway - [ ] associate_link - [ ] create_device @@ -5210,9 +5759,12 @@ - [ ] update_global_network - [ ] update_link - [ ] update_site +
## opsworks -12% implemented +
+12% implemented + - [ ] assign_instance - [ ] assign_volume - [ ] associate_elastic_ip @@ -5287,9 +5839,12 @@ - [ ] update_stack - [ ] update_user_profile - [ ] update_volume +
## opsworkscm -0% implemented +
+0% implemented + - [ ] associate_node - [ ] create_backup - [ ] create_server @@ -5302,13 +5857,19 @@ - [ ] describe_servers - [ ] disassociate_node - [ ] export_server_engine_attribute +- [ ] list_tags_for_resource - [ ] restore_server - [ ] start_maintenance +- [ ] tag_resource +- [ ] untag_resource - [ ] update_server - [ ] update_server_engine_attributes +
## organizations -48% implemented +
+51% implemented + - [ ] accept_handshake - [X] attach_policy - [ ] cancel_handshake @@ -5354,19 +5915,27 @@ - [ ] remove_account_from_organization - [X] tag_resource - [X] untag_resource -- [ ] update_organizational_unit +- [X] update_organizational_unit - [ ] update_policy +
## outposts -0% implemented +
+0% implemented + - [ ] create_outpost +- [ ] delete_outpost +- [ ] delete_site - [ ] get_outpost - [ ] get_outpost_instance_types - [ ] list_outposts - [ ] list_sites +
## personalize -0% implemented +
+0% implemented + - [ ] create_batch_inference_job - [ ] create_campaign - [ ] create_dataset @@ -5406,23 +5975,35 @@ - [ ] list_solution_versions - [ ] list_solutions - [ ] update_campaign +
## personalize-events -0% implemented +
+0% implemented + - [ ] put_events +
## personalize-runtime -0% implemented +
+0% implemented + - [ ] get_personalized_ranking - [ ] get_recommendations +
## pi -0% implemented +
+0% implemented + - [ ] describe_dimension_keys - [ ] get_resource_metrics +
## pinpoint -0% implemented +
+0% implemented + - [ ] create_app - [ ] create_campaign - [ ] create_email_template @@ -5430,6 +6011,7 @@ - [ ] create_import_job - [ ] create_journey - [ ] create_push_template +- [ ] create_recommender_configuration - [ ] create_segment - [ ] create_sms_template - [ ] create_voice_template @@ -5448,6 +6030,7 @@ - [ ] delete_gcm_channel - [ ] delete_journey - [ ] delete_push_template +- [ ] delete_recommender_configuration - [ ] delete_segment - [ ] delete_sms_channel - [ ] delete_sms_template @@ -5485,6 +6068,8 @@ - [ ] get_journey_execution_activity_metrics - [ ] get_journey_execution_metrics - [ ] get_push_template +- [ ] get_recommender_configuration +- [ ] get_recommender_configurations - [ ] get_segment - [ ] get_segment_export_jobs - [ ] get_segment_import_jobs @@ -5498,6 +6083,7 @@ - [ ] get_voice_template - [ ] list_journeys - [ ] list_tags_for_resource +- [ ] list_template_versions - [ ] list_templates - [ ] phone_number_validate - [ ] put_event_stream @@ -5523,14 +6109,19 @@ - [ ] update_journey - [ ] update_journey_state - [ ] update_push_template +- [ ] update_recommender_configuration - [ ] update_segment - [ ] update_sms_channel - [ ] update_sms_template +- [ ] update_template_active_version - [ ] update_voice_channel - [ ] update_voice_template +
## pinpoint-email -0% implemented +
+0% implemented + - [ ] create_configuration_set - [ ] create_configuration_set_event_destination - [ ] create_dedicated_ip_pool @@ -5573,9 +6164,12 @@ - [ ] tag_resource - [ ] untag_resource - [ ] update_configuration_set_event_destination +
## pinpoint-sms-voice -0% implemented +
+0% implemented + - [ ] create_configuration_set - [ ] create_configuration_set_event_destination - [ ] delete_configuration_set @@ -5583,9 +6177,12 @@ - [ ] get_configuration_set_event_destinations - [ ] send_voice_message - [ ] update_configuration_set_event_destination +
## polly -55% implemented +
+55% implemented + - [X] delete_lexicon - [X] describe_voices - [X] get_lexicon @@ -5595,15 +6192,21 @@ - [X] put_lexicon - [ ] start_speech_synthesis_task - [ ] synthesize_speech +
## pricing -0% implemented +
+0% implemented + - [ ] describe_services - [ ] get_attribute_values - [ ] get_products +
## qldb -0% implemented +
+0% implemented + - [ ] create_ledger - [ ] delete_ledger - [ ] describe_journal_s3_export @@ -5619,13 +6222,19 @@ - [ ] tag_resource - [ ] untag_resource - [ ] update_ledger +
## qldb-session -0% implemented +
+0% implemented + - [ ] send_command +
## quicksight -0% implemented +
+0% implemented + - [ ] cancel_ingestion - [ ] create_dashboard - [ ] create_data_set @@ -5676,6 +6285,7 @@ - [ ] list_user_groups - [ ] list_users - [ ] register_user +- [ ] search_dashboards - [ ] tag_resource - [ ] untag_resource - [ ] update_dashboard @@ -5691,9 +6301,12 @@ - [ ] update_template_alias - [ ] update_template_permissions - [ ] update_user +
## ram -0% implemented +
+0% implemented + - [ ] accept_resource_share_invitation - [ ] associate_resource_share - [ ] associate_resource_share_permission @@ -5717,9 +6330,12 @@ - [ ] tag_resource - [ ] untag_resource - [ ] update_resource_share +
## rds -0% implemented +
+0% implemented + - [ ] add_role_to_db_cluster - [ ] add_role_to_db_instance - [ ] add_source_identifier_to_subscription @@ -5727,6 +6343,7 @@ - [ ] apply_pending_maintenance_action - [ ] authorize_db_security_group_ingress - [ ] backtrack_db_cluster +- [ ] cancel_export_task - [ ] copy_db_cluster_parameter_group - [ ] copy_db_cluster_snapshot - [ ] copy_db_parameter_group @@ -5792,6 +6409,7 @@ - [ ] describe_event_categories - [ ] describe_event_subscriptions - [ ] describe_events +- [ ] describe_export_tasks - [ ] describe_global_clusters - [ ] describe_installation_media - [ ] describe_option_group_options @@ -5806,6 +6424,7 @@ - [ ] failover_db_cluster - [ ] import_installation_media - [ ] list_tags_for_resource +- [ ] modify_certificates - [ ] modify_current_db_cluster_capacity - [ ] modify_db_cluster - [ ] modify_db_cluster_endpoint @@ -5843,21 +6462,28 @@ - [ ] start_activity_stream - [ ] start_db_cluster - [ ] start_db_instance +- [ ] start_export_task - [ ] stop_activity_stream - [ ] stop_db_cluster - [ ] stop_db_instance +
## rds-data -0% implemented +
+0% implemented + - [ ] batch_execute_statement - [ ] begin_transaction - [ ] commit_transaction - [ ] execute_sql - [ ] execute_statement - [ ] rollback_transaction +
## redshift -30% implemented +
+29% implemented + - [ ] accept_reserved_node_exchange - [ ] authorize_cluster_security_group_ingress - [ ] authorize_snapshot_access @@ -5935,18 +6561,23 @@ - [ ] modify_scheduled_action - [X] modify_snapshot_copy_retention_period - [ ] modify_snapshot_schedule +- [ ] pause_cluster - [ ] purchase_reserved_node_offering - [ ] reboot_cluster - [ ] reset_cluster_parameter_group - [ ] resize_cluster - [X] restore_from_cluster_snapshot - [ ] restore_table_from_cluster_snapshot +- [ ] resume_cluster - [ ] revoke_cluster_security_group_ingress - [ ] revoke_snapshot_access - [ ] rotate_encryption_key +
## rekognition -0% implemented +
+0% implemented + - [ ] compare_faces - [ ] create_collection - [ ] create_project @@ -5971,6 +6602,7 @@ - [ ] get_face_search - [ ] get_label_detection - [ ] get_person_tracking +- [ ] get_text_detection - [ ] index_faces - [ ] list_collections - [ ] list_faces @@ -5986,11 +6618,15 @@ - [ ] start_person_tracking - [ ] start_project_version - [ ] start_stream_processor +- [ ] start_text_detection - [ ] stop_project_version - [ ] stop_stream_processor +
## resource-groups -75% implemented +
+75% implemented + - [X] create_group - [X] delete_group - [X] get_group @@ -6003,9 +6639,12 @@ - [X] untag - [X] update_group - [X] update_group_query +
## resourcegroupstaggingapi -37% implemented +
+37% implemented + - [ ] describe_report_creation - [ ] get_compliance_summary - [X] get_resources @@ -6014,12 +6653,16 @@ - [ ] start_report_creation - [ ] tag_resources - [ ] untag_resources +
## robomaker -0% implemented +
+0% implemented + - [ ] batch_describe_simulation_job - [ ] cancel_deployment_job - [ ] cancel_simulation_job +- [ ] cancel_simulation_job_batch - [ ] create_deployment_job - [ ] create_fleet - [ ] create_robot @@ -6039,23 +6682,29 @@ - [ ] describe_robot_application - [ ] describe_simulation_application - [ ] describe_simulation_job +- [ ] describe_simulation_job_batch - [ ] list_deployment_jobs - [ ] list_fleets - [ ] list_robot_applications - [ ] list_robots - [ ] list_simulation_applications +- [ ] list_simulation_job_batches - [ ] list_simulation_jobs - [ ] list_tags_for_resource - [ ] register_robot - [ ] restart_simulation_job +- [ ] start_simulation_job_batch - [ ] sync_deployment_job - [ ] tag_resource - [ ] untag_resource - [ ] update_robot_application - [ ] update_simulation_application +
## route53 -12% implemented +
+12% implemented + - [ ] associate_vpc_with_hosted_zone - [ ] change_resource_record_sets - [X] change_tags_for_resource @@ -6112,9 +6761,12 @@ - [ ] update_hosted_zone_comment - [ ] update_traffic_policy_comment - [ ] update_traffic_policy_instance +
## route53domains -0% implemented +
+0% implemented + - [ ] check_domain_availability - [ ] check_domain_transferability - [ ] delete_tags_for_domain @@ -6139,9 +6791,12 @@ - [ ] update_domain_nameservers - [ ] update_tags_for_domain - [ ] view_billing +
## route53resolver -0% implemented +
+0% implemented + - [ ] associate_resolver_endpoint_ip_address - [ ] associate_resolver_rule - [ ] create_resolver_endpoint @@ -6164,9 +6819,12 @@ - [ ] untag_resource - [ ] update_resolver_endpoint - [ ] update_resolver_rule +
## s3 -14% implemented +
+13% implemented + - [ ] abort_multipart_upload - [ ] complete_multipart_upload - [ ] copy_object @@ -6177,63 +6835,63 @@ - [X] delete_bucket_cors - [ ] delete_bucket_encryption - [ ] delete_bucket_inventory_configuration -- [X] delete_bucket_lifecycle +- [ ] delete_bucket_lifecycle - [ ] delete_bucket_metrics_configuration - [X] delete_bucket_policy - [ ] delete_bucket_replication - [X] delete_bucket_tagging - [ ] delete_bucket_website -- [X] delete_object +- [ ] delete_object - [ ] delete_object_tagging -- [X] delete_objects -- [X] delete_public_access_block +- [ ] delete_objects +- [ ] delete_public_access_block - [ ] get_bucket_accelerate_configuration - [X] get_bucket_acl - [ ] get_bucket_analytics_configuration -- [X] get_bucket_cors +- [ ] get_bucket_cors - [ ] get_bucket_encryption - [ ] get_bucket_inventory_configuration -- [X] get_bucket_lifecycle -- [X] get_bucket_lifecycle_configuration -- [X] get_bucket_location -- [X] get_bucket_logging +- [ ] get_bucket_lifecycle +- [ ] get_bucket_lifecycle_configuration +- [ ] get_bucket_location +- [ ] get_bucket_logging - [ ] get_bucket_metrics_configuration - [ ] get_bucket_notification - [ ] get_bucket_notification_configuration - [X] get_bucket_policy -- [X] get_bucket_policy_status +- [ ] get_bucket_policy_status - [ ] get_bucket_replication - [ ] get_bucket_request_payment -- [X] get_bucket_tagging +- [ ] get_bucket_tagging - [X] get_bucket_versioning - [ ] get_bucket_website -- [X] get_object -- [X] get_object_acl +- [ ] get_object +- [ ] get_object_acl - [ ] get_object_legal_hold - [ ] get_object_lock_configuration - [ ] get_object_retention - [ ] get_object_tagging - [ ] get_object_torrent -- [X] get_public_access_block +- [ ] get_public_access_block - [ ] head_bucket - [ ] head_object - [ ] list_bucket_analytics_configurations - [ ] list_bucket_inventory_configurations - [ ] list_bucket_metrics_configurations -- [X] list_buckets -- [X] list_multipart_uploads +- [ ] list_buckets +- [ ] list_multipart_uploads - [ ] list_object_versions -- [X] list_objects -- [X] list_objects_v2 +- [ ] list_objects +- [ ] list_objects_v2 - [ ] list_parts - [X] put_bucket_accelerate_configuration -- [X] put_bucket_acl +- [ ] put_bucket_acl - [ ] put_bucket_analytics_configuration - [X] put_bucket_cors - [ ] put_bucket_encryption - [ ] put_bucket_inventory_configuration -- [X] put_bucket_lifecycle -- [X] put_bucket_lifecycle_configuration +- [ ] put_bucket_lifecycle +- [ ] put_bucket_lifecycle_configuration - [X] put_bucket_logging - [ ] put_bucket_metrics_configuration - [ ] put_bucket_notification @@ -6241,42 +6899,51 @@ - [ ] put_bucket_policy - [ ] put_bucket_replication - [ ] put_bucket_request_payment -- [X] put_bucket_tagging -- [X] put_bucket_versioning +- [ ] put_bucket_tagging +- [ ] put_bucket_versioning - [ ] put_bucket_website -- [X] put_object +- [ ] put_object - [ ] put_object_acl - [ ] put_object_legal_hold - [ ] put_object_lock_configuration - [ ] put_object_retention - [ ] put_object_tagging -- [X] put_public_access_block +- [ ] put_public_access_block - [ ] restore_object - [ ] select_object_content - [ ] upload_part - [ ] upload_part_copy +
## s3control -0% implemented +
+0% implemented + - [ ] create_access_point - [ ] create_job - [ ] delete_access_point - [ ] delete_access_point_policy +- [ ] delete_job_tagging - [ ] delete_public_access_block - [ ] describe_job - [ ] get_access_point - [ ] get_access_point_policy - [ ] get_access_point_policy_status +- [ ] get_job_tagging - [ ] get_public_access_block - [ ] list_access_points - [ ] list_jobs - [ ] put_access_point_policy +- [ ] put_job_tagging - [ ] put_public_access_block - [ ] update_job_priority - [ ] update_job_status +
## sagemaker -0% implemented +
+0% implemented + - [ ] add_tags - [ ] associate_trial_component - [ ] create_algorithm @@ -6349,6 +7016,7 @@ - [ ] describe_trial - [ ] describe_trial_component - [ ] describe_user_profile +- [ ] describe_workforce - [ ] describe_workteam - [ ] disassociate_trial_component - [ ] get_search_suggestions @@ -6407,22 +7075,32 @@ - [ ] update_trial - [ ] update_trial_component - [ ] update_user_profile +- [ ] update_workforce - [ ] update_workteam +
## sagemaker-a2i-runtime -0% implemented +
+0% implemented + - [ ] delete_human_loop - [ ] describe_human_loop - [ ] list_human_loops - [ ] start_human_loop - [ ] stop_human_loop +
## sagemaker-runtime -0% implemented +
+0% implemented + - [ ] invoke_endpoint +
## savingsplans -0% implemented +
+0% implemented + - [ ] create_savings_plan - [ ] describe_savings_plan_rates - [ ] describe_savings_plans @@ -6431,9 +7109,12 @@ - [ ] list_tags_for_resource - [ ] tag_resource - [ ] untag_resource +
## schemas -0% implemented +
+0% implemented + - [ ] create_discoverer - [ ] create_registry - [ ] create_schema @@ -6463,9 +7144,12 @@ - [ ] update_discoverer - [ ] update_registry - [ ] update_schema +
## sdb -0% implemented +
+0% implemented + - [ ] batch_delete_attributes - [ ] batch_put_attributes - [ ] create_domain @@ -6476,9 +7160,12 @@ - [ ] list_domains - [ ] put_attributes - [ ] select +
## secretsmanager -61% implemented +
+66% implemented + - [ ] cancel_rotate_secret - [X] create_secret - [ ] delete_resource_policy @@ -6495,11 +7182,14 @@ - [X] rotate_secret - [ ] tag_resource - [ ] untag_resource -- [ ] update_secret +- [X] update_secret - [ ] update_secret_version_stage +
## securityhub -0% implemented +
+0% implemented + - [ ] accept_invitation - [ ] batch_disable_standards - [ ] batch_enable_standards @@ -6515,6 +7205,8 @@ - [ ] describe_action_targets - [ ] describe_hub - [ ] describe_products +- [ ] describe_standards +- [ ] describe_standards_controls - [ ] disable_import_findings_for_product - [ ] disable_security_hub - [ ] disassociate_from_master_account @@ -6538,9 +7230,13 @@ - [ ] update_action_target - [ ] update_findings - [ ] update_insight +- [ ] update_standards_control +
## serverlessrepo -0% implemented +
+0% implemented + - [ ] create_application - [ ] create_application_version - [ ] create_cloud_formation_change_set @@ -6553,10 +7249,14 @@ - [ ] list_application_versions - [ ] list_applications - [ ] put_application_policy +- [ ] unshare_application - [ ] update_application +
## service-quotas -0% implemented +
+0% implemented + - [ ] associate_service_quota_template - [ ] delete_service_quota_increase_request_from_template - [ ] disassociate_service_quota_template @@ -6573,9 +7273,12 @@ - [ ] list_services - [ ] put_service_quota_increase_request_into_template - [ ] request_service_quota_increase +
## servicecatalog -0% implemented +
+0% implemented + - [ ] accept_portfolio_share - [ ] associate_budget_with_resource - [ ] associate_principal_with_portfolio @@ -6659,9 +7362,12 @@ - [ ] update_provisioning_artifact - [ ] update_service_action - [ ] update_tag_option +
## servicediscovery -0% implemented +
+0% implemented + - [ ] create_http_namespace - [ ] create_private_dns_namespace - [ ] create_public_dns_namespace @@ -6682,12 +7388,15 @@ - [ ] register_instance - [ ] update_instance_custom_health_status - [ ] update_service +
## ses -14% implemented +
+18% implemented + - [ ] clone_receipt_rule_set -- [ ] create_configuration_set -- [ ] create_configuration_set_event_destination +- [X] create_configuration_set +- [X] create_configuration_set_event_destination - [ ] create_configuration_set_tracking_options - [ ] create_custom_verification_email_template - [ ] create_receipt_filter @@ -6717,7 +7426,7 @@ - [ ] get_identity_policies - [ ] get_identity_verification_attributes - [X] get_send_quota -- [ ] get_send_statistics +- [X] get_send_statistics - [ ] get_template - [ ] list_configuration_sets - [ ] list_custom_verification_email_templates @@ -6756,9 +7465,12 @@ - [ ] verify_domain_identity - [X] verify_email_address - [X] verify_email_identity +
## sesv2 -0% implemented +
+0% implemented + - [ ] create_configuration_set - [ ] create_configuration_set_event_destination - [ ] create_dedicated_ip_pool @@ -6808,11 +7520,15 @@ - [ ] tag_resource - [ ] untag_resource - [ ] update_configuration_set_event_destination +
## shield -0% implemented +
+0% implemented + - [ ] associate_drt_log_bucket - [ ] associate_drt_role +- [ ] associate_health_check - [ ] create_protection - [ ] create_subscription - [ ] delete_protection @@ -6824,14 +7540,18 @@ - [ ] describe_subscription - [ ] disassociate_drt_log_bucket - [ ] disassociate_drt_role +- [ ] disassociate_health_check - [ ] get_subscription_state - [ ] list_attacks - [ ] list_protections - [ ] update_emergency_contact_settings - [ ] update_subscription +
## signer -0% implemented +
+0% implemented + - [ ] cancel_signing_profile - [ ] describe_signing_job - [ ] get_signing_platform @@ -6844,9 +7564,12 @@ - [ ] start_signing_job - [ ] tag_resource - [ ] untag_resource +
## sms -0% implemented +
+0% implemented + - [ ] create_app - [ ] create_replication_job - [ ] delete_app @@ -6875,9 +7598,12 @@ - [ ] terminate_app - [ ] update_app - [ ] update_replication_job +
## sms-voice -0% implemented +
+0% implemented + - [ ] create_configuration_set - [ ] create_configuration_set_event_destination - [ ] delete_configuration_set @@ -6886,9 +7612,12 @@ - [ ] list_configuration_sets - [ ] send_voice_message - [ ] update_configuration_set_event_destination +
## snowball -0% implemented +
+0% implemented + - [ ] cancel_cluster - [ ] cancel_job - [ ] create_address @@ -6908,9 +7637,12 @@ - [ ] list_jobs - [ ] update_cluster - [ ] update_job +
## sns -63% implemented +
+63% implemented + - [X] add_permission - [ ] check_if_phone_number_is_opted_out - [ ] confirm_subscription @@ -6944,9 +7676,12 @@ - [X] tag_resource - [X] unsubscribe - [X] untag_resource +
## sqs -85% implemented +
+85% implemented + - [X] add_permission - [X] change_message_visibility - [ ] change_message_visibility_batch @@ -6967,9 +7702,12 @@ - [X] set_queue_attributes - [X] tag_queue - [X] untag_queue +
## ssm -11% implemented +
+12% implemented + - [X] add_tags_to_resource - [ ] cancel_command - [ ] cancel_maintenance_window_execution @@ -7049,7 +7787,7 @@ - [ ] get_patch_baseline - [ ] get_patch_baseline_for_patch_group - [ ] get_service_setting -- [ ] label_parameter_version +- [X] label_parameter_version - [ ] list_association_versions - [ ] list_associations - [ ] list_command_invocations @@ -7092,22 +7830,31 @@ - [ ] update_patch_baseline - [ ] update_resource_data_sync - [ ] update_service_setting +
## sso -0% implemented +
+0% implemented + - [ ] get_role_credentials - [ ] list_account_roles - [ ] list_accounts - [ ] logout +
## sso-oidc -0% implemented +
+0% implemented + - [ ] create_token - [ ] register_client - [ ] start_device_authorization +
## stepfunctions -36% implemented +
+36% implemented + - [ ] create_activity - [X] create_state_machine - [ ] delete_activity @@ -7130,9 +7877,12 @@ - [ ] tag_resource - [ ] untag_resource - [ ] update_state_machine +
## storagegateway -0% implemented +
+0% implemented + - [ ] activate_gateway - [ ] add_cache - [ ] add_tags_to_resource @@ -7208,20 +7958,26 @@ - [ ] update_smb_security_strategy - [ ] update_snapshot_schedule - [ ] update_vtl_device_type +
## sts -62% implemented +
+62% implemented + - [X] assume_role -- [ ] assume_role_with_saml +- [X] assume_role_with_saml - [X] assume_role_with_web_identity - [ ] decode_authorization_message - [ ] get_access_key_info -- [X] get_caller_identity +- [ ] get_caller_identity - [X] get_federation_token - [X] get_session_token +
## support -0% implemented +
+0% implemented + - [ ] add_attachments_to_set - [ ] add_communication_to_case - [ ] create_case @@ -7236,9 +7992,12 @@ - [ ] describe_trusted_advisor_checks - [ ] refresh_trusted_advisor_check - [ ] resolve_case +
## swf -48% implemented +
+51% implemented + - [ ] count_closed_workflow_executions - [ ] count_open_workflow_executions - [X] count_pending_activity_tasks @@ -7272,34 +8031,48 @@ - [X] start_workflow_execution - [ ] tag_resource - [X] terminate_workflow_execution -- [X] undeprecate_activity_type +- [ ] undeprecate_activity_type - [X] undeprecate_domain -- [X] undeprecate_workflow_type +- [ ] undeprecate_workflow_type - [ ] untag_resource +
## textract -0% implemented +
+0% implemented + - [ ] analyze_document - [ ] detect_document_text - [ ] get_document_analysis - [ ] get_document_text_detection - [ ] start_document_analysis - [ ] start_document_text_detection +
## transcribe -0% implemented +
+0% implemented + - [ ] create_vocabulary +- [ ] create_vocabulary_filter - [ ] delete_transcription_job - [ ] delete_vocabulary +- [ ] delete_vocabulary_filter - [ ] get_transcription_job - [ ] get_vocabulary +- [ ] get_vocabulary_filter - [ ] list_transcription_jobs - [ ] list_vocabularies +- [ ] list_vocabulary_filters - [ ] start_transcription_job - [ ] update_vocabulary +- [ ] update_vocabulary_filter +
## transfer -0% implemented +
+0% implemented + - [ ] create_server - [ ] create_user - [ ] delete_server @@ -7318,17 +8091,27 @@ - [ ] untag_resource - [ ] update_server - [ ] update_user +
## translate -0% implemented +
+0% implemented + - [ ] delete_terminology +- [ ] describe_text_translation_job - [ ] get_terminology - [ ] import_terminology - [ ] list_terminologies +- [ ] list_text_translation_jobs +- [ ] start_text_translation_job +- [ ] stop_text_translation_job - [ ] translate_text +
## waf -0% implemented +
+0% implemented + - [ ] create_byte_match_set - [ ] create_geo_match_set - [ ] create_ip_set @@ -7405,9 +8188,12 @@ - [ ] update_sql_injection_match_set - [ ] update_web_acl - [ ] update_xss_match_set +
## waf-regional -0% implemented +
+0% implemented + - [ ] associate_web_acl - [ ] create_byte_match_set - [ ] create_geo_match_set @@ -7488,9 +8274,12 @@ - [ ] update_sql_injection_match_set - [ ] update_web_acl - [ ] update_xss_match_set +
## wafv2 -0% implemented +
+0% implemented + - [ ] associate_web_acl - [ ] check_capacity - [ ] create_ip_set @@ -7527,9 +8316,12 @@ - [ ] update_regex_pattern_set - [ ] update_rule_group - [ ] update_web_acl +
## workdocs -0% implemented +
+0% implemented + - [ ] abort_document_version_upload - [ ] activate_user - [ ] add_resource_permissions @@ -7571,9 +8363,12 @@ - [ ] update_document_version - [ ] update_folder - [ ] update_user +
## worklink -0% implemented +
+0% implemented + - [ ] associate_domain - [ ] associate_website_authorization_provider - [ ] associate_website_certificate_authority @@ -7604,15 +8399,19 @@ - [ ] update_domain_metadata - [ ] update_fleet_metadata - [ ] update_identity_provider_configuration +
## workmail -0% implemented +
+0% implemented + - [ ] associate_delegate_to_resource - [ ] associate_member_to_group - [ ] create_alias - [ ] create_group - [ ] create_resource - [ ] create_user +- [ ] delete_access_control_rule - [ ] delete_alias - [ ] delete_group - [ ] delete_mailbox_permissions @@ -7625,7 +8424,9 @@ - [ ] describe_user - [ ] disassociate_delegate_from_resource - [ ] disassociate_member_from_group +- [ ] get_access_control_effect - [ ] get_mailbox_details +- [ ] list_access_control_rules - [ ] list_aliases - [ ] list_group_members - [ ] list_groups @@ -7633,20 +8434,30 @@ - [ ] list_organizations - [ ] list_resource_delegates - [ ] list_resources +- [ ] list_tags_for_resource - [ ] list_users +- [ ] put_access_control_rule - [ ] put_mailbox_permissions - [ ] register_to_work_mail - [ ] reset_password +- [ ] tag_resource +- [ ] untag_resource - [ ] update_mailbox_quota - [ ] update_primary_email_address - [ ] update_resource +
## workmailmessageflow -0% implemented +
+0% implemented + - [ ] get_raw_message_content +
## workspaces -0% implemented +
+0% implemented + - [ ] associate_ip_groups - [ ] authorize_ip_rules - [ ] copy_workspace_image @@ -7671,6 +8482,7 @@ - [ ] disassociate_ip_groups - [ ] import_workspace_image - [ ] list_available_management_cidr_ranges +- [ ] migrate_workspace - [ ] modify_account - [ ] modify_client_properties - [ ] modify_selfservice_permissions @@ -7687,9 +8499,12 @@ - [ ] stop_workspaces - [ ] terminate_workspaces - [ ] update_rules_of_ip_group +
## xray -0% implemented +
+0% implemented + - [ ] batch_get_traces - [ ] create_group - [ ] create_sampling_rule @@ -7710,3 +8525,4 @@ - [ ] put_trace_segments - [ ] update_group - [ ] update_sampling_rule +
diff --git a/moto/__init__.py b/moto/__init__.py index 79c1555d3..4f8f08eda 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -49,9 +49,7 @@ mock_dynamodbstreams = lazy_load(".dynamodbstreams", "mock_dynamodbstreams") mock_elasticbeanstalk = lazy_load(".elasticbeanstalk", "mock_elasticbeanstalk") mock_ec2 = lazy_load(".ec2", "mock_ec2") mock_ec2_deprecated = lazy_load(".ec2", "mock_ec2_deprecated") -mock_ec2_instance_connect = lazy_load( - ".ec2_instance_connect", "mock_ec2_instance_connect" -) +mock_ec2instanceconnect = lazy_load(".ec2instanceconnect", "mock_ec2instanceconnect") mock_ecr = lazy_load(".ecr", "mock_ecr") mock_ecr_deprecated = lazy_load(".ecr", "mock_ecr_deprecated") mock_ecs = lazy_load(".ecs", "mock_ecs") @@ -75,6 +73,7 @@ mock_kms = lazy_load(".kms", "mock_kms") mock_kms_deprecated = lazy_load(".kms", "mock_kms_deprecated") mock_logs = lazy_load(".logs", "mock_logs") mock_logs_deprecated = lazy_load(".logs", "mock_logs_deprecated") +mock_managedblockchain = lazy_load(".managedblockchain", "mock_managedblockchain") mock_opsworks = lazy_load(".opsworks", "mock_opsworks") mock_opsworks_deprecated = lazy_load(".opsworks", "mock_opsworks_deprecated") mock_organizations = lazy_load(".organizations", "mock_organizations") diff --git a/moto/apigateway/models.py b/moto/apigateway/models.py index d39b719d6..4513c75ab 100644 --- a/moto/apigateway/models.py +++ b/moto/apigateway/models.py @@ -56,8 +56,10 @@ class Deployment(BaseModel, dict): class IntegrationResponse(BaseModel, dict): - def __init__(self, status_code, selection_pattern=None): - self["responseTemplates"] = {"application/json": None} + def __init__(self, status_code, selection_pattern=None, response_templates=None): + if response_templates is None: + response_templates = {"application/json": None} + self["responseTemplates"] = response_templates self["statusCode"] = status_code if selection_pattern: self["selectionPattern"] = selection_pattern @@ -72,8 +74,14 @@ class Integration(BaseModel, dict): self["requestTemplates"] = request_templates self["integrationResponses"] = {"200": IntegrationResponse(200)} - def create_integration_response(self, status_code, selection_pattern): - integration_response = IntegrationResponse(status_code, selection_pattern) + def create_integration_response( + self, status_code, selection_pattern, response_templates + ): + if response_templates == {}: + response_templates = None + integration_response = IntegrationResponse( + status_code, selection_pattern, response_templates + ) self["integrationResponses"][status_code] = integration_response return integration_response @@ -956,7 +964,7 @@ class APIGatewayBackend(BaseBackend): raise InvalidRequestInput() integration = self.get_integration(function_id, resource_id, method_type) integration_response = integration.create_integration_response( - status_code, selection_pattern + status_code, selection_pattern, response_templates ) return integration_response diff --git a/moto/autoscaling/models.py b/moto/autoscaling/models.py index b757672d0..1da12a09c 100644 --- a/moto/autoscaling/models.py +++ b/moto/autoscaling/models.py @@ -419,11 +419,8 @@ class FakeAutoScalingGroup(BaseModel): curr_instance_count = len(self.active_instances()) if self.desired_capacity == curr_instance_count: - self.autoscaling_backend.update_attached_elbs(self.name) - self.autoscaling_backend.update_attached_target_groups(self.name) - return - - if self.desired_capacity > curr_instance_count: + pass # Nothing to do here + elif self.desired_capacity > curr_instance_count: # Need more instances count_needed = int(self.desired_capacity) - int(curr_instance_count) @@ -447,6 +444,7 @@ class FakeAutoScalingGroup(BaseModel): self.instance_states = list( set(self.instance_states) - set(instances_to_remove) ) + if self.name in self.autoscaling_backend.autoscaling_groups: self.autoscaling_backend.update_attached_elbs(self.name) self.autoscaling_backend.update_attached_target_groups(self.name) @@ -695,6 +693,7 @@ class AutoScalingBackend(BaseBackend): ) group.instance_states.extend(new_instances) self.update_attached_elbs(group.name) + self.update_attached_target_groups(group.name) def set_instance_health( self, instance_id, health_status, should_respect_grace_period @@ -938,8 +937,7 @@ class AutoScalingBackend(BaseBackend): standby_instances.append(instance_state) if should_decrement: group.desired_capacity = group.desired_capacity - len(instance_ids) - else: - group.set_desired_capacity(group.desired_capacity) + group.set_desired_capacity(group.desired_capacity) return standby_instances, original_size, group.desired_capacity def exit_standby_instances(self, group_name, instance_ids): @@ -951,6 +949,7 @@ class AutoScalingBackend(BaseBackend): instance_state.lifecycle_state = "InService" standby_instances.append(instance_state) group.desired_capacity = group.desired_capacity + len(instance_ids) + group.set_desired_capacity(group.desired_capacity) return standby_instances, original_size, group.desired_capacity def terminate_instance(self, instance_id, should_decrement): diff --git a/moto/awslambda/models.py b/moto/awslambda/models.py index 829aa76d2..360c47528 100644 --- a/moto/awslambda/models.py +++ b/moto/awslambda/models.py @@ -5,6 +5,8 @@ import time from collections import defaultdict import copy import datetime +from gzip import GzipFile + import docker import docker.errors import hashlib @@ -988,6 +990,28 @@ class LambdaBackend(BaseBackend): func = self._lambdas.get_arn(function_arn) return func.invoke(json.dumps(event), {}, {}) + def send_log_event( + self, function_arn, filter_name, log_group_name, log_stream_name, log_events + ): + data = { + "messageType": "DATA_MESSAGE", + "owner": ACCOUNT_ID, + "logGroup": log_group_name, + "logStream": log_stream_name, + "subscriptionFilters": [filter_name], + "logEvents": log_events, + } + + output = io.BytesIO() + with GzipFile(fileobj=output, mode="w") as f: + f.write(json.dumps(data, separators=(",", ":")).encode("utf-8")) + payload_gz_encoded = base64.b64encode(output.getvalue()).decode("utf-8") + + event = {"awslogs": {"data": payload_gz_encoded}} + + func = self._lambdas.get_arn(function_arn) + return func.invoke(json.dumps(event), {}, {}) + def list_tags(self, resource): return self.get_function_by_arn(resource).tags diff --git a/moto/backends.py b/moto/backends.py index bb71429eb..44534d574 100644 --- a/moto/backends.py +++ b/moto/backends.py @@ -21,7 +21,7 @@ BACKENDS = { "dynamodb2": ("dynamodb2", "dynamodb_backends2"), "dynamodbstreams": ("dynamodbstreams", "dynamodbstreams_backends"), "ec2": ("ec2", "ec2_backends"), - "ec2_instance_connect": ("ec2_instance_connect", "ec2_instance_connect_backends"), + "ec2instanceconnect": ("ec2instanceconnect", "ec2instanceconnect_backends"), "ecr": ("ecr", "ecr_backends"), "ecs": ("ecs", "ecs_backends"), "elasticbeanstalk": ("elasticbeanstalk", "eb_backends"), @@ -39,6 +39,7 @@ BACKENDS = { "kms": ("kms", "kms_backends"), "lambda": ("awslambda", "lambda_backends"), "logs": ("logs", "logs_backends"), + "managedblockchain": ("managedblockchain", "managedblockchain_backends"), "moto_api": ("core", "moto_api_backends"), "opsworks": ("opsworks", "opsworks_backends"), "organizations": ("organizations", "organizations_backends"), diff --git a/moto/dynamodb2/exceptions.py b/moto/dynamodb2/exceptions.py index 18e498a90..334cd913a 100644 --- a/moto/dynamodb2/exceptions.py +++ b/moto/dynamodb2/exceptions.py @@ -149,3 +149,18 @@ class IncorrectDataType(MockValidationException): def __init__(self): super(IncorrectDataType, self).__init__(self.inc_data_type_msg) + + +class ConditionalCheckFailed(ValueError): + msg = "The conditional request failed" + + def __init__(self): + super(ConditionalCheckFailed, self).__init__(self.msg) + + +class TransactionCanceledException(ValueError): + cancel_reason_msg = "Transaction cancelled, please refer cancellation reasons for specific reasons [{}]" + + def __init__(self, errors): + msg = self.cancel_reason_msg.format(", ".join([str(err) for err in errors])) + super(TransactionCanceledException, self).__init__(msg) diff --git a/moto/dynamodb2/models/__init__.py b/moto/dynamodb2/models/__init__.py index ea16f456f..a5277800f 100644 --- a/moto/dynamodb2/models/__init__.py +++ b/moto/dynamodb2/models/__init__.py @@ -18,6 +18,8 @@ from moto.dynamodb2.exceptions import ( InvalidIndexNameError, ItemSizeTooLarge, ItemSizeToUpdateTooLarge, + ConditionalCheckFailed, + TransactionCanceledException, ) from moto.dynamodb2.models.utilities import bytesize from moto.dynamodb2.models.dynamo_type import DynamoType @@ -316,6 +318,12 @@ class Table(BaseModel): } self.set_stream_specification(streams) self.lambda_event_source_mappings = {} + self.continuous_backups = { + "ContinuousBackupsStatus": "ENABLED", # One of 'ENABLED'|'DISABLED', it's enabled by default + "PointInTimeRecoveryDescription": { + "PointInTimeRecoveryStatus": "DISABLED" # One of 'ENABLED'|'DISABLED' + }, + } @classmethod def create_from_cloudformation_json( @@ -453,14 +461,14 @@ class Table(BaseModel): if not overwrite: if not get_expected(expected).expr(current): - raise ValueError("The conditional request failed") + raise ConditionalCheckFailed condition_op = get_filter_expression( condition_expression, expression_attribute_names, expression_attribute_values, ) if not condition_op.expr(current): - raise ValueError("The conditional request failed") + raise ConditionalCheckFailed if range_value: self.items[hash_value][range_value] = item @@ -824,6 +832,42 @@ class DynamoDBBackend(BaseBackend): required_table = self.tables[table] return required_table.tags + def list_tables(self, limit, exclusive_start_table_name): + all_tables = list(self.tables.keys()) + + if exclusive_start_table_name: + try: + last_table_index = all_tables.index(exclusive_start_table_name) + except ValueError: + start = len(all_tables) + else: + start = last_table_index + 1 + else: + start = 0 + + if limit: + tables = all_tables[start : start + limit] + else: + tables = all_tables[start:] + + if limit and len(all_tables) > start + limit: + return tables, tables[-1] + return tables, None + + def describe_table(self, name): + table = self.tables[name] + return table.describe(base_key="Table") + + def update_table(self, name, global_index, throughput, stream_spec): + table = self.get_table(name) + if global_index: + table = self.update_table_global_indexes(name, global_index) + if throughput: + table = self.update_table_throughput(name, throughput) + if stream_spec: + table = self.update_table_streams(name, stream_spec) + return table + def update_table_throughput(self, name, throughput): table = self.tables[name] table.throughput = throughput @@ -1070,14 +1114,14 @@ class DynamoDBBackend(BaseBackend): expected = {} if not get_expected(expected).expr(item): - raise ValueError("The conditional request failed") + raise ConditionalCheckFailed condition_op = get_filter_expression( condition_expression, expression_attribute_names, expression_attribute_values, ) if not condition_op.expr(item): - raise ValueError("The conditional request failed") + raise ConditionalCheckFailed # Update does not fail on new items, so create one if item is None: @@ -1130,11 +1174,11 @@ class DynamoDBBackend(BaseBackend): expression_attribute_values, ) if not condition_op.expr(item): - raise ValueError("The conditional request failed") + raise ConditionalCheckFailed return table.delete_item(hash_value, range_value) - def update_ttl(self, table_name, ttl_spec): + def update_time_to_live(self, table_name, ttl_spec): table = self.tables.get(table_name) if table is None: raise JsonRESTError("ResourceNotFound", "Table not found") @@ -1151,7 +1195,7 @@ class DynamoDBBackend(BaseBackend): table.ttl["TimeToLiveStatus"] = "DISABLED" table.ttl["AttributeName"] = ttl_spec["AttributeName"] - def describe_ttl(self, table_name): + def describe_time_to_live(self, table_name): table = self.tables.get(table_name) if table is None: raise JsonRESTError("ResourceNotFound", "Table not found") @@ -1161,8 +1205,9 @@ class DynamoDBBackend(BaseBackend): def transact_write_items(self, transact_items): # Create a backup in case any of the transactions fail original_table_state = copy.deepcopy(self.tables) - try: - for item in transact_items: + errors = [] + for item in transact_items: + try: if "ConditionCheck" in item: item = item["ConditionCheck"] key = item["Key"] @@ -1182,7 +1227,7 @@ class DynamoDBBackend(BaseBackend): expression_attribute_values, ) if not condition_op.expr(current): - raise ValueError("The conditional request failed") + raise ConditionalCheckFailed() elif "Put" in item: item = item["Put"] attrs = item["Item"] @@ -1241,10 +1286,55 @@ class DynamoDBBackend(BaseBackend): ) else: raise ValueError - except: # noqa: E722 Do not use bare except - # Rollback to the original state, and reraise the error + errors.append(None) + except Exception as e: # noqa: E722 Do not use bare except + errors.append(type(e).__name__) + if any(errors): + # Rollback to the original state, and reraise the errors self.tables = original_table_state - raise + raise TransactionCanceledException(errors) + + def describe_continuous_backups(self, table_name): + table = self.get_table(table_name) + + return table.continuous_backups + + def update_continuous_backups(self, table_name, point_in_time_spec): + table = self.get_table(table_name) + + if ( + point_in_time_spec["PointInTimeRecoveryEnabled"] + and table.continuous_backups["PointInTimeRecoveryDescription"][ + "PointInTimeRecoveryStatus" + ] + == "DISABLED" + ): + table.continuous_backups["PointInTimeRecoveryDescription"] = { + "PointInTimeRecoveryStatus": "ENABLED", + "EarliestRestorableDateTime": unix_time(), + "LatestRestorableDateTime": unix_time(), + } + elif not point_in_time_spec["PointInTimeRecoveryEnabled"]: + table.continuous_backups["PointInTimeRecoveryDescription"] = { + "PointInTimeRecoveryStatus": "DISABLED" + } + + return table.continuous_backups + + ###################### + # LIST of methods where the logic completely resides in responses.py + # Duplicated here so that the implementation coverage script is aware + # TODO: Move logic here + ###################### + + def batch_get_item(self): + pass + + def batch_write_item(self): + pass + + def transact_get_items(self): + pass dynamodb_backends = {} diff --git a/moto/dynamodb2/parsing/executors.py b/moto/dynamodb2/parsing/executors.py index 8c51c9cec..2f2f2bb82 100644 --- a/moto/dynamodb2/parsing/executors.py +++ b/moto/dynamodb2/parsing/executors.py @@ -1,6 +1,10 @@ from abc import abstractmethod -from moto.dynamodb2.exceptions import IncorrectOperandType, IncorrectDataType +from moto.dynamodb2.exceptions import ( + IncorrectOperandType, + IncorrectDataType, + ProvidedKeyDoesNotExist, +) from moto.dynamodb2.models import DynamoType from moto.dynamodb2.models.dynamo_type import DDBTypeConversion, DDBType from moto.dynamodb2.parsing.ast_nodes import ( @@ -193,7 +197,18 @@ class AddExecutor(NodeExecutor): value_to_add = self.get_action_value() if isinstance(value_to_add, DynamoType): if value_to_add.is_set(): - current_string_set = self.get_item_at_end_of_path(item) + try: + current_string_set = self.get_item_at_end_of_path(item) + except ProvidedKeyDoesNotExist: + current_string_set = DynamoType({value_to_add.type: []}) + SetExecutor.set( + item_part_to_modify_with_set=self.get_item_before_end_of_path( + item + ), + element_to_set=self.get_element_to_action(), + value_to_set=current_string_set, + expression_attribute_names=self.expression_attribute_names, + ) assert isinstance(current_string_set, DynamoType) if not current_string_set.type == value_to_add.type: raise IncorrectDataType() @@ -204,7 +219,11 @@ class AddExecutor(NodeExecutor): else: current_string_set.value.append(value) elif value_to_add.type == DDBType.NUMBER: - existing_value = self.get_item_at_end_of_path(item) + try: + existing_value = self.get_item_at_end_of_path(item) + except ProvidedKeyDoesNotExist: + existing_value = DynamoType({DDBType.NUMBER: "0"}) + assert isinstance(existing_value, DynamoType) if not existing_value.type == DDBType.NUMBER: raise IncorrectDataType() diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index b703f2935..aec7c7560 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -9,7 +9,12 @@ import six from moto.core.responses import BaseResponse from moto.core.utils import camelcase_to_underscores, amzn_request_id -from .exceptions import InvalidIndexNameError, ItemSizeTooLarge, MockValidationException +from .exceptions import ( + InvalidIndexNameError, + ItemSizeTooLarge, + MockValidationException, + TransactionCanceledException, +) from moto.dynamodb2.models import dynamodb_backends, dynamo_json_dump @@ -92,27 +97,14 @@ class DynamoHandler(BaseResponse): def list_tables(self): body = self.body limit = body.get("Limit", 100) - all_tables = list(self.dynamodb_backend.tables.keys()) - exclusive_start_table_name = body.get("ExclusiveStartTableName") - if exclusive_start_table_name: - try: - last_table_index = all_tables.index(exclusive_start_table_name) - except ValueError: - start = len(all_tables) - else: - start = last_table_index + 1 - else: - start = 0 - - if limit: - tables = all_tables[start : start + limit] - else: - tables = all_tables[start:] + tables, last_eval = self.dynamodb_backend.list_tables( + limit, exclusive_start_table_name + ) response = {"TableNames": tables} - if limit and len(all_tables) > start + limit: - response["LastEvaluatedTableName"] = tables[-1] + if last_eval: + response["LastEvaluatedTableName"] = last_eval return dynamo_json_dump(response) @@ -232,33 +224,29 @@ class DynamoHandler(BaseResponse): def update_table(self): name = self.body["TableName"] - table = self.dynamodb_backend.get_table(name) - if "GlobalSecondaryIndexUpdates" in self.body: - table = self.dynamodb_backend.update_table_global_indexes( - name, self.body["GlobalSecondaryIndexUpdates"] + global_index = self.body.get("GlobalSecondaryIndexUpdates", None) + throughput = self.body.get("ProvisionedThroughput", None) + stream_spec = self.body.get("StreamSpecification", None) + try: + table = self.dynamodb_backend.update_table( + name=name, + global_index=global_index, + throughput=throughput, + stream_spec=stream_spec, ) - if "ProvisionedThroughput" in self.body: - throughput = self.body["ProvisionedThroughput"] - table = self.dynamodb_backend.update_table_throughput(name, throughput) - if "StreamSpecification" in self.body: - try: - table = self.dynamodb_backend.update_table_streams( - name, self.body["StreamSpecification"] - ) - except ValueError: - er = "com.amazonaws.dynamodb.v20111205#ResourceInUseException" - return self.error(er, "Cannot enable stream") - - return dynamo_json_dump(table.describe()) + return dynamo_json_dump(table.describe()) + except ValueError: + er = "com.amazonaws.dynamodb.v20111205#ResourceInUseException" + return self.error(er, "Cannot enable stream") def describe_table(self): name = self.body["TableName"] try: - table = self.dynamodb_backend.tables[name] + table = self.dynamodb_backend.describe_table(name) + return dynamo_json_dump(table) except KeyError: er = "com.amazonaws.dynamodb.v20111205#ResourceNotFoundException" return self.error(er, "Requested resource not found") - return dynamo_json_dump(table.describe(base_key="Table")) def put_item(self): name = self.body["TableName"] @@ -850,14 +838,14 @@ class DynamoHandler(BaseResponse): name = self.body["TableName"] ttl_spec = self.body["TimeToLiveSpecification"] - self.dynamodb_backend.update_ttl(name, ttl_spec) + self.dynamodb_backend.update_time_to_live(name, ttl_spec) return json.dumps({"TimeToLiveSpecification": ttl_spec}) def describe_time_to_live(self): name = self.body["TableName"] - ttl_spec = self.dynamodb_backend.describe_ttl(name) + ttl_spec = self.dynamodb_backend.describe_time_to_live(name) return json.dumps({"TimeToLiveDescription": ttl_spec}) @@ -929,10 +917,37 @@ class DynamoHandler(BaseResponse): transact_items = self.body["TransactItems"] try: self.dynamodb_backend.transact_write_items(transact_items) - except ValueError: - er = "com.amazonaws.dynamodb.v20111205#ConditionalCheckFailedException" - return self.error( - er, "A condition specified in the operation could not be evaluated." - ) + except TransactionCanceledException as e: + er = "com.amazonaws.dynamodb.v20111205#TransactionCanceledException" + return self.error(er, str(e)) response = {"ConsumedCapacity": [], "ItemCollectionMetrics": {}} return dynamo_json_dump(response) + + def describe_continuous_backups(self): + name = self.body["TableName"] + + if self.dynamodb_backend.get_table(name) is None: + return self.error( + "com.amazonaws.dynamodb.v20111205#TableNotFoundException", + "Table not found: {}".format(name), + ) + + response = self.dynamodb_backend.describe_continuous_backups(name) + + return json.dumps({"ContinuousBackupsDescription": response}) + + def update_continuous_backups(self): + name = self.body["TableName"] + point_in_time_spec = self.body["PointInTimeRecoverySpecification"] + + if self.dynamodb_backend.get_table(name) is None: + return self.error( + "com.amazonaws.dynamodb.v20111205#TableNotFoundException", + "Table not found: {}".format(name), + ) + + response = self.dynamodb_backend.update_continuous_backups( + name, point_in_time_spec + ) + + return json.dumps({"ContinuousBackupsDescription": response}) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index e94d2877c..bab4636af 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -560,8 +560,10 @@ class Instance(TaggedEC2Resource, BotoInstance): # worst case we'll get IP address exaustion... rarely pass - def add_block_device(self, size, device_path): - volume = self.ec2_backend.create_volume(size, self.region_name) + def add_block_device(self, size, device_path, snapshot_id=None, encrypted=False): + volume = self.ec2_backend.create_volume( + size, self.region_name, snapshot_id, encrypted + ) self.ec2_backend.attach_volume(volume.id, self.id, device_path) def setup_defaults(self): @@ -891,8 +893,12 @@ class InstanceBackend(object): new_instance.add_tags(instance_tags) if "block_device_mappings" in kwargs: for block_device in kwargs["block_device_mappings"]: + device_name = block_device["DeviceName"] + volume_size = block_device["Ebs"].get("VolumeSize") + snapshot_id = block_device["Ebs"].get("SnapshotId") + encrypted = block_device["Ebs"].get("Encrypted", False) new_instance.add_block_device( - block_device["Ebs"]["VolumeSize"], block_device["DeviceName"] + volume_size, device_name, snapshot_id, encrypted ) else: new_instance.setup_defaults() diff --git a/moto/ec2/responses/instances.py b/moto/ec2/responses/instances.py index de17f0609..adcbfa741 100644 --- a/moto/ec2/responses/instances.py +++ b/moto/ec2/responses/instances.py @@ -4,10 +4,16 @@ from boto.ec2.instancetype import InstanceType from moto.autoscaling import autoscaling_backends from moto.core.responses import BaseResponse from moto.core.utils import camelcase_to_underscores -from moto.ec2.utils import filters_from_querystring, dict_from_querystring +from moto.ec2.exceptions import MissingParameterError +from moto.ec2.utils import ( + filters_from_querystring, + dict_from_querystring, +) from moto.elbv2 import elbv2_backends from moto.core import ACCOUNT_ID +from copy import deepcopy + class InstanceResponse(BaseResponse): def describe_instances(self): @@ -44,40 +50,31 @@ class InstanceResponse(BaseResponse): owner_id = self._get_param("OwnerId") user_data = self._get_param("UserData") security_group_names = self._get_multi_param("SecurityGroup") - security_group_ids = self._get_multi_param("SecurityGroupId") - nics = dict_from_querystring("NetworkInterface", self.querystring) - instance_type = self._get_param("InstanceType", if_none="m1.small") - placement = self._get_param("Placement.AvailabilityZone") - subnet_id = self._get_param("SubnetId") - private_ip = self._get_param("PrivateIpAddress") - associate_public_ip = self._get_param("AssociatePublicIpAddress") - key_name = self._get_param("KeyName") - ebs_optimized = self._get_param("EbsOptimized") or False - instance_initiated_shutdown_behavior = self._get_param( - "InstanceInitiatedShutdownBehavior" - ) - tags = self._parse_tag_specification("TagSpecification") - region_name = self.region + kwargs = { + "instance_type": self._get_param("InstanceType", if_none="m1.small"), + "placement": self._get_param("Placement.AvailabilityZone"), + "region_name": self.region, + "subnet_id": self._get_param("SubnetId"), + "owner_id": owner_id, + "key_name": self._get_param("KeyName"), + "security_group_ids": self._get_multi_param("SecurityGroupId"), + "nics": dict_from_querystring("NetworkInterface", self.querystring), + "private_ip": self._get_param("PrivateIpAddress"), + "associate_public_ip": self._get_param("AssociatePublicIpAddress"), + "tags": self._parse_tag_specification("TagSpecification"), + "ebs_optimized": self._get_param("EbsOptimized") or False, + "instance_initiated_shutdown_behavior": self._get_param( + "InstanceInitiatedShutdownBehavior" + ), + } + + mappings = self._parse_block_device_mapping() + if mappings: + kwargs["block_device_mappings"] = mappings if self.is_not_dryrun("RunInstance"): new_reservation = self.ec2_backend.add_instances( - image_id, - min_count, - user_data, - security_group_names, - instance_type=instance_type, - placement=placement, - region_name=region_name, - subnet_id=subnet_id, - owner_id=owner_id, - key_name=key_name, - security_group_ids=security_group_ids, - nics=nics, - private_ip=private_ip, - associate_public_ip=associate_public_ip, - tags=tags, - ebs_optimized=ebs_optimized, - instance_initiated_shutdown_behavior=instance_initiated_shutdown_behavior, + image_id, min_count, user_data, security_group_names, **kwargs ) template = self.response_template(EC2_RUN_INSTANCES) @@ -272,6 +269,58 @@ class InstanceResponse(BaseResponse): ) return EC2_MODIFY_INSTANCE_ATTRIBUTE + def _parse_block_device_mapping(self): + device_mappings = self._get_list_prefix("BlockDeviceMapping") + mappings = [] + for device_mapping in device_mappings: + self._validate_block_device_mapping(device_mapping) + device_template = deepcopy(BLOCK_DEVICE_MAPPING_TEMPLATE) + device_template["VirtualName"] = device_mapping.get("virtual_name") + device_template["DeviceName"] = device_mapping.get("device_name") + device_template["Ebs"]["SnapshotId"] = device_mapping.get( + "ebs._snapshot_id" + ) + device_template["Ebs"]["VolumeSize"] = device_mapping.get( + "ebs._volume_size" + ) + device_template["Ebs"]["DeleteOnTermination"] = device_mapping.get( + "ebs._delete_on_termination", False + ) + device_template["Ebs"]["VolumeType"] = device_mapping.get( + "ebs._volume_type" + ) + device_template["Ebs"]["Iops"] = device_mapping.get("ebs._iops") + device_template["Ebs"]["Encrypted"] = device_mapping.get( + "ebs._encrypted", False + ) + mappings.append(device_template) + + return mappings + + @staticmethod + def _validate_block_device_mapping(device_mapping): + + if not any(mapping for mapping in device_mapping if mapping.startswith("ebs.")): + raise MissingParameterError("ebs") + if ( + "ebs._volume_size" not in device_mapping + and "ebs._snapshot_id" not in device_mapping + ): + raise MissingParameterError("size or snapshotId") + + +BLOCK_DEVICE_MAPPING_TEMPLATE = { + "VirtualName": None, + "DeviceName": None, + "Ebs": { + "SnapshotId": None, + "VolumeSize": None, + "DeleteOnTermination": None, + "VolumeType": None, + "Iops": None, + "Encrypted": None, + }, +} EC2_RUN_INSTANCES = ( """ diff --git a/moto/ec2_instance_connect/__init__.py b/moto/ec2_instance_connect/__init__.py deleted file mode 100644 index c20d59cfa..000000000 --- a/moto/ec2_instance_connect/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from ..core.models import base_decorator -from .models import ec2_instance_connect_backends - -mock_ec2_instance_connect = base_decorator(ec2_instance_connect_backends) diff --git a/moto/ec2_instance_connect/models.py b/moto/ec2_instance_connect/models.py deleted file mode 100644 index f3dbbe9f8..000000000 --- a/moto/ec2_instance_connect/models.py +++ /dev/null @@ -1,11 +0,0 @@ -import boto.ec2 -from moto.core import BaseBackend - - -class Ec2InstanceConnectBackend(BaseBackend): - pass - - -ec2_instance_connect_backends = {} -for region in boto.ec2.regions(): - ec2_instance_connect_backends[region.name] = Ec2InstanceConnectBackend() diff --git a/moto/ec2_instance_connect/responses.py b/moto/ec2_instance_connect/responses.py deleted file mode 100644 index 462f1fddc..000000000 --- a/moto/ec2_instance_connect/responses.py +++ /dev/null @@ -1,9 +0,0 @@ -import json -from moto.core.responses import BaseResponse - - -class Ec2InstanceConnectResponse(BaseResponse): - def send_ssh_public_key(self): - return json.dumps( - {"RequestId": "example-2a47-4c91-9700-e37e85162cb6", "Success": True} - ) diff --git a/moto/ec2instanceconnect/__init__.py b/moto/ec2instanceconnect/__init__.py new file mode 100644 index 000000000..c53958f7e --- /dev/null +++ b/moto/ec2instanceconnect/__init__.py @@ -0,0 +1,4 @@ +from ..core.models import base_decorator +from .models import ec2instanceconnect_backends + +mock_ec2instanceconnect = base_decorator(ec2instanceconnect_backends) diff --git a/moto/ec2instanceconnect/models.py b/moto/ec2instanceconnect/models.py new file mode 100644 index 000000000..43c01e7f2 --- /dev/null +++ b/moto/ec2instanceconnect/models.py @@ -0,0 +1,15 @@ +import boto.ec2 +import json +from moto.core import BaseBackend + + +class Ec2InstanceConnectBackend(BaseBackend): + def send_ssh_public_key(self): + return json.dumps( + {"RequestId": "example-2a47-4c91-9700-e37e85162cb6", "Success": True} + ) + + +ec2instanceconnect_backends = {} +for region in boto.ec2.regions(): + ec2instanceconnect_backends[region.name] = Ec2InstanceConnectBackend() diff --git a/moto/ec2instanceconnect/responses.py b/moto/ec2instanceconnect/responses.py new file mode 100644 index 000000000..9fce11aa2 --- /dev/null +++ b/moto/ec2instanceconnect/responses.py @@ -0,0 +1,11 @@ +from moto.core.responses import BaseResponse +from .models import ec2instanceconnect_backends + + +class Ec2InstanceConnectResponse(BaseResponse): + @property + def ec2instanceconnect_backend(self): + return ec2instanceconnect_backends[self.region] + + def send_ssh_public_key(self): + return self.ec2instanceconnect_backend.send_ssh_public_key() diff --git a/moto/ec2_instance_connect/urls.py b/moto/ec2instanceconnect/urls.py similarity index 100% rename from moto/ec2_instance_connect/urls.py rename to moto/ec2instanceconnect/urls.py diff --git a/moto/iot/models.py b/moto/iot/models.py index 2e9979bda..5b74b353c 100644 --- a/moto/iot/models.py +++ b/moto/iot/models.py @@ -857,8 +857,30 @@ class IoTBackend(BaseBackend): del self.thing_groups[thing_group.arn] def list_thing_groups(self, parent_group, name_prefix_filter, recursive): - thing_groups = self.thing_groups.values() - return thing_groups + if recursive is None: + recursive = True + if name_prefix_filter is None: + name_prefix_filter = "" + if parent_group and parent_group not in [ + _.thing_group_name for _ in self.thing_groups.values() + ]: + raise ResourceNotFoundException() + thing_groups = [ + _ for _ in self.thing_groups.values() if _.parent_group_name == parent_group + ] + if recursive: + for g in thing_groups: + thing_groups.extend( + self.list_thing_groups( + parent_group=g.thing_group_name, + name_prefix_filter=None, + recursive=False, + ) + ) + # thing_groups = groups_to_process.values() + return [ + _ for _ in thing_groups if _.thing_group_name.startswith(name_prefix_filter) + ] def update_thing_group( self, thing_group_name, thing_group_properties, expected_version diff --git a/moto/iot/responses.py b/moto/iot/responses.py index c12d4b5c5..07a8c10c2 100644 --- a/moto/iot/responses.py +++ b/moto/iot/responses.py @@ -535,7 +535,7 @@ class IoTResponse(BaseResponse): # max_results = self._get_int_param("maxResults") parent_group = self._get_param("parentGroup") name_prefix_filter = self._get_param("namePrefixFilter") - recursive = self._get_param("recursive") + recursive = self._get_bool_param("recursive") thing_groups = self.iot_backend.list_thing_groups( parent_group=parent_group, name_prefix_filter=name_prefix_filter, diff --git a/moto/logs/exceptions.py b/moto/logs/exceptions.py index 9f6628b0f..022b3a411 100644 --- a/moto/logs/exceptions.py +++ b/moto/logs/exceptions.py @@ -7,10 +7,10 @@ class LogsClientError(JsonRESTError): class ResourceNotFoundException(LogsClientError): - def __init__(self): + def __init__(self, msg=None): self.code = 400 super(ResourceNotFoundException, self).__init__( - "ResourceNotFoundException", "The specified resource does not exist" + "ResourceNotFoundException", msg or "The specified log group does not exist" ) @@ -28,3 +28,11 @@ class ResourceAlreadyExistsException(LogsClientError): super(ResourceAlreadyExistsException, self).__init__( "ResourceAlreadyExistsException", "The specified log group already exists" ) + + +class LimitExceededException(LogsClientError): + def __init__(self): + self.code = 400 + super(LimitExceededException, self).__init__( + "LimitExceededException", "Resource limit exceeded." + ) diff --git a/moto/logs/models.py b/moto/logs/models.py index 755605734..dcc0e85e1 100644 --- a/moto/logs/models.py +++ b/moto/logs/models.py @@ -6,6 +6,7 @@ from .exceptions import ( ResourceNotFoundException, ResourceAlreadyExistsException, InvalidParameterException, + LimitExceededException, ) @@ -57,6 +58,8 @@ class LogStream: 0 # I'm guessing this is token needed for sequenceToken by put_events ) self.events = [] + self.destination_arn = None + self.filter_name = None self.__class__._log_ids += 1 @@ -97,11 +100,32 @@ class LogStream: self.lastIngestionTime = int(unix_time_millis()) # TODO: make this match AWS if possible self.storedBytes += sum([len(log_event["message"]) for log_event in log_events]) - self.events += [ + events = [ LogEvent(self.lastIngestionTime, log_event) for log_event in log_events ] + self.events += events self.uploadSequenceToken += 1 + if self.destination_arn and self.destination_arn.split(":")[2] == "lambda": + from moto.awslambda import lambda_backends # due to circular dependency + + lambda_log_events = [ + { + "id": event.eventId, + "timestamp": event.timestamp, + "message": event.message, + } + for event in events + ] + + lambda_backends[self.region].send_log_event( + self.destination_arn, + self.filter_name, + log_group_name, + log_stream_name, + lambda_log_events, + ) + return "{:056d}".format(self.uploadSequenceToken) def get_log_events( @@ -227,6 +251,7 @@ class LogGroup: self.retention_in_days = kwargs.get( "RetentionInDays" ) # AWS defaults to Never Expire for log group retention + self.subscription_filters = [] def create_log_stream(self, log_stream_name): if log_stream_name in self.streams: @@ -386,6 +411,48 @@ class LogGroup: k: v for (k, v) in self.tags.items() if k not in tags_to_remove } + def describe_subscription_filters(self): + return self.subscription_filters + + def put_subscription_filter( + self, filter_name, filter_pattern, destination_arn, role_arn + ): + creation_time = int(unix_time_millis()) + + # only one subscription filter can be associated with a log group + if self.subscription_filters: + if self.subscription_filters[0]["filterName"] == filter_name: + creation_time = self.subscription_filters[0]["creationTime"] + else: + raise LimitExceededException + + for stream in self.streams.values(): + stream.destination_arn = destination_arn + stream.filter_name = filter_name + + self.subscription_filters = [ + { + "filterName": filter_name, + "logGroupName": self.name, + "filterPattern": filter_pattern, + "destinationArn": destination_arn, + "roleArn": role_arn, + "distribution": "ByLogStream", + "creationTime": creation_time, + } + ] + + def delete_subscription_filter(self, filter_name): + if ( + not self.subscription_filters + or self.subscription_filters[0]["filterName"] != filter_name + ): + raise ResourceNotFoundException( + "The specified subscription filter does not exist." + ) + + self.subscription_filters = [] + class LogsBackend(BaseBackend): def __init__(self, region_name): @@ -557,6 +624,46 @@ class LogsBackend(BaseBackend): log_group = self.groups[log_group_name] log_group.untag(tags) + def describe_subscription_filters(self, log_group_name): + log_group = self.groups.get(log_group_name) + + if not log_group: + raise ResourceNotFoundException() + + return log_group.describe_subscription_filters() + + def put_subscription_filter( + self, log_group_name, filter_name, filter_pattern, destination_arn, role_arn + ): + # TODO: support other destinations like Kinesis stream + from moto.awslambda import lambda_backends # due to circular dependency + + log_group = self.groups.get(log_group_name) + + if not log_group: + raise ResourceNotFoundException() + + lambda_func = lambda_backends[self.region_name].get_function(destination_arn) + + # no specific permission check implemented + if not lambda_func: + raise InvalidParameterException( + "Could not execute the lambda function. " + "Make sure you have given CloudWatch Logs permission to execute your function." + ) + + log_group.put_subscription_filter( + filter_name, filter_pattern, destination_arn, role_arn + ) + + def delete_subscription_filter(self, log_group_name, filter_name): + log_group = self.groups.get(log_group_name) + + if not log_group: + raise ResourceNotFoundException() + + log_group.delete_subscription_filter(filter_name) + logs_backends = {} for region in Session().get_available_regions("logs"): diff --git a/moto/logs/responses.py b/moto/logs/responses.py index 4631da2f9..9e6886a42 100644 --- a/moto/logs/responses.py +++ b/moto/logs/responses.py @@ -178,3 +178,33 @@ class LogsResponse(BaseResponse): tags = self._get_param("tags") self.logs_backend.untag_log_group(log_group_name, tags) return "" + + def describe_subscription_filters(self): + log_group_name = self._get_param("logGroupName") + + subscription_filters = self.logs_backend.describe_subscription_filters( + log_group_name + ) + + return json.dumps({"subscriptionFilters": subscription_filters}) + + def put_subscription_filter(self): + log_group_name = self._get_param("logGroupName") + filter_name = self._get_param("filterName") + filter_pattern = self._get_param("filterPattern") + destination_arn = self._get_param("destinationArn") + role_arn = self._get_param("roleArn") + + self.logs_backend.put_subscription_filter( + log_group_name, filter_name, filter_pattern, destination_arn, role_arn + ) + + return "" + + def delete_subscription_filter(self): + log_group_name = self._get_param("logGroupName") + filter_name = self._get_param("filterName") + + self.logs_backend.delete_subscription_filter(log_group_name, filter_name) + + return "" diff --git a/moto/managedblockchain/__init__.py b/moto/managedblockchain/__init__.py new file mode 100644 index 000000000..a95fa7351 --- /dev/null +++ b/moto/managedblockchain/__init__.py @@ -0,0 +1,9 @@ +from __future__ import unicode_literals +from .models import managedblockchain_backends +from ..core.models import base_decorator, deprecated_base_decorator + +managedblockchain_backend = managedblockchain_backends["us-east-1"] +mock_managedblockchain = base_decorator(managedblockchain_backends) +mock_managedblockchain_deprecated = deprecated_base_decorator( + managedblockchain_backends +) diff --git a/moto/managedblockchain/exceptions.py b/moto/managedblockchain/exceptions.py new file mode 100644 index 000000000..456eabc05 --- /dev/null +++ b/moto/managedblockchain/exceptions.py @@ -0,0 +1,48 @@ +from __future__ import unicode_literals +from moto.core.exceptions import RESTError + + +class ManagedBlockchainClientError(RESTError): + code = 400 + + +class BadRequestException(ManagedBlockchainClientError): + def __init__(self, pretty_called_method, operation_error): + super(BadRequestException, self).__init__( + "BadRequestException", + "An error occurred (BadRequestException) when calling the {0} operation: {1}".format( + pretty_called_method, operation_error + ), + ) + + +class InvalidRequestException(ManagedBlockchainClientError): + def __init__(self, pretty_called_method, operation_error): + super(InvalidRequestException, self).__init__( + "InvalidRequestException", + "An error occurred (InvalidRequestException) when calling the {0} operation: {1}".format( + pretty_called_method, operation_error + ), + ) + + +class ResourceNotFoundException(ManagedBlockchainClientError): + def __init__(self, pretty_called_method, operation_error): + self.code = 404 + super(ResourceNotFoundException, self).__init__( + "ResourceNotFoundException", + "An error occurred (BadRequestException) when calling the {0} operation: {1}".format( + pretty_called_method, operation_error + ), + ) + + +class ResourceLimitExceededException(ManagedBlockchainClientError): + def __init__(self, pretty_called_method, operation_error): + self.code = 429 + super(ResourceLimitExceededException, self).__init__( + "ResourceLimitExceededException", + "An error occurred (ResourceLimitExceededException) when calling the {0} operation: {1}".format( + pretty_called_method, operation_error + ), + ) diff --git a/moto/managedblockchain/models.py b/moto/managedblockchain/models.py new file mode 100644 index 000000000..034e45d35 --- /dev/null +++ b/moto/managedblockchain/models.py @@ -0,0 +1,811 @@ +from __future__ import unicode_literals, division + +import datetime +import re + +from boto3 import Session + +from moto.core import BaseBackend, BaseModel + +from .exceptions import ( + BadRequestException, + ResourceNotFoundException, + InvalidRequestException, + ResourceLimitExceededException, +) + +from .utils import ( + get_network_id, + get_member_id, + get_proposal_id, + get_invitation_id, + member_name_exist_in_network, + number_of_members_in_network, + admin_password_ok, +) + +FRAMEWORKS = [ + "HYPERLEDGER_FABRIC", +] + +FRAMEWORKVERSIONS = [ + "1.2", +] + +EDITIONS = { + "STARTER": { + "MaxMembers": 5, + "MaxNodesPerMember": 2, + "AllowedNodeInstanceTypes": ["bc.t3.small", "bc.t3.medium"], + }, + "STANDARD": { + "MaxMembers": 14, + "MaxNodesPerMember": 3, + "AllowedNodeInstanceTypes": ["bc.t3", "bc.m5", "bc.c5"], + }, +} + +VOTEVALUES = ["YES", "NO"] + + +class ManagedBlockchainNetwork(BaseModel): + def __init__( + self, + id, + name, + framework, + frameworkversion, + frameworkconfiguration, + voting_policy, + member_configuration, + region, + description=None, + ): + self.creationdate = datetime.datetime.utcnow() + self.id = id + self.name = name + self.description = description + self.framework = framework + self.frameworkversion = frameworkversion + self.frameworkconfiguration = frameworkconfiguration + self.voting_policy = voting_policy + self.member_configuration = member_configuration + self.region = region + + @property + def network_name(self): + return self.name + + @property + def network_framework(self): + return self.framework + + @property + def network_framework_version(self): + return self.frameworkversion + + @property + def network_creationdate(self): + return self.creationdate.strftime("%Y-%m-%dT%H:%M:%S.%f%z") + + @property + def network_description(self): + return self.description + + @property + def network_edition(self): + return self.frameworkconfiguration["Fabric"]["Edition"] + + @property + def vote_pol_proposal_duration(self): + return self.voting_policy["ApprovalThresholdPolicy"]["ProposalDurationInHours"] + + @property + def vote_pol_threshold_percentage(self): + return self.voting_policy["ApprovalThresholdPolicy"]["ThresholdPercentage"] + + @property + def vote_pol_threshold_comparator(self): + return self.voting_policy["ApprovalThresholdPolicy"]["ThresholdComparator"] + + def to_dict(self): + # Format for list_networks + d = { + "Id": self.id, + "Name": self.name, + "Framework": self.framework, + "FrameworkVersion": self.frameworkversion, + "Status": "AVAILABLE", + "CreationDate": self.creationdate.strftime("%Y-%m-%dT%H:%M:%S.%f%z"), + } + if self.description is not None: + d["Description"] = self.description + return d + + def get_format(self): + # Format for get_network + frameworkattributes = { + "Fabric": { + "OrderingServiceEndpoint": "orderer.{0}.managedblockchain.{1}.amazonaws.com:30001".format( + self.id.lower(), self.region + ), + "Edition": self.frameworkconfiguration["Fabric"]["Edition"], + } + } + + vpcendpointname = "com.amazonaws.{0}.managedblockchain.{1}".format( + self.region, self.id.lower() + ) + + d = { + "Id": self.id, + "Name": self.name, + "Framework": self.framework, + "FrameworkVersion": self.frameworkversion, + "FrameworkAttributes": frameworkattributes, + "VpcEndpointServiceName": vpcendpointname, + "VotingPolicy": self.voting_policy, + "Status": "AVAILABLE", + "CreationDate": self.creationdate.strftime("%Y-%m-%dT%H:%M:%S.%f%z"), + } + if self.description is not None: + d["Description"] = self.description + return d + + +class ManagedBlockchainProposal(BaseModel): + def __init__( + self, + id, + networkid, + memberid, + membername, + numofmembers, + actions, + network_expirtation, + network_threshold, + network_threshold_comp, + description=None, + ): + # In general, passing all values instead of creating + # an apparatus to look them up + self.id = id + self.networkid = networkid + self.memberid = memberid + self.membername = membername + self.numofmembers = numofmembers + self.actions = actions + self.network_expirtation = network_expirtation + self.network_threshold = network_threshold + self.network_threshold_comp = network_threshold_comp + self.description = description + + self.creationdate = datetime.datetime.utcnow() + self.expirtationdate = self.creationdate + datetime.timedelta( + hours=network_expirtation + ) + self.yes_vote_count = 0 + self.no_vote_count = 0 + self.outstanding_vote_count = self.numofmembers + self.status = "IN_PROGRESS" + self.votes = {} + + @property + def network_id(self): + return self.networkid + + @property + def proposal_status(self): + return self.status + + @property + def proposal_votes(self): + return self.votes + + def proposal_actions(self, action_type): + default_return = [] + if action_type.lower() == "invitations": + if "Invitations" in self.actions: + return self.actions["Invitations"] + elif action_type.lower() == "removals": + if "Removals" in self.actions: + return self.actions["Removals"] + return default_return + + def to_dict(self): + # Format for list_proposals + d = { + "ProposalId": self.id, + "ProposedByMemberId": self.memberid, + "ProposedByMemberName": self.membername, + "Status": self.status, + "CreationDate": self.creationdate.strftime("%Y-%m-%dT%H:%M:%S.%f%z"), + "ExpirationDate": self.expirtationdate.strftime("%Y-%m-%dT%H:%M:%S.%f%z"), + } + return d + + def get_format(self): + # Format for get_proposal + d = { + "ProposalId": self.id, + "NetworkId": self.networkid, + "Actions": self.actions, + "ProposedByMemberId": self.memberid, + "ProposedByMemberName": self.membername, + "Status": self.status, + "CreationDate": self.creationdate.strftime("%Y-%m-%dT%H:%M:%S.%f%z"), + "ExpirationDate": self.expirtationdate.strftime("%Y-%m-%dT%H:%M:%S.%f%z"), + "YesVoteCount": self.yes_vote_count, + "NoVoteCount": self.no_vote_count, + "OutstandingVoteCount": self.outstanding_vote_count, + } + if self.description is not None: + d["Description"] = self.description + return d + + def set_vote(self, votermemberid, votermembername, vote): + if datetime.datetime.utcnow() > self.expirtationdate: + self.status = "EXPIRED" + return False + + if vote.upper() == "YES": + self.yes_vote_count += 1 + else: + self.no_vote_count += 1 + self.outstanding_vote_count -= 1 + + perct_yes = (self.yes_vote_count / self.numofmembers) * 100 + perct_no = (self.no_vote_count / self.numofmembers) * 100 + self.votes[votermemberid] = { + "MemberId": votermemberid, + "MemberName": votermembername, + "Vote": vote.upper(), + } + + if self.network_threshold_comp == "GREATER_THAN_OR_EQUAL_TO": + if perct_yes >= self.network_threshold: + self.status = "APPROVED" + elif perct_no >= self.network_threshold: + self.status = "REJECTED" + else: + if perct_yes > self.network_threshold: + self.status = "APPROVED" + elif perct_no > self.network_threshold: + self.status = "REJECTED" + + return True + + +class ManagedBlockchainInvitation(BaseModel): + def __init__( + self, + id, + networkid, + networkname, + networkframework, + networkframeworkversion, + networkcreationdate, + region, + networkdescription=None, + ): + self.id = id + self.networkid = networkid + self.networkname = networkname + self.networkdescription = networkdescription + self.networkframework = networkframework + self.networkframeworkversion = networkframeworkversion + self.networkstatus = "AVAILABLE" + self.networkcreationdate = networkcreationdate + self.status = "PENDING" + self.region = region + + self.creationdate = datetime.datetime.utcnow() + self.expirtationdate = self.creationdate + datetime.timedelta(days=7) + + @property + def invitation_status(self): + return self.status + + @property + def invitation_networkid(self): + return self.networkid + + def to_dict(self): + d = { + "InvitationId": self.id, + "CreationDate": self.creationdate.strftime("%Y-%m-%dT%H:%M:%S.%f%z"), + "ExpirationDate": self.expirtationdate.strftime("%Y-%m-%dT%H:%M:%S.%f%z"), + "Status": self.status, + "NetworkSummary": { + "Id": self.networkid, + "Name": self.networkname, + "Framework": self.networkframework, + "FrameworkVersion": self.networkframeworkversion, + "Status": self.networkstatus, + "CreationDate": self.networkcreationdate, + }, + } + if self.networkdescription is not None: + d["NetworkSummary"]["Description"] = self.networkdescription + return d + + def accept_invitation(self): + self.status = "ACCEPTED" + + def reject_invitation(self): + self.status = "REJECTED" + + def set_network_status(self, network_status): + self.networkstatus = network_status + + +class ManagedBlockchainMember(BaseModel): + def __init__( + self, id, networkid, member_configuration, region, + ): + self.creationdate = datetime.datetime.utcnow() + self.id = id + self.networkid = networkid + self.member_configuration = member_configuration + self.status = "AVAILABLE" + self.region = region + self.description = None + + @property + def network_id(self): + return self.networkid + + @property + def name(self): + return self.member_configuration["Name"] + + @property + def member_status(self): + return self.status + + def to_dict(self): + # Format for list_members + d = { + "Id": self.id, + "Name": self.member_configuration["Name"], + "Status": self.status, + "CreationDate": self.creationdate.strftime("%Y-%m-%dT%H:%M:%S.%f%z"), + "IsOwned": True, + } + if "Description" in self.member_configuration: + self.description = self.member_configuration["Description"] + return d + + def get_format(self): + # Format for get_member + frameworkattributes = { + "Fabric": { + "AdminUsername": self.member_configuration["FrameworkConfiguration"][ + "Fabric" + ]["AdminUsername"], + "CaEndpoint": "ca.{0}.{1}.managedblockchain.{2}.amazonaws.com:30002".format( + self.id.lower(), self.networkid.lower(), self.region + ), + } + } + + d = { + "NetworkId": self.networkid, + "Id": self.id, + "Name": self.name, + "FrameworkAttributes": frameworkattributes, + "LogPublishingConfiguration": self.member_configuration[ + "LogPublishingConfiguration" + ], + "Status": self.status, + "CreationDate": self.creationdate.strftime("%Y-%m-%dT%H:%M:%S.%f%z"), + } + if "Description" in self.member_configuration: + d["Description"] = self.description + return d + + def delete(self): + self.status = "DELETED" + + def update(self, logpublishingconfiguration): + self.member_configuration[ + "LogPublishingConfiguration" + ] = logpublishingconfiguration + + +class ManagedBlockchainBackend(BaseBackend): + def __init__(self, region_name): + self.networks = {} + self.members = {} + self.proposals = {} + self.invitations = {} + self.region_name = region_name + + def reset(self): + region_name = self.region_name + self.__dict__ = {} + self.__init__(region_name) + + def create_network( + self, + name, + framework, + frameworkversion, + frameworkconfiguration, + voting_policy, + member_configuration, + description=None, + ): + # Check framework + if framework not in FRAMEWORKS: + raise BadRequestException("CreateNetwork", "Invalid request body") + + # Check framework version + if frameworkversion not in FRAMEWORKVERSIONS: + raise BadRequestException( + "CreateNetwork", + "Invalid version {0} requested for framework HYPERLEDGER_FABRIC".format( + frameworkversion + ), + ) + + # Check edition + if frameworkconfiguration["Fabric"]["Edition"] not in EDITIONS: + raise BadRequestException("CreateNetwork", "Invalid request body") + + ## Generate network ID + network_id = get_network_id() + + ## Generate memberid ID and initial member + member_id = get_member_id() + self.members[member_id] = ManagedBlockchainMember( + id=member_id, + networkid=network_id, + member_configuration=member_configuration, + region=self.region_name, + ) + + self.networks[network_id] = ManagedBlockchainNetwork( + id=network_id, + name=name, + framework=framework, + frameworkversion=frameworkversion, + frameworkconfiguration=frameworkconfiguration, + voting_policy=voting_policy, + member_configuration=member_configuration, + region=self.region_name, + description=description, + ) + + # Return the network and member ID + d = {"NetworkId": network_id, "MemberId": member_id} + return d + + def list_networks(self): + return self.networks.values() + + def get_network(self, network_id): + if network_id not in self.networks: + raise ResourceNotFoundException( + "GetNetwork", "Network {0} not found.".format(network_id) + ) + return self.networks.get(network_id) + + def create_proposal( + self, networkid, memberid, actions, description=None, + ): + # Check if network exists + if networkid not in self.networks: + raise ResourceNotFoundException( + "CreateProposal", "Network {0} not found.".format(networkid) + ) + + # Check if member exists + if memberid not in self.members: + raise ResourceNotFoundException( + "CreateProposal", "Member {0} not found.".format(memberid) + ) + + # CLI docs say that Invitations and Removals cannot both be passed - but it does + # not throw an error and can be performed + if "Invitations" in actions: + for propinvitation in actions["Invitations"]: + if re.match("[0-9]{12}", propinvitation["Principal"]) is None: + raise InvalidRequestException( + "CreateProposal", + "Account ID format specified in proposal is not valid.", + ) + + if "Removals" in actions: + for propmember in actions["Removals"]: + if propmember["MemberId"] not in self.members: + raise InvalidRequestException( + "CreateProposal", + "Member ID format specified in proposal is not valid.", + ) + + ## Generate proposal ID + proposal_id = get_proposal_id() + + self.proposals[proposal_id] = ManagedBlockchainProposal( + id=proposal_id, + networkid=networkid, + memberid=memberid, + membername=self.members.get(memberid).name, + numofmembers=number_of_members_in_network(self.members, networkid), + actions=actions, + network_expirtation=self.networks.get(networkid).vote_pol_proposal_duration, + network_threshold=self.networks.get( + networkid + ).vote_pol_threshold_percentage, + network_threshold_comp=self.networks.get( + networkid + ).vote_pol_threshold_comparator, + description=description, + ) + + # Return the proposal ID + d = {"ProposalId": proposal_id} + return d + + def list_proposals(self, networkid): + # Check if network exists + if networkid not in self.networks: + raise ResourceNotFoundException( + "ListProposals", "Network {0} not found.".format(networkid) + ) + + proposalsfornetwork = [] + for proposal_id in self.proposals: + if self.proposals.get(proposal_id).network_id == networkid: + proposalsfornetwork.append(self.proposals[proposal_id]) + return proposalsfornetwork + + def get_proposal(self, networkid, proposalid): + # Check if network exists + if networkid not in self.networks: + raise ResourceNotFoundException( + "GetProposal", "Network {0} not found.".format(networkid) + ) + + if proposalid not in self.proposals: + raise ResourceNotFoundException( + "GetProposal", "Proposal {0} not found.".format(proposalid) + ) + return self.proposals.get(proposalid) + + def vote_on_proposal(self, networkid, proposalid, votermemberid, vote): + # Check if network exists + if networkid not in self.networks: + raise ResourceNotFoundException( + "VoteOnProposal", "Network {0} not found.".format(networkid) + ) + + if proposalid not in self.proposals: + raise ResourceNotFoundException( + "VoteOnProposal", "Proposal {0} not found.".format(proposalid) + ) + + if votermemberid not in self.members: + raise ResourceNotFoundException( + "VoteOnProposal", "Member {0} not found.".format(votermemberid) + ) + + if vote.upper() not in VOTEVALUES: + raise BadRequestException("VoteOnProposal", "Invalid request body") + + # Check to see if this member already voted + # TODO Verify exception + if votermemberid in self.proposals.get(proposalid).proposal_votes: + raise BadRequestException("VoteOnProposal", "Invalid request body") + + # Will return false if vote was not cast (e.g., status wrong) + if self.proposals.get(proposalid).set_vote( + votermemberid, self.members.get(votermemberid).name, vote.upper() + ): + if self.proposals.get(proposalid).proposal_status == "APPROVED": + ## Generate invitations + for propinvitation in self.proposals.get(proposalid).proposal_actions( + "Invitations" + ): + invitation_id = get_invitation_id() + self.invitations[invitation_id] = ManagedBlockchainInvitation( + id=invitation_id, + networkid=networkid, + networkname=self.networks.get(networkid).network_name, + networkframework=self.networks.get(networkid).network_framework, + networkframeworkversion=self.networks.get( + networkid + ).network_framework_version, + networkcreationdate=self.networks.get( + networkid + ).network_creationdate, + region=self.region_name, + networkdescription=self.networks.get( + networkid + ).network_description, + ) + + ## Delete members + for propmember in self.proposals.get(proposalid).proposal_actions( + "Removals" + ): + self.delete_member(networkid, propmember["MemberId"]) + + def list_proposal_votes(self, networkid, proposalid): + # Check if network exists + if networkid not in self.networks: + raise ResourceNotFoundException( + "ListProposalVotes", "Network {0} not found.".format(networkid) + ) + + if proposalid not in self.proposals: + raise ResourceNotFoundException( + "ListProposalVotes", "Proposal {0} not found.".format(proposalid) + ) + + # Output the vote summaries + proposalvotesfornetwork = [] + for proposal_id in self.proposals: + if self.proposals.get(proposal_id).network_id == networkid: + for pvmemberid in self.proposals.get(proposal_id).proposal_votes: + proposalvotesfornetwork.append( + self.proposals.get(proposal_id).proposal_votes[pvmemberid] + ) + return proposalvotesfornetwork + + def list_invitations(self): + return self.invitations.values() + + def reject_invitation(self, invitationid): + if invitationid not in self.invitations: + raise ResourceNotFoundException( + "RejectInvitation", "InvitationId {0} not found.".format(invitationid) + ) + self.invitations.get(invitationid).reject_invitation() + + def create_member( + self, invitationid, networkid, member_configuration, + ): + # Check if network exists + if networkid not in self.networks: + raise ResourceNotFoundException( + "CreateMember", "Network {0} not found.".format(networkid) + ) + + if invitationid not in self.invitations: + raise InvalidRequestException( + "CreateMember", "Invitation {0} not valid".format(invitationid) + ) + + if self.invitations.get(invitationid).invitation_status != "PENDING": + raise InvalidRequestException( + "CreateMember", "Invitation {0} not valid".format(invitationid) + ) + + if ( + member_name_exist_in_network( + self.members, networkid, member_configuration["Name"] + ) + is True + ): + raise InvalidRequestException( + "CreateMember", + "Member name {0} already exists in network {1}.".format( + member_configuration["Name"], networkid + ), + ) + + networkedition = self.networks.get(networkid).network_edition + if ( + number_of_members_in_network(self.members, networkid) + >= EDITIONS[networkedition]["MaxMembers"] + ): + raise ResourceLimitExceededException( + "CreateMember", + "You cannot create a member in network {0}.{1} is the maximum number of members allowed in a {2} Edition network.".format( + networkid, EDITIONS[networkedition]["MaxMembers"], networkedition + ), + ) + + memberadminpassword = member_configuration["FrameworkConfiguration"]["Fabric"][ + "AdminPassword" + ] + if admin_password_ok(memberadminpassword) is False: + raise BadRequestException("CreateMember", "Invalid request body") + + member_id = get_member_id() + self.members[member_id] = ManagedBlockchainMember( + id=member_id, + networkid=networkid, + member_configuration=member_configuration, + region=self.region_name, + ) + + # Accept the invitaiton + self.invitations.get(invitationid).accept_invitation() + + # Return the member ID + d = {"MemberId": member_id} + return d + + def list_members(self, networkid): + # Check if network exists + if networkid not in self.networks: + raise ResourceNotFoundException( + "ListMembers", "Network {0} not found.".format(networkid) + ) + + membersfornetwork = [] + for member_id in self.members: + if self.members.get(member_id).network_id == networkid: + membersfornetwork.append(self.members[member_id]) + return membersfornetwork + + def get_member(self, networkid, memberid): + # Check if network exists + if networkid not in self.networks: + raise ResourceNotFoundException( + "GetMember", "Network {0} not found.".format(networkid) + ) + + if memberid not in self.members: + raise ResourceNotFoundException( + "GetMember", "Member {0} not found.".format(memberid) + ) + + ## Cannot get a member than has been delted (it does show up in the list) + if self.members.get(memberid).member_status == "DELETED": + raise ResourceNotFoundException( + "GetMember", "Member {0} not found.".format(memberid) + ) + + return self.members.get(memberid) + + def delete_member(self, networkid, memberid): + # Check if network exists + if networkid not in self.networks: + raise ResourceNotFoundException( + "DeleteMember", "Network {0} not found.".format(networkid) + ) + + if memberid not in self.members: + raise ResourceNotFoundException( + "DeleteMember", "Member {0} not found.".format(memberid) + ) + + self.members.get(memberid).delete() + + # Is this the last member in the network? (all set to DELETED) + if number_of_members_in_network( + self.members, networkid, member_status="DELETED" + ) == len(self.members): + # Set network status to DELETED for all invitations + for invitation_id in self.invitations: + if ( + self.invitations.get(invitation_id).invitation_networkid + == networkid + ): + self.invitations.get(invitation_id).set_network_status("DELETED") + + # Remove network + del self.networks[networkid] + + def update_member(self, networkid, memberid, logpublishingconfiguration): + # Check if network exists + if networkid not in self.networks: + raise ResourceNotFoundException( + "UpdateMember", "Network {0} not found.".format(networkid) + ) + + if memberid not in self.members: + raise ResourceNotFoundException( + "UpdateMember", "Member {0} not found.".format(memberid) + ) + + self.members.get(memberid).update(logpublishingconfiguration) + + +managedblockchain_backends = {} +for region in Session().get_available_regions("managedblockchain"): + managedblockchain_backends[region] = ManagedBlockchainBackend(region) diff --git a/moto/managedblockchain/responses.py b/moto/managedblockchain/responses.py new file mode 100644 index 000000000..34206b3c4 --- /dev/null +++ b/moto/managedblockchain/responses.py @@ -0,0 +1,326 @@ +from __future__ import unicode_literals + +import json +from six.moves.urllib.parse import urlparse, parse_qs + +from moto.core.responses import BaseResponse +from .models import managedblockchain_backends +from .utils import ( + region_from_managedblckchain_url, + networkid_from_managedblockchain_url, + proposalid_from_managedblockchain_url, + invitationid_from_managedblockchain_url, + memberid_from_managedblockchain_url, +) + + +class ManagedBlockchainResponse(BaseResponse): + def __init__(self, backend): + super(ManagedBlockchainResponse, self).__init__() + self.backend = backend + + @classmethod + def network_response(clazz, request, full_url, headers): + region_name = region_from_managedblckchain_url(full_url) + response_instance = ManagedBlockchainResponse( + managedblockchain_backends[region_name] + ) + return response_instance._network_response(request, full_url, headers) + + def _network_response(self, request, full_url, headers): + method = request.method + if hasattr(request, "body"): + body = request.body + else: + body = request.data + parsed_url = urlparse(full_url) + querystring = parse_qs(parsed_url.query, keep_blank_values=True) + if method == "GET": + return self._all_networks_response(request, full_url, headers) + elif method == "POST": + json_body = json.loads(body.decode("utf-8")) + return self._network_response_post(json_body, querystring, headers) + + def _all_networks_response(self, request, full_url, headers): + mbcnetworks = self.backend.list_networks() + response = json.dumps( + {"Networks": [mbcnetwork.to_dict() for mbcnetwork in mbcnetworks]} + ) + headers["content-type"] = "application/json" + return 200, headers, response + + def _network_response_post(self, json_body, querystring, headers): + name = json_body["Name"] + framework = json_body["Framework"] + frameworkversion = json_body["FrameworkVersion"] + frameworkconfiguration = json_body["FrameworkConfiguration"] + voting_policy = json_body["VotingPolicy"] + member_configuration = json_body["MemberConfiguration"] + + # Optional + description = json_body.get("Description", None) + + response = self.backend.create_network( + name, + framework, + frameworkversion, + frameworkconfiguration, + voting_policy, + member_configuration, + description, + ) + return 200, headers, json.dumps(response) + + @classmethod + def networkid_response(clazz, request, full_url, headers): + region_name = region_from_managedblckchain_url(full_url) + response_instance = ManagedBlockchainResponse( + managedblockchain_backends[region_name] + ) + return response_instance._networkid_response(request, full_url, headers) + + def _networkid_response(self, request, full_url, headers): + method = request.method + + if method == "GET": + network_id = networkid_from_managedblockchain_url(full_url) + return self._networkid_response_get(network_id, headers) + + def _networkid_response_get(self, network_id, headers): + mbcnetwork = self.backend.get_network(network_id) + response = json.dumps({"Network": mbcnetwork.get_format()}) + headers["content-type"] = "application/json" + return 200, headers, response + + @classmethod + def proposal_response(clazz, request, full_url, headers): + region_name = region_from_managedblckchain_url(full_url) + response_instance = ManagedBlockchainResponse( + managedblockchain_backends[region_name] + ) + return response_instance._proposal_response(request, full_url, headers) + + def _proposal_response(self, request, full_url, headers): + method = request.method + if hasattr(request, "body"): + body = request.body + else: + body = request.data + parsed_url = urlparse(full_url) + querystring = parse_qs(parsed_url.query, keep_blank_values=True) + network_id = networkid_from_managedblockchain_url(full_url) + if method == "GET": + return self._all_proposals_response(network_id, headers) + elif method == "POST": + json_body = json.loads(body.decode("utf-8")) + return self._proposal_response_post( + network_id, json_body, querystring, headers + ) + + def _all_proposals_response(self, network_id, headers): + proposals = self.backend.list_proposals(network_id) + response = json.dumps( + {"Proposals": [proposal.to_dict() for proposal in proposals]} + ) + headers["content-type"] = "application/json" + return 200, headers, response + + def _proposal_response_post(self, network_id, json_body, querystring, headers): + memberid = json_body["MemberId"] + actions = json_body["Actions"] + + # Optional + description = json_body.get("Description", None) + + response = self.backend.create_proposal( + network_id, memberid, actions, description, + ) + return 200, headers, json.dumps(response) + + @classmethod + def proposalid_response(clazz, request, full_url, headers): + region_name = region_from_managedblckchain_url(full_url) + response_instance = ManagedBlockchainResponse( + managedblockchain_backends[region_name] + ) + return response_instance._proposalid_response(request, full_url, headers) + + def _proposalid_response(self, request, full_url, headers): + method = request.method + network_id = networkid_from_managedblockchain_url(full_url) + if method == "GET": + proposal_id = proposalid_from_managedblockchain_url(full_url) + return self._proposalid_response_get(network_id, proposal_id, headers) + + def _proposalid_response_get(self, network_id, proposal_id, headers): + proposal = self.backend.get_proposal(network_id, proposal_id) + response = json.dumps({"Proposal": proposal.get_format()}) + headers["content-type"] = "application/json" + return 200, headers, response + + @classmethod + def proposal_votes_response(clazz, request, full_url, headers): + region_name = region_from_managedblckchain_url(full_url) + response_instance = ManagedBlockchainResponse( + managedblockchain_backends[region_name] + ) + return response_instance._proposal_votes_response(request, full_url, headers) + + def _proposal_votes_response(self, request, full_url, headers): + method = request.method + if hasattr(request, "body"): + body = request.body + else: + body = request.data + parsed_url = urlparse(full_url) + querystring = parse_qs(parsed_url.query, keep_blank_values=True) + network_id = networkid_from_managedblockchain_url(full_url) + proposal_id = proposalid_from_managedblockchain_url(full_url) + if method == "GET": + return self._all_proposal_votes_response(network_id, proposal_id, headers) + elif method == "POST": + json_body = json.loads(body.decode("utf-8")) + return self._proposal_votes_response_post( + network_id, proposal_id, json_body, querystring, headers + ) + + def _all_proposal_votes_response(self, network_id, proposal_id, headers): + proposalvotes = self.backend.list_proposal_votes(network_id, proposal_id) + response = json.dumps({"ProposalVotes": proposalvotes}) + headers["content-type"] = "application/json" + return 200, headers, response + + def _proposal_votes_response_post( + self, network_id, proposal_id, json_body, querystring, headers + ): + votermemberid = json_body["VoterMemberId"] + vote = json_body["Vote"] + + self.backend.vote_on_proposal( + network_id, proposal_id, votermemberid, vote, + ) + return 200, headers, "" + + @classmethod + def invitation_response(clazz, request, full_url, headers): + region_name = region_from_managedblckchain_url(full_url) + response_instance = ManagedBlockchainResponse( + managedblockchain_backends[region_name] + ) + return response_instance._invitation_response(request, full_url, headers) + + def _invitation_response(self, request, full_url, headers): + method = request.method + if method == "GET": + return self._all_invitation_response(request, full_url, headers) + + def _all_invitation_response(self, request, full_url, headers): + invitations = self.backend.list_invitations() + response = json.dumps( + {"Invitations": [invitation.to_dict() for invitation in invitations]} + ) + headers["content-type"] = "application/json" + return 200, headers, response + + @classmethod + def invitationid_response(clazz, request, full_url, headers): + region_name = region_from_managedblckchain_url(full_url) + response_instance = ManagedBlockchainResponse( + managedblockchain_backends[region_name] + ) + return response_instance._invitationid_response(request, full_url, headers) + + def _invitationid_response(self, request, full_url, headers): + method = request.method + if method == "DELETE": + invitation_id = invitationid_from_managedblockchain_url(full_url) + return self._invitationid_response_delete(invitation_id, headers) + + def _invitationid_response_delete(self, invitation_id, headers): + self.backend.reject_invitation(invitation_id) + headers["content-type"] = "application/json" + return 200, headers, "" + + @classmethod + def member_response(clazz, request, full_url, headers): + region_name = region_from_managedblckchain_url(full_url) + response_instance = ManagedBlockchainResponse( + managedblockchain_backends[region_name] + ) + return response_instance._member_response(request, full_url, headers) + + def _member_response(self, request, full_url, headers): + method = request.method + if hasattr(request, "body"): + body = request.body + else: + body = request.data + parsed_url = urlparse(full_url) + querystring = parse_qs(parsed_url.query, keep_blank_values=True) + network_id = networkid_from_managedblockchain_url(full_url) + if method == "GET": + return self._all_members_response(network_id, headers) + elif method == "POST": + json_body = json.loads(body.decode("utf-8")) + return self._member_response_post( + network_id, json_body, querystring, headers + ) + + def _all_members_response(self, network_id, headers): + members = self.backend.list_members(network_id) + response = json.dumps({"Members": [member.to_dict() for member in members]}) + headers["content-type"] = "application/json" + return 200, headers, response + + def _member_response_post(self, network_id, json_body, querystring, headers): + invitationid = json_body["InvitationId"] + member_configuration = json_body["MemberConfiguration"] + + response = self.backend.create_member( + invitationid, network_id, member_configuration, + ) + return 200, headers, json.dumps(response) + + @classmethod + def memberid_response(clazz, request, full_url, headers): + region_name = region_from_managedblckchain_url(full_url) + response_instance = ManagedBlockchainResponse( + managedblockchain_backends[region_name] + ) + return response_instance._memberid_response(request, full_url, headers) + + def _memberid_response(self, request, full_url, headers): + method = request.method + if hasattr(request, "body"): + body = request.body + else: + body = request.data + network_id = networkid_from_managedblockchain_url(full_url) + member_id = memberid_from_managedblockchain_url(full_url) + if method == "GET": + return self._memberid_response_get(network_id, member_id, headers) + elif method == "PATCH": + json_body = json.loads(body.decode("utf-8")) + return self._memberid_response_patch( + network_id, member_id, json_body, headers + ) + elif method == "DELETE": + return self._memberid_response_delete(network_id, member_id, headers) + + def _memberid_response_get(self, network_id, member_id, headers): + member = self.backend.get_member(network_id, member_id) + response = json.dumps({"Member": member.get_format()}) + headers["content-type"] = "application/json" + return 200, headers, response + + def _memberid_response_patch(self, network_id, member_id, json_body, headers): + logpublishingconfiguration = json_body["LogPublishingConfiguration"] + self.backend.update_member( + network_id, member_id, logpublishingconfiguration, + ) + return 200, headers, "" + + def _memberid_response_delete(self, network_id, member_id, headers): + self.backend.delete_member(network_id, member_id) + headers["content-type"] = "application/json" + return 200, headers, "" diff --git a/moto/managedblockchain/urls.py b/moto/managedblockchain/urls.py new file mode 100644 index 000000000..c7d191aab --- /dev/null +++ b/moto/managedblockchain/urls.py @@ -0,0 +1,16 @@ +from __future__ import unicode_literals +from .responses import ManagedBlockchainResponse + +url_bases = ["https?://managedblockchain.(.+).amazonaws.com"] + +url_paths = { + "{0}/networks$": ManagedBlockchainResponse.network_response, + "{0}/networks/(?P[^/.]+)$": ManagedBlockchainResponse.networkid_response, + "{0}/networks/(?P[^/.]+)/proposals$": ManagedBlockchainResponse.proposal_response, + "{0}/networks/(?P[^/.]+)/proposals/(?P[^/.]+)$": ManagedBlockchainResponse.proposalid_response, + "{0}/networks/(?P[^/.]+)/proposals/(?P[^/.]+)/votes$": ManagedBlockchainResponse.proposal_votes_response, + "{0}/invitations$": ManagedBlockchainResponse.invitation_response, + "{0}/invitations/(?P[^/.]+)$": ManagedBlockchainResponse.invitationid_response, + "{0}/networks/(?P[^/.]+)/members$": ManagedBlockchainResponse.member_response, + "{0}/networks/(?P[^/.]+)/members/(?P[^/.]+)$": ManagedBlockchainResponse.memberid_response, +} diff --git a/moto/managedblockchain/utils.py b/moto/managedblockchain/utils.py new file mode 100644 index 000000000..ea8f50513 --- /dev/null +++ b/moto/managedblockchain/utils.py @@ -0,0 +1,106 @@ +import random +import re +import string + +from six.moves.urllib.parse import urlparse + + +def region_from_managedblckchain_url(url): + domain = urlparse(url).netloc + region = "us-east-1" + if "." in domain: + region = domain.split(".")[1] + return region + + +def networkid_from_managedblockchain_url(full_url): + id_search = re.search("\/n-[A-Z0-9]{26}", full_url, re.IGNORECASE) + return_id = None + if id_search: + return_id = id_search.group(0).replace("/", "") + return return_id + + +def get_network_id(): + return "n-" + "".join( + random.choice(string.ascii_uppercase + string.digits) for _ in range(26) + ) + + +def memberid_from_managedblockchain_url(full_url): + id_search = re.search("\/m-[A-Z0-9]{26}", full_url, re.IGNORECASE) + return_id = None + if id_search: + return_id = id_search.group(0).replace("/", "") + return return_id + + +def get_member_id(): + return "m-" + "".join( + random.choice(string.ascii_uppercase + string.digits) for _ in range(26) + ) + + +def proposalid_from_managedblockchain_url(full_url): + id_search = re.search("\/p-[A-Z0-9]{26}", full_url, re.IGNORECASE) + return_id = None + if id_search: + return_id = id_search.group(0).replace("/", "") + return return_id + + +def get_proposal_id(): + return "p-" + "".join( + random.choice(string.ascii_uppercase + string.digits) for _ in range(26) + ) + + +def invitationid_from_managedblockchain_url(full_url): + id_search = re.search("\/in-[A-Z0-9]{26}", full_url, re.IGNORECASE) + return_id = None + if id_search: + return_id = id_search.group(0).replace("/", "") + return return_id + + +def get_invitation_id(): + return "in-" + "".join( + random.choice(string.ascii_uppercase + string.digits) for _ in range(26) + ) + + +def member_name_exist_in_network(members, networkid, membername): + membernamexists = False + for member_id in members: + if members.get(member_id).network_id == networkid: + if members.get(member_id).name == membername: + membernamexists = True + break + return membernamexists + + +def number_of_members_in_network(members, networkid, member_status=None): + return len( + [ + membid + for membid in members + if members.get(membid).network_id == networkid + and ( + member_status is None + or members.get(membid).member_status == member_status + ) + ] + ) + + +def admin_password_ok(password): + if not re.search("[a-z]", password): + return False + elif not re.search("[A-Z]", password): + return False + elif not re.search("[0-9]", password): + return False + elif re.search("['\"@\\/]", password): + return False + else: + return True diff --git a/moto/opsworks/models.py b/moto/opsworks/models.py index 96d918cc9..84bd3b103 100644 --- a/moto/opsworks/models.py +++ b/moto/opsworks/models.py @@ -125,6 +125,9 @@ class OpsworkInstance(BaseModel): def status(self): if self.instance is None: return "stopped" + # OpsWorks reports the "running" state as "online" + elif self.instance._state.name == "running": + return "online" return self.instance._state.name def to_dict(self): diff --git a/moto/redshift/exceptions.py b/moto/redshift/exceptions.py index 0a17e8aab..b5f83d3bc 100644 --- a/moto/redshift/exceptions.py +++ b/moto/redshift/exceptions.py @@ -136,3 +136,10 @@ class SnapshotCopyAlreadyEnabledFaultError(RedshiftClientError): cluster_identifier ), ) + + +class ClusterAlreadyExistsFaultError(RedshiftClientError): + def __init__(self): + super(ClusterAlreadyExistsFaultError, self).__init__( + "ClusterAlreadyExists", "Cluster already exists" + ) diff --git a/moto/redshift/models.py b/moto/redshift/models.py index 17840fb86..07baf18c0 100644 --- a/moto/redshift/models.py +++ b/moto/redshift/models.py @@ -10,6 +10,7 @@ from moto.core import BaseBackend, BaseModel from moto.core.utils import iso_8601_datetime_with_milliseconds from moto.ec2 import ec2_backends from .exceptions import ( + ClusterAlreadyExistsFaultError, ClusterNotFoundError, ClusterParameterGroupNotFoundError, ClusterSecurityGroupNotFoundError, @@ -580,6 +581,8 @@ class RedshiftBackend(BaseBackend): def create_cluster(self, **cluster_kwargs): cluster_identifier = cluster_kwargs["cluster_identifier"] + if cluster_identifier in self.clusters: + raise ClusterAlreadyExistsFaultError() cluster = Cluster(self, **cluster_kwargs) self.clusters[cluster_identifier] = cluster return cluster diff --git a/moto/s3/exceptions.py b/moto/s3/exceptions.py index c38a4f467..3ed385f1c 100644 --- a/moto/s3/exceptions.py +++ b/moto/s3/exceptions.py @@ -377,3 +377,12 @@ class NoSystemTags(S3ClientError): super(NoSystemTags, self).__init__( "InvalidTag", "System tags cannot be added/updated by requester" ) + + +class NoSuchUpload(S3ClientError): + code = 404 + + def __init__(self): + super(NoSuchUpload, self).__init__( + "NoSuchUpload", "The specified multipart upload does not exist." + ) diff --git a/moto/s3/models.py b/moto/s3/models.py index 866c5d007..3020fd45e 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -40,6 +40,7 @@ from .exceptions import ( NoSuchPublicAccessBlockConfiguration, InvalidPublicAccessBlockConfiguration, WrongPublicAccessBlockAccountIdError, + NoSuchUpload, ) from .utils import clean_key_name, _VersionedKeyStore @@ -1478,6 +1479,9 @@ class S3Backend(BaseBackend): def cancel_multipart(self, bucket_name, multipart_id): bucket = self.get_bucket(bucket_name) + multipart_data = bucket.multiparts.get(multipart_id, None) + if not multipart_data: + raise NoSuchUpload() del bucket.multiparts[multipart_id] def list_multipart(self, bucket_name, multipart_id): diff --git a/moto/ses/exceptions.py b/moto/ses/exceptions.py index a905039e2..7a4ef1b03 100644 --- a/moto/ses/exceptions.py +++ b/moto/ses/exceptions.py @@ -7,3 +7,37 @@ class MessageRejectedError(RESTError): def __init__(self, message): super(MessageRejectedError, self).__init__("MessageRejected", message) + + +class ConfigurationSetDoesNotExist(RESTError): + code = 400 + + def __init__(self, message): + super(ConfigurationSetDoesNotExist, self).__init__( + "ConfigurationSetDoesNotExist", message + ) + + +class EventDestinationAlreadyExists(RESTError): + code = 400 + + def __init__(self, message): + super(EventDestinationAlreadyExists, self).__init__( + "EventDestinationAlreadyExists", message + ) + + +class TemplateNameAlreadyExists(RESTError): + code = 400 + + def __init__(self, message): + super(TemplateNameAlreadyExists, self).__init__( + "TemplateNameAlreadyExists", message + ) + + +class TemplateDoesNotExist(RESTError): + code = 400 + + def __init__(self, message): + super(TemplateDoesNotExist, self).__init__("TemplateDoesNotExist", message) diff --git a/moto/ses/models.py b/moto/ses/models.py index 91241f706..f918d9021 100644 --- a/moto/ses/models.py +++ b/moto/ses/models.py @@ -1,11 +1,18 @@ from __future__ import unicode_literals +import datetime import email from email.utils import parseaddr from moto.core import BaseBackend, BaseModel from moto.sns.models import sns_backends -from .exceptions import MessageRejectedError +from .exceptions import ( + MessageRejectedError, + ConfigurationSetDoesNotExist, + EventDestinationAlreadyExists, + TemplateNameAlreadyExists, + TemplateDoesNotExist, +) from .utils import get_random_message_id from .feedback import COMMON_MAIL, BOUNCE, COMPLAINT, DELIVERY @@ -81,7 +88,12 @@ class SESBackend(BaseBackend): self.domains = [] self.sent_messages = [] self.sent_message_count = 0 + self.rejected_messages_count = 0 self.sns_topics = {} + self.config_set = {} + self.config_set_event_destination = {} + self.event_destinations = {} + self.templates = {} def _is_verified_address(self, source): _, address = parseaddr(source) @@ -118,6 +130,7 @@ class SESBackend(BaseBackend): if recipient_count > RECIPIENT_LIMIT: raise MessageRejectedError("Too many recipients.") if not self._is_verified_address(source): + self.rejected_messages_count += 1 raise MessageRejectedError("Email address not verified %s" % source) self.__process_sns_feedback__(source, destinations, region) @@ -135,6 +148,7 @@ class SESBackend(BaseBackend): if recipient_count > RECIPIENT_LIMIT: raise MessageRejectedError("Too many recipients.") if not self._is_verified_address(source): + self.rejected_messages_count += 1 raise MessageRejectedError("Email address not verified %s" % source) self.__process_sns_feedback__(source, destinations, region) @@ -189,7 +203,7 @@ class SESBackend(BaseBackend): def send_raw_email(self, source, destinations, raw_data, region): if source is not None: _, source_email_address = parseaddr(source) - if source_email_address not in self.addresses: + if not self._is_verified_address(source_email_address): raise MessageRejectedError( "Did not have authority to send from email %s" % source_email_address @@ -202,7 +216,7 @@ class SESBackend(BaseBackend): raise MessageRejectedError("Source not specified") _, source_email_address = parseaddr(message["from"]) - if source_email_address not in self.addresses: + if not self._is_verified_address(source_email_address): raise MessageRejectedError( "Did not have authority to send from email %s" % source_email_address @@ -237,5 +251,48 @@ class SESBackend(BaseBackend): return {} + def create_configuration_set(self, configuration_set_name): + self.config_set[configuration_set_name] = 1 + return {} + + def create_configuration_set_event_destination( + self, configuration_set_name, event_destination + ): + + if self.config_set.get(configuration_set_name) is None: + raise ConfigurationSetDoesNotExist("Invalid Configuration Set Name.") + + if self.event_destinations.get(event_destination["Name"]): + raise EventDestinationAlreadyExists("Duplicate Event destination Name.") + + self.config_set_event_destination[configuration_set_name] = event_destination + self.event_destinations[event_destination["Name"]] = 1 + + return {} + + def get_send_statistics(self): + + statistics = {} + statistics["DeliveryAttempts"] = self.sent_message_count + statistics["Rejects"] = self.rejected_messages_count + statistics["Complaints"] = 0 + statistics["Bounces"] = 0 + statistics["Timestamp"] = datetime.datetime.utcnow() + return statistics + + def add_template(self, template_info): + template_name = template_info["template_name"] + if self.templates.get(template_name, None): + raise TemplateNameAlreadyExists("Duplicate Template Name.") + self.templates[template_name] = template_info + + def get_template(self, template_name): + if not self.templates.get(template_name, None): + raise TemplateDoesNotExist("Invalid Template Name.") + return self.templates[template_name] + + def list_templates(self): + return list(self.templates.values()) + ses_backend = SESBackend() diff --git a/moto/ses/responses.py b/moto/ses/responses.py index 1034aeb0d..f0780e98a 100644 --- a/moto/ses/responses.py +++ b/moto/ses/responses.py @@ -5,6 +5,7 @@ import six from moto.core.responses import BaseResponse from .models import ses_backend +from datetime import datetime class EmailResponse(BaseResponse): @@ -133,6 +134,71 @@ class EmailResponse(BaseResponse): template = self.response_template(SET_IDENTITY_NOTIFICATION_TOPIC_RESPONSE) return template.render() + def get_send_statistics(self): + statistics = ses_backend.get_send_statistics() + template = self.response_template(GET_SEND_STATISTICS) + return template.render(all_statistics=[statistics]) + + def create_configuration_set(self): + configuration_set_name = self.querystring.get("ConfigurationSet.Name")[0] + ses_backend.create_configuration_set( + configuration_set_name=configuration_set_name + ) + template = self.response_template(CREATE_CONFIGURATION_SET) + return template.render() + + def create_configuration_set_event_destination(self): + + configuration_set_name = self._get_param("ConfigurationSetName") + is_configuration_event_enabled = self.querystring.get( + "EventDestination.Enabled" + )[0] + configuration_event_name = self.querystring.get("EventDestination.Name")[0] + event_topic_arn = self.querystring.get( + "EventDestination.SNSDestination.TopicARN" + )[0] + event_matching_types = self._get_multi_param( + "EventDestination.MatchingEventTypes.member" + ) + + event_destination = { + "Name": configuration_event_name, + "Enabled": is_configuration_event_enabled, + "EventMatchingTypes": event_matching_types, + "SNSDestination": event_topic_arn, + } + + ses_backend.create_configuration_set_event_destination( + configuration_set_name=configuration_set_name, + event_destination=event_destination, + ) + + template = self.response_template(CREATE_CONFIGURATION_SET_EVENT_DESTINATION) + return template.render() + + def create_template(self): + template_data = self._get_dict_param("Template") + template_info = {} + template_info["text_part"] = template_data["._text_part"] + template_info["html_part"] = template_data["._html_part"] + template_info["template_name"] = template_data["._name"] + template_info["subject_part"] = template_data["._subject_part"] + template_info["Timestamp"] = datetime.utcnow() + ses_backend.add_template(template_info=template_info) + template = self.response_template(CREATE_TEMPLATE) + return template.render() + + def get_template(self): + template_name = self._get_param("TemplateName") + template_data = ses_backend.get_template(template_name) + template = self.response_template(GET_TEMPLATE) + return template.render(template_data=template_data) + + def list_templates(self): + email_templates = ses_backend.list_templates() + template = self.response_template(LIST_TEMPLATES) + return template.render(templates=email_templates) + VERIFY_EMAIL_IDENTITY = """ @@ -248,3 +314,74 @@ SET_IDENTITY_NOTIFICATION_TOPIC_RESPONSE = """47e0ef1a-9bf2-11e1-9279-0100e8cf109a """ + +GET_SEND_STATISTICS = """ + + + {% for statistics in all_statistics %} + + {{ statistics["DeliveryAttempts"] }} + {{ statistics["Rejects"] }} + {{ statistics["Bounces"] }} + {{ statistics["Complaints"] }} + {{ statistics["Timestamp"] }} + + {% endfor %} + + + e0abcdfa-c866-11e0-b6d0-273d09173z49 + + +""" + +CREATE_CONFIGURATION_SET = """ + + + 47e0ef1a-9bf2-11e1-9279-0100e8cf109a + +""" + + +CREATE_CONFIGURATION_SET_EVENT_DESTINATION = """ + + + 67e0ef1a-9bf2-11e1-9279-0100e8cf109a + +""" + +CREATE_TEMPLATE = """ + + + 47e0ef1a-9bf2-11e1-9279-0100e8cf12ba + +""" + +GET_TEMPLATE = """ + + + + + 47e0ef1a-9bf2-11e1-9279-0100e8cf12ba + +""" + +LIST_TEMPLATES = """ + + + {% for template in templates %} + + {{ template["template_name"] }} + {{ template["Timestamp"] }} + + {% endfor %} + + + + 47e0ef1a-9bf2-11e1-9279-0100e8cf12ba + +""" diff --git a/moto/ssm/models.py b/moto/ssm/models.py index 3ce3b3a22..67216972e 100644 --- a/moto/ssm/models.py +++ b/moto/ssm/models.py @@ -514,6 +514,16 @@ class SimpleSystemManagerBackend(BaseBackend): def get_parameters(self, names, with_decryption): result = [] + + if len(names) > 10: + raise ValidationException( + "1 validation error detected: " + "Value '[{}]' at 'names' failed to satisfy constraint: " + "Member must have length less than or equal to 10.".format( + ", ".join(names) + ) + ) + for name in names: if name in self._parameters: result.append(self.get_parameter(name, with_decryption)) diff --git a/scripts/implementation_coverage.py b/scripts/implementation_coverage.py index 4552ec18e..57f978ff9 100755 --- a/scripts/implementation_coverage.py +++ b/scripts/implementation_coverage.py @@ -7,18 +7,18 @@ import boto3 script_dir = os.path.dirname(os.path.abspath(__file__)) -alternative_service_names = {'lambda': 'awslambda'} +alternative_service_names = {'lambda': 'awslambda', 'dynamodb': 'dynamodb2'} def get_moto_implementation(service_name): service_name = service_name.replace("-", "") if "-" in service_name else service_name alt_service_name = alternative_service_names[service_name] if service_name in alternative_service_names else service_name - if not hasattr(moto, alt_service_name): - return None - module = getattr(moto, alt_service_name) - if module is None: - return None - mock = getattr(module, "mock_{}".format(service_name)) + if hasattr(moto, "mock_{}".format(alt_service_name)): + mock = getattr(moto, "mock_{}".format(alt_service_name)) + elif hasattr(moto, "mock_{}".format(service_name)): + mock = getattr(moto, "mock_{}".format(service_name)) + else: + mock = None if mock is None: return None backends = list(mock().backends.values()) @@ -97,12 +97,14 @@ def write_implementation_coverage_to_file(coverage): file.write("\n") file.write("## {}\n".format(service_name)) - file.write("{}% implemented\n".format(percentage_implemented)) + file.write("
\n") + file.write("{}% implemented\n\n".format(percentage_implemented)) for op in operations: if op in implemented: file.write("- [X] {}\n".format(op)) else: file.write("- [ ] {}\n".format(op)) + file.write("
\n") if __name__ == '__main__': diff --git a/tests/test_apigateway/test_apigateway.py b/tests/test_apigateway/test_apigateway.py index 7495372d2..295cd1c54 100644 --- a/tests/test_apigateway/test_apigateway.py +++ b/tests/test_apigateway/test_apigateway.py @@ -1,5 +1,6 @@ from __future__ import unicode_literals +import json import boto3 from freezegun import freeze_time @@ -1230,6 +1231,65 @@ def test_put_integration_response_requires_responseTemplate(): ) +@mock_apigateway +def test_put_integration_response_with_response_template(): + client = boto3.client("apigateway", region_name="us-west-2") + response = client.create_rest_api(name="my_api", description="this is my api") + api_id = response["id"] + resources = client.get_resources(restApiId=api_id) + root_id = [resource for resource in resources["items"] if resource["path"] == "/"][ + 0 + ]["id"] + + client.put_method( + restApiId=api_id, resourceId=root_id, httpMethod="GET", authorizationType="NONE" + ) + client.put_method_response( + restApiId=api_id, resourceId=root_id, httpMethod="GET", statusCode="200" + ) + client.put_integration( + restApiId=api_id, + resourceId=root_id, + httpMethod="GET", + type="HTTP", + uri="http://httpbin.org/robots.txt", + integrationHttpMethod="POST", + ) + + with assert_raises(ClientError) as ex: + client.put_integration_response( + restApiId=api_id, resourceId=root_id, httpMethod="GET", statusCode="200" + ) + + ex.exception.response["Error"]["Code"].should.equal("BadRequestException") + ex.exception.response["Error"]["Message"].should.equal("Invalid request input") + + client.put_integration_response( + restApiId=api_id, + resourceId=root_id, + httpMethod="GET", + statusCode="200", + selectionPattern="foobar", + responseTemplates={"application/json": json.dumps({"data": "test"})}, + ) + + response = client.get_integration_response( + restApiId=api_id, resourceId=root_id, httpMethod="GET", statusCode="200" + ) + + # this is hard to match against, so remove it + response["ResponseMetadata"].pop("HTTPHeaders", None) + response["ResponseMetadata"].pop("RetryAttempts", None) + response.should.equal( + { + "statusCode": "200", + "selectionPattern": "foobar", + "ResponseMetadata": {"HTTPStatusCode": 200}, + "responseTemplates": {"application/json": json.dumps({"data": "test"})}, + } + ) + + @mock_apigateway def test_put_integration_validation(): client = boto3.client("apigateway", region_name="us-west-2") diff --git a/tests/test_autoscaling/test_autoscaling.py b/tests/test_autoscaling/test_autoscaling.py index 3a10f20ff..93a8c5a48 100644 --- a/tests/test_autoscaling/test_autoscaling.py +++ b/tests/test_autoscaling/test_autoscaling.py @@ -1071,6 +1071,7 @@ def test_autoscaling_describe_policies_boto3(): response["ScalingPolicies"][0]["PolicyName"].should.equal("test_policy_down") +@mock_elb @mock_autoscaling @mock_ec2 def test_detach_one_instance_decrement(): @@ -1096,6 +1097,19 @@ def test_detach_one_instance_decrement(): ], VPCZoneIdentifier=mocked_networking["subnet1"], ) + + elb_client = boto3.client("elb", region_name="us-east-1") + elb_client.create_load_balancer( + LoadBalancerName="my-lb", + Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}], + AvailabilityZones=["us-east-1a", "us-east-1b"], + ) + + response = client.attach_load_balancers( + AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"] + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) instance_to_detach = response["AutoScalingGroups"][0]["Instances"][0]["InstanceId"] instance_to_keep = response["AutoScalingGroups"][0]["Instances"][1]["InstanceId"] @@ -1111,6 +1125,9 @@ def test_detach_one_instance_decrement(): response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) response["AutoScalingGroups"][0]["Instances"].should.have.length_of(1) + instance_to_detach.shouldnt.be.within( + [x["InstanceId"] for x in response["AutoScalingGroups"][0]["Instances"]] + ) # test to ensure tag has been removed response = ec2_client.describe_instances(InstanceIds=[instance_to_detach]) @@ -1122,7 +1139,14 @@ def test_detach_one_instance_decrement(): tags = response["Reservations"][0]["Instances"][0]["Tags"] tags.should.have.length_of(2) + response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"]) + list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(1) + instance_to_detach.shouldnt.be.within( + [x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]] + ) + +@mock_elb @mock_autoscaling @mock_ec2 def test_detach_one_instance(): @@ -1148,6 +1172,19 @@ def test_detach_one_instance(): ], VPCZoneIdentifier=mocked_networking["subnet1"], ) + + elb_client = boto3.client("elb", region_name="us-east-1") + elb_client.create_load_balancer( + LoadBalancerName="my-lb", + Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}], + AvailabilityZones=["us-east-1a", "us-east-1b"], + ) + + response = client.attach_load_balancers( + AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"] + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) instance_to_detach = response["AutoScalingGroups"][0]["Instances"][0]["InstanceId"] instance_to_keep = response["AutoScalingGroups"][0]["Instances"][1]["InstanceId"] @@ -1173,7 +1210,14 @@ def test_detach_one_instance(): tags = response["Reservations"][0]["Instances"][0]["Tags"] tags.should.have.length_of(2) + response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"]) + list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(2) + instance_to_detach.shouldnt.be.within( + [x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]] + ) + +@mock_elb @mock_autoscaling @mock_ec2 def test_standby_one_instance_decrement(): @@ -1199,6 +1243,19 @@ def test_standby_one_instance_decrement(): ], VPCZoneIdentifier=mocked_networking["subnet1"], ) + + elb_client = boto3.client("elb", region_name="us-east-1") + elb_client.create_load_balancer( + LoadBalancerName="my-lb", + Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}], + AvailabilityZones=["us-east-1a", "us-east-1b"], + ) + + response = client.attach_load_balancers( + AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"] + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) instance_to_standby = response["AutoScalingGroups"][0]["Instances"][0]["InstanceId"] instance_to_keep = response["AutoScalingGroups"][0]["Instances"][1]["InstanceId"] @@ -1226,7 +1283,14 @@ def test_standby_one_instance_decrement(): tags = instance["Tags"] tags.should.have.length_of(2) + response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"]) + list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(1) + instance_to_standby.shouldnt.be.within( + [x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]] + ) + +@mock_elb @mock_autoscaling @mock_ec2 def test_standby_one_instance(): @@ -1252,6 +1316,19 @@ def test_standby_one_instance(): ], VPCZoneIdentifier=mocked_networking["subnet1"], ) + + elb_client = boto3.client("elb", region_name="us-east-1") + elb_client.create_load_balancer( + LoadBalancerName="my-lb", + Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}], + AvailabilityZones=["us-east-1a", "us-east-1b"], + ) + + response = client.attach_load_balancers( + AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"] + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) instance_to_standby = response["AutoScalingGroups"][0]["Instances"][0]["InstanceId"] instance_to_keep = response["AutoScalingGroups"][0]["Instances"][1]["InstanceId"] @@ -1279,6 +1356,12 @@ def test_standby_one_instance(): tags = instance["Tags"] tags.should.have.length_of(2) + response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"]) + list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(2) + instance_to_standby.shouldnt.be.within( + [x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]] + ) + @mock_elb @mock_autoscaling @@ -1338,8 +1421,12 @@ def test_standby_elb_update(): response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"]) list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(2) + instance_to_standby.shouldnt.be.within( + [x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]] + ) +@mock_elb @mock_autoscaling @mock_ec2 def test_standby_terminate_instance_decrement(): @@ -1366,6 +1453,18 @@ def test_standby_terminate_instance_decrement(): VPCZoneIdentifier=mocked_networking["subnet1"], ) + elb_client = boto3.client("elb", region_name="us-east-1") + elb_client.create_load_balancer( + LoadBalancerName="my-lb", + Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}], + AvailabilityZones=["us-east-1a", "us-east-1b"], + ) + + response = client.attach_load_balancers( + AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"] + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) instance_to_standby_terminate = response["AutoScalingGroups"][0]["Instances"][0][ "InstanceId" @@ -1409,7 +1508,14 @@ def test_standby_terminate_instance_decrement(): "terminated" ) + response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"]) + list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(1) + instance_to_standby_terminate.shouldnt.be.within( + [x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]] + ) + +@mock_elb @mock_autoscaling @mock_ec2 def test_standby_terminate_instance_no_decrement(): @@ -1436,6 +1542,18 @@ def test_standby_terminate_instance_no_decrement(): VPCZoneIdentifier=mocked_networking["subnet1"], ) + elb_client = boto3.client("elb", region_name="us-east-1") + elb_client.create_load_balancer( + LoadBalancerName="my-lb", + Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}], + AvailabilityZones=["us-east-1a", "us-east-1b"], + ) + + response = client.attach_load_balancers( + AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"] + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) instance_to_standby_terminate = response["AutoScalingGroups"][0]["Instances"][0][ "InstanceId" @@ -1479,7 +1597,14 @@ def test_standby_terminate_instance_no_decrement(): "terminated" ) + response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"]) + list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(2) + instance_to_standby_terminate.shouldnt.be.within( + [x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]] + ) + +@mock_elb @mock_autoscaling @mock_ec2 def test_standby_detach_instance_decrement(): @@ -1506,6 +1631,18 @@ def test_standby_detach_instance_decrement(): VPCZoneIdentifier=mocked_networking["subnet1"], ) + elb_client = boto3.client("elb", region_name="us-east-1") + elb_client.create_load_balancer( + LoadBalancerName="my-lb", + Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}], + AvailabilityZones=["us-east-1a", "us-east-1b"], + ) + + response = client.attach_load_balancers( + AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"] + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) instance_to_standby_detach = response["AutoScalingGroups"][0]["Instances"][0][ "InstanceId" @@ -1547,7 +1684,14 @@ def test_standby_detach_instance_decrement(): response = ec2_client.describe_instances(InstanceIds=[instance_to_standby_detach]) response["Reservations"][0]["Instances"][0]["State"]["Name"].should.equal("running") + response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"]) + list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(1) + instance_to_standby_detach.shouldnt.be.within( + [x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]] + ) + +@mock_elb @mock_autoscaling @mock_ec2 def test_standby_detach_instance_no_decrement(): @@ -1574,6 +1718,18 @@ def test_standby_detach_instance_no_decrement(): VPCZoneIdentifier=mocked_networking["subnet1"], ) + elb_client = boto3.client("elb", region_name="us-east-1") + elb_client.create_load_balancer( + LoadBalancerName="my-lb", + Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}], + AvailabilityZones=["us-east-1a", "us-east-1b"], + ) + + response = client.attach_load_balancers( + AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"] + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) instance_to_standby_detach = response["AutoScalingGroups"][0]["Instances"][0][ "InstanceId" @@ -1615,7 +1771,14 @@ def test_standby_detach_instance_no_decrement(): response = ec2_client.describe_instances(InstanceIds=[instance_to_standby_detach]) response["Reservations"][0]["Instances"][0]["State"]["Name"].should.equal("running") + response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"]) + list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(2) + instance_to_standby_detach.shouldnt.be.within( + [x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]] + ) + +@mock_elb @mock_autoscaling @mock_ec2 def test_standby_exit_standby(): @@ -1642,6 +1805,18 @@ def test_standby_exit_standby(): VPCZoneIdentifier=mocked_networking["subnet1"], ) + elb_client = boto3.client("elb", region_name="us-east-1") + elb_client.create_load_balancer( + LoadBalancerName="my-lb", + Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}], + AvailabilityZones=["us-east-1a", "us-east-1b"], + ) + + response = client.attach_load_balancers( + AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"] + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) instance_to_standby_exit_standby = response["AutoScalingGroups"][0]["Instances"][0][ "InstanceId" @@ -1683,7 +1858,14 @@ def test_standby_exit_standby(): ) response["Reservations"][0]["Instances"][0]["State"]["Name"].should.equal("running") + response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"]) + list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(3) + instance_to_standby_exit_standby.should.be.within( + [x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]] + ) + +@mock_elb @mock_autoscaling @mock_ec2 def test_attach_one_instance(): @@ -1711,6 +1893,18 @@ def test_attach_one_instance(): NewInstancesProtectedFromScaleIn=True, ) + elb_client = boto3.client("elb", region_name="us-east-1") + elb_client.create_load_balancer( + LoadBalancerName="my-lb", + Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}], + AvailabilityZones=["us-east-1a", "us-east-1b"], + ) + + response = client.attach_load_balancers( + AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"] + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + ec2 = boto3.resource("ec2", "us-east-1") instances_to_add = [ x.id for x in ec2.create_instances(ImageId="", MinCount=1, MaxCount=1) @@ -1727,6 +1921,9 @@ def test_attach_one_instance(): for instance in instances: instance["ProtectedFromScaleIn"].should.equal(True) + response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"]) + list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(3) + @mock_autoscaling @mock_ec2 @@ -1948,6 +2145,7 @@ def test_terminate_instance_via_ec2_in_autoscaling_group(): replaced_instance_id.should_not.equal(original_instance_id) +@mock_elb @mock_autoscaling @mock_ec2 def test_terminate_instance_in_auto_scaling_group_decrement(): @@ -1966,6 +2164,18 @@ def test_terminate_instance_in_auto_scaling_group_decrement(): NewInstancesProtectedFromScaleIn=False, ) + elb_client = boto3.client("elb", region_name="us-east-1") + elb_client.create_load_balancer( + LoadBalancerName="my-lb", + Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}], + AvailabilityZones=["us-east-1a", "us-east-1b"], + ) + + response = client.attach_load_balancers( + AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"] + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) original_instance_id = next( instance["InstanceId"] @@ -1979,7 +2189,11 @@ def test_terminate_instance_in_auto_scaling_group_decrement(): response["AutoScalingGroups"][0]["Instances"].should.equal([]) response["AutoScalingGroups"][0]["DesiredCapacity"].should.equal(0) + response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"]) + list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(0) + +@mock_elb @mock_autoscaling @mock_ec2 def test_terminate_instance_in_auto_scaling_group_no_decrement(): @@ -1998,6 +2212,18 @@ def test_terminate_instance_in_auto_scaling_group_no_decrement(): NewInstancesProtectedFromScaleIn=False, ) + elb_client = boto3.client("elb", region_name="us-east-1") + elb_client.create_load_balancer( + LoadBalancerName="my-lb", + Listeners=[{"Protocol": "tcp", "LoadBalancerPort": 80, "InstancePort": 8080}], + AvailabilityZones=["us-east-1a", "us-east-1b"], + ) + + response = client.attach_load_balancers( + AutoScalingGroupName="test_asg", LoadBalancerNames=["my-lb"] + ) + response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=["test_asg"]) original_instance_id = next( instance["InstanceId"] @@ -2014,3 +2240,9 @@ def test_terminate_instance_in_auto_scaling_group_no_decrement(): ) replaced_instance_id.should_not.equal(original_instance_id) response["AutoScalingGroups"][0]["DesiredCapacity"].should.equal(1) + + response = elb_client.describe_load_balancers(LoadBalancerNames=["my-lb"]) + list(response["LoadBalancerDescriptions"][0]["Instances"]).should.have.length_of(1) + original_instance_id.shouldnt.be.within( + [x["InstanceId"] for x in response["LoadBalancerDescriptions"][0]["Instances"]] + ) diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 470c5f8ff..50fd4fd6c 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -1,5 +1,6 @@ from __future__ import unicode_literals, print_function +from datetime import datetime from decimal import Decimal import boto @@ -2049,6 +2050,141 @@ def test_set_ttl(): resp["TimeToLiveDescription"]["TimeToLiveStatus"].should.equal("DISABLED") +@mock_dynamodb2 +def test_describe_continuous_backups(): + # given + client = boto3.client("dynamodb", region_name="us-east-1") + table_name = client.create_table( + TableName="test", + AttributeDefinitions=[ + {"AttributeName": "client", "AttributeType": "S"}, + {"AttributeName": "app", "AttributeType": "S"}, + ], + KeySchema=[ + {"AttributeName": "client", "KeyType": "HASH"}, + {"AttributeName": "app", "KeyType": "RANGE"}, + ], + BillingMode="PAY_PER_REQUEST", + )["TableDescription"]["TableName"] + + # when + response = client.describe_continuous_backups(TableName=table_name) + + # then + response["ContinuousBackupsDescription"].should.equal( + { + "ContinuousBackupsStatus": "ENABLED", + "PointInTimeRecoveryDescription": {"PointInTimeRecoveryStatus": "DISABLED"}, + } + ) + + +@mock_dynamodb2 +def test_describe_continuous_backups_errors(): + # given + client = boto3.client("dynamodb", region_name="us-east-1") + + # when + with assert_raises(Exception) as e: + client.describe_continuous_backups(TableName="not-existing-table") + + # then + ex = e.exception + ex.operation_name.should.equal("DescribeContinuousBackups") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("TableNotFoundException") + ex.response["Error"]["Message"].should.equal("Table not found: not-existing-table") + + +@mock_dynamodb2 +def test_update_continuous_backups(): + # given + client = boto3.client("dynamodb", region_name="us-east-1") + table_name = client.create_table( + TableName="test", + AttributeDefinitions=[ + {"AttributeName": "client", "AttributeType": "S"}, + {"AttributeName": "app", "AttributeType": "S"}, + ], + KeySchema=[ + {"AttributeName": "client", "KeyType": "HASH"}, + {"AttributeName": "app", "KeyType": "RANGE"}, + ], + BillingMode="PAY_PER_REQUEST", + )["TableDescription"]["TableName"] + + # when + response = client.update_continuous_backups( + TableName=table_name, + PointInTimeRecoverySpecification={"PointInTimeRecoveryEnabled": True}, + ) + + # then + response["ContinuousBackupsDescription"]["ContinuousBackupsStatus"].should.equal( + "ENABLED" + ) + point_in_time = response["ContinuousBackupsDescription"][ + "PointInTimeRecoveryDescription" + ] + earliest_datetime = point_in_time["EarliestRestorableDateTime"] + earliest_datetime.should.be.a(datetime) + latest_datetime = point_in_time["LatestRestorableDateTime"] + latest_datetime.should.be.a(datetime) + point_in_time["PointInTimeRecoveryStatus"].should.equal("ENABLED") + + # when + # a second update should not change anything + response = client.update_continuous_backups( + TableName=table_name, + PointInTimeRecoverySpecification={"PointInTimeRecoveryEnabled": True}, + ) + + # then + response["ContinuousBackupsDescription"]["ContinuousBackupsStatus"].should.equal( + "ENABLED" + ) + point_in_time = response["ContinuousBackupsDescription"][ + "PointInTimeRecoveryDescription" + ] + point_in_time["EarliestRestorableDateTime"].should.equal(earliest_datetime) + point_in_time["LatestRestorableDateTime"].should.equal(latest_datetime) + point_in_time["PointInTimeRecoveryStatus"].should.equal("ENABLED") + + # when + response = client.update_continuous_backups( + TableName=table_name, + PointInTimeRecoverySpecification={"PointInTimeRecoveryEnabled": False}, + ) + + # then + response["ContinuousBackupsDescription"].should.equal( + { + "ContinuousBackupsStatus": "ENABLED", + "PointInTimeRecoveryDescription": {"PointInTimeRecoveryStatus": "DISABLED"}, + } + ) + + +@mock_dynamodb2 +def test_update_continuous_backups_errors(): + # given + client = boto3.client("dynamodb", region_name="us-east-1") + + # when + with assert_raises(Exception) as e: + client.update_continuous_backups( + TableName="not-existing-table", + PointInTimeRecoverySpecification={"PointInTimeRecoveryEnabled": True}, + ) + + # then + ex = e.exception + ex.operation_name.should.equal("UpdateContinuousBackups") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("TableNotFoundException") + ex.response["Error"]["Message"].should.equal("Table not found: not-existing-table") + + # https://github.com/spulec/moto/issues/1043 @mock_dynamodb2 def test_query_missing_expr_names(): @@ -4298,13 +4434,8 @@ def test_transact_write_items_put_conditional_expressions(): ] ) # Assert the exception is correct - ex.exception.response["Error"]["Code"].should.equal( - "ConditionalCheckFailedException" - ) + ex.exception.response["Error"]["Code"].should.equal("TransactionCanceledException") ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - ex.exception.response["Error"]["Message"].should.equal( - "A condition specified in the operation could not be evaluated." - ) # Assert all are present items = dynamodb.scan(TableName="test-table")["Items"] items.should.have.length_of(1) @@ -4393,13 +4524,8 @@ def test_transact_write_items_conditioncheck_fails(): ] ) # Assert the exception is correct - ex.exception.response["Error"]["Code"].should.equal( - "ConditionalCheckFailedException" - ) + ex.exception.response["Error"]["Code"].should.equal("TransactionCanceledException") ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - ex.exception.response["Error"]["Message"].should.equal( - "A condition specified in the operation could not be evaluated." - ) # Assert the original email address is still present items = dynamodb.scan(TableName="test-table")["Items"] @@ -4495,13 +4621,8 @@ def test_transact_write_items_delete_with_failed_condition_expression(): ] ) # Assert the exception is correct - ex.exception.response["Error"]["Code"].should.equal( - "ConditionalCheckFailedException" - ) + ex.exception.response["Error"]["Code"].should.equal("TransactionCanceledException") ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - ex.exception.response["Error"]["Message"].should.equal( - "A condition specified in the operation could not be evaluated." - ) # Assert the original item is still present items = dynamodb.scan(TableName="test-table")["Items"] items.should.have.length_of(1) @@ -4573,13 +4694,8 @@ def test_transact_write_items_update_with_failed_condition_expression(): ] ) # Assert the exception is correct - ex.exception.response["Error"]["Code"].should.equal( - "ConditionalCheckFailedException" - ) + ex.exception.response["Error"]["Code"].should.equal("TransactionCanceledException") ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) - ex.exception.response["Error"]["Message"].should.equal( - "A condition specified in the operation could not be evaluated." - ) # Assert the original item is still present items = dynamodb.scan(TableName="test-table")["Items"] items.should.have.length_of(1) @@ -5029,3 +5145,126 @@ def test_update_item_atomic_counter_return_values(): "v" in response["Attributes"] ), "v has been updated, and should be returned here" response["Attributes"]["v"]["N"].should.equal("8") + + +@mock_dynamodb2 +def test_update_item_atomic_counter_from_zero(): + table = "table_t" + ddb_mock = boto3.client("dynamodb", region_name="eu-west-1") + ddb_mock.create_table( + TableName=table, + KeySchema=[{"AttributeName": "t_id", "KeyType": "HASH"}], + AttributeDefinitions=[{"AttributeName": "t_id", "AttributeType": "S"}], + BillingMode="PAY_PER_REQUEST", + ) + + key = {"t_id": {"S": "item1"}} + + ddb_mock.put_item( + TableName=table, Item=key, + ) + + ddb_mock.update_item( + TableName=table, + Key=key, + UpdateExpression="add n_i :inc1, n_f :inc2", + ExpressionAttributeValues={":inc1": {"N": "1.2"}, ":inc2": {"N": "-0.5"}}, + ) + updated_item = ddb_mock.get_item(TableName=table, Key=key)["Item"] + assert updated_item["n_i"]["N"] == "1.2" + assert updated_item["n_f"]["N"] == "-0.5" + + +@mock_dynamodb2 +def test_update_item_add_to_non_existent_set(): + table = "table_t" + ddb_mock = boto3.client("dynamodb", region_name="eu-west-1") + ddb_mock.create_table( + TableName=table, + KeySchema=[{"AttributeName": "t_id", "KeyType": "HASH"}], + AttributeDefinitions=[{"AttributeName": "t_id", "AttributeType": "S"}], + BillingMode="PAY_PER_REQUEST", + ) + key = {"t_id": {"S": "item1"}} + ddb_mock.put_item( + TableName=table, Item=key, + ) + + ddb_mock.update_item( + TableName=table, + Key=key, + UpdateExpression="add s_i :s1", + ExpressionAttributeValues={":s1": {"SS": ["hello"]}}, + ) + updated_item = ddb_mock.get_item(TableName=table, Key=key)["Item"] + assert updated_item["s_i"]["SS"] == ["hello"] + + +@mock_dynamodb2 +def test_update_item_add_to_non_existent_number_set(): + table = "table_t" + ddb_mock = boto3.client("dynamodb", region_name="eu-west-1") + ddb_mock.create_table( + TableName=table, + KeySchema=[{"AttributeName": "t_id", "KeyType": "HASH"}], + AttributeDefinitions=[{"AttributeName": "t_id", "AttributeType": "S"}], + BillingMode="PAY_PER_REQUEST", + ) + key = {"t_id": {"S": "item1"}} + ddb_mock.put_item( + TableName=table, Item=key, + ) + + ddb_mock.update_item( + TableName=table, + Key=key, + UpdateExpression="add s_i :s1", + ExpressionAttributeValues={":s1": {"NS": ["3"]}}, + ) + updated_item = ddb_mock.get_item(TableName=table, Key=key)["Item"] + assert updated_item["s_i"]["NS"] == ["3"] + + +@mock_dynamodb2 +def test_transact_write_items_fails_with_transaction_canceled_exception(): + table_schema = { + "KeySchema": [{"AttributeName": "id", "KeyType": "HASH"}], + "AttributeDefinitions": [{"AttributeName": "id", "AttributeType": "S"},], + } + dynamodb = boto3.client("dynamodb", region_name="us-east-1") + dynamodb.create_table( + TableName="test-table", BillingMode="PAY_PER_REQUEST", **table_schema + ) + # Insert one item + dynamodb.put_item(TableName="test-table", Item={"id": {"S": "foo"}}) + # Update two items, the one that exists and another that doesn't + with assert_raises(ClientError) as ex: + dynamodb.transact_write_items( + TransactItems=[ + { + "Update": { + "Key": {"id": {"S": "foo"}}, + "TableName": "test-table", + "UpdateExpression": "SET #k = :v", + "ConditionExpression": "attribute_exists(id)", + "ExpressionAttributeNames": {"#k": "key"}, + "ExpressionAttributeValues": {":v": {"S": "value"}}, + } + }, + { + "Update": { + "Key": {"id": {"S": "doesnotexist"}}, + "TableName": "test-table", + "UpdateExpression": "SET #e = :v", + "ConditionExpression": "attribute_exists(id)", + "ExpressionAttributeNames": {"#e": "key"}, + "ExpressionAttributeValues": {":v": {"S": "value"}}, + } + }, + ] + ) + ex.exception.response["Error"]["Code"].should.equal("TransactionCanceledException") + ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.exception.response["Error"]["Message"].should.equal( + "Transaction cancelled, please refer cancellation reasons for specific reasons [None, ConditionalCheckFailed]" + ) diff --git a/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py b/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py index 6fba713ec..33f65d5ec 100644 --- a/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py +++ b/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py @@ -1307,16 +1307,16 @@ def test_update_item_add_with_expression(): ExpressionAttributeValues={":v": {"item4"}}, ) current_item["str_set"] = current_item["str_set"].union({"item4"}) - dict(table.get_item(Key=item_key)["Item"]).should.equal(current_item) + assert dict(table.get_item(Key=item_key)["Item"]) == current_item # Update item to add a string value to a non-existing set - # Should throw: 'The provided key element does not match the schema' - assert_failure_due_to_key_not_in_schema( - table.update_item, + table.update_item( Key=item_key, UpdateExpression="ADD non_existing_str_set :v", ExpressionAttributeValues={":v": {"item4"}}, ) + current_item["non_existing_str_set"] = {"item4"} + assert dict(table.get_item(Key=item_key)["Item"]) == current_item # Update item to add a num value to a num set table.update_item( @@ -1325,7 +1325,7 @@ def test_update_item_add_with_expression(): ExpressionAttributeValues={":v": {6}}, ) current_item["num_set"] = current_item["num_set"].union({6}) - dict(table.get_item(Key=item_key)["Item"]).should.equal(current_item) + assert dict(table.get_item(Key=item_key)["Item"]) == current_item # Update item to add a value to a number value table.update_item( @@ -1334,7 +1334,7 @@ def test_update_item_add_with_expression(): ExpressionAttributeValues={":v": 20}, ) current_item["num_val"] = current_item["num_val"] + 20 - dict(table.get_item(Key=item_key)["Item"]).should.equal(current_item) + assert dict(table.get_item(Key=item_key)["Item"]) == current_item # Attempt to add a number value to a string set, should raise Client Error table.update_item.when.called_with( @@ -1342,7 +1342,7 @@ def test_update_item_add_with_expression(): UpdateExpression="ADD str_set :v", ExpressionAttributeValues={":v": 20}, ).should.have.raised(ClientError) - dict(table.get_item(Key=item_key)["Item"]).should.equal(current_item) + assert dict(table.get_item(Key=item_key)["Item"]) == current_item # Attempt to add a number set to the string set, should raise a ClientError table.update_item.when.called_with( @@ -1350,7 +1350,7 @@ def test_update_item_add_with_expression(): UpdateExpression="ADD str_set :v", ExpressionAttributeValues={":v": {20}}, ).should.have.raised(ClientError) - dict(table.get_item(Key=item_key)["Item"]).should.equal(current_item) + assert dict(table.get_item(Key=item_key)["Item"]) == current_item # Attempt to update with a bad expression table.update_item.when.called_with( @@ -1388,17 +1388,18 @@ def test_update_item_add_with_nested_sets(): current_item["nested"]["str_set"] = current_item["nested"]["str_set"].union( {"item4"} ) - dict(table.get_item(Key=item_key)["Item"]).should.equal(current_item) + assert dict(table.get_item(Key=item_key)["Item"]) == current_item # Update item to add a string value to a non-existing set # Should raise - assert_failure_due_to_key_not_in_schema( - table.update_item, + table.update_item( Key=item_key, UpdateExpression="ADD #ns.#ne :v", ExpressionAttributeNames={"#ns": "nested", "#ne": "non_existing_str_set"}, ExpressionAttributeValues={":v": {"new_item"}}, ) + current_item["nested"]["non_existing_str_set"] = {"new_item"} + assert dict(table.get_item(Key=item_key)["Item"]) == current_item @mock_dynamodb2 diff --git a/tests/test_ec2/test_instances.py b/tests/test_ec2/test_instances.py index 0509e1a45..d53bd14aa 100644 --- a/tests/test_ec2/test_instances.py +++ b/tests/test_ec2/test_instances.py @@ -1126,6 +1126,111 @@ def test_run_instance_with_keypair(): instance.key_name.should.equal("keypair_name") +@mock_ec2 +def test_run_instance_with_block_device_mappings(): + ec2_client = boto3.client("ec2", region_name="us-east-1") + + kwargs = { + "MinCount": 1, + "MaxCount": 1, + "ImageId": "ami-d3adb33f", + "KeyName": "the_key", + "InstanceType": "t1.micro", + "BlockDeviceMappings": [{"DeviceName": "/dev/sda2", "Ebs": {"VolumeSize": 50}}], + } + + ec2_client.run_instances(**kwargs) + + instances = ec2_client.describe_instances() + volume = instances["Reservations"][0]["Instances"][0]["BlockDeviceMappings"][0][ + "Ebs" + ] + + volumes = ec2_client.describe_volumes(VolumeIds=[volume["VolumeId"]]) + volumes["Volumes"][0]["Size"].should.equal(50) + + +@mock_ec2 +def test_run_instance_with_block_device_mappings_missing_ebs(): + ec2_client = boto3.client("ec2", region_name="us-east-1") + + kwargs = { + "MinCount": 1, + "MaxCount": 1, + "ImageId": "ami-d3adb33f", + "KeyName": "the_key", + "InstanceType": "t1.micro", + "BlockDeviceMappings": [{"DeviceName": "/dev/sda2"}], + } + with assert_raises(ClientError) as ex: + ec2_client.run_instances(**kwargs) + + ex.exception.response["Error"]["Code"].should.equal("MissingParameter") + ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.exception.response["Error"]["Message"].should.equal( + "The request must contain the parameter ebs" + ) + + +@mock_ec2 +def test_run_instance_with_block_device_mappings_missing_size(): + ec2_client = boto3.client("ec2", region_name="us-east-1") + + kwargs = { + "MinCount": 1, + "MaxCount": 1, + "ImageId": "ami-d3adb33f", + "KeyName": "the_key", + "InstanceType": "t1.micro", + "BlockDeviceMappings": [ + {"DeviceName": "/dev/sda2", "Ebs": {"VolumeType": "standard"}} + ], + } + with assert_raises(ClientError) as ex: + ec2_client.run_instances(**kwargs) + + ex.exception.response["Error"]["Code"].should.equal("MissingParameter") + ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.exception.response["Error"]["Message"].should.equal( + "The request must contain the parameter size or snapshotId" + ) + + +@mock_ec2 +def test_run_instance_with_block_device_mappings_from_snapshot(): + ec2_client = boto3.client("ec2", region_name="us-east-1") + ec2_resource = boto3.resource("ec2", region_name="us-east-1") + volume_details = { + "AvailabilityZone": "1a", + "Size": 30, + } + + volume = ec2_resource.create_volume(**volume_details) + snapshot = volume.create_snapshot() + kwargs = { + "MinCount": 1, + "MaxCount": 1, + "ImageId": "ami-d3adb33f", + "KeyName": "the_key", + "InstanceType": "t1.micro", + "BlockDeviceMappings": [ + {"DeviceName": "/dev/sda2", "Ebs": {"SnapshotId": snapshot.snapshot_id}} + ], + } + + ec2_client.run_instances(**kwargs) + + instances = ec2_client.describe_instances() + volume = instances["Reservations"][0]["Instances"][0]["BlockDeviceMappings"][0][ + "Ebs" + ] + + volumes = ec2_client.describe_volumes(VolumeIds=[volume["VolumeId"]]) + + volumes["Volumes"][0]["Size"].should.equal(30) + volumes["Volumes"][0]["SnapshotId"].should.equal(snapshot.snapshot_id) + + @mock_ec2_deprecated def test_describe_instance_status_no_instances(): conn = boto.connect_ec2("the_key", "the_secret") diff --git a/tests/test_ec2_instance_connect/test_ec2_instance_connect_boto3.py b/tests/test_ec2instanceconnect/test_ec2instanceconnect_boto3.py similarity index 92% rename from tests/test_ec2_instance_connect/test_ec2_instance_connect_boto3.py rename to tests/test_ec2instanceconnect/test_ec2instanceconnect_boto3.py index eb685d80a..3f676af96 100644 --- a/tests/test_ec2_instance_connect/test_ec2_instance_connect_boto3.py +++ b/tests/test_ec2instanceconnect/test_ec2instanceconnect_boto3.py @@ -1,6 +1,6 @@ import boto3 -from moto import mock_ec2_instance_connect +from moto import mock_ec2instanceconnect pubkey = """ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDV5+voluw2zmzqpqCAqtsyoP01TQ8Ydx1eS1yD6wUsHcPqMIqpo57YxiC8XPwrdeKQ6GG6MC3bHsgXoPypGP0LyixbiuLTU31DnnqorcHt4bWs6rQa7dK2pCCflz2fhYRt5ZjqSNsAKivIbqkH66JozN0SySIka3kEV79GdB0BicioKeEJlCwM9vvxafyzjWf/z8E0lh4ni3vkLpIVJ0t5l+Qd9QMJrT6Is0SCQPVagTYZoi8+fWDoGsBa8vyRwDjEzBl28ZplKh9tSyDkRIYszWTpmK8qHiqjLYZBfAxXjGJbEYL1iig4ZxvbYzKEiKSBi1ZMW9iWjHfZDZuxXAmB @@ -8,7 +8,7 @@ example """ -@mock_ec2_instance_connect +@mock_ec2instanceconnect def test_send_ssh_public_key(): client = boto3.client("ec2-instance-connect", region_name="us-east-1") fake_request_id = "example-2a47-4c91-9700-e37e85162cb6" diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py index 58a820fee..c3ee4c96d 100644 --- a/tests/test_iot/test_iot.py +++ b/tests/test_iot/test_iot.py @@ -9,6 +9,38 @@ from botocore.exceptions import ClientError from nose.tools import assert_raises +def generate_thing_group_tree(iot_client, tree_dict, _parent=None): + """ + Generates a thing group tree given the input tree structure. + :param iot_client: the iot client for boto3 + :param tree_dict: dictionary with the key being the group_name, and the value being a sub tree. + tree_dict = { + "group_name_1a":{ + "group_name_2a":{ + "group_name_3a":{} or None + }, + }, + "group_name_1b":{} + } + :return: a dictionary of created groups, keyed by group name + """ + if tree_dict is None: + tree_dict = {} + created_dict = {} + for group_name in tree_dict.keys(): + params = {"thingGroupName": group_name} + if _parent: + params["parentGroupName"] = _parent + created_group = iot_client.create_thing_group(**params) + created_dict[group_name] = created_group + subtree_dict = generate_thing_group_tree( + iot_client=iot_client, tree_dict=tree_dict[group_name], _parent=group_name + ) + created_dict.update(created_dict) + created_dict.update(subtree_dict) + return created_dict + + @mock_iot def test_attach_policy(): client = boto3.client("iot", region_name="ap-northeast-1") @@ -756,25 +788,143 @@ def test_delete_principal_thing(): client.delete_certificate(certificateId=cert_id) +class TestListThingGroup: + group_name_1a = "my-group-name-1a" + group_name_1b = "my-group-name-1b" + group_name_2a = "my-group-name-2a" + group_name_2b = "my-group-name-2b" + group_name_3a = "my-group-name-3a" + group_name_3b = "my-group-name-3b" + group_name_3c = "my-group-name-3c" + group_name_3d = "my-group-name-3d" + tree_dict = { + group_name_1a: { + group_name_2a: {group_name_3a: {}, group_name_3b: {}}, + group_name_2b: {group_name_3c: {}, group_name_3d: {}}, + }, + group_name_1b: {}, + } + + @mock_iot + def test_should_list_all_groups(self): + # setup + client = boto3.client("iot", region_name="ap-northeast-1") + group_catalog = generate_thing_group_tree(client, self.tree_dict) + # test + resp = client.list_thing_groups() + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(8) + + @mock_iot + def test_should_list_all_groups_non_recursively(self): + # setup + client = boto3.client("iot", region_name="ap-northeast-1") + group_catalog = generate_thing_group_tree(client, self.tree_dict) + # test + resp = client.list_thing_groups(recursive=False) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(2) + + @mock_iot + def test_should_list_all_groups_filtered_by_parent(self): + # setup + client = boto3.client("iot", region_name="ap-northeast-1") + group_catalog = generate_thing_group_tree(client, self.tree_dict) + # test + resp = client.list_thing_groups(parentGroup=self.group_name_1a) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(6) + resp = client.list_thing_groups(parentGroup=self.group_name_2a) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(2) + resp = client.list_thing_groups(parentGroup=self.group_name_1b) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(0) + with assert_raises(ClientError) as e: + client.list_thing_groups(parentGroup="inexistant-group-name") + e.exception.response["Error"]["Code"].should.equal( + "ResourceNotFoundException" + ) + + @mock_iot + def test_should_list_all_groups_filtered_by_parent_non_recursively(self): + # setup + client = boto3.client("iot", region_name="ap-northeast-1") + group_catalog = generate_thing_group_tree(client, self.tree_dict) + # test + resp = client.list_thing_groups(parentGroup=self.group_name_1a, recursive=False) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(2) + resp = client.list_thing_groups(parentGroup=self.group_name_2a, recursive=False) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(2) + + @mock_iot + def test_should_list_all_groups_filtered_by_name_prefix(self): + # setup + client = boto3.client("iot", region_name="ap-northeast-1") + group_catalog = generate_thing_group_tree(client, self.tree_dict) + # test + resp = client.list_thing_groups(namePrefixFilter="my-group-name-1") + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(2) + resp = client.list_thing_groups(namePrefixFilter="my-group-name-3") + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(4) + resp = client.list_thing_groups(namePrefixFilter="prefix-which-doesn-not-match") + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(0) + + @mock_iot + def test_should_list_all_groups_filtered_by_name_prefix_non_recursively(self): + # setup + client = boto3.client("iot", region_name="ap-northeast-1") + group_catalog = generate_thing_group_tree(client, self.tree_dict) + # test + resp = client.list_thing_groups( + namePrefixFilter="my-group-name-1", recursive=False + ) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(2) + resp = client.list_thing_groups( + namePrefixFilter="my-group-name-3", recursive=False + ) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(0) + + @mock_iot + def test_should_list_all_groups_filtered_by_name_prefix_and_parent(self): + # setup + client = boto3.client("iot", region_name="ap-northeast-1") + group_catalog = generate_thing_group_tree(client, self.tree_dict) + # test + resp = client.list_thing_groups( + namePrefixFilter="my-group-name-2", parentGroup=self.group_name_1a + ) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(2) + resp = client.list_thing_groups( + namePrefixFilter="my-group-name-3", parentGroup=self.group_name_1a + ) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(4) + resp = client.list_thing_groups( + namePrefixFilter="prefix-which-doesn-not-match", + parentGroup=self.group_name_1a, + ) + resp.should.have.key("thingGroups") + resp["thingGroups"].should.have.length_of(0) + + @mock_iot def test_delete_thing_group(): client = boto3.client("iot", region_name="ap-northeast-1") group_name_1a = "my-group-name-1a" group_name_2a = "my-group-name-2a" - # --1a - # |--2a - - # create thing groups tree - # 1 - thing_group1a = client.create_thing_group(thingGroupName=group_name_1a) - thing_group1a.should.have.key("thingGroupName").which.should.equal(group_name_1a) - thing_group1a.should.have.key("thingGroupArn") - # 2 - thing_group2a = client.create_thing_group( - thingGroupName=group_name_2a, parentGroupName=group_name_1a - ) - thing_group2a.should.have.key("thingGroupName").which.should.equal(group_name_2a) - thing_group2a.should.have.key("thingGroupArn") + tree_dict = { + group_name_1a: {group_name_2a: {},}, + } + group_catalog = generate_thing_group_tree(client, tree_dict) # delete group with child try: @@ -809,56 +959,14 @@ def test_describe_thing_group_metadata_hierarchy(): group_name_3c = "my-group-name-3c" group_name_3d = "my-group-name-3d" - # --1a - # |--2a - # | |--3a - # | |--3b - # | - # |--2b - # |--3c - # |--3d - # --1b - - # create thing groups tree - # 1 - thing_group1a = client.create_thing_group(thingGroupName=group_name_1a) - thing_group1a.should.have.key("thingGroupName").which.should.equal(group_name_1a) - thing_group1a.should.have.key("thingGroupArn") - thing_group1b = client.create_thing_group(thingGroupName=group_name_1b) - thing_group1b.should.have.key("thingGroupName").which.should.equal(group_name_1b) - thing_group1b.should.have.key("thingGroupArn") - # 2 - thing_group2a = client.create_thing_group( - thingGroupName=group_name_2a, parentGroupName=group_name_1a - ) - thing_group2a.should.have.key("thingGroupName").which.should.equal(group_name_2a) - thing_group2a.should.have.key("thingGroupArn") - thing_group2b = client.create_thing_group( - thingGroupName=group_name_2b, parentGroupName=group_name_1a - ) - thing_group2b.should.have.key("thingGroupName").which.should.equal(group_name_2b) - thing_group2b.should.have.key("thingGroupArn") - # 3 - thing_group3a = client.create_thing_group( - thingGroupName=group_name_3a, parentGroupName=group_name_2a - ) - thing_group3a.should.have.key("thingGroupName").which.should.equal(group_name_3a) - thing_group3a.should.have.key("thingGroupArn") - thing_group3b = client.create_thing_group( - thingGroupName=group_name_3b, parentGroupName=group_name_2a - ) - thing_group3b.should.have.key("thingGroupName").which.should.equal(group_name_3b) - thing_group3b.should.have.key("thingGroupArn") - thing_group3c = client.create_thing_group( - thingGroupName=group_name_3c, parentGroupName=group_name_2b - ) - thing_group3c.should.have.key("thingGroupName").which.should.equal(group_name_3c) - thing_group3c.should.have.key("thingGroupArn") - thing_group3d = client.create_thing_group( - thingGroupName=group_name_3d, parentGroupName=group_name_2b - ) - thing_group3d.should.have.key("thingGroupName").which.should.equal(group_name_3d) - thing_group3d.should.have.key("thingGroupArn") + tree_dict = { + group_name_1a: { + group_name_2a: {group_name_3a: {}, group_name_3b: {}}, + group_name_2b: {group_name_3c: {}, group_name_3d: {}}, + }, + group_name_1b: {}, + } + group_catalog = generate_thing_group_tree(client, tree_dict) # describe groups # groups level 1 @@ -910,7 +1018,7 @@ def test_describe_thing_group_metadata_hierarchy(): ].should.match(group_name_1a) thing_group_description2a["thingGroupMetadata"]["rootToParentThingGroups"][0][ "groupArn" - ].should.match(thing_group1a["thingGroupArn"]) + ].should.match(group_catalog[group_name_1a]["thingGroupArn"]) thing_group_description2a.should.have.key("version") # 2b thing_group_description2b = client.describe_thing_group( @@ -936,7 +1044,7 @@ def test_describe_thing_group_metadata_hierarchy(): ].should.match(group_name_1a) thing_group_description2b["thingGroupMetadata"]["rootToParentThingGroups"][0][ "groupArn" - ].should.match(thing_group1a["thingGroupArn"]) + ].should.match(group_catalog[group_name_1a]["thingGroupArn"]) thing_group_description2b.should.have.key("version") # groups level 3 # 3a @@ -963,13 +1071,13 @@ def test_describe_thing_group_metadata_hierarchy(): ].should.match(group_name_1a) thing_group_description3a["thingGroupMetadata"]["rootToParentThingGroups"][0][ "groupArn" - ].should.match(thing_group1a["thingGroupArn"]) + ].should.match(group_catalog[group_name_1a]["thingGroupArn"]) thing_group_description3a["thingGroupMetadata"]["rootToParentThingGroups"][1][ "groupName" ].should.match(group_name_2a) thing_group_description3a["thingGroupMetadata"]["rootToParentThingGroups"][1][ "groupArn" - ].should.match(thing_group2a["thingGroupArn"]) + ].should.match(group_catalog[group_name_2a]["thingGroupArn"]) thing_group_description3a.should.have.key("version") # 3b thing_group_description3b = client.describe_thing_group( @@ -995,13 +1103,13 @@ def test_describe_thing_group_metadata_hierarchy(): ].should.match(group_name_1a) thing_group_description3b["thingGroupMetadata"]["rootToParentThingGroups"][0][ "groupArn" - ].should.match(thing_group1a["thingGroupArn"]) + ].should.match(group_catalog[group_name_1a]["thingGroupArn"]) thing_group_description3b["thingGroupMetadata"]["rootToParentThingGroups"][1][ "groupName" ].should.match(group_name_2a) thing_group_description3b["thingGroupMetadata"]["rootToParentThingGroups"][1][ "groupArn" - ].should.match(thing_group2a["thingGroupArn"]) + ].should.match(group_catalog[group_name_2a]["thingGroupArn"]) thing_group_description3b.should.have.key("version") # 3c thing_group_description3c = client.describe_thing_group( @@ -1027,13 +1135,13 @@ def test_describe_thing_group_metadata_hierarchy(): ].should.match(group_name_1a) thing_group_description3c["thingGroupMetadata"]["rootToParentThingGroups"][0][ "groupArn" - ].should.match(thing_group1a["thingGroupArn"]) + ].should.match(group_catalog[group_name_1a]["thingGroupArn"]) thing_group_description3c["thingGroupMetadata"]["rootToParentThingGroups"][1][ "groupName" ].should.match(group_name_2b) thing_group_description3c["thingGroupMetadata"]["rootToParentThingGroups"][1][ "groupArn" - ].should.match(thing_group2b["thingGroupArn"]) + ].should.match(group_catalog[group_name_2b]["thingGroupArn"]) thing_group_description3c.should.have.key("version") # 3d thing_group_description3d = client.describe_thing_group( @@ -1059,13 +1167,13 @@ def test_describe_thing_group_metadata_hierarchy(): ].should.match(group_name_1a) thing_group_description3d["thingGroupMetadata"]["rootToParentThingGroups"][0][ "groupArn" - ].should.match(thing_group1a["thingGroupArn"]) + ].should.match(group_catalog[group_name_1a]["thingGroupArn"]) thing_group_description3d["thingGroupMetadata"]["rootToParentThingGroups"][1][ "groupName" ].should.match(group_name_2b) thing_group_description3d["thingGroupMetadata"]["rootToParentThingGroups"][1][ "groupArn" - ].should.match(thing_group2b["thingGroupArn"]) + ].should.match(group_catalog[group_name_2b]["thingGroupArn"]) thing_group_description3d.should.have.key("version") diff --git a/tests/test_logs/test_logs.py b/tests/test_logs/test_logs.py index 2429d7e93..675948150 100644 --- a/tests/test_logs/test_logs.py +++ b/tests/test_logs/test_logs.py @@ -1,10 +1,17 @@ +import base64 +import json +import time +import zlib +from io import BytesIO +from zipfile import ZipFile, ZIP_DEFLATED + import boto3 import os import sure # noqa import six from botocore.exceptions import ClientError -from moto import mock_logs, settings +from moto import mock_logs, settings, mock_lambda, mock_iam from nose.tools import assert_raises from nose import SkipTest @@ -425,3 +432,408 @@ def test_untag_log_group(): assert response["tags"] == remaining_tags response = conn.delete_log_group(logGroupName=log_group_name) + + +@mock_logs +def test_describe_subscription_filters(): + # given + client = boto3.client("logs", "us-east-1") + log_group_name = "/test" + client.create_log_group(logGroupName=log_group_name) + + # when + response = client.describe_subscription_filters(logGroupName=log_group_name) + + # then + response["subscriptionFilters"].should.have.length_of(0) + + +@mock_logs +def test_describe_subscription_filters_errors(): + # given + client = boto3.client("logs", "us-east-1") + + # when + with assert_raises(ClientError) as e: + client.describe_subscription_filters(logGroupName="not-existing-log-group",) + + # then + ex = e.exception + ex.operation_name.should.equal("DescribeSubscriptionFilters") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("ResourceNotFoundException") + ex.response["Error"]["Message"].should.equal( + "The specified log group does not exist" + ) + + +@mock_lambda +@mock_logs +def test_put_subscription_filter_update(): + # given + region_name = "us-east-1" + client_lambda = boto3.client("lambda", region_name) + client_logs = boto3.client("logs", region_name) + log_group_name = "/test" + log_stream_name = "stream" + client_logs.create_log_group(logGroupName=log_group_name) + client_logs.create_log_stream( + logGroupName=log_group_name, logStreamName=log_stream_name + ) + function_arn = client_lambda.create_function( + FunctionName="test", + Runtime="python3.8", + Role=_get_role_name(region_name), + Handler="lambda_function.lambda_handler", + Code={"ZipFile": _get_test_zip_file()}, + Description="test lambda function", + Timeout=3, + MemorySize=128, + Publish=True, + )["FunctionArn"] + + # when + client_logs.put_subscription_filter( + logGroupName=log_group_name, + filterName="test", + filterPattern="", + destinationArn=function_arn, + ) + + # then + response = client_logs.describe_subscription_filters(logGroupName=log_group_name) + response["subscriptionFilters"].should.have.length_of(1) + filter = response["subscriptionFilters"][0] + creation_time = filter["creationTime"] + creation_time.should.be.a(int) + filter["destinationArn"] = "arn:aws:lambda:us-east-1:123456789012:function:test" + filter["distribution"] = "ByLogStream" + filter["logGroupName"] = "/test" + filter["filterName"] = "test" + filter["filterPattern"] = "" + + # when + # to update an existing subscription filter the 'filerName' must be identical + client_logs.put_subscription_filter( + logGroupName=log_group_name, + filterName="test", + filterPattern="[]", + destinationArn=function_arn, + ) + + # then + response = client_logs.describe_subscription_filters(logGroupName=log_group_name) + response["subscriptionFilters"].should.have.length_of(1) + filter = response["subscriptionFilters"][0] + filter["creationTime"].should.equal(creation_time) + filter["destinationArn"] = "arn:aws:lambda:us-east-1:123456789012:function:test" + filter["distribution"] = "ByLogStream" + filter["logGroupName"] = "/test" + filter["filterName"] = "test" + filter["filterPattern"] = "[]" + + # when + # only one subscription filter can be associated with a log group + with assert_raises(ClientError) as e: + client_logs.put_subscription_filter( + logGroupName=log_group_name, + filterName="test-2", + filterPattern="", + destinationArn=function_arn, + ) + + # then + ex = e.exception + ex.operation_name.should.equal("PutSubscriptionFilter") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("LimitExceededException") + ex.response["Error"]["Message"].should.equal("Resource limit exceeded.") + + +@mock_lambda +@mock_logs +def test_put_subscription_filter_with_lambda(): + # given + region_name = "us-east-1" + client_lambda = boto3.client("lambda", region_name) + client_logs = boto3.client("logs", region_name) + log_group_name = "/test" + log_stream_name = "stream" + client_logs.create_log_group(logGroupName=log_group_name) + client_logs.create_log_stream( + logGroupName=log_group_name, logStreamName=log_stream_name + ) + function_arn = client_lambda.create_function( + FunctionName="test", + Runtime="python3.8", + Role=_get_role_name(region_name), + Handler="lambda_function.lambda_handler", + Code={"ZipFile": _get_test_zip_file()}, + Description="test lambda function", + Timeout=3, + MemorySize=128, + Publish=True, + )["FunctionArn"] + + # when + client_logs.put_subscription_filter( + logGroupName=log_group_name, + filterName="test", + filterPattern="", + destinationArn=function_arn, + ) + + # then + response = client_logs.describe_subscription_filters(logGroupName=log_group_name) + response["subscriptionFilters"].should.have.length_of(1) + filter = response["subscriptionFilters"][0] + filter["creationTime"].should.be.a(int) + filter["destinationArn"] = "arn:aws:lambda:us-east-1:123456789012:function:test" + filter["distribution"] = "ByLogStream" + filter["logGroupName"] = "/test" + filter["filterName"] = "test" + filter["filterPattern"] = "" + + # when + client_logs.put_log_events( + logGroupName=log_group_name, + logStreamName=log_stream_name, + logEvents=[ + {"timestamp": 0, "message": "test"}, + {"timestamp": 0, "message": "test 2"}, + ], + ) + + # then + msg_showed_up, received_message = _wait_for_log_msg( + client_logs, "/aws/lambda/test", "awslogs" + ) + assert msg_showed_up, "CloudWatch log event was not found. All logs: {}".format( + received_message + ) + + data = json.loads(received_message)["awslogs"]["data"] + response = json.loads( + zlib.decompress(base64.b64decode(data), 16 + zlib.MAX_WBITS).decode("utf-8") + ) + response["messageType"].should.equal("DATA_MESSAGE") + response["owner"].should.equal("123456789012") + response["logGroup"].should.equal("/test") + response["logStream"].should.equal("stream") + response["subscriptionFilters"].should.equal(["test"]) + log_events = sorted(response["logEvents"], key=lambda log_event: log_event["id"]) + log_events.should.have.length_of(2) + log_events[0]["id"].should.be.a(int) + log_events[0]["message"].should.equal("test") + log_events[0]["timestamp"].should.equal(0) + log_events[1]["id"].should.be.a(int) + log_events[1]["message"].should.equal("test 2") + log_events[1]["timestamp"].should.equal(0) + + +@mock_logs +def test_put_subscription_filter_errors(): + # given + client = boto3.client("logs", "us-east-1") + log_group_name = "/test" + client.create_log_group(logGroupName=log_group_name) + + # when + with assert_raises(ClientError) as e: + client.put_subscription_filter( + logGroupName="not-existing-log-group", + filterName="test", + filterPattern="", + destinationArn="arn:aws:lambda:us-east-1:123456789012:function:test", + ) + + # then + ex = e.exception + ex.operation_name.should.equal("PutSubscriptionFilter") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("ResourceNotFoundException") + ex.response["Error"]["Message"].should.equal( + "The specified log group does not exist" + ) + + # when + with assert_raises(ClientError) as e: + client.put_subscription_filter( + logGroupName="/test", + filterName="test", + filterPattern="", + destinationArn="arn:aws:lambda:us-east-1:123456789012:function:not-existing", + ) + + # then + ex = e.exception + ex.operation_name.should.equal("PutSubscriptionFilter") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("InvalidParameterException") + ex.response["Error"]["Message"].should.equal( + "Could not execute the lambda function. " + "Make sure you have given CloudWatch Logs permission to execute your function." + ) + + # when + with assert_raises(ClientError) as e: + client.put_subscription_filter( + logGroupName="/test", + filterName="test", + filterPattern="", + destinationArn="arn:aws:lambda:us-east-1:123456789012:function:not-existing", + ) + + # then + ex = e.exception + ex.operation_name.should.equal("PutSubscriptionFilter") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("InvalidParameterException") + ex.response["Error"]["Message"].should.equal( + "Could not execute the lambda function. " + "Make sure you have given CloudWatch Logs permission to execute your function." + ) + + +@mock_lambda +@mock_logs +def test_delete_subscription_filter_errors(): + # given + region_name = "us-east-1" + client_lambda = boto3.client("lambda", region_name) + client_logs = boto3.client("logs", region_name) + log_group_name = "/test" + client_logs.create_log_group(logGroupName=log_group_name) + function_arn = client_lambda.create_function( + FunctionName="test", + Runtime="python3.8", + Role=_get_role_name(region_name), + Handler="lambda_function.lambda_handler", + Code={"ZipFile": _get_test_zip_file()}, + Description="test lambda function", + Timeout=3, + MemorySize=128, + Publish=True, + )["FunctionArn"] + client_logs.put_subscription_filter( + logGroupName=log_group_name, + filterName="test", + filterPattern="", + destinationArn=function_arn, + ) + + # when + client_logs.delete_subscription_filter( + logGroupName="/test", filterName="test", + ) + + # then + response = client_logs.describe_subscription_filters(logGroupName=log_group_name) + response["subscriptionFilters"].should.have.length_of(0) + + +@mock_lambda +@mock_logs +def test_delete_subscription_filter_errors(): + # given + region_name = "us-east-1" + client_lambda = boto3.client("lambda", region_name) + client_logs = boto3.client("logs", region_name) + log_group_name = "/test" + client_logs.create_log_group(logGroupName=log_group_name) + function_arn = client_lambda.create_function( + FunctionName="test", + Runtime="python3.8", + Role=_get_role_name(region_name), + Handler="lambda_function.lambda_handler", + Code={"ZipFile": _get_test_zip_file()}, + Description="test lambda function", + Timeout=3, + MemorySize=128, + Publish=True, + )["FunctionArn"] + client_logs.put_subscription_filter( + logGroupName=log_group_name, + filterName="test", + filterPattern="", + destinationArn=function_arn, + ) + + # when + with assert_raises(ClientError) as e: + client_logs.delete_subscription_filter( + logGroupName="not-existing-log-group", filterName="test", + ) + + # then + ex = e.exception + ex.operation_name.should.equal("DeleteSubscriptionFilter") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("ResourceNotFoundException") + ex.response["Error"]["Message"].should.equal( + "The specified log group does not exist" + ) + + # when + with assert_raises(ClientError) as e: + client_logs.delete_subscription_filter( + logGroupName="/test", filterName="wrong-filter-name", + ) + + # then + ex = e.exception + ex.operation_name.should.equal("DeleteSubscriptionFilter") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("ResourceNotFoundException") + ex.response["Error"]["Message"].should.equal( + "The specified subscription filter does not exist." + ) + + +def _get_role_name(region_name): + with mock_iam(): + iam = boto3.client("iam", region_name=region_name) + try: + return iam.get_role(RoleName="test-role")["Role"]["Arn"] + except ClientError: + return iam.create_role( + RoleName="test-role", AssumeRolePolicyDocument="test policy", Path="/", + )["Role"]["Arn"] + + +def _get_test_zip_file(): + func_str = """ +def lambda_handler(event, context): + return event +""" + + zip_output = BytesIO() + zip_file = ZipFile(zip_output, "w", ZIP_DEFLATED) + zip_file.writestr("lambda_function.py", func_str) + zip_file.close() + zip_output.seek(0) + return zip_output.read() + + +def _wait_for_log_msg(client, log_group_name, expected_msg_part): + received_messages = [] + start = time.time() + while (time.time() - start) < 10: + result = client.describe_log_streams(logGroupName=log_group_name) + log_streams = result.get("logStreams") + if not log_streams: + time.sleep(1) + continue + + for log_stream in log_streams: + result = client.get_log_events( + logGroupName=log_group_name, logStreamName=log_stream["logStreamName"], + ) + received_messages.extend( + [event["message"] for event in result.get("events")] + ) + for message in received_messages: + if expected_msg_part in message: + return True, message + time.sleep(1) + return False, received_messages diff --git a/tests/test_managedblockchain/__init__.py b/tests/test_managedblockchain/__init__.py new file mode 100644 index 000000000..baffc4882 --- /dev/null +++ b/tests/test_managedblockchain/__init__.py @@ -0,0 +1 @@ +from __future__ import unicode_literals diff --git a/tests/test_managedblockchain/helpers.py b/tests/test_managedblockchain/helpers.py new file mode 100644 index 000000000..38c13b512 --- /dev/null +++ b/tests/test_managedblockchain/helpers.py @@ -0,0 +1,67 @@ +from __future__ import unicode_literals + + +default_frameworkconfiguration = {"Fabric": {"Edition": "STARTER"}} + +default_votingpolicy = { + "ApprovalThresholdPolicy": { + "ThresholdPercentage": 50, + "ProposalDurationInHours": 24, + "ThresholdComparator": "GREATER_THAN_OR_EQUAL_TO", + } +} + +default_memberconfiguration = { + "Name": "testmember1", + "Description": "Test Member 1", + "FrameworkConfiguration": { + "Fabric": {"AdminUsername": "admin", "AdminPassword": "Admin12345"} + }, + "LogPublishingConfiguration": { + "Fabric": {"CaLogs": {"Cloudwatch": {"Enabled": False}}} + }, +} + +default_policy_actions = {"Invitations": [{"Principal": "123456789012"}]} + +multiple_policy_actions = { + "Invitations": [{"Principal": "123456789012"}, {"Principal": "123456789013"}] +} + + +def member_id_exist_in_list(members, memberid): + memberidxists = False + for member in members: + if member["Id"] == memberid: + memberidxists = True + break + return memberidxists + + +def create_member_configuration( + name, adminuser, adminpass, cloudwatchenabled, description=None +): + d = { + "Name": name, + "FrameworkConfiguration": { + "Fabric": {"AdminUsername": adminuser, "AdminPassword": adminpass} + }, + "LogPublishingConfiguration": { + "Fabric": {"CaLogs": {"Cloudwatch": {"Enabled": cloudwatchenabled}}} + }, + } + + if description is not None: + d["Description"] = description + + return d + + +def select_invitation_id_for_network(invitations, networkid, status=None): + # Get invitations based on network and maybe status + invitationsfornetwork = [] + for invitation in invitations: + if invitation["NetworkSummary"]["Id"] == networkid: + if status is None or invitation["Status"] == status: + invitationsfornetwork.append(invitation["InvitationId"]) + return invitationsfornetwork diff --git a/tests/test_managedblockchain/test_managedblockchain_invitations.py b/tests/test_managedblockchain/test_managedblockchain_invitations.py new file mode 100644 index 000000000..81b20a9ba --- /dev/null +++ b/tests/test_managedblockchain/test_managedblockchain_invitations.py @@ -0,0 +1,142 @@ +from __future__ import unicode_literals + +import boto3 +import sure # noqa + +from moto.managedblockchain.exceptions import BadRequestException +from moto import mock_managedblockchain +from . import helpers + + +@mock_managedblockchain +def test_create_2_invitations(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + # Create proposal + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.multiple_policy_actions, + ) + proposal_id = response["ProposalId"] + + # Get proposal details + response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id) + response["Proposal"]["NetworkId"].should.equal(network_id) + response["Proposal"]["Status"].should.equal("IN_PROGRESS") + + # Vote yes + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="YES", + ) + + # Get the invitation + response = conn.list_invitations() + response["Invitations"].should.have.length_of(2) + response["Invitations"][0]["NetworkSummary"]["Id"].should.equal(network_id) + response["Invitations"][0]["Status"].should.equal("PENDING") + response["Invitations"][1]["NetworkSummary"]["Id"].should.equal(network_id) + response["Invitations"][1]["Status"].should.equal("PENDING") + + +@mock_managedblockchain +def test_reject_invitation(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + # Create proposal + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + proposal_id = response["ProposalId"] + + # Get proposal details + response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id) + response["Proposal"]["NetworkId"].should.equal(network_id) + response["Proposal"]["Status"].should.equal("IN_PROGRESS") + + # Vote yes + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="YES", + ) + + # Get the invitation + response = conn.list_invitations() + response["Invitations"][0]["NetworkSummary"]["Id"].should.equal(network_id) + response["Invitations"][0]["Status"].should.equal("PENDING") + invitation_id = response["Invitations"][0]["InvitationId"] + + # Reject - thanks but no thanks + response = conn.reject_invitation(InvitationId=invitation_id) + + # Check the invitation status + response = conn.list_invitations() + response["Invitations"][0]["InvitationId"].should.equal(invitation_id) + response["Invitations"][0]["Status"].should.equal("REJECTED") + + +@mock_managedblockchain +def test_reject_invitation_badinvitation(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network - need a good network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + + proposal_id = response["ProposalId"] + + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="YES", + ) + + response = conn.reject_invitation.when.called_with( + InvitationId="in-ABCDEFGHIJKLMNOP0123456789", + ).should.throw(Exception, "InvitationId in-ABCDEFGHIJKLMNOP0123456789 not found.") diff --git a/tests/test_managedblockchain/test_managedblockchain_members.py b/tests/test_managedblockchain/test_managedblockchain_members.py new file mode 100644 index 000000000..76d29dd55 --- /dev/null +++ b/tests/test_managedblockchain/test_managedblockchain_members.py @@ -0,0 +1,669 @@ +from __future__ import unicode_literals + +import boto3 +import sure # noqa + +from moto.managedblockchain.exceptions import BadRequestException +from moto import mock_managedblockchain +from . import helpers + + +@mock_managedblockchain +def test_create_another_member(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network + response = conn.create_network( + Name="testnetwork1", + Description="Test Network 1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + # Create proposal + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + proposal_id = response["ProposalId"] + + # Get proposal details + response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id) + response["Proposal"]["NetworkId"].should.equal(network_id) + response["Proposal"]["Status"].should.equal("IN_PROGRESS") + + # Vote yes + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="YES", + ) + + # Get the invitation + response = conn.list_invitations() + response["Invitations"][0]["NetworkSummary"]["Id"].should.equal(network_id) + response["Invitations"][0]["Status"].should.equal("PENDING") + invitation_id = response["Invitations"][0]["InvitationId"] + + # Create the member + response = conn.create_member( + InvitationId=invitation_id, + NetworkId=network_id, + MemberConfiguration=helpers.create_member_configuration( + "testmember2", "admin", "Admin12345", False + ), + ) + member_id2 = response["MemberId"] + + # Check the invitation status + response = conn.list_invitations() + response["Invitations"][0]["InvitationId"].should.equal(invitation_id) + response["Invitations"][0]["Status"].should.equal("ACCEPTED") + + # Find member in full list + response = conn.list_members(NetworkId=network_id) + members = response["Members"] + members.should.have.length_of(2) + helpers.member_id_exist_in_list(members, member_id2).should.equal(True) + + # Get member 2 details + response = conn.get_member(NetworkId=network_id, MemberId=member_id2) + response["Member"]["Name"].should.equal("testmember2") + + # Update member + logconfignewenabled = not helpers.default_memberconfiguration[ + "LogPublishingConfiguration" + ]["Fabric"]["CaLogs"]["Cloudwatch"]["Enabled"] + logconfignew = { + "Fabric": {"CaLogs": {"Cloudwatch": {"Enabled": logconfignewenabled}}} + } + conn.update_member( + NetworkId=network_id, + MemberId=member_id2, + LogPublishingConfiguration=logconfignew, + ) + + # Get member 2 details + response = conn.get_member(NetworkId=network_id, MemberId=member_id2) + response["Member"]["LogPublishingConfiguration"]["Fabric"]["CaLogs"]["Cloudwatch"][ + "Enabled" + ].should.equal(logconfignewenabled) + + +@mock_managedblockchain +def test_create_another_member_withopts(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + # Create proposal + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + proposal_id = response["ProposalId"] + + # Get proposal details + response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id) + response["Proposal"]["NetworkId"].should.equal(network_id) + response["Proposal"]["Status"].should.equal("IN_PROGRESS") + + # Vote yes + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="YES", + ) + + # Get the invitation + response = conn.list_invitations() + response["Invitations"][0]["NetworkSummary"]["Id"].should.equal(network_id) + response["Invitations"][0]["Status"].should.equal("PENDING") + invitation_id = response["Invitations"][0]["InvitationId"] + + # Create the member + response = conn.create_member( + InvitationId=invitation_id, + NetworkId=network_id, + MemberConfiguration=helpers.create_member_configuration( + "testmember2", "admin", "Admin12345", False, "Test Member 2" + ), + ) + member_id2 = response["MemberId"] + + # Check the invitation status + response = conn.list_invitations() + response["Invitations"][0]["InvitationId"].should.equal(invitation_id) + response["Invitations"][0]["Status"].should.equal("ACCEPTED") + + # Find member in full list + response = conn.list_members(NetworkId=network_id) + members = response["Members"] + members.should.have.length_of(2) + helpers.member_id_exist_in_list(members, member_id2).should.equal(True) + + # Get member 2 details + response = conn.get_member(NetworkId=network_id, MemberId=member_id2) + response["Member"]["Description"].should.equal("Test Member 2") + + # Try to create member with already used invitation + response = conn.create_member.when.called_with( + InvitationId=invitation_id, + NetworkId=network_id, + MemberConfiguration=helpers.create_member_configuration( + "testmember2", "admin", "Admin12345", False, "Test Member 2 Duplicate" + ), + ).should.throw(Exception, "Invitation {0} not valid".format(invitation_id)) + + # Delete member 2 + conn.delete_member(NetworkId=network_id, MemberId=member_id2) + + # Member is still in the list + response = conn.list_members(NetworkId=network_id) + members = response["Members"] + members.should.have.length_of(2) + + # But cannot get + response = conn.get_member.when.called_with( + NetworkId=network_id, MemberId=member_id2, + ).should.throw(Exception, "Member {0} not found".format(member_id2)) + + # Delete member 1 + conn.delete_member(NetworkId=network_id, MemberId=member_id) + + # Network should be gone + response = conn.list_networks() + mbcnetworks = response["Networks"] + mbcnetworks.should.have.length_of(0) + + # Verify the invitation network status is DELETED + # Get the invitation + response = conn.list_invitations() + response["Invitations"].should.have.length_of(1) + response["Invitations"][0]["NetworkSummary"]["Id"].should.equal(network_id) + response["Invitations"][0]["NetworkSummary"]["Status"].should.equal("DELETED") + + +@mock_managedblockchain +def test_create_and_delete_member(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + # Create proposal (create additional member) + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + proposal_id = response["ProposalId"] + + # Vote yes + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="YES", + ) + + # Get the invitation + response = conn.list_invitations() + invitation_id = response["Invitations"][0]["InvitationId"] + + # Create the member + response = conn.create_member( + InvitationId=invitation_id, + NetworkId=network_id, + MemberConfiguration=helpers.create_member_configuration( + "testmember2", "admin", "Admin12345", False, "Test Member 2" + ), + ) + member_id2 = response["MemberId"] + + both_policy_actions = { + "Invitations": [{"Principal": "123456789012"}], + "Removals": [{"MemberId": member_id2}], + } + + # Create proposal (invite and remove member) + response = conn.create_proposal( + NetworkId=network_id, MemberId=member_id, Actions=both_policy_actions, + ) + proposal_id2 = response["ProposalId"] + + # Get proposal details + response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id2) + response["Proposal"]["NetworkId"].should.equal(network_id) + response["Proposal"]["Status"].should.equal("IN_PROGRESS") + + # Vote yes + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id2, + VoterMemberId=member_id, + Vote="YES", + ) + + # Check the invitation status + response = conn.list_invitations() + invitations = helpers.select_invitation_id_for_network( + response["Invitations"], network_id, "PENDING" + ) + invitations.should.have.length_of(1) + + # Member is still in the list + response = conn.list_members(NetworkId=network_id) + members = response["Members"] + members.should.have.length_of(2) + foundmember2 = False + for member in members: + if member["Id"] == member_id2 and member["Status"] == "DELETED": + foundmember2 = True + foundmember2.should.equal(True) + + +@mock_managedblockchain +def test_create_too_many_members(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + # Create 4 more members - create invitations for 5 + for counter in range(2, 7): + # Create proposal + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + proposal_id = response["ProposalId"] + + # Vote yes + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="YES", + ) + + for counter in range(2, 6): + # Get the invitation + response = conn.list_invitations() + invitation_id = helpers.select_invitation_id_for_network( + response["Invitations"], network_id, "PENDING" + )[0] + + # Create the member + response = conn.create_member( + InvitationId=invitation_id, + NetworkId=network_id, + MemberConfiguration=helpers.create_member_configuration( + "testmember" + str(counter), + "admin", + "Admin12345", + False, + "Test Member " + str(counter), + ), + ) + member_id = response["MemberId"] + + # Find member in full list + response = conn.list_members(NetworkId=network_id) + members = response["Members"] + members.should.have.length_of(counter) + helpers.member_id_exist_in_list(members, member_id).should.equal(True) + + # Get member details + response = conn.get_member(NetworkId=network_id, MemberId=member_id) + response["Member"]["Description"].should.equal("Test Member " + str(counter)) + + # Try to create the sixth + response = conn.list_invitations() + invitation_id = helpers.select_invitation_id_for_network( + response["Invitations"], network_id, "PENDING" + )[0] + + # Try to create member with already used invitation + response = conn.create_member.when.called_with( + InvitationId=invitation_id, + NetworkId=network_id, + MemberConfiguration=helpers.create_member_configuration( + "testmember6", "admin", "Admin12345", False, "Test Member 6" + ), + ).should.throw( + Exception, + "5 is the maximum number of members allowed in a STARTER Edition network", + ) + + +@mock_managedblockchain +def test_create_another_member_alreadyhave(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network + response = conn.create_network( + Name="testnetwork1", + Description="Test Network 1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + # Create proposal + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + proposal_id = response["ProposalId"] + + # Vote yes + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="YES", + ) + + # Get the invitation + response = conn.list_invitations() + invitation_id = response["Invitations"][0]["InvitationId"] + + # Should fail trying to create with same name + response = conn.create_member.when.called_with( + NetworkId=network_id, + InvitationId=invitation_id, + MemberConfiguration=helpers.create_member_configuration( + "testmember1", "admin", "Admin12345", False + ), + ).should.throw( + Exception, + "Member name {0} already exists in network {1}".format( + "testmember1", network_id + ), + ) + + +@mock_managedblockchain +def test_create_another_member_badnetwork(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.create_member.when.called_with( + NetworkId="n-ABCDEFGHIJKLMNOP0123456789", + InvitationId="id-ABCDEFGHIJKLMNOP0123456789", + MemberConfiguration=helpers.create_member_configuration( + "testmember2", "admin", "Admin12345", False + ), + ).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_create_another_member_badinvitation(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network - need a good network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + + response = conn.create_member.when.called_with( + NetworkId=network_id, + InvitationId="in-ABCDEFGHIJKLMNOP0123456789", + MemberConfiguration=helpers.create_member_configuration( + "testmember2", "admin", "Admin12345", False + ), + ).should.throw(Exception, "Invitation in-ABCDEFGHIJKLMNOP0123456789 not valid") + + +@mock_managedblockchain +def test_create_another_member_adminpassword(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network - need a good network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + # Create proposal + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + proposal_id = response["ProposalId"] + + # Get proposal details + response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id) + response["Proposal"]["NetworkId"].should.equal(network_id) + response["Proposal"]["Status"].should.equal("IN_PROGRESS") + + # Vote yes + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="YES", + ) + + # Get the invitation + response = conn.list_invitations() + invitation_id = response["Invitations"][0]["InvitationId"] + + badadminpassmemberconf = helpers.create_member_configuration( + "testmember2", "admin", "Admin12345", False + ) + + # Too short + badadminpassmemberconf["FrameworkConfiguration"]["Fabric"][ + "AdminPassword" + ] = "badap" + response = conn.create_member.when.called_with( + NetworkId=network_id, + InvitationId=invitation_id, + MemberConfiguration=badadminpassmemberconf, + ).should.throw( + Exception, + "Invalid length for parameter MemberConfiguration.FrameworkConfiguration.Fabric.AdminPassword", + ) + + # No uppercase or numbers + badadminpassmemberconf["FrameworkConfiguration"]["Fabric"][ + "AdminPassword" + ] = "badadminpwd" + response = conn.create_member.when.called_with( + NetworkId=network_id, + InvitationId=invitation_id, + MemberConfiguration=badadminpassmemberconf, + ).should.throw(Exception, "Invalid request body") + + # No lowercase or numbers + badadminpassmemberconf["FrameworkConfiguration"]["Fabric"][ + "AdminPassword" + ] = "BADADMINPWD" + response = conn.create_member.when.called_with( + NetworkId=network_id, + InvitationId=invitation_id, + MemberConfiguration=badadminpassmemberconf, + ).should.throw(Exception, "Invalid request body") + + # No numbers + badadminpassmemberconf["FrameworkConfiguration"]["Fabric"][ + "AdminPassword" + ] = "badAdminpwd" + response = conn.create_member.when.called_with( + NetworkId=network_id, + InvitationId=invitation_id, + MemberConfiguration=badadminpassmemberconf, + ).should.throw(Exception, "Invalid request body") + + # Invalid character + badadminpassmemberconf["FrameworkConfiguration"]["Fabric"][ + "AdminPassword" + ] = "badAdmin@pwd1" + response = conn.create_member.when.called_with( + NetworkId=network_id, + InvitationId=invitation_id, + MemberConfiguration=badadminpassmemberconf, + ).should.throw(Exception, "Invalid request body") + + +@mock_managedblockchain +def test_list_members_badnetwork(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.list_members.when.called_with( + NetworkId="n-ABCDEFGHIJKLMNOP0123456789", + ).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_get_member_badnetwork(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.get_member.when.called_with( + NetworkId="n-ABCDEFGHIJKLMNOP0123456789", + MemberId="m-ABCDEFGHIJKLMNOP0123456789", + ).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_get_member_badmember(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network - need a good network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + + response = conn.get_member.when.called_with( + NetworkId=network_id, MemberId="m-ABCDEFGHIJKLMNOP0123456789", + ).should.throw(Exception, "Member m-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_delete_member_badnetwork(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.delete_member.when.called_with( + NetworkId="n-ABCDEFGHIJKLMNOP0123456789", + MemberId="m-ABCDEFGHIJKLMNOP0123456789", + ).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_delete_member_badmember(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network - need a good network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + + response = conn.delete_member.when.called_with( + NetworkId=network_id, MemberId="m-ABCDEFGHIJKLMNOP0123456789", + ).should.throw(Exception, "Member m-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_update_member_badnetwork(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.update_member.when.called_with( + NetworkId="n-ABCDEFGHIJKLMNOP0123456789", + MemberId="m-ABCDEFGHIJKLMNOP0123456789", + LogPublishingConfiguration=helpers.default_memberconfiguration[ + "LogPublishingConfiguration" + ], + ).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_update_member_badmember(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network - need a good network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + + response = conn.update_member.when.called_with( + NetworkId=network_id, + MemberId="m-ABCDEFGHIJKLMNOP0123456789", + LogPublishingConfiguration=helpers.default_memberconfiguration[ + "LogPublishingConfiguration" + ], + ).should.throw(Exception, "Member m-ABCDEFGHIJKLMNOP0123456789 not found") diff --git a/tests/test_managedblockchain/test_managedblockchain_networks.py b/tests/test_managedblockchain/test_managedblockchain_networks.py new file mode 100644 index 000000000..4e1579017 --- /dev/null +++ b/tests/test_managedblockchain/test_managedblockchain_networks.py @@ -0,0 +1,123 @@ +from __future__ import unicode_literals + +import boto3 +import sure # noqa + +from moto.managedblockchain.exceptions import BadRequestException +from moto import mock_managedblockchain +from . import helpers + + +@mock_managedblockchain +def test_create_network(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + network_id.should.match("n-[A-Z0-9]{26}") + member_id.should.match("m-[A-Z0-9]{26}") + + # Find in full list + response = conn.list_networks() + mbcnetworks = response["Networks"] + mbcnetworks.should.have.length_of(1) + mbcnetworks[0]["Name"].should.equal("testnetwork1") + + # Get network details + response = conn.get_network(NetworkId=network_id) + response["Network"]["Name"].should.equal("testnetwork1") + + +@mock_managedblockchain +def test_create_network_withopts(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.create_network( + Name="testnetwork1", + Description="Test Network 1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + network_id.should.match("n-[A-Z0-9]{26}") + member_id.should.match("m-[A-Z0-9]{26}") + + # Find in full list + response = conn.list_networks() + mbcnetworks = response["Networks"] + mbcnetworks.should.have.length_of(1) + mbcnetworks[0]["Description"].should.equal("Test Network 1") + + # Get network details + response = conn.get_network(NetworkId=network_id) + response["Network"]["Description"].should.equal("Test Network 1") + + +@mock_managedblockchain +def test_create_network_noframework(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.create_network.when.called_with( + Name="testnetwork1", + Description="Test Network 1", + Framework="HYPERLEDGER_VINYL", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ).should.throw(Exception, "Invalid request body") + + +@mock_managedblockchain +def test_create_network_badframeworkver(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.create_network.when.called_with( + Name="testnetwork1", + Description="Test Network 1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.X", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ).should.throw( + Exception, "Invalid version 1.X requested for framework HYPERLEDGER_FABRIC" + ) + + +@mock_managedblockchain +def test_create_network_badedition(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + frameworkconfiguration = {"Fabric": {"Edition": "SUPER"}} + + response = conn.create_network.when.called_with( + Name="testnetwork1", + Description="Test Network 1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ).should.throw(Exception, "Invalid request body") + + +@mock_managedblockchain +def test_get_network_badnetwork(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.get_network.when.called_with( + NetworkId="n-ABCDEFGHIJKLMNOP0123456789", + ).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found") diff --git a/tests/test_managedblockchain/test_managedblockchain_proposals.py b/tests/test_managedblockchain/test_managedblockchain_proposals.py new file mode 100644 index 000000000..407d26246 --- /dev/null +++ b/tests/test_managedblockchain/test_managedblockchain_proposals.py @@ -0,0 +1,199 @@ +from __future__ import unicode_literals + +import boto3 +import sure # noqa + +from moto.managedblockchain.exceptions import BadRequestException +from moto import mock_managedblockchain +from . import helpers + + +@mock_managedblockchain +def test_create_proposal(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + network_id.should.match("n-[A-Z0-9]{26}") + member_id.should.match("m-[A-Z0-9]{26}") + + # Create proposal + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + proposal_id = response["ProposalId"] + proposal_id.should.match("p-[A-Z0-9]{26}") + + # Find in full list + response = conn.list_proposals(NetworkId=network_id) + proposals = response["Proposals"] + proposals.should.have.length_of(1) + proposals[0]["ProposalId"].should.equal(proposal_id) + + # Get proposal details + response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id) + response["Proposal"]["NetworkId"].should.equal(network_id) + + +@mock_managedblockchain +def test_create_proposal_withopts(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + network_id.should.match("n-[A-Z0-9]{26}") + member_id.should.match("m-[A-Z0-9]{26}") + + # Create proposal + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + Description="Adding a new member", + ) + proposal_id = response["ProposalId"] + proposal_id.should.match("p-[A-Z0-9]{26}") + + # Get proposal details + response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id) + response["Proposal"]["Description"].should.equal("Adding a new member") + + +@mock_managedblockchain +def test_create_proposal_badnetwork(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.create_proposal.when.called_with( + NetworkId="n-ABCDEFGHIJKLMNOP0123456789", + MemberId="m-ABCDEFGHIJKLMNOP0123456789", + Actions=helpers.default_policy_actions, + ).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_create_proposal_badmember(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network - need a good network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + + response = conn.create_proposal.when.called_with( + NetworkId=network_id, + MemberId="m-ABCDEFGHIJKLMNOP0123456789", + Actions=helpers.default_policy_actions, + ).should.throw(Exception, "Member m-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_create_proposal_badinvitationacctid(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Must be 12 digits + actions = {"Invitations": [{"Principal": "1234567890"}]} + + # Create network - need a good network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + response = conn.create_proposal.when.called_with( + NetworkId=network_id, MemberId=member_id, Actions=actions, + ).should.throw(Exception, "Account ID format specified in proposal is not valid") + + +@mock_managedblockchain +def test_create_proposal_badremovalmemid(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Must be 12 digits + actions = {"Removals": [{"MemberId": "m-ABCDEFGHIJKLMNOP0123456789"}]} + + # Create network - need a good network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + response = conn.create_proposal.when.called_with( + NetworkId=network_id, MemberId=member_id, Actions=actions, + ).should.throw(Exception, "Member ID format specified in proposal is not valid") + + +@mock_managedblockchain +def test_list_proposal_badnetwork(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.list_proposals.when.called_with( + NetworkId="n-ABCDEFGHIJKLMNOP0123456789", + ).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_get_proposal_badnetwork(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.get_proposal.when.called_with( + NetworkId="n-ABCDEFGHIJKLMNOP0123456789", + ProposalId="p-ABCDEFGHIJKLMNOP0123456789", + ).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_get_proposal_badproposal(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network - need a good network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + + response = conn.get_proposal.when.called_with( + NetworkId=network_id, ProposalId="p-ABCDEFGHIJKLMNOP0123456789", + ).should.throw(Exception, "Proposal p-ABCDEFGHIJKLMNOP0123456789 not found") diff --git a/tests/test_managedblockchain/test_managedblockchain_proposalvotes.py b/tests/test_managedblockchain/test_managedblockchain_proposalvotes.py new file mode 100644 index 000000000..a026b496f --- /dev/null +++ b/tests/test_managedblockchain/test_managedblockchain_proposalvotes.py @@ -0,0 +1,529 @@ +from __future__ import unicode_literals + +import os + +import boto3 +import sure # noqa +from freezegun import freeze_time +from nose import SkipTest + +from moto.managedblockchain.exceptions import BadRequestException +from moto import mock_managedblockchain, settings +from . import helpers + + +@mock_managedblockchain +def test_vote_on_proposal_one_member_total_yes(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + # Create proposal + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + proposal_id = response["ProposalId"] + + # Get proposal details + response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id) + response["Proposal"]["NetworkId"].should.equal(network_id) + response["Proposal"]["Status"].should.equal("IN_PROGRESS") + + # Vote yes + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="YES", + ) + + # List proposal votes + response = conn.list_proposal_votes(NetworkId=network_id, ProposalId=proposal_id) + response["ProposalVotes"][0]["MemberId"].should.equal(member_id) + + # Get proposal details - should be APPROVED + response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id) + response["Proposal"]["Status"].should.equal("APPROVED") + response["Proposal"]["YesVoteCount"].should.equal(1) + response["Proposal"]["NoVoteCount"].should.equal(0) + response["Proposal"]["OutstandingVoteCount"].should.equal(0) + + +@mock_managedblockchain +def test_vote_on_proposal_one_member_total_no(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + # Create proposal + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + proposal_id = response["ProposalId"] + + # Get proposal details + response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id) + response["Proposal"]["NetworkId"].should.equal(network_id) + response["Proposal"]["Status"].should.equal("IN_PROGRESS") + + # Vote no + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="NO", + ) + + # List proposal votes + response = conn.list_proposal_votes(NetworkId=network_id, ProposalId=proposal_id) + response["ProposalVotes"][0]["MemberId"].should.equal(member_id) + + # Get proposal details - should be REJECTED + response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id) + response["Proposal"]["Status"].should.equal("REJECTED") + response["Proposal"]["YesVoteCount"].should.equal(0) + response["Proposal"]["NoVoteCount"].should.equal(1) + response["Proposal"]["OutstandingVoteCount"].should.equal(0) + + +@mock_managedblockchain +def test_vote_on_proposal_yes_greater_than(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + votingpolicy = { + "ApprovalThresholdPolicy": { + "ThresholdPercentage": 50, + "ProposalDurationInHours": 24, + "ThresholdComparator": "GREATER_THAN", + } + } + + # Create network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + + proposal_id = response["ProposalId"] + + # Vote yes + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="YES", + ) + + # Get the invitation + response = conn.list_invitations() + invitation_id = response["Invitations"][0]["InvitationId"] + + # Create the member + response = conn.create_member( + InvitationId=invitation_id, + NetworkId=network_id, + MemberConfiguration=helpers.create_member_configuration( + "testmember2", "admin", "Admin12345", False, "Test Member 2" + ), + ) + member_id2 = response["MemberId"] + + # Create another proposal + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + + proposal_id = response["ProposalId"] + + # Vote yes with member 1 + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="YES", + ) + + # Get proposal details + response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id) + response["Proposal"]["NetworkId"].should.equal(network_id) + response["Proposal"]["Status"].should.equal("IN_PROGRESS") + + +@mock_managedblockchain +def test_vote_on_proposal_no_greater_than(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + votingpolicy = { + "ApprovalThresholdPolicy": { + "ThresholdPercentage": 50, + "ProposalDurationInHours": 24, + "ThresholdComparator": "GREATER_THAN", + } + } + + # Create network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + + proposal_id = response["ProposalId"] + + # Vote yes + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="YES", + ) + + # Get the invitation + response = conn.list_invitations() + invitation_id = response["Invitations"][0]["InvitationId"] + + # Create the member + response = conn.create_member( + InvitationId=invitation_id, + NetworkId=network_id, + MemberConfiguration=helpers.create_member_configuration( + "testmember2", "admin", "Admin12345", False, "Test Member 2" + ), + ) + member_id2 = response["MemberId"] + + # Create another proposal + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + + proposal_id = response["ProposalId"] + + # Vote no with member 1 + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="NO", + ) + + # Vote no with member 2 + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id2, + Vote="NO", + ) + + # Get proposal details + response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id) + response["Proposal"]["NetworkId"].should.equal(network_id) + response["Proposal"]["Status"].should.equal("REJECTED") + + +@mock_managedblockchain +def test_vote_on_proposal_expiredproposal(): + if os.environ.get("TEST_SERVER_MODE", "false").lower() == "true": + raise SkipTest("Cant manipulate time in server mode") + + votingpolicy = { + "ApprovalThresholdPolicy": { + "ThresholdPercentage": 50, + "ProposalDurationInHours": 1, + "ThresholdComparator": "GREATER_THAN_OR_EQUAL_TO", + } + } + + conn = boto3.client("managedblockchain", region_name="us-east-1") + + with freeze_time("2015-01-01 12:00:00"): + # Create network - need a good network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + + proposal_id = response["ProposalId"] + + with freeze_time("2015-02-01 12:00:00"): + # Vote yes - should set status to expired + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="YES", + ) + + # Get proposal details - should be EXPIRED + response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id) + response["Proposal"]["Status"].should.equal("EXPIRED") + + +@mock_managedblockchain +def test_vote_on_proposal_badnetwork(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.vote_on_proposal.when.called_with( + NetworkId="n-ABCDEFGHIJKLMNOP0123456789", + ProposalId="p-ABCDEFGHIJKLMNOP0123456789", + VoterMemberId="m-ABCDEFGHIJKLMNOP0123456789", + Vote="YES", + ).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_vote_on_proposal_badproposal(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network - need a good network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + + response = conn.vote_on_proposal.when.called_with( + NetworkId=network_id, + ProposalId="p-ABCDEFGHIJKLMNOP0123456789", + VoterMemberId="m-ABCDEFGHIJKLMNOP0123456789", + Vote="YES", + ).should.throw(Exception, "Proposal p-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_vote_on_proposal_badmember(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network - need a good network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + + proposal_id = response["ProposalId"] + + response = conn.vote_on_proposal.when.called_with( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId="m-ABCDEFGHIJKLMNOP0123456789", + Vote="YES", + ).should.throw(Exception, "Member m-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_vote_on_proposal_badvote(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network - need a good network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + + proposal_id = response["ProposalId"] + + response = conn.vote_on_proposal.when.called_with( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="FOO", + ).should.throw(Exception, "Invalid request body") + + +@mock_managedblockchain +def test_vote_on_proposal_alreadyvoted(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network - need a good network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + + proposal_id = response["ProposalId"] + + # Vote yes + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="YES", + ) + + # Get the invitation + response = conn.list_invitations() + invitation_id = response["Invitations"][0]["InvitationId"] + + # Create the member + response = conn.create_member( + InvitationId=invitation_id, + NetworkId=network_id, + MemberConfiguration=helpers.create_member_configuration( + "testmember2", "admin", "Admin12345", False, "Test Member 2" + ), + ) + member_id2 = response["MemberId"] + + # Create another proposal + response = conn.create_proposal( + NetworkId=network_id, + MemberId=member_id, + Actions=helpers.default_policy_actions, + ) + + proposal_id = response["ProposalId"] + + # Get proposal details + response = conn.get_proposal(NetworkId=network_id, ProposalId=proposal_id) + response["Proposal"]["NetworkId"].should.equal(network_id) + response["Proposal"]["Status"].should.equal("IN_PROGRESS") + + # Vote yes with member 1 + response = conn.vote_on_proposal( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="YES", + ) + + # Vote yes with member 1 again + response = conn.vote_on_proposal.when.called_with( + NetworkId=network_id, + ProposalId=proposal_id, + VoterMemberId=member_id, + Vote="YES", + ).should.throw(Exception, "Invalid request body") + + +@mock_managedblockchain +def test_list_proposal_votes_badnetwork(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + response = conn.list_proposal_votes.when.called_with( + NetworkId="n-ABCDEFGHIJKLMNOP0123456789", + ProposalId="p-ABCDEFGHIJKLMNOP0123456789", + ).should.throw(Exception, "Network n-ABCDEFGHIJKLMNOP0123456789 not found") + + +@mock_managedblockchain +def test_list_proposal_votes_badproposal(): + conn = boto3.client("managedblockchain", region_name="us-east-1") + + # Create network + response = conn.create_network( + Name="testnetwork1", + Framework="HYPERLEDGER_FABRIC", + FrameworkVersion="1.2", + FrameworkConfiguration=helpers.default_frameworkconfiguration, + VotingPolicy=helpers.default_votingpolicy, + MemberConfiguration=helpers.default_memberconfiguration, + ) + network_id = response["NetworkId"] + member_id = response["MemberId"] + + response = conn.list_proposal_votes.when.called_with( + NetworkId=network_id, ProposalId="p-ABCDEFGHIJKLMNOP0123456789", + ).should.throw(Exception, "Proposal p-ABCDEFGHIJKLMNOP0123456789 not found") diff --git a/tests/test_opsworks/test_instances.py b/tests/test_opsworks/test_instances.py index 5f0dc2040..93935d20f 100644 --- a/tests/test_opsworks/test_instances.py +++ b/tests/test_opsworks/test_instances.py @@ -195,6 +195,10 @@ def test_ec2_integration(): reservations = ec2.describe_instances()["Reservations"] assert reservations.should.be.empty + # Before starting the instance, its status should be "stopped" + opsworks_instance = opsworks.describe_instances(StackId=stack_id)["Instances"][0] + opsworks_instance["Status"].should.equal("stopped") + # After starting the instance, it should be discoverable via ec2 opsworks.start_instance(InstanceId=instance_id) reservations = ec2.describe_instances()["Reservations"] @@ -204,3 +208,5 @@ def test_ec2_integration(): instance["InstanceId"].should.equal(opsworks_instance["Ec2InstanceId"]) instance["PrivateIpAddress"].should.equal(opsworks_instance["PrivateIp"]) + # After starting the instance, its status should be "online" + opsworks_instance["Status"].should.equal("online") diff --git a/tests/test_redshift/test_redshift.py b/tests/test_redshift/test_redshift.py index 6bb3b1396..cf96ee15f 100644 --- a/tests/test_redshift/test_redshift.py +++ b/tests/test_redshift/test_redshift.py @@ -915,6 +915,11 @@ def test_create_cluster_from_snapshot(): ClusterIdentifier=original_cluster_identifier, ) + client.restore_from_cluster_snapshot.when.called_with( + ClusterIdentifier=original_cluster_identifier, + SnapshotIdentifier=original_snapshot_identifier, + ).should.throw(ClientError, "ClusterAlreadyExists") + response = client.restore_from_cluster_snapshot( ClusterIdentifier=new_cluster_identifier, SnapshotIdentifier=original_snapshot_identifier, @@ -1333,3 +1338,20 @@ def test_modify_snapshot_copy_retention_period(): response = client.describe_clusters(ClusterIdentifier="test") cluster_snapshot_copy_status = response["Clusters"][0]["ClusterSnapshotCopyStatus"] cluster_snapshot_copy_status["RetentionPeriod"].should.equal(5) + + +@mock_redshift +def test_create_duplicate_cluster_fails(): + kwargs = { + "ClusterIdentifier": "test", + "ClusterType": "single-node", + "DBName": "test", + "MasterUsername": "user", + "MasterUserPassword": "password", + "NodeType": "ds2.xlarge", + } + client = boto3.client("redshift", region_name="us-east-1") + client.create_cluster(**kwargs) + client.create_cluster.when.called_with(**kwargs).should.throw( + ClientError, "ClusterAlreadyExists" + ) diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index f60e0293e..bcb9da87f 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -2149,6 +2149,19 @@ def test_boto3_copy_object_with_versioning(): data.should.equal(b"test2") +@mock_s3 +def test_s3_abort_multipart_data_with_invalid_upload_and_key(): + client = boto3.client("s3", region_name=DEFAULT_REGION_NAME) + + client.create_bucket(Bucket="blah") + + with assert_raises(Exception) as err: + client.abort_multipart_upload( + Bucket="blah", Key="foobar", UploadId="dummy_upload_id" + ) + err.exception.response["Error"]["Code"].should.equal("NoSuchUpload") + + @mock_s3 def test_boto3_copy_object_from_unversioned_to_versioned_bucket(): client = boto3.client("s3", region_name=DEFAULT_REGION_NAME) diff --git a/tests/test_ses/test_ses.py b/tests/test_ses/test_ses.py index 851327b9d..ce0062974 100644 --- a/tests/test_ses/test_ses.py +++ b/tests/test_ses/test_ses.py @@ -127,3 +127,53 @@ def test_send_raw_email(): send_quota["GetSendQuotaResponse"]["GetSendQuotaResult"]["SentLast24Hours"] ) sent_count.should.equal(1) + + +@mock_ses_deprecated +def test_get_send_statistics(): + conn = boto.connect_ses("the_key", "the_secret") + + conn.send_email.when.called_with( + "test@example.com", + "test subject", + "test body", + "test_to@example.com", + format="html", + ).should.throw(BotoServerError) + + # tests to verify rejects in get_send_statistics + result = conn.get_send_statistics() + + reject_count = int( + result["GetSendStatisticsResponse"]["GetSendStatisticsResult"][ + "SendDataPoints" + ][0]["Rejects"] + ) + delivery_count = int( + result["GetSendStatisticsResponse"]["GetSendStatisticsResult"][ + "SendDataPoints" + ][0]["DeliveryAttempts"] + ) + reject_count.should.equal(1) + delivery_count.should.equal(0) + + conn.verify_email_identity("test@example.com") + conn.send_email( + "test@example.com", "test subject", "test body", "test_to@example.com" + ) + + # tests to delivery attempts in get_send_statistics + result = conn.get_send_statistics() + + reject_count = int( + result["GetSendStatisticsResponse"]["GetSendStatisticsResult"][ + "SendDataPoints" + ][0]["Rejects"] + ) + delivery_count = int( + result["GetSendStatisticsResponse"]["GetSendStatisticsResult"][ + "SendDataPoints" + ][0]["DeliveryAttempts"] + ) + reject_count.should.equal(1) + delivery_count.should.equal(1) diff --git a/tests/test_ses/test_ses_boto3.py b/tests/test_ses/test_ses_boto3.py index de8aa0813..707afe8fb 100644 --- a/tests/test_ses/test_ses_boto3.py +++ b/tests/test_ses/test_ses_boto3.py @@ -4,6 +4,8 @@ import boto3 from botocore.exceptions import ClientError from six.moves.email_mime_multipart import MIMEMultipart from six.moves.email_mime_text import MIMEText +from nose.tools import assert_raises + import sure # noqa @@ -139,19 +141,7 @@ def test_send_html_email(): def test_send_raw_email(): conn = boto3.client("ses", region_name="us-east-1") - message = MIMEMultipart() - message["Subject"] = "Test" - message["From"] = "test@example.com" - message["To"] = "to@example.com, foo@example.com" - - # Message body - part = MIMEText("test file attached") - message.attach(part) - - # Attachment - part = MIMEText("contents of test file here") - part.add_header("Content-Disposition", "attachment; filename=test.txt") - message.attach(part) + message = get_raw_email() kwargs = dict(Source=message["From"], RawMessage={"Data": message.as_string()}) @@ -165,6 +155,39 @@ def test_send_raw_email(): sent_count.should.equal(2) +@mock_ses +def test_send_raw_email_validate_domain(): + conn = boto3.client("ses", region_name="us-east-1") + + message = get_raw_email() + + kwargs = dict(Source=message["From"], RawMessage={"Data": message.as_string()}) + + conn.send_raw_email.when.called_with(**kwargs).should.throw(ClientError) + + conn.verify_domain_identity(Domain="example.com") + conn.send_raw_email(**kwargs) + + send_quota = conn.get_send_quota() + sent_count = int(send_quota["SentLast24Hours"]) + sent_count.should.equal(2) + + +def get_raw_email(): + message = MIMEMultipart() + message["Subject"] = "Test" + message["From"] = "test@example.com" + message["To"] = "to@example.com, foo@example.com" + # Message body + part = MIMEText("test file attached") + message.attach(part) + # Attachment + part = MIMEText("contents of test file here") + part.add_header("Content-Disposition", "attachment; filename=test.txt") + message.attach(part) + return message + + @mock_ses def test_send_raw_email_without_source(): conn = boto3.client("ses", region_name="us-east-1") @@ -227,3 +250,94 @@ def test_send_email_notification_with_encoded_sender(): Message={"Subject": {"Data": "hi",}, "Body": {"Text": {"Data": "there",}}}, ) response["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + +@mock_ses +def test_create_configuration_set(): + conn = boto3.client("ses", region_name="us-east-1") + conn.create_configuration_set(ConfigurationSet=dict({"Name": "test"})) + + conn.create_configuration_set_event_destination( + ConfigurationSetName="test", + EventDestination={ + "Name": "snsEvent", + "Enabled": True, + "MatchingEventTypes": ["send",], + "SNSDestination": { + "TopicARN": "arn:aws:sns:us-east-1:123456789012:myTopic" + }, + }, + ) + + with assert_raises(ClientError) as ex: + conn.create_configuration_set_event_destination( + ConfigurationSetName="failtest", + EventDestination={ + "Name": "snsEvent", + "Enabled": True, + "MatchingEventTypes": ["send",], + "SNSDestination": { + "TopicARN": "arn:aws:sns:us-east-1:123456789012:myTopic" + }, + }, + ) + + ex.exception.response["Error"]["Code"].should.equal("ConfigurationSetDoesNotExist") + + with assert_raises(ClientError) as ex: + conn.create_configuration_set_event_destination( + ConfigurationSetName="test", + EventDestination={ + "Name": "snsEvent", + "Enabled": True, + "MatchingEventTypes": ["send",], + "SNSDestination": { + "TopicARN": "arn:aws:sns:us-east-1:123456789012:myTopic" + }, + }, + ) + + ex.exception.response["Error"]["Code"].should.equal("EventDestinationAlreadyExists") + + +@mock_ses +def test_create_ses_template(): + conn = boto3.client("ses", region_name="us-east-1") + + conn.create_template( + Template={ + "TemplateName": "MyTemplate", + "SubjectPart": "Greetings, {{name}}!", + "TextPart": "Dear {{name}}," + "\r\nYour favorite animal is {{favoriteanimal}}.", + "HtmlPart": "

Hello {{name}}," + "

Your favorite animal is {{favoriteanimal}}.

", + } + ) + with assert_raises(ClientError) as ex: + conn.create_template( + Template={ + "TemplateName": "MyTemplate", + "SubjectPart": "Greetings, {{name}}!", + "TextPart": "Dear {{name}}," + "\r\nYour favorite animal is {{favoriteanimal}}.", + "HtmlPart": "

Hello {{name}}," + "

Your favorite animal is {{favoriteanimal}}.

", + } + ) + + ex.exception.response["Error"]["Code"].should.equal("TemplateNameAlreadyExists") + + # get a template which is already added + result = conn.get_template(TemplateName="MyTemplate") + result["Template"]["TemplateName"].should.equal("MyTemplate") + result["Template"]["SubjectPart"].should.equal("Greetings, {{name}}!") + + # get a template which is not present + with assert_raises(ClientError) as ex: + conn.get_template(TemplateName="MyFakeTemplate") + + ex.exception.response["Error"]["Code"].should.equal("TemplateDoesNotExist") + + result = conn.list_templates() + result["TemplatesMetadata"][0]["Name"].should.equal("MyTemplate") diff --git a/tests/test_ssm/test_ssm_boto3.py b/tests/test_ssm/test_ssm_boto3.py index 170cd8a3e..837f81bf5 100644 --- a/tests/test_ssm/test_ssm_boto3.py +++ b/tests/test_ssm/test_ssm_boto3.py @@ -1,5 +1,7 @@ from __future__ import unicode_literals +import string + import boto3 import botocore.exceptions import sure # noqa @@ -300,6 +302,30 @@ def test_get_parameter(): ) +@mock_ssm +def test_get_parameters_errors(): + client = boto3.client("ssm", region_name="us-east-1") + + ssm_parameters = {name: "value" for name in string.ascii_lowercase[:11]} + + for name, value in ssm_parameters.items(): + client.put_parameter(Name=name, Value=value, Type="String") + + with assert_raises(ClientError) as e: + client.get_parameters(Names=list(ssm_parameters.keys())) + ex = e.exception + ex.operation_name.should.equal("GetParameters") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("ValidationException") + ex.response["Error"]["Message"].should.equal( + "1 validation error detected: " + "Value '[{}]' at 'names' failed to satisfy constraint: " + "Member must have length less than or equal to 10.".format( + ", ".join(ssm_parameters.keys()) + ) + ) + + @mock_ssm def test_get_nonexistant_parameter(): client = boto3.client("ssm", region_name="us-east-1")