diff --git a/.gitignore b/.gitignore index efb489651..0a24fe476 100644 --- a/.gitignore +++ b/.gitignore @@ -18,3 +18,4 @@ venv/ .python-version .vscode/ tests/file.tmp +.eggs/ diff --git a/AUTHORS.md b/AUTHORS.md index 0a152505a..fbca08368 100644 --- a/AUTHORS.md +++ b/AUTHORS.md @@ -54,3 +54,5 @@ Moto is written by Steve Pulec with contributions from: * [William Richard](https://github.com/william-richard) * [Alex Casalboni](https://github.com/alexcasalboni) * [Jon Beilke](https://github.com/jrbeilke) +* [Craig Anderson](https://github.com/craiga) +* [Robert Lewis](https://github.com/ralewis85) diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index 26ea1972a..e03eaabe1 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -1,5676 +1,4441 @@ - -## acm - 41% implemented -- [X] add_tags_to_certificate -- [X] delete_certificate -- [ ] describe_certificate -- [ ] export_certificate -- [X] get_certificate -- [ ] import_certificate -- [ ] list_certificates -- [ ] list_tags_for_certificate -- [X] remove_tags_from_certificate -- [X] request_certificate -- [ ] resend_validation_email -- [ ] update_certificate_options - -## acm-pca - 0% implemented -- [ ] create_certificate_authority -- [ ] create_certificate_authority_audit_report -- [ ] delete_certificate_authority -- [ ] describe_certificate_authority -- [ ] describe_certificate_authority_audit_report -- [ ] get_certificate -- [ ] get_certificate_authority_certificate -- [ ] get_certificate_authority_csr -- [ ] import_certificate_authority_certificate -- [ ] issue_certificate -- [ ] list_certificate_authorities -- [ ] list_tags -- [ ] restore_certificate_authority -- [ ] revoke_certificate -- [ ] tag_certificate_authority -- [ ] untag_certificate_authority -- [ ] update_certificate_authority - -## alexaforbusiness - 0% implemented -- [ ] approve_skill -- [ ] associate_contact_with_address_book -- [ ] associate_device_with_room -- [ ] associate_skill_group_with_room -- [ ] associate_skill_with_skill_group -- [ ] associate_skill_with_users -- [ ] create_address_book -- [ ] create_business_report_schedule -- [ ] create_conference_provider -- [ ] create_contact -- [ ] create_profile -- [ ] create_room -- [ ] create_skill_group -- [ ] create_user -- [ ] delete_address_book -- [ ] delete_business_report_schedule -- [ ] delete_conference_provider -- [ ] delete_contact -- [ ] delete_device -- [ ] delete_profile -- [ ] delete_room -- [ ] delete_room_skill_parameter -- [ ] delete_skill_authorization -- [ ] delete_skill_group -- [ ] delete_user -- [ ] disassociate_contact_from_address_book -- [ ] disassociate_device_from_room -- [ ] disassociate_skill_from_skill_group -- [ ] disassociate_skill_from_users -- [ ] disassociate_skill_group_from_room -- [ ] forget_smart_home_appliances -- [ ] get_address_book -- [ ] get_conference_preference -- [ ] get_conference_provider -- [ ] get_contact -- [ ] get_device -- [ ] get_profile -- [ ] get_room -- [ ] get_room_skill_parameter -- [ ] get_skill_group -- [ ] list_business_report_schedules -- [ ] list_conference_providers -- [ ] list_device_events -- [ ] list_skills -- [ ] list_skills_store_categories -- [ ] list_skills_store_skills_by_category -- [ ] list_smart_home_appliances -- [ ] list_tags -- [ ] put_conference_preference -- [ ] put_room_skill_parameter -- [ ] put_skill_authorization -- [ ] register_avs_device -- [ ] reject_skill -- [ ] resolve_room -- [ ] revoke_invitation -- [ ] search_address_books -- [ ] search_contacts -- [ ] search_devices -- [ ] search_profiles -- [ ] search_rooms -- [ ] search_skill_groups -- [ ] search_users -- [ ] send_invitation -- [ ] start_device_sync -- [ ] start_smart_home_appliance_discovery -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_address_book -- [ ] update_business_report_schedule -- [ ] update_conference_provider -- [ ] update_contact -- [ ] update_device -- [ ] update_profile -- [ ] update_room -- [ ] update_skill_group - -## amplify - 0% implemented -- [ ] create_app -- [ ] create_branch -- [ ] create_domain_association -- [ ] delete_app -- [ ] delete_branch -- [ ] delete_domain_association -- [ ] delete_job -- [ ] get_app -- [ ] get_branch -- [ ] get_domain_association -- [ ] get_job -- [ ] list_apps -- [ ] list_branches -- [ ] list_domain_associations -- [ ] list_jobs -- [ ] start_job -- [ ] stop_job -- [ ] update_app -- [ ] update_branch -- [ ] update_domain_association - -## apigateway - 24% implemented -- [ ] create_api_key -- [ ] create_authorizer -- [ ] create_base_path_mapping -- [X] create_deployment -- [ ] create_documentation_part -- [ ] create_documentation_version -- [ ] create_domain_name -- [ ] create_model -- [ ] create_request_validator -- [X] create_resource -- [X] create_rest_api -- [X] create_stage -- [X] create_usage_plan -- [X] create_usage_plan_key -- [ ] create_vpc_link -- [ ] delete_api_key -- [ ] delete_authorizer -- [ ] delete_base_path_mapping -- [ ] delete_client_certificate -- [X] delete_deployment -- [ ] delete_documentation_part -- [ ] delete_documentation_version -- [ ] delete_domain_name -- [ ] delete_gateway_response -- [X] delete_integration -- [X] delete_integration_response -- [ ] delete_method -- [X] delete_method_response -- [ ] delete_model -- [ ] delete_request_validator -- [X] delete_resource -- [X] delete_rest_api -- [ ] delete_stage -- [X] delete_usage_plan -- [X] delete_usage_plan_key -- [ ] delete_vpc_link -- [ ] flush_stage_authorizers_cache -- [ ] flush_stage_cache -- [ ] generate_client_certificate -- [ ] get_account -- [ ] get_api_key -- [ ] get_api_keys -- [ ] get_authorizer -- [ ] get_authorizers -- [ ] get_base_path_mapping -- [ ] get_base_path_mappings -- [ ] get_client_certificate -- [ ] get_client_certificates -- [X] get_deployment -- [X] get_deployments -- [ ] get_documentation_part -- [ ] get_documentation_parts -- [ ] get_documentation_version -- [ ] get_documentation_versions -- [ ] get_domain_name -- [ ] get_domain_names -- [ ] get_export -- [ ] get_gateway_response -- [ ] get_gateway_responses -- [X] get_integration -- [X] get_integration_response -- [X] get_method -- [X] get_method_response -- [ ] get_model -- [ ] get_model_template -- [ ] get_models -- [ ] get_request_validator -- [ ] get_request_validators -- [X] get_resource -- [ ] get_resources -- [X] get_rest_api -- [ ] get_rest_apis -- [ ] get_sdk -- [ ] get_sdk_type -- [ ] get_sdk_types -- [X] get_stage -- [X] get_stages -- [ ] get_tags -- [ ] get_usage -- [X] get_usage_plan -- [X] get_usage_plan_key -- [X] get_usage_plan_keys -- [X] get_usage_plans -- [ ] get_vpc_link -- [ ] get_vpc_links -- [ ] import_api_keys -- [ ] import_documentation_parts -- [ ] import_rest_api -- [ ] put_gateway_response -- [ ] put_integration -- [ ] put_integration_response -- [ ] put_method -- [ ] put_method_response -- [ ] put_rest_api -- [ ] tag_resource -- [ ] test_invoke_authorizer -- [ ] test_invoke_method -- [ ] untag_resource -- [ ] update_account -- [ ] update_api_key -- [ ] update_authorizer -- [ ] update_base_path_mapping -- [ ] update_client_certificate -- [ ] update_deployment -- [ ] update_documentation_part -- [ ] update_documentation_version -- [ ] update_domain_name -- [ ] update_gateway_response -- [ ] update_integration -- [ ] update_integration_response -- [ ] update_method -- [ ] update_method_response -- [ ] update_model -- [ ] update_request_validator -- [ ] update_resource -- [ ] update_rest_api -- [X] update_stage -- [ ] update_usage -- [ ] update_usage_plan -- [ ] update_vpc_link - -## apigatewaymanagementapi - 0% implemented -- [ ] post_to_connection - -## apigatewayv2 - 0% implemented -- [ ] create_api -- [ ] create_api_mapping -- [ ] create_authorizer -- [ ] create_deployment -- [ ] create_domain_name -- [ ] create_integration -- [ ] create_integration_response -- [ ] create_model -- [ ] create_route -- [ ] create_route_response -- [ ] create_stage -- [ ] delete_api -- [ ] delete_api_mapping -- [ ] delete_authorizer -- [ ] delete_deployment -- [ ] delete_domain_name -- [ ] delete_integration -- [ ] delete_integration_response -- [ ] delete_model -- [ ] delete_route -- [ ] delete_route_response -- [ ] delete_stage -- [ ] get_api -- [ ] get_api_mapping -- [ ] get_api_mappings -- [ ] get_apis -- [ ] get_authorizer -- [ ] get_authorizers -- [ ] get_deployment -- [ ] get_deployments -- [ ] get_domain_name -- [ ] get_domain_names -- [ ] get_integration -- [ ] get_integration_response -- [ ] get_integration_responses -- [ ] get_integrations -- [ ] get_model -- [ ] get_model_template -- [ ] get_models -- [ ] get_route -- [ ] get_route_response -- [ ] get_route_responses -- [ ] get_routes -- [ ] get_stage -- [ ] get_stages -- [ ] update_api -- [ ] update_api_mapping -- [ ] update_authorizer -- [ ] update_deployment -- [ ] update_domain_name -- [ ] update_integration -- [ ] update_integration_response -- [ ] update_model -- [ ] update_route -- [ ] update_route_response -- [ ] update_stage - -## application-autoscaling - 0% implemented -- [ ] delete_scaling_policy -- [ ] delete_scheduled_action -- [ ] deregister_scalable_target -- [ ] describe_scalable_targets -- [ ] describe_scaling_activities -- [ ] describe_scaling_policies -- [ ] describe_scheduled_actions -- [ ] put_scaling_policy -- [ ] put_scheduled_action -- [ ] register_scalable_target - -## appmesh - 0% implemented -- [ ] create_mesh -- [ ] create_route -- [ ] create_virtual_node -- [ ] create_virtual_router -- [ ] delete_mesh -- [ ] delete_route -- [ ] delete_virtual_node -- [ ] delete_virtual_router -- [ ] describe_mesh -- [ ] describe_route -- [ ] describe_virtual_node -- [ ] describe_virtual_router -- [ ] list_meshes -- [ ] list_routes -- [ ] list_virtual_nodes -- [ ] list_virtual_routers -- [ ] update_route -- [ ] update_virtual_node -- [ ] update_virtual_router - -## appstream - 0% implemented -- [ ] associate_fleet -- [ ] batch_associate_user_stack -- [ ] batch_disassociate_user_stack -- [ ] copy_image -- [ ] create_directory_config -- [ ] create_fleet -- [ ] create_image_builder -- [ ] create_image_builder_streaming_url -- [ ] create_stack -- [ ] create_streaming_url -- [ ] create_user -- [ ] delete_directory_config -- [ ] delete_fleet -- [ ] delete_image -- [ ] delete_image_builder -- [ ] delete_image_permissions -- [ ] delete_stack -- [ ] delete_user -- [ ] describe_directory_configs -- [ ] describe_fleets -- [ ] describe_image_builders -- [ ] describe_image_permissions -- [ ] describe_images -- [ ] describe_sessions -- [ ] describe_stacks -- [ ] describe_user_stack_associations -- [ ] describe_users -- [ ] disable_user -- [ ] disassociate_fleet -- [ ] enable_user -- [ ] expire_session -- [ ] list_associated_fleets -- [ ] list_associated_stacks -- [ ] list_tags_for_resource -- [ ] start_fleet -- [ ] start_image_builder -- [ ] stop_fleet -- [ ] stop_image_builder -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_directory_config -- [ ] update_fleet -- [ ] update_image_permissions -- [ ] update_stack - -## appsync - 0% implemented -- [ ] create_api_key -- [ ] create_data_source -- [ ] create_function -- [ ] create_graphql_api -- [ ] create_resolver -- [ ] create_type -- [ ] delete_api_key -- [ ] delete_data_source -- [ ] delete_function -- [ ] delete_graphql_api -- [ ] delete_resolver -- [ ] delete_type -- [ ] get_data_source -- [ ] get_function -- [ ] get_graphql_api -- [ ] get_introspection_schema -- [ ] get_resolver -- [ ] get_schema_creation_status -- [ ] get_type -- [ ] list_api_keys -- [ ] list_data_sources -- [ ] list_functions -- [ ] list_graphql_apis -- [ ] list_resolvers -- [ ] list_resolvers_by_function -- [ ] list_types -- [ ] start_schema_creation -- [ ] update_api_key -- [ ] update_data_source -- [ ] update_function -- [ ] update_graphql_api -- [ ] update_resolver -- [ ] update_type - -## athena - 0% implemented -- [ ] batch_get_named_query -- [ ] batch_get_query_execution -- [ ] create_named_query -- [ ] delete_named_query -- [ ] get_named_query -- [ ] get_query_execution -- [ ] get_query_results -- [ ] list_named_queries -- [ ] list_query_executions -- [ ] start_query_execution -- [ ] stop_query_execution - -## autoscaling - 44% implemented -- [X] attach_instances -- [X] attach_load_balancer_target_groups -- [X] attach_load_balancers -- [ ] batch_delete_scheduled_action -- [ ] batch_put_scheduled_update_group_action -- [ ] complete_lifecycle_action -- [X] create_auto_scaling_group -- [X] create_launch_configuration -- [X] create_or_update_tags -- [X] delete_auto_scaling_group -- [X] delete_launch_configuration -- [ ] delete_lifecycle_hook -- [ ] delete_notification_configuration -- [X] delete_policy -- [ ] delete_scheduled_action -- [ ] delete_tags -- [ ] describe_account_limits -- [ ] describe_adjustment_types -- [X] describe_auto_scaling_groups -- [X] describe_auto_scaling_instances -- [ ] describe_auto_scaling_notification_types -- [X] describe_launch_configurations -- [ ] describe_lifecycle_hook_types -- [ ] describe_lifecycle_hooks -- [X] describe_load_balancer_target_groups -- [X] describe_load_balancers -- [ ] describe_metric_collection_types -- [ ] describe_notification_configurations -- [X] describe_policies -- [ ] describe_scaling_activities -- [ ] describe_scaling_process_types -- [ ] describe_scheduled_actions -- [ ] describe_tags -- [ ] describe_termination_policy_types -- [X] detach_instances -- [X] detach_load_balancer_target_groups -- [X] detach_load_balancers -- [ ] disable_metrics_collection -- [ ] enable_metrics_collection -- [ ] enter_standby -- [X] execute_policy -- [ ] exit_standby -- [ ] put_lifecycle_hook -- [ ] put_notification_configuration -- [ ] put_scaling_policy -- [ ] put_scheduled_update_group_action -- [ ] record_lifecycle_action_heartbeat -- [ ] resume_processes -- [X] set_desired_capacity -- [X] set_instance_health -- [X] set_instance_protection -- [X] suspend_processes -- [ ] terminate_instance_in_auto_scaling_group -- [X] update_auto_scaling_group - -## autoscaling-plans - 0% implemented -- [ ] create_scaling_plan -- [ ] delete_scaling_plan -- [ ] describe_scaling_plan_resources -- [ ] describe_scaling_plans -- [ ] get_scaling_plan_resource_forecast_data -- [ ] update_scaling_plan - -## batch - 93% implemented -- [ ] cancel_job -- [X] create_compute_environment -- [X] create_job_queue -- [X] delete_compute_environment -- [X] delete_job_queue -- [X] deregister_job_definition -- [X] describe_compute_environments -- [X] describe_job_definitions -- [X] describe_job_queues -- [X] describe_jobs -- [X] list_jobs -- [X] register_job_definition -- [X] submit_job -- [X] terminate_job -- [X] update_compute_environment -- [X] update_job_queue - -## budgets - 0% implemented -- [ ] create_budget -- [ ] create_notification -- [ ] create_subscriber -- [ ] delete_budget -- [ ] delete_notification -- [ ] delete_subscriber -- [ ] describe_budget -- [ ] describe_budget_performance_history -- [ ] describe_budgets -- [ ] describe_notifications_for_budget -- [ ] describe_subscribers_for_notification -- [ ] update_budget -- [ ] update_notification -- [ ] update_subscriber - -## ce - 0% implemented -- [ ] get_cost_and_usage -- [ ] get_cost_forecast -- [ ] get_dimension_values -- [ ] get_reservation_coverage -- [ ] get_reservation_purchase_recommendation -- [ ] get_reservation_utilization -- [ ] get_tags - -## chime - 0% implemented -- [ ] batch_suspend_user -- [ ] batch_unsuspend_user -- [ ] batch_update_user -- [ ] create_account -- [ ] delete_account -- [ ] get_account -- [ ] get_account_settings -- [ ] get_user -- [ ] invite_users -- [ ] list_accounts -- [ ] list_users -- [ ] logout_user -- [ ] reset_personal_pin -- [ ] update_account -- [ ] update_account_settings -- [ ] update_user - -## cloud9 - 0% implemented -- [ ] create_environment_ec2 -- [ ] create_environment_membership -- [ ] delete_environment -- [ ] delete_environment_membership -- [ ] describe_environment_memberships -- [ ] describe_environment_status -- [ ] describe_environments -- [ ] list_environments -- [ ] update_environment -- [ ] update_environment_membership - -## clouddirectory - 0% implemented -- [ ] add_facet_to_object -- [ ] apply_schema -- [ ] attach_object -- [ ] attach_policy -- [ ] attach_to_index -- [ ] attach_typed_link -- [ ] batch_read -- [ ] batch_write -- [ ] create_directory -- [ ] create_facet -- [ ] create_index -- [ ] create_object -- [ ] create_schema -- [ ] create_typed_link_facet -- [ ] delete_directory -- [ ] delete_facet -- [ ] delete_object -- [ ] delete_schema -- [ ] delete_typed_link_facet -- [ ] detach_from_index -- [ ] detach_object -- [ ] detach_policy -- [ ] detach_typed_link -- [ ] disable_directory -- [ ] enable_directory -- [ ] get_applied_schema_version -- [ ] get_directory -- [ ] get_facet -- [ ] get_link_attributes -- [ ] get_object_attributes -- [ ] get_object_information -- [ ] get_schema_as_json -- [ ] get_typed_link_facet_information -- [ ] list_applied_schema_arns -- [ ] list_attached_indices -- [ ] list_development_schema_arns -- [ ] list_directories -- [ ] list_facet_attributes -- [ ] list_facet_names -- [ ] list_incoming_typed_links -- [ ] list_index -- [ ] list_managed_schema_arns -- [ ] list_object_attributes -- [ ] list_object_children -- [ ] list_object_parent_paths -- [ ] list_object_parents -- [ ] list_object_policies -- [ ] list_outgoing_typed_links -- [ ] list_policy_attachments -- [ ] list_published_schema_arns -- [ ] list_tags_for_resource -- [ ] list_typed_link_facet_attributes -- [ ] list_typed_link_facet_names -- [ ] lookup_policy -- [ ] publish_schema -- [ ] put_schema_from_json -- [ ] remove_facet_from_object -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_facet -- [ ] update_link_attributes -- [ ] update_object_attributes -- [ ] update_schema -- [ ] update_typed_link_facet -- [ ] upgrade_applied_schema -- [ ] upgrade_published_schema - -## cloudformation - 40% implemented -- [ ] cancel_update_stack -- [ ] continue_update_rollback -- [X] create_change_set -- [X] create_stack -- [X] create_stack_instances -- [X] create_stack_set -- [X] delete_change_set -- [X] delete_stack -- [X] delete_stack_instances -- [X] delete_stack_set -- [ ] describe_account_limits -- [X] describe_change_set -- [ ] describe_stack_drift_detection_status -- [ ] describe_stack_events -- [ ] describe_stack_instance -- [ ] describe_stack_resource -- [ ] describe_stack_resource_drifts -- [ ] describe_stack_resources -- [ ] describe_stack_set -- [ ] describe_stack_set_operation -- [X] describe_stacks -- [ ] detect_stack_drift -- [ ] detect_stack_resource_drift -- [ ] estimate_template_cost -- [X] execute_change_set -- [ ] get_stack_policy -- [ ] get_template -- [ ] get_template_summary -- [X] list_change_sets -- [X] list_exports -- [ ] list_imports -- [ ] list_stack_instances -- [X] list_stack_resources -- [ ] list_stack_set_operation_results -- [ ] list_stack_set_operations -- [ ] list_stack_sets -- [X] list_stacks -- [ ] set_stack_policy -- [ ] signal_resource -- [ ] stop_stack_set_operation -- [X] update_stack -- [ ] update_stack_instances -- [X] update_stack_set -- [ ] update_termination_protection -- [X] validate_template - -## cloudfront - 0% implemented -- [ ] create_cloud_front_origin_access_identity -- [ ] create_distribution -- [ ] create_distribution_with_tags -- [ ] create_field_level_encryption_config -- [ ] create_field_level_encryption_profile -- [ ] create_invalidation -- [ ] create_public_key -- [ ] create_streaming_distribution -- [ ] create_streaming_distribution_with_tags -- [ ] delete_cloud_front_origin_access_identity -- [ ] delete_distribution -- [ ] delete_field_level_encryption_config -- [ ] delete_field_level_encryption_profile -- [ ] delete_public_key -- [ ] delete_streaming_distribution -- [ ] get_cloud_front_origin_access_identity -- [ ] get_cloud_front_origin_access_identity_config -- [ ] get_distribution -- [ ] get_distribution_config -- [ ] get_field_level_encryption -- [ ] get_field_level_encryption_config -- [ ] get_field_level_encryption_profile -- [ ] get_field_level_encryption_profile_config -- [ ] get_invalidation -- [ ] get_public_key -- [ ] get_public_key_config -- [ ] get_streaming_distribution -- [ ] get_streaming_distribution_config -- [ ] list_cloud_front_origin_access_identities -- [ ] list_distributions -- [ ] list_distributions_by_web_acl_id -- [ ] list_field_level_encryption_configs -- [ ] list_field_level_encryption_profiles -- [ ] list_invalidations -- [ ] list_public_keys -- [ ] list_streaming_distributions -- [ ] list_tags_for_resource -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_cloud_front_origin_access_identity -- [ ] update_distribution -- [ ] update_field_level_encryption_config -- [ ] update_field_level_encryption_profile -- [ ] update_public_key -- [ ] update_streaming_distribution - -## cloudhsm - 0% implemented -- [ ] add_tags_to_resource -- [ ] create_hapg -- [ ] create_hsm -- [ ] create_luna_client -- [ ] delete_hapg -- [ ] delete_hsm -- [ ] delete_luna_client -- [ ] describe_hapg -- [ ] describe_hsm -- [ ] describe_luna_client -- [ ] get_config -- [ ] list_available_zones -- [ ] list_hapgs -- [ ] list_hsms -- [ ] list_luna_clients -- [ ] list_tags_for_resource -- [ ] modify_hapg -- [ ] modify_hsm -- [ ] modify_luna_client -- [ ] remove_tags_from_resource - -## cloudhsmv2 - 0% implemented -- [ ] copy_backup_to_region -- [ ] create_cluster -- [ ] create_hsm -- [ ] delete_backup -- [ ] delete_cluster -- [ ] delete_hsm -- [ ] describe_backups -- [ ] describe_clusters -- [ ] initialize_cluster -- [ ] list_tags -- [ ] restore_backup -- [ ] tag_resource -- [ ] untag_resource - -## cloudsearch - 0% implemented -- [ ] build_suggesters -- [ ] create_domain -- [ ] define_analysis_scheme -- [ ] define_expression -- [ ] define_index_field -- [ ] define_suggester -- [ ] delete_analysis_scheme -- [ ] delete_domain -- [ ] delete_expression -- [ ] delete_index_field -- [ ] delete_suggester -- [ ] describe_analysis_schemes -- [ ] describe_availability_options -- [ ] describe_domains -- [ ] describe_expressions -- [ ] describe_index_fields -- [ ] describe_scaling_parameters -- [ ] describe_service_access_policies -- [ ] describe_suggesters -- [ ] index_documents -- [ ] list_domain_names -- [ ] update_availability_options -- [ ] update_scaling_parameters -- [ ] update_service_access_policies - -## cloudsearchdomain - 0% implemented -- [ ] search -- [ ] suggest -- [ ] upload_documents - -## cloudtrail - 0% implemented -- [ ] add_tags -- [ ] create_trail -- [ ] delete_trail -- [ ] describe_trails -- [ ] get_event_selectors -- [ ] get_trail_status -- [ ] list_public_keys -- [ ] list_tags -- [ ] lookup_events -- [ ] put_event_selectors -- [ ] remove_tags -- [ ] start_logging -- [ ] stop_logging -- [ ] update_trail - -## cloudwatch - 52% implemented -- [X] delete_alarms -- [X] delete_dashboards -- [ ] describe_alarm_history -- [ ] describe_alarms -- [ ] describe_alarms_for_metric -- [ ] disable_alarm_actions -- [ ] enable_alarm_actions -- [X] get_dashboard -- [ ] get_metric_data -- [X] get_metric_statistics -- [ ] get_metric_widget_image -- [X] list_dashboards -- [ ] list_metrics -- [X] put_dashboard -- [X] put_metric_alarm -- [X] put_metric_data -- [X] set_alarm_state - -## codebuild - 0% implemented -- [ ] batch_delete_builds -- [ ] batch_get_builds -- [ ] batch_get_projects -- [ ] create_project -- [ ] create_webhook -- [ ] delete_project -- [ ] delete_source_credentials -- [ ] delete_webhook -- [ ] import_source_credentials -- [ ] invalidate_project_cache -- [ ] list_builds -- [ ] list_builds_for_project -- [ ] list_curated_environment_images -- [ ] list_projects -- [ ] list_source_credentials -- [ ] start_build -- [ ] stop_build -- [ ] update_project -- [ ] update_webhook - -## codecommit - 0% implemented -- [ ] batch_get_repositories -- [ ] create_branch -- [ ] create_pull_request -- [ ] create_repository -- [ ] delete_branch -- [ ] delete_comment_content -- [ ] delete_file -- [ ] delete_repository -- [ ] describe_pull_request_events -- [ ] get_blob -- [ ] get_branch -- [ ] get_comment -- [ ] get_comments_for_compared_commit -- [ ] get_comments_for_pull_request -- [ ] get_commit -- [ ] get_differences -- [ ] get_file -- [ ] get_folder -- [ ] get_merge_conflicts -- [ ] get_pull_request -- [ ] get_repository -- [ ] get_repository_triggers -- [ ] list_branches -- [ ] list_pull_requests -- [ ] list_repositories -- [ ] merge_pull_request_by_fast_forward -- [ ] post_comment_for_compared_commit -- [ ] post_comment_for_pull_request -- [ ] post_comment_reply -- [ ] put_file -- [ ] put_repository_triggers -- [ ] test_repository_triggers -- [ ] update_comment -- [ ] update_default_branch -- [ ] update_pull_request_description -- [ ] update_pull_request_status -- [ ] update_pull_request_title -- [ ] update_repository_description -- [ ] update_repository_name - -## codedeploy - 0% implemented -- [ ] add_tags_to_on_premises_instances -- [ ] batch_get_application_revisions -- [ ] batch_get_applications -- [ ] batch_get_deployment_groups -- [ ] batch_get_deployment_instances -- [ ] batch_get_deployment_targets -- [ ] batch_get_deployments -- [ ] batch_get_on_premises_instances -- [ ] continue_deployment -- [ ] create_application -- [ ] create_deployment -- [ ] create_deployment_config -- [ ] create_deployment_group -- [ ] delete_application -- [ ] delete_deployment_config -- [ ] delete_deployment_group -- [ ] delete_git_hub_account_token -- [ ] deregister_on_premises_instance -- [ ] get_application -- [ ] get_application_revision -- [ ] get_deployment -- [ ] get_deployment_config -- [ ] get_deployment_group -- [ ] get_deployment_instance -- [ ] get_deployment_target -- [ ] get_on_premises_instance -- [ ] list_application_revisions -- [ ] list_applications -- [ ] list_deployment_configs -- [ ] list_deployment_groups -- [ ] list_deployment_instances -- [ ] list_deployment_targets -- [ ] list_deployments -- [ ] list_git_hub_account_token_names -- [ ] list_on_premises_instances -- [ ] put_lifecycle_event_hook_execution_status -- [ ] register_application_revision -- [ ] register_on_premises_instance -- [ ] remove_tags_from_on_premises_instances -- [ ] skip_wait_time_for_instance_termination -- [ ] stop_deployment -- [ ] update_application -- [ ] update_deployment_group - -## codepipeline - 0% implemented -- [ ] acknowledge_job -- [ ] acknowledge_third_party_job -- [ ] create_custom_action_type -- [ ] create_pipeline -- [ ] delete_custom_action_type -- [ ] delete_pipeline -- [ ] delete_webhook -- [ ] deregister_webhook_with_third_party -- [ ] disable_stage_transition -- [ ] enable_stage_transition -- [ ] get_job_details -- [ ] get_pipeline -- [ ] get_pipeline_execution -- [ ] get_pipeline_state -- [ ] get_third_party_job_details -- [ ] list_action_types -- [ ] list_pipeline_executions -- [ ] list_pipelines -- [ ] list_webhooks -- [ ] poll_for_jobs -- [ ] poll_for_third_party_jobs -- [ ] put_action_revision -- [ ] put_approval_result -- [ ] put_job_failure_result -- [ ] put_job_success_result -- [ ] put_third_party_job_failure_result -- [ ] put_third_party_job_success_result -- [ ] put_webhook -- [ ] register_webhook_with_third_party -- [ ] retry_stage_execution -- [ ] start_pipeline_execution -- [ ] update_pipeline - -## codestar - 0% implemented -- [ ] associate_team_member -- [ ] create_project -- [ ] create_user_profile -- [ ] delete_project -- [ ] delete_user_profile -- [ ] describe_project -- [ ] describe_user_profile -- [ ] disassociate_team_member -- [ ] list_projects -- [ ] list_resources -- [ ] list_tags_for_project -- [ ] list_team_members -- [ ] list_user_profiles -- [ ] tag_project -- [ ] untag_project -- [ ] update_project -- [ ] update_team_member -- [ ] update_user_profile - -## cognito-identity - 22% implemented -- [X] create_identity_pool -- [ ] delete_identities -- [ ] delete_identity_pool -- [ ] describe_identity -- [ ] describe_identity_pool -- [X] get_credentials_for_identity -- [X] get_id -- [ ] get_identity_pool_roles -- [ ] get_open_id_token -- [X] get_open_id_token_for_developer_identity -- [ ] list_identities -- [ ] list_identity_pools -- [ ] lookup_developer_identity -- [ ] merge_developer_identities -- [ ] set_identity_pool_roles -- [ ] unlink_developer_identity -- [ ] unlink_identity -- [ ] update_identity_pool - -## cognito-idp - 36% implemented -- [ ] add_custom_attributes -- [X] admin_add_user_to_group -- [ ] admin_confirm_sign_up -- [X] admin_create_user -- [X] admin_delete_user -- [ ] admin_delete_user_attributes -- [ ] admin_disable_provider_for_user -- [X] admin_disable_user -- [X] admin_enable_user -- [ ] admin_forget_device -- [ ] admin_get_device -- [X] admin_get_user -- [X] admin_initiate_auth -- [ ] admin_link_provider_for_user -- [ ] admin_list_devices -- [X] admin_list_groups_for_user -- [ ] admin_list_user_auth_events -- [X] admin_remove_user_from_group -- [ ] admin_reset_user_password -- [ ] admin_respond_to_auth_challenge -- [ ] admin_set_user_mfa_preference -- [ ] admin_set_user_settings -- [ ] admin_update_auth_event_feedback -- [ ] admin_update_device_status -- [ ] admin_update_user_attributes -- [ ] admin_user_global_sign_out -- [ ] associate_software_token -- [X] change_password -- [ ] confirm_device -- [X] confirm_forgot_password -- [ ] confirm_sign_up -- [X] create_group -- [X] create_identity_provider -- [ ] create_resource_server -- [ ] create_user_import_job -- [X] create_user_pool -- [X] create_user_pool_client -- [X] create_user_pool_domain -- [X] delete_group -- [X] delete_identity_provider -- [ ] delete_resource_server -- [ ] delete_user -- [ ] delete_user_attributes -- [X] delete_user_pool -- [X] delete_user_pool_client -- [X] delete_user_pool_domain -- [X] describe_identity_provider -- [ ] describe_resource_server -- [ ] describe_risk_configuration -- [ ] describe_user_import_job -- [X] describe_user_pool -- [X] describe_user_pool_client -- [X] describe_user_pool_domain -- [ ] forget_device -- [ ] forgot_password -- [ ] get_csv_header -- [ ] get_device -- [X] get_group -- [ ] get_identity_provider_by_identifier -- [ ] get_signing_certificate -- [ ] get_ui_customization -- [ ] get_user -- [ ] get_user_attribute_verification_code -- [ ] get_user_pool_mfa_config -- [ ] global_sign_out -- [ ] initiate_auth -- [ ] list_devices -- [X] list_groups -- [X] list_identity_providers -- [ ] list_resource_servers -- [ ] list_user_import_jobs -- [X] list_user_pool_clients -- [X] list_user_pools -- [X] list_users -- [X] list_users_in_group -- [ ] resend_confirmation_code -- [X] respond_to_auth_challenge -- [ ] set_risk_configuration -- [ ] set_ui_customization -- [ ] set_user_mfa_preference -- [ ] set_user_pool_mfa_config -- [ ] set_user_settings -- [ ] sign_up -- [ ] start_user_import_job -- [ ] stop_user_import_job -- [ ] update_auth_event_feedback -- [ ] update_device_status -- [ ] update_group -- [X] update_identity_provider -- [ ] update_resource_server -- [ ] update_user_attributes -- [ ] update_user_pool -- [X] update_user_pool_client -- [ ] update_user_pool_domain -- [ ] verify_software_token -- [ ] verify_user_attribute - -## cognito-sync - 0% implemented -- [ ] bulk_publish -- [ ] delete_dataset -- [ ] describe_dataset -- [ ] describe_identity_pool_usage -- [ ] describe_identity_usage -- [ ] get_bulk_publish_details -- [ ] get_cognito_events -- [ ] get_identity_pool_configuration -- [ ] list_datasets -- [ ] list_identity_pool_usage -- [ ] list_records -- [ ] register_device -- [ ] set_cognito_events -- [ ] set_identity_pool_configuration -- [ ] subscribe_to_dataset -- [ ] unsubscribe_from_dataset -- [ ] update_records - -## comprehend - 0% implemented -- [ ] batch_detect_dominant_language -- [ ] batch_detect_entities -- [ ] batch_detect_key_phrases -- [ ] batch_detect_sentiment -- [ ] batch_detect_syntax -- [ ] create_document_classifier -- [ ] create_entity_recognizer -- [ ] delete_document_classifier -- [ ] delete_entity_recognizer -- [ ] describe_document_classification_job -- [ ] describe_document_classifier -- [ ] describe_dominant_language_detection_job -- [ ] describe_entities_detection_job -- [ ] describe_entity_recognizer -- [ ] describe_key_phrases_detection_job -- [ ] describe_sentiment_detection_job -- [ ] describe_topics_detection_job -- [ ] detect_dominant_language -- [ ] detect_entities -- [ ] detect_key_phrases -- [ ] detect_sentiment -- [ ] detect_syntax -- [ ] list_document_classification_jobs -- [ ] list_document_classifiers -- [ ] list_dominant_language_detection_jobs -- [ ] list_entities_detection_jobs -- [ ] list_entity_recognizers -- [ ] list_key_phrases_detection_jobs -- [ ] list_sentiment_detection_jobs -- [ ] list_topics_detection_jobs -- [ ] start_document_classification_job -- [ ] start_dominant_language_detection_job -- [ ] start_entities_detection_job -- [ ] start_key_phrases_detection_job -- [ ] start_sentiment_detection_job -- [ ] start_topics_detection_job -- [ ] stop_dominant_language_detection_job -- [ ] stop_entities_detection_job -- [ ] stop_key_phrases_detection_job -- [ ] stop_sentiment_detection_job -- [ ] stop_training_document_classifier -- [ ] stop_training_entity_recognizer - -## comprehendmedical - 0% implemented -- [ ] detect_entities -- [ ] detect_phi - -## config - 19% implemented -- [ ] batch_get_aggregate_resource_config -- [ ] batch_get_resource_config -- [ ] delete_aggregation_authorization -- [ ] delete_config_rule -- [ ] delete_configuration_aggregator -- [X] delete_configuration_recorder -- [X] delete_delivery_channel -- [ ] delete_evaluation_results -- [ ] delete_pending_aggregation_request -- [ ] delete_retention_configuration -- [ ] deliver_config_snapshot -- [ ] describe_aggregate_compliance_by_config_rules -- [ ] describe_aggregation_authorizations -- [ ] describe_compliance_by_config_rule -- [ ] describe_compliance_by_resource -- [ ] describe_config_rule_evaluation_status -- [ ] describe_config_rules -- [ ] describe_configuration_aggregator_sources_status -- [ ] describe_configuration_aggregators -- [X] describe_configuration_recorder_status -- [X] describe_configuration_recorders -- [ ] describe_delivery_channel_status -- [X] describe_delivery_channels -- [ ] describe_pending_aggregation_requests -- [ ] describe_retention_configurations -- [ ] get_aggregate_compliance_details_by_config_rule -- [ ] get_aggregate_config_rule_compliance_summary -- [ ] get_aggregate_discovered_resource_counts -- [ ] get_aggregate_resource_config -- [ ] get_compliance_details_by_config_rule -- [ ] get_compliance_details_by_resource -- [ ] get_compliance_summary_by_config_rule -- [ ] get_compliance_summary_by_resource_type -- [ ] get_discovered_resource_counts -- [ ] get_resource_config_history -- [ ] list_aggregate_discovered_resources -- [ ] list_discovered_resources -- [ ] put_aggregation_authorization -- [ ] put_config_rule -- [ ] put_configuration_aggregator -- [X] put_configuration_recorder -- [X] put_delivery_channel -- [ ] put_evaluations -- [ ] put_retention_configuration -- [ ] start_config_rules_evaluation -- [X] start_configuration_recorder -- [X] stop_configuration_recorder - -## connect - 0% implemented -- [ ] create_user -- [ ] delete_user -- [ ] describe_user -- [ ] describe_user_hierarchy_group -- [ ] describe_user_hierarchy_structure -- [ ] get_contact_attributes -- [ ] get_current_metric_data -- [ ] get_federation_token -- [ ] get_metric_data -- [ ] list_routing_profiles -- [ ] list_security_profiles -- [ ] list_user_hierarchy_groups -- [ ] list_users -- [ ] start_outbound_voice_contact -- [ ] stop_contact -- [ ] update_contact_attributes -- [ ] update_user_hierarchy -- [ ] update_user_identity_info -- [ ] update_user_phone_config -- [ ] update_user_routing_profile -- [ ] update_user_security_profiles - -## cur - 0% implemented -- [ ] delete_report_definition -- [ ] describe_report_definitions -- [ ] put_report_definition - -## datapipeline - 42% implemented -- [X] activate_pipeline -- [ ] add_tags -- [X] create_pipeline -- [ ] deactivate_pipeline -- [X] delete_pipeline -- [X] describe_objects -- [X] describe_pipelines -- [ ] evaluate_expression -- [X] get_pipeline_definition -- [X] list_pipelines -- [ ] poll_for_task -- [X] put_pipeline_definition -- [ ] query_objects -- [ ] remove_tags -- [ ] report_task_progress -- [ ] report_task_runner_heartbeat -- [ ] set_status -- [ ] set_task_status -- [ ] validate_pipeline_definition - -## datasync - 0% implemented -- [ ] cancel_task_execution -- [ ] create_agent -- [ ] create_location_efs -- [ ] create_location_nfs -- [ ] create_location_s3 -- [ ] create_task -- [ ] delete_agent -- [ ] delete_location -- [ ] delete_task -- [ ] describe_agent -- [ ] describe_location_efs -- [ ] describe_location_nfs -- [ ] describe_location_s3 -- [ ] describe_task -- [ ] describe_task_execution -- [ ] list_agents -- [ ] list_locations -- [ ] list_tags_for_resource -- [ ] list_task_executions -- [ ] list_tasks -- [ ] start_task_execution -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_agent -- [ ] update_task - -## dax - 0% implemented -- [ ] create_cluster -- [ ] create_parameter_group -- [ ] create_subnet_group -- [ ] decrease_replication_factor -- [ ] delete_cluster -- [ ] delete_parameter_group -- [ ] delete_subnet_group -- [ ] describe_clusters -- [ ] describe_default_parameters -- [ ] describe_events -- [ ] describe_parameter_groups -- [ ] describe_parameters -- [ ] describe_subnet_groups -- [ ] increase_replication_factor -- [ ] list_tags -- [ ] reboot_node -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_cluster -- [ ] update_parameter_group -- [ ] update_subnet_group - -## devicefarm - 0% implemented -- [ ] create_device_pool -- [ ] create_instance_profile -- [ ] create_network_profile -- [ ] create_project -- [ ] create_remote_access_session -- [ ] create_upload -- [ ] create_vpce_configuration -- [ ] delete_device_pool -- [ ] delete_instance_profile -- [ ] delete_network_profile -- [ ] delete_project -- [ ] delete_remote_access_session -- [ ] delete_run -- [ ] delete_upload -- [ ] delete_vpce_configuration -- [ ] get_account_settings -- [ ] get_device -- [ ] get_device_instance -- [ ] get_device_pool -- [ ] get_device_pool_compatibility -- [ ] get_instance_profile -- [ ] get_job -- [ ] get_network_profile -- [ ] get_offering_status -- [ ] get_project -- [ ] get_remote_access_session -- [ ] get_run -- [ ] get_suite -- [ ] get_test -- [ ] get_upload -- [ ] get_vpce_configuration -- [ ] install_to_remote_access_session -- [ ] list_artifacts -- [ ] list_device_instances -- [ ] list_device_pools -- [ ] list_devices -- [ ] list_instance_profiles -- [ ] list_jobs -- [ ] list_network_profiles -- [ ] list_offering_promotions -- [ ] list_offering_transactions -- [ ] list_offerings -- [ ] list_projects -- [ ] list_remote_access_sessions -- [ ] list_runs -- [ ] list_samples -- [ ] list_suites -- [ ] list_tests -- [ ] list_unique_problems -- [ ] list_uploads -- [ ] list_vpce_configurations -- [ ] purchase_offering -- [ ] renew_offering -- [ ] schedule_run -- [ ] stop_job -- [ ] stop_remote_access_session -- [ ] stop_run -- [ ] update_device_instance -- [ ] update_device_pool -- [ ] update_instance_profile -- [ ] update_network_profile -- [ ] update_project -- [ ] update_upload -- [ ] update_vpce_configuration - -## directconnect - 0% implemented -- [ ] allocate_connection_on_interconnect -- [ ] allocate_hosted_connection -- [ ] allocate_private_virtual_interface -- [ ] allocate_public_virtual_interface -- [ ] associate_connection_with_lag -- [ ] associate_hosted_connection -- [ ] associate_virtual_interface -- [ ] confirm_connection -- [ ] confirm_private_virtual_interface -- [ ] confirm_public_virtual_interface -- [ ] create_bgp_peer -- [ ] create_connection -- [ ] create_direct_connect_gateway -- [ ] create_direct_connect_gateway_association -- [ ] create_interconnect -- [ ] create_lag -- [ ] create_private_virtual_interface -- [ ] create_public_virtual_interface -- [ ] delete_bgp_peer -- [ ] delete_connection -- [ ] delete_direct_connect_gateway -- [ ] delete_direct_connect_gateway_association -- [ ] delete_interconnect -- [ ] delete_lag -- [ ] delete_virtual_interface -- [ ] describe_connection_loa -- [ ] describe_connections -- [ ] describe_connections_on_interconnect -- [ ] describe_direct_connect_gateway_associations -- [ ] describe_direct_connect_gateway_attachments -- [ ] describe_direct_connect_gateways -- [ ] describe_hosted_connections -- [ ] describe_interconnect_loa -- [ ] describe_interconnects -- [ ] describe_lags -- [ ] describe_loa -- [ ] describe_locations -- [ ] describe_tags -- [ ] describe_virtual_gateways -- [ ] describe_virtual_interfaces -- [ ] disassociate_connection_from_lag -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_lag -- [ ] update_virtual_interface_attributes - -## discovery - 0% implemented -- [ ] associate_configuration_items_to_application -- [ ] create_application -- [ ] create_tags -- [ ] delete_applications -- [ ] delete_tags -- [ ] describe_agents -- [ ] describe_configurations -- [ ] describe_continuous_exports -- [ ] describe_export_configurations -- [ ] describe_export_tasks -- [ ] describe_tags -- [ ] disassociate_configuration_items_from_application -- [ ] export_configurations -- [ ] get_discovery_summary -- [ ] list_configurations -- [ ] list_server_neighbors -- [ ] start_continuous_export -- [ ] start_data_collection_by_agent_ids -- [ ] start_export_task -- [ ] stop_continuous_export -- [ ] stop_data_collection_by_agent_ids -- [ ] update_application - -## dlm - 0% implemented -- [ ] create_lifecycle_policy -- [ ] delete_lifecycle_policy -- [ ] get_lifecycle_policies -- [ ] get_lifecycle_policy -- [ ] update_lifecycle_policy - -## dms - 0% implemented -- [ ] add_tags_to_resource -- [ ] create_endpoint -- [ ] create_event_subscription -- [ ] create_replication_instance -- [ ] create_replication_subnet_group -- [ ] create_replication_task -- [ ] delete_certificate -- [ ] delete_endpoint -- [ ] delete_event_subscription -- [ ] delete_replication_instance -- [ ] delete_replication_subnet_group -- [ ] delete_replication_task -- [ ] describe_account_attributes -- [ ] describe_certificates -- [ ] describe_connections -- [ ] describe_endpoint_types -- [ ] describe_endpoints -- [ ] describe_event_categories -- [ ] describe_event_subscriptions -- [ ] describe_events -- [ ] describe_orderable_replication_instances -- [ ] describe_refresh_schemas_status -- [ ] describe_replication_instance_task_logs -- [ ] describe_replication_instances -- [ ] describe_replication_subnet_groups -- [ ] describe_replication_task_assessment_results -- [ ] describe_replication_tasks -- [ ] describe_schemas -- [ ] describe_table_statistics -- [ ] import_certificate -- [ ] list_tags_for_resource -- [ ] modify_endpoint -- [ ] modify_event_subscription -- [ ] modify_replication_instance -- [ ] modify_replication_subnet_group -- [ ] modify_replication_task -- [ ] reboot_replication_instance -- [ ] refresh_schemas -- [ ] reload_tables -- [ ] remove_tags_from_resource -- [ ] start_replication_task -- [ ] start_replication_task_assessment -- [ ] stop_replication_task -- [ ] test_connection - -## ds - 0% implemented -- [ ] accept_shared_directory -- [ ] add_ip_routes -- [ ] add_tags_to_resource -- [ ] cancel_schema_extension -- [ ] connect_directory -- [ ] create_alias -- [ ] create_computer -- [ ] create_conditional_forwarder -- [ ] create_directory -- [ ] create_log_subscription -- [ ] create_microsoft_ad -- [ ] create_snapshot -- [ ] create_trust -- [ ] delete_conditional_forwarder -- [ ] delete_directory -- [ ] delete_log_subscription -- [ ] delete_snapshot -- [ ] delete_trust -- [ ] deregister_event_topic -- [ ] describe_conditional_forwarders -- [ ] describe_directories -- [ ] describe_domain_controllers -- [ ] describe_event_topics -- [ ] describe_shared_directories -- [ ] describe_snapshots -- [ ] describe_trusts -- [ ] disable_radius -- [ ] disable_sso -- [ ] enable_radius -- [ ] enable_sso -- [ ] get_directory_limits -- [ ] get_snapshot_limits -- [ ] list_ip_routes -- [ ] list_log_subscriptions -- [ ] list_schema_extensions -- [ ] list_tags_for_resource -- [ ] register_event_topic -- [ ] reject_shared_directory -- [ ] remove_ip_routes -- [ ] remove_tags_from_resource -- [ ] reset_user_password -- [ ] restore_from_snapshot -- [ ] share_directory -- [ ] start_schema_extension -- [ ] unshare_directory -- [ ] update_conditional_forwarder -- [ ] update_number_of_domain_controllers -- [ ] update_radius -- [ ] update_trust -- [ ] verify_trust - -## dynamodb - 19% implemented -- [ ] batch_get_item -- [ ] batch_write_item -- [ ] create_backup -- [ ] create_global_table -- [X] create_table -- [ ] delete_backup -- [X] delete_item -- [X] delete_table -- [ ] describe_backup -- [ ] describe_continuous_backups -- [ ] describe_endpoints -- [ ] describe_global_table -- [ ] describe_global_table_settings -- [ ] describe_limits -- [ ] describe_table -- [ ] describe_time_to_live -- [X] get_item -- [ ] list_backups -- [ ] list_global_tables -- [ ] list_tables -- [ ] list_tags_of_resource -- [X] put_item -- [X] query -- [ ] restore_table_from_backup -- [ ] restore_table_to_point_in_time -- [X] scan -- [ ] tag_resource -- [ ] transact_get_items -- [ ] transact_write_items -- [ ] untag_resource -- [ ] update_continuous_backups -- [ ] update_global_table -- [ ] update_global_table_settings -- [ ] update_item -- [ ] update_table -- [ ] update_time_to_live - -## dynamodbstreams - 100% implemented -- [X] describe_stream -- [X] get_records -- [X] get_shard_iterator -- [X] list_streams - -## ec2 - 30% implemented -- [ ] accept_reserved_instances_exchange_quote -- [ ] accept_transit_gateway_vpc_attachment -- [ ] accept_vpc_endpoint_connections -- [X] accept_vpc_peering_connection -- [ ] advertise_byoip_cidr -- [X] allocate_address -- [ ] allocate_hosts -- [ ] apply_security_groups_to_client_vpn_target_network -- [ ] assign_ipv6_addresses -- [ ] assign_private_ip_addresses -- [X] associate_address -- [ ] associate_client_vpn_target_network -- [X] associate_dhcp_options -- [ ] associate_iam_instance_profile -- [X] associate_route_table -- [ ] associate_subnet_cidr_block -- [ ] associate_transit_gateway_route_table -- [X] associate_vpc_cidr_block -- [ ] attach_classic_link_vpc -- [X] attach_internet_gateway -- [X] attach_network_interface -- [X] attach_volume -- [X] attach_vpn_gateway -- [ ] authorize_client_vpn_ingress -- [X] authorize_security_group_egress -- [X] authorize_security_group_ingress -- [ ] bundle_instance -- [ ] cancel_bundle_task -- [ ] cancel_capacity_reservation -- [ ] cancel_conversion_task -- [ ] cancel_export_task -- [ ] cancel_import_task -- [ ] cancel_reserved_instances_listing -- [X] cancel_spot_fleet_requests -- [X] cancel_spot_instance_requests -- [ ] confirm_product_instance -- [ ] copy_fpga_image -- [X] copy_image -- [X] copy_snapshot -- [ ] create_capacity_reservation -- [ ] create_client_vpn_endpoint -- [ ] create_client_vpn_route -- [X] create_customer_gateway -- [ ] create_default_subnet -- [ ] create_default_vpc -- [X] create_dhcp_options -- [ ] create_egress_only_internet_gateway -- [ ] create_fleet -- [ ] create_flow_logs -- [ ] create_fpga_image -- [X] create_image -- [ ] create_instance_export_task -- [X] create_internet_gateway -- [X] create_key_pair -- [ ] create_launch_template -- [ ] create_launch_template_version -- [X] create_nat_gateway -- [X] create_network_acl -- [X] create_network_acl_entry -- [X] create_network_interface -- [ ] create_network_interface_permission -- [ ] create_placement_group -- [ ] create_reserved_instances_listing -- [X] create_route -- [X] create_route_table -- [X] create_security_group -- [X] create_snapshot -- [ ] create_spot_datafeed_subscription -- [X] create_subnet -- [X] create_tags -- [ ] create_transit_gateway -- [ ] create_transit_gateway_route -- [ ] create_transit_gateway_route_table -- [ ] create_transit_gateway_vpc_attachment -- [X] create_volume -- [X] create_vpc -- [ ] create_vpc_endpoint -- [ ] create_vpc_endpoint_connection_notification -- [ ] create_vpc_endpoint_service_configuration -- [X] create_vpc_peering_connection -- [X] create_vpn_connection -- [ ] create_vpn_connection_route -- [X] create_vpn_gateway -- [ ] delete_client_vpn_endpoint -- [ ] delete_client_vpn_route -- [X] delete_customer_gateway -- [ ] delete_dhcp_options -- [ ] delete_egress_only_internet_gateway -- [ ] delete_fleets -- [ ] delete_flow_logs -- [ ] delete_fpga_image -- [X] delete_internet_gateway -- [X] delete_key_pair -- [ ] delete_launch_template -- [ ] delete_launch_template_versions -- [X] delete_nat_gateway -- [X] delete_network_acl -- [X] delete_network_acl_entry -- [X] delete_network_interface -- [ ] delete_network_interface_permission -- [ ] delete_placement_group -- [X] delete_route -- [X] delete_route_table -- [X] delete_security_group -- [X] delete_snapshot -- [ ] delete_spot_datafeed_subscription -- [X] delete_subnet -- [X] delete_tags -- [ ] delete_transit_gateway -- [ ] delete_transit_gateway_route -- [ ] delete_transit_gateway_route_table -- [ ] delete_transit_gateway_vpc_attachment -- [X] delete_volume -- [X] delete_vpc -- [ ] delete_vpc_endpoint_connection_notifications -- [ ] delete_vpc_endpoint_service_configurations -- [ ] delete_vpc_endpoints -- [X] delete_vpc_peering_connection -- [X] delete_vpn_connection -- [ ] delete_vpn_connection_route -- [X] delete_vpn_gateway -- [ ] deprovision_byoip_cidr -- [X] deregister_image -- [ ] describe_account_attributes -- [X] describe_addresses -- [ ] describe_aggregate_id_format -- [X] describe_availability_zones -- [ ] describe_bundle_tasks -- [ ] describe_byoip_cidrs -- [ ] describe_capacity_reservations -- [ ] describe_classic_link_instances -- [ ] describe_client_vpn_authorization_rules -- [ ] describe_client_vpn_connections -- [ ] describe_client_vpn_endpoints -- [ ] describe_client_vpn_routes -- [ ] describe_client_vpn_target_networks -- [ ] describe_conversion_tasks -- [ ] describe_customer_gateways -- [X] describe_dhcp_options -- [ ] describe_egress_only_internet_gateways -- [ ] describe_elastic_gpus -- [ ] describe_export_tasks -- [ ] describe_fleet_history -- [ ] describe_fleet_instances -- [ ] describe_fleets -- [ ] describe_flow_logs -- [ ] describe_fpga_image_attribute -- [ ] describe_fpga_images -- [ ] describe_host_reservation_offerings -- [ ] describe_host_reservations -- [ ] describe_hosts -- [ ] describe_iam_instance_profile_associations -- [ ] describe_id_format -- [ ] describe_identity_id_format -- [ ] describe_image_attribute -- [X] describe_images -- [ ] describe_import_image_tasks -- [ ] describe_import_snapshot_tasks -- [X] describe_instance_attribute -- [ ] describe_instance_credit_specifications -- [ ] describe_instance_status -- [ ] describe_instances -- [X] describe_internet_gateways -- [X] describe_key_pairs -- [ ] describe_launch_template_versions -- [ ] describe_launch_templates -- [ ] describe_moving_addresses -- [ ] describe_nat_gateways -- [ ] describe_network_acls -- [ ] describe_network_interface_attribute -- [ ] describe_network_interface_permissions -- [X] describe_network_interfaces -- [ ] describe_placement_groups -- [ ] describe_prefix_lists -- [ ] describe_principal_id_format -- [ ] describe_public_ipv4_pools -- [X] describe_regions -- [ ] describe_reserved_instances -- [ ] describe_reserved_instances_listings -- [ ] describe_reserved_instances_modifications -- [ ] describe_reserved_instances_offerings -- [ ] describe_route_tables -- [ ] describe_scheduled_instance_availability -- [ ] describe_scheduled_instances -- [ ] describe_security_group_references -- [X] describe_security_groups -- [ ] describe_snapshot_attribute -- [X] describe_snapshots -- [ ] describe_spot_datafeed_subscription -- [X] describe_spot_fleet_instances -- [ ] describe_spot_fleet_request_history -- [X] describe_spot_fleet_requests -- [X] describe_spot_instance_requests -- [ ] describe_spot_price_history -- [ ] describe_stale_security_groups -- [ ] describe_subnets -- [X] describe_tags -- [ ] describe_transit_gateway_attachments -- [ ] describe_transit_gateway_route_tables -- [ ] describe_transit_gateway_vpc_attachments -- [ ] describe_transit_gateways -- [ ] describe_volume_attribute -- [ ] describe_volume_status -- [X] describe_volumes -- [ ] describe_volumes_modifications -- [X] describe_vpc_attribute -- [ ] describe_vpc_classic_link -- [ ] describe_vpc_classic_link_dns_support -- [ ] describe_vpc_endpoint_connection_notifications -- [ ] describe_vpc_endpoint_connections -- [ ] describe_vpc_endpoint_service_configurations -- [ ] describe_vpc_endpoint_service_permissions -- [ ] describe_vpc_endpoint_services -- [ ] describe_vpc_endpoints -- [ ] describe_vpc_peering_connections -- [ ] describe_vpcs -- [X] describe_vpn_connections -- [ ] describe_vpn_gateways -- [ ] detach_classic_link_vpc -- [X] detach_internet_gateway -- [X] detach_network_interface -- [X] detach_volume -- [X] detach_vpn_gateway -- [ ] disable_transit_gateway_route_table_propagation -- [ ] disable_vgw_route_propagation -- [ ] disable_vpc_classic_link -- [ ] disable_vpc_classic_link_dns_support -- [X] disassociate_address -- [ ] disassociate_client_vpn_target_network -- [ ] disassociate_iam_instance_profile -- [X] disassociate_route_table -- [ ] disassociate_subnet_cidr_block -- [ ] disassociate_transit_gateway_route_table -- [X] disassociate_vpc_cidr_block -- [ ] enable_transit_gateway_route_table_propagation -- [ ] enable_vgw_route_propagation -- [ ] enable_volume_io -- [ ] enable_vpc_classic_link -- [ ] enable_vpc_classic_link_dns_support -- [ ] export_client_vpn_client_certificate_revocation_list -- [ ] export_client_vpn_client_configuration -- [ ] export_transit_gateway_routes -- [ ] get_console_output -- [ ] get_console_screenshot -- [ ] get_host_reservation_purchase_preview -- [ ] get_launch_template_data -- [ ] get_password_data -- [ ] get_reserved_instances_exchange_quote -- [ ] get_transit_gateway_attachment_propagations -- [ ] get_transit_gateway_route_table_associations -- [ ] get_transit_gateway_route_table_propagations -- [ ] import_client_vpn_client_certificate_revocation_list -- [ ] import_image -- [ ] import_instance -- [X] import_key_pair -- [ ] import_snapshot -- [ ] import_volume -- [ ] modify_capacity_reservation -- [ ] modify_client_vpn_endpoint -- [ ] modify_fleet -- [ ] modify_fpga_image_attribute -- [ ] modify_hosts -- [ ] modify_id_format -- [ ] modify_identity_id_format -- [ ] modify_image_attribute -- [X] modify_instance_attribute -- [ ] modify_instance_capacity_reservation_attributes -- [ ] modify_instance_credit_specification -- [ ] modify_instance_placement -- [ ] modify_launch_template -- [X] modify_network_interface_attribute -- [ ] modify_reserved_instances -- [ ] modify_snapshot_attribute -- [X] modify_spot_fleet_request -- [X] modify_subnet_attribute -- [ ] modify_transit_gateway_vpc_attachment -- [ ] modify_volume -- [ ] modify_volume_attribute -- [X] modify_vpc_attribute -- [ ] modify_vpc_endpoint -- [ ] modify_vpc_endpoint_connection_notification -- [ ] modify_vpc_endpoint_service_configuration -- [ ] modify_vpc_endpoint_service_permissions -- [ ] modify_vpc_peering_connection_options -- [ ] modify_vpc_tenancy -- [ ] monitor_instances -- [ ] move_address_to_vpc -- [ ] provision_byoip_cidr -- [ ] purchase_host_reservation -- [ ] purchase_reserved_instances_offering -- [ ] purchase_scheduled_instances -- [X] reboot_instances -- [ ] register_image -- [ ] reject_transit_gateway_vpc_attachment -- [ ] reject_vpc_endpoint_connections -- [X] reject_vpc_peering_connection -- [X] release_address -- [ ] release_hosts -- [ ] replace_iam_instance_profile_association -- [X] replace_network_acl_association -- [X] replace_network_acl_entry -- [X] replace_route -- [X] replace_route_table_association -- [ ] replace_transit_gateway_route -- [ ] report_instance_status -- [X] request_spot_fleet -- [X] request_spot_instances -- [ ] reset_fpga_image_attribute -- [ ] reset_image_attribute -- [ ] reset_instance_attribute -- [ ] reset_network_interface_attribute -- [ ] reset_snapshot_attribute -- [ ] restore_address_to_classic -- [ ] revoke_client_vpn_ingress -- [X] revoke_security_group_egress -- [X] revoke_security_group_ingress -- [ ] run_instances -- [ ] run_scheduled_instances -- [ ] search_transit_gateway_routes -- [X] start_instances -- [X] stop_instances -- [ ] terminate_client_vpn_connections -- [X] terminate_instances -- [ ] unassign_ipv6_addresses -- [ ] unassign_private_ip_addresses -- [ ] unmonitor_instances -- [ ] update_security_group_rule_descriptions_egress -- [ ] update_security_group_rule_descriptions_ingress -- [ ] withdraw_byoip_cidr - -## ecr - 28% implemented -- [ ] batch_check_layer_availability -- [ ] batch_delete_image -- [X] batch_get_image -- [ ] complete_layer_upload -- [X] create_repository -- [ ] delete_lifecycle_policy -- [X] delete_repository -- [ ] delete_repository_policy -- [X] describe_images -- [X] describe_repositories -- [ ] get_authorization_token -- [ ] get_download_url_for_layer -- [ ] get_lifecycle_policy -- [ ] get_lifecycle_policy_preview -- [ ] get_repository_policy -- [ ] initiate_layer_upload -- [X] list_images -- [ ] list_tags_for_resource -- [X] put_image -- [ ] put_lifecycle_policy -- [ ] set_repository_policy -- [ ] start_lifecycle_policy_preview -- [ ] tag_resource -- [ ] untag_resource -- [ ] upload_layer_part - -## ecs - 72% implemented -- [X] create_cluster -- [X] create_service -- [ ] delete_account_setting -- [X] delete_attributes -- [X] delete_cluster -- [X] delete_service -- [X] deregister_container_instance -- [X] deregister_task_definition -- [X] describe_clusters -- [X] describe_container_instances -- [X] describe_services -- [X] describe_task_definition -- [X] describe_tasks -- [ ] discover_poll_endpoint -- [ ] list_account_settings -- [X] list_attributes -- [X] list_clusters -- [X] list_container_instances -- [X] list_services -- [ ] list_tags_for_resource -- [X] list_task_definition_families -- [X] list_task_definitions -- [X] list_tasks -- [ ] put_account_setting -- [X] put_attributes -- [X] register_container_instance -- [X] register_task_definition -- [X] run_task -- [X] start_task -- [X] stop_task -- [ ] submit_container_state_change -- [ ] submit_task_state_change -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_container_agent -- [X] update_container_instances_state -- [X] update_service - -## efs - 0% implemented -- [ ] create_file_system -- [ ] create_mount_target -- [ ] create_tags -- [ ] delete_file_system -- [ ] delete_mount_target -- [ ] delete_tags -- [ ] describe_file_systems -- [ ] describe_mount_target_security_groups -- [ ] describe_mount_targets -- [ ] describe_tags -- [ ] modify_mount_target_security_groups -- [ ] update_file_system - -## eks - 0% implemented -- [ ] create_cluster -- [ ] delete_cluster -- [ ] describe_cluster -- [ ] describe_update -- [ ] list_clusters -- [ ] list_updates -- [ ] update_cluster_version - -## elasticache - 0% implemented -- [ ] add_tags_to_resource -- [ ] authorize_cache_security_group_ingress -- [ ] copy_snapshot -- [ ] create_cache_cluster -- [ ] create_cache_parameter_group -- [ ] create_cache_security_group -- [ ] create_cache_subnet_group -- [ ] create_replication_group -- [ ] create_snapshot -- [ ] decrease_replica_count -- [ ] delete_cache_cluster -- [ ] delete_cache_parameter_group -- [ ] delete_cache_security_group -- [ ] delete_cache_subnet_group -- [ ] delete_replication_group -- [ ] delete_snapshot -- [ ] describe_cache_clusters -- [ ] describe_cache_engine_versions -- [ ] describe_cache_parameter_groups -- [ ] describe_cache_parameters -- [ ] describe_cache_security_groups -- [ ] describe_cache_subnet_groups -- [ ] describe_engine_default_parameters -- [ ] describe_events -- [ ] describe_replication_groups -- [ ] describe_reserved_cache_nodes -- [ ] describe_reserved_cache_nodes_offerings -- [ ] describe_snapshots -- [ ] increase_replica_count -- [ ] list_allowed_node_type_modifications -- [ ] list_tags_for_resource -- [ ] modify_cache_cluster -- [ ] modify_cache_parameter_group -- [ ] modify_cache_subnet_group -- [ ] modify_replication_group -- [ ] modify_replication_group_shard_configuration -- [ ] purchase_reserved_cache_nodes_offering -- [ ] reboot_cache_cluster -- [ ] remove_tags_from_resource -- [ ] reset_cache_parameter_group -- [ ] revoke_cache_security_group_ingress -- [ ] test_failover - -## elasticbeanstalk - 0% implemented -- [ ] abort_environment_update -- [ ] apply_environment_managed_action -- [ ] check_dns_availability -- [ ] compose_environments -- [ ] create_application -- [ ] create_application_version -- [ ] create_configuration_template -- [ ] create_environment -- [ ] create_platform_version -- [ ] create_storage_location -- [ ] delete_application -- [ ] delete_application_version -- [ ] delete_configuration_template -- [ ] delete_environment_configuration -- [ ] delete_platform_version -- [ ] describe_account_attributes -- [ ] describe_application_versions -- [ ] describe_applications -- [ ] describe_configuration_options -- [ ] describe_configuration_settings -- [ ] describe_environment_health -- [ ] describe_environment_managed_action_history -- [ ] describe_environment_managed_actions -- [ ] describe_environment_resources -- [ ] describe_environments -- [ ] describe_events -- [ ] describe_instances_health -- [ ] describe_platform_version -- [ ] list_available_solution_stacks -- [ ] list_platform_versions -- [ ] list_tags_for_resource -- [ ] rebuild_environment -- [ ] request_environment_info -- [ ] restart_app_server -- [ ] retrieve_environment_info -- [ ] swap_environment_cnames -- [ ] terminate_environment -- [ ] update_application -- [ ] update_application_resource_lifecycle -- [ ] update_application_version -- [ ] update_configuration_template -- [ ] update_environment -- [ ] update_tags_for_resource -- [ ] validate_configuration_settings - -## elastictranscoder - 0% implemented -- [ ] cancel_job -- [ ] create_job -- [ ] create_pipeline -- [ ] create_preset -- [ ] delete_pipeline -- [ ] delete_preset -- [ ] list_jobs_by_pipeline -- [ ] list_jobs_by_status -- [ ] list_pipelines -- [ ] list_presets -- [ ] read_job -- [ ] read_pipeline -- [ ] read_preset -- [ ] test_role -- [ ] update_pipeline -- [ ] update_pipeline_notifications -- [ ] update_pipeline_status - -## elb - 34% implemented -- [ ] add_tags -- [X] apply_security_groups_to_load_balancer -- [ ] attach_load_balancer_to_subnets -- [X] configure_health_check -- [X] create_app_cookie_stickiness_policy -- [X] create_lb_cookie_stickiness_policy -- [X] create_load_balancer -- [X] create_load_balancer_listeners -- [ ] create_load_balancer_policy -- [X] delete_load_balancer -- [X] delete_load_balancer_listeners -- [ ] delete_load_balancer_policy -- [ ] deregister_instances_from_load_balancer -- [ ] describe_account_limits -- [ ] describe_instance_health -- [ ] describe_load_balancer_attributes -- [ ] describe_load_balancer_policies -- [ ] describe_load_balancer_policy_types -- [X] describe_load_balancers -- [ ] describe_tags -- [ ] detach_load_balancer_from_subnets -- [ ] disable_availability_zones_for_load_balancer -- [ ] enable_availability_zones_for_load_balancer -- [ ] modify_load_balancer_attributes -- [ ] register_instances_with_load_balancer -- [ ] remove_tags -- [ ] set_load_balancer_listener_ssl_certificate -- [ ] set_load_balancer_policies_for_backend_server -- [X] set_load_balancer_policies_of_listener - -## elbv2 - 70% implemented -- [ ] add_listener_certificates -- [ ] add_tags -- [X] create_listener -- [X] create_load_balancer -- [X] create_rule -- [X] create_target_group -- [X] delete_listener -- [X] delete_load_balancer -- [X] delete_rule -- [X] delete_target_group -- [X] deregister_targets -- [ ] describe_account_limits -- [ ] describe_listener_certificates -- [X] describe_listeners -- [X] describe_load_balancer_attributes -- [X] describe_load_balancers -- [X] describe_rules -- [ ] describe_ssl_policies -- [ ] describe_tags -- [ ] describe_target_group_attributes -- [X] describe_target_groups -- [X] describe_target_health -- [X] modify_listener -- [X] modify_load_balancer_attributes -- [X] modify_rule -- [X] modify_target_group -- [ ] modify_target_group_attributes -- [X] register_targets -- [ ] remove_listener_certificates -- [ ] remove_tags -- [X] set_ip_address_type -- [X] set_rule_priorities -- [X] set_security_groups -- [X] set_subnets - -## emr - 55% implemented -- [ ] add_instance_fleet -- [X] add_instance_groups -- [X] add_job_flow_steps -- [X] add_tags -- [ ] cancel_steps -- [ ] create_security_configuration -- [ ] delete_security_configuration -- [ ] describe_cluster -- [X] describe_job_flows -- [ ] describe_security_configuration -- [X] describe_step -- [X] list_bootstrap_actions -- [X] list_clusters -- [ ] list_instance_fleets -- [X] list_instance_groups -- [ ] list_instances -- [ ] list_security_configurations -- [X] list_steps -- [ ] modify_instance_fleet -- [X] modify_instance_groups -- [ ] put_auto_scaling_policy -- [ ] remove_auto_scaling_policy -- [X] remove_tags -- [X] run_job_flow -- [X] set_termination_protection -- [X] set_visible_to_all_users -- [X] terminate_job_flows - -## es - 0% implemented -- [ ] add_tags -- [ ] cancel_elasticsearch_service_software_update -- [ ] create_elasticsearch_domain -- [ ] delete_elasticsearch_domain -- [ ] delete_elasticsearch_service_role -- [ ] describe_elasticsearch_domain -- [ ] describe_elasticsearch_domain_config -- [ ] describe_elasticsearch_domains -- [ ] describe_elasticsearch_instance_type_limits -- [ ] describe_reserved_elasticsearch_instance_offerings -- [ ] describe_reserved_elasticsearch_instances -- [ ] get_compatible_elasticsearch_versions -- [ ] get_upgrade_history -- [ ] get_upgrade_status -- [ ] list_domain_names -- [ ] list_elasticsearch_instance_types -- [ ] list_elasticsearch_versions -- [ ] list_tags -- [ ] purchase_reserved_elasticsearch_instance_offering -- [ ] remove_tags -- [ ] start_elasticsearch_service_software_update -- [ ] update_elasticsearch_domain_config -- [ ] upgrade_elasticsearch_domain - -## events - 100% implemented -- [X] delete_rule -- [X] describe_event_bus -- [X] describe_rule -- [X] disable_rule -- [X] enable_rule -- [X] list_rule_names_by_target -- [X] list_rules -- [X] list_targets_by_rule -- [X] put_events -- [X] put_permission -- [X] put_rule -- [X] put_targets -- [X] remove_permission -- [X] remove_targets -- [X] test_event_pattern - -## firehose - 0% implemented -- [ ] create_delivery_stream -- [ ] delete_delivery_stream -- [ ] describe_delivery_stream -- [ ] list_delivery_streams -- [ ] list_tags_for_delivery_stream -- [ ] put_record -- [ ] put_record_batch -- [ ] start_delivery_stream_encryption -- [ ] stop_delivery_stream_encryption -- [ ] tag_delivery_stream -- [ ] untag_delivery_stream -- [ ] update_destination - -## fms - 0% implemented -- [ ] associate_admin_account -- [ ] delete_notification_channel -- [ ] delete_policy -- [ ] disassociate_admin_account -- [ ] get_admin_account -- [ ] get_compliance_detail -- [ ] get_notification_channel -- [ ] get_policy -- [ ] list_compliance_status -- [ ] list_member_accounts -- [ ] list_policies -- [ ] put_notification_channel -- [ ] put_policy - -## fsx - 0% implemented -- [ ] create_backup -- [ ] create_file_system -- [ ] create_file_system_from_backup -- [ ] delete_backup -- [ ] delete_file_system -- [ ] describe_backups -- [ ] describe_file_systems -- [ ] list_tags_for_resource -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_file_system - -## gamelift - 0% implemented -- [ ] accept_match -- [ ] create_alias -- [ ] create_build -- [ ] create_fleet -- [ ] create_game_session -- [ ] create_game_session_queue -- [ ] create_matchmaking_configuration -- [ ] create_matchmaking_rule_set -- [ ] create_player_session -- [ ] create_player_sessions -- [ ] create_vpc_peering_authorization -- [ ] create_vpc_peering_connection -- [ ] delete_alias -- [ ] delete_build -- [ ] delete_fleet -- [ ] delete_game_session_queue -- [ ] delete_matchmaking_configuration -- [ ] delete_scaling_policy -- [ ] delete_vpc_peering_authorization -- [ ] delete_vpc_peering_connection -- [ ] describe_alias -- [ ] describe_build -- [ ] describe_ec2_instance_limits -- [ ] describe_fleet_attributes -- [ ] describe_fleet_capacity -- [ ] describe_fleet_events -- [ ] describe_fleet_port_settings -- [ ] describe_fleet_utilization -- [ ] describe_game_session_details -- [ ] describe_game_session_placement -- [ ] describe_game_session_queues -- [ ] describe_game_sessions -- [ ] describe_instances -- [ ] describe_matchmaking -- [ ] describe_matchmaking_configurations -- [ ] describe_matchmaking_rule_sets -- [ ] describe_player_sessions -- [ ] describe_runtime_configuration -- [ ] describe_scaling_policies -- [ ] describe_vpc_peering_authorizations -- [ ] describe_vpc_peering_connections -- [ ] get_game_session_log_url -- [ ] get_instance_access -- [ ] list_aliases -- [ ] list_builds -- [ ] list_fleets -- [ ] put_scaling_policy -- [ ] request_upload_credentials -- [ ] resolve_alias -- [ ] search_game_sessions -- [ ] start_fleet_actions -- [ ] start_game_session_placement -- [ ] start_match_backfill -- [ ] start_matchmaking -- [ ] stop_fleet_actions -- [ ] stop_game_session_placement -- [ ] stop_matchmaking -- [ ] update_alias -- [ ] update_build -- [ ] update_fleet_attributes -- [ ] update_fleet_capacity -- [ ] update_fleet_port_settings -- [ ] update_game_session -- [ ] update_game_session_queue -- [ ] update_matchmaking_configuration -- [ ] update_runtime_configuration -- [ ] validate_matchmaking_rule_set - -## glacier - 12% implemented -- [ ] abort_multipart_upload -- [ ] abort_vault_lock -- [ ] add_tags_to_vault -- [ ] complete_multipart_upload -- [ ] complete_vault_lock -- [X] create_vault -- [ ] delete_archive -- [X] delete_vault -- [ ] delete_vault_access_policy -- [ ] delete_vault_notifications -- [ ] describe_job -- [ ] describe_vault -- [ ] get_data_retrieval_policy -- [ ] get_job_output -- [ ] get_vault_access_policy -- [ ] get_vault_lock -- [ ] get_vault_notifications -- [X] initiate_job -- [ ] initiate_multipart_upload -- [ ] initiate_vault_lock -- [X] list_jobs -- [ ] list_multipart_uploads -- [ ] list_parts -- [ ] list_provisioned_capacity -- [ ] list_tags_for_vault -- [ ] list_vaults -- [ ] purchase_provisioned_capacity -- [ ] remove_tags_from_vault -- [ ] set_data_retrieval_policy -- [ ] set_vault_access_policy -- [ ] set_vault_notifications -- [ ] upload_archive -- [ ] upload_multipart_part - -## globalaccelerator - 0% implemented -- [ ] create_accelerator -- [ ] create_endpoint_group -- [ ] create_listener -- [ ] delete_accelerator -- [ ] delete_endpoint_group -- [ ] delete_listener -- [ ] describe_accelerator -- [ ] describe_accelerator_attributes -- [ ] describe_endpoint_group -- [ ] describe_listener -- [ ] list_accelerators -- [ ] list_endpoint_groups -- [ ] list_listeners -- [ ] update_accelerator -- [ ] update_accelerator_attributes -- [ ] update_endpoint_group -- [ ] update_listener - -## glue - 5% implemented -- [ ] batch_create_partition -- [ ] batch_delete_connection -- [ ] batch_delete_partition -- [ ] batch_delete_table -- [ ] batch_delete_table_version -- [ ] batch_get_partition -- [ ] batch_stop_job_run -- [ ] create_classifier -- [ ] create_connection -- [ ] create_crawler -- [X] create_database -- [ ] create_dev_endpoint -- [ ] create_job -- [ ] create_partition -- [ ] create_script -- [ ] create_security_configuration -- [X] create_table -- [ ] create_trigger -- [ ] create_user_defined_function -- [ ] delete_classifier -- [ ] delete_connection -- [ ] delete_crawler -- [ ] delete_database -- [ ] delete_dev_endpoint -- [ ] delete_job -- [ ] delete_partition -- [ ] delete_resource_policy -- [ ] delete_security_configuration -- [ ] delete_table -- [ ] delete_table_version -- [ ] delete_trigger -- [ ] delete_user_defined_function -- [ ] get_catalog_import_status -- [ ] get_classifier -- [ ] get_classifiers -- [ ] get_connection -- [ ] get_connections -- [ ] get_crawler -- [ ] get_crawler_metrics -- [ ] get_crawlers -- [ ] get_data_catalog_encryption_settings -- [X] get_database -- [ ] get_databases -- [ ] get_dataflow_graph -- [ ] get_dev_endpoint -- [ ] get_dev_endpoints -- [ ] get_job -- [ ] get_job_run -- [ ] get_job_runs -- [ ] get_jobs -- [ ] get_mapping -- [ ] get_partition -- [ ] get_partitions -- [ ] get_plan -- [ ] get_resource_policy -- [ ] get_security_configuration -- [ ] get_security_configurations -- [X] get_table -- [ ] get_table_version -- [ ] get_table_versions -- [X] get_tables -- [ ] get_trigger -- [ ] get_triggers -- [ ] get_user_defined_function -- [ ] get_user_defined_functions -- [ ] import_catalog_to_glue -- [ ] put_data_catalog_encryption_settings -- [ ] put_resource_policy -- [ ] reset_job_bookmark -- [ ] start_crawler -- [ ] start_crawler_schedule -- [ ] start_job_run -- [ ] start_trigger -- [ ] stop_crawler -- [ ] stop_crawler_schedule -- [ ] stop_trigger -- [ ] update_classifier -- [ ] update_connection -- [ ] update_crawler -- [ ] update_crawler_schedule -- [ ] update_database -- [ ] update_dev_endpoint -- [ ] update_job -- [ ] update_partition -- [ ] update_table -- [ ] update_trigger -- [ ] update_user_defined_function - -## greengrass - 0% implemented -- [ ] associate_role_to_group -- [ ] associate_service_role_to_account -- [ ] create_connector_definition -- [ ] create_connector_definition_version -- [ ] create_core_definition -- [ ] create_core_definition_version -- [ ] create_deployment -- [ ] create_device_definition -- [ ] create_device_definition_version -- [ ] create_function_definition -- [ ] create_function_definition_version -- [ ] create_group -- [ ] create_group_certificate_authority -- [ ] create_group_version -- [ ] create_logger_definition -- [ ] create_logger_definition_version -- [ ] create_resource_definition -- [ ] create_resource_definition_version -- [ ] create_software_update_job -- [ ] create_subscription_definition -- [ ] create_subscription_definition_version -- [ ] delete_connector_definition -- [ ] delete_core_definition -- [ ] delete_device_definition -- [ ] delete_function_definition -- [ ] delete_group -- [ ] delete_logger_definition -- [ ] delete_resource_definition -- [ ] delete_subscription_definition -- [ ] disassociate_role_from_group -- [ ] disassociate_service_role_from_account -- [ ] get_associated_role -- [ ] get_bulk_deployment_status -- [ ] get_connectivity_info -- [ ] get_connector_definition -- [ ] get_connector_definition_version -- [ ] get_core_definition -- [ ] get_core_definition_version -- [ ] get_deployment_status -- [ ] get_device_definition -- [ ] get_device_definition_version -- [ ] get_function_definition -- [ ] get_function_definition_version -- [ ] get_group -- [ ] get_group_certificate_authority -- [ ] get_group_certificate_configuration -- [ ] get_group_version -- [ ] get_logger_definition -- [ ] get_logger_definition_version -- [ ] get_resource_definition -- [ ] get_resource_definition_version -- [ ] get_service_role_for_account -- [ ] get_subscription_definition -- [ ] get_subscription_definition_version -- [ ] list_bulk_deployment_detailed_reports -- [ ] list_bulk_deployments -- [ ] list_connector_definition_versions -- [ ] list_connector_definitions -- [ ] list_core_definition_versions -- [ ] list_core_definitions -- [ ] list_deployments -- [ ] list_device_definition_versions -- [ ] list_device_definitions -- [ ] list_function_definition_versions -- [ ] list_function_definitions -- [ ] list_group_certificate_authorities -- [ ] list_group_versions -- [ ] list_groups -- [ ] list_logger_definition_versions -- [ ] list_logger_definitions -- [ ] list_resource_definition_versions -- [ ] list_resource_definitions -- [ ] list_subscription_definition_versions -- [ ] list_subscription_definitions -- [ ] reset_deployments -- [ ] start_bulk_deployment -- [ ] stop_bulk_deployment -- [ ] update_connectivity_info -- [ ] update_connector_definition -- [ ] update_core_definition -- [ ] update_device_definition -- [ ] update_function_definition -- [ ] update_group -- [ ] update_group_certificate_configuration -- [ ] update_logger_definition -- [ ] update_resource_definition -- [ ] update_subscription_definition - -## guardduty - 0% implemented -- [ ] accept_invitation -- [ ] archive_findings -- [ ] create_detector -- [ ] create_filter -- [ ] create_ip_set -- [ ] create_members -- [ ] create_sample_findings -- [ ] create_threat_intel_set -- [ ] decline_invitations -- [ ] delete_detector -- [ ] delete_filter -- [ ] delete_invitations -- [ ] delete_ip_set -- [ ] delete_members -- [ ] delete_threat_intel_set -- [ ] disassociate_from_master_account -- [ ] disassociate_members -- [ ] get_detector -- [ ] get_filter -- [ ] get_findings -- [ ] get_findings_statistics -- [ ] get_invitations_count -- [ ] get_ip_set -- [ ] get_master_account -- [ ] get_members -- [ ] get_threat_intel_set -- [ ] invite_members -- [ ] list_detectors -- [ ] list_filters -- [ ] list_findings -- [ ] list_invitations -- [ ] list_ip_sets -- [ ] list_members -- [ ] list_threat_intel_sets -- [ ] start_monitoring_members -- [ ] stop_monitoring_members -- [ ] unarchive_findings -- [ ] update_detector -- [ ] update_filter -- [ ] update_findings_feedback -- [ ] update_ip_set -- [ ] update_threat_intel_set - -## health - 0% implemented -- [ ] describe_affected_entities -- [ ] describe_entity_aggregates -- [ ] describe_event_aggregates -- [ ] describe_event_details -- [ ] describe_event_types -- [ ] describe_events - -## iam - 56% implemented -- [ ] add_client_id_to_open_id_connect_provider -- [X] add_role_to_instance_profile -- [X] add_user_to_group -- [X] attach_group_policy -- [X] attach_role_policy -- [X] attach_user_policy -- [ ] change_password -- [X] create_access_key -- [X] create_account_alias -- [X] create_group -- [X] create_instance_profile -- [X] create_login_profile -- [ ] create_open_id_connect_provider -- [X] create_policy -- [X] create_policy_version -- [X] create_role -- [X] create_saml_provider -- [ ] create_service_linked_role -- [ ] create_service_specific_credential -- [X] create_user -- [ ] create_virtual_mfa_device -- [X] deactivate_mfa_device -- [X] delete_access_key -- [X] delete_account_alias -- [ ] delete_account_password_policy -- [ ] delete_group -- [ ] delete_group_policy -- [ ] delete_instance_profile -- [X] delete_login_profile -- [ ] delete_open_id_connect_provider -- [ ] delete_policy -- [X] delete_policy_version -- [X] delete_role -- [ ] delete_role_permissions_boundary -- [X] delete_role_policy -- [X] delete_saml_provider -- [X] delete_server_certificate -- [ ] delete_service_linked_role -- [ ] delete_service_specific_credential -- [X] delete_signing_certificate -- [ ] delete_ssh_public_key -- [X] delete_user -- [ ] delete_user_permissions_boundary -- [X] delete_user_policy -- [ ] delete_virtual_mfa_device -- [X] detach_group_policy -- [X] detach_role_policy -- [X] detach_user_policy -- [X] enable_mfa_device -- [ ] generate_credential_report -- [ ] generate_service_last_accessed_details -- [X] get_access_key_last_used -- [X] get_account_authorization_details -- [ ] get_account_password_policy -- [ ] get_account_summary -- [ ] get_context_keys_for_custom_policy -- [ ] get_context_keys_for_principal_policy -- [X] get_credential_report -- [X] get_group -- [X] get_group_policy -- [X] get_instance_profile -- [X] get_login_profile -- [ ] get_open_id_connect_provider -- [X] get_policy -- [X] get_policy_version -- [X] get_role -- [X] get_role_policy -- [X] get_saml_provider -- [X] get_server_certificate -- [ ] get_service_last_accessed_details -- [ ] get_service_last_accessed_details_with_entities -- [ ] get_service_linked_role_deletion_status -- [ ] get_ssh_public_key -- [X] get_user -- [X] get_user_policy -- [ ] list_access_keys -- [X] list_account_aliases -- [X] list_attached_group_policies -- [X] list_attached_role_policies -- [X] list_attached_user_policies -- [ ] list_entities_for_policy -- [X] list_group_policies -- [X] list_groups -- [ ] list_groups_for_user -- [ ] list_instance_profiles -- [ ] list_instance_profiles_for_role -- [X] list_mfa_devices -- [ ] list_open_id_connect_providers -- [X] list_policies -- [ ] list_policies_granting_service_access -- [X] list_policy_versions -- [X] list_role_policies -- [X] list_role_tags -- [X] list_roles -- [X] list_saml_providers -- [ ] list_server_certificates -- [ ] list_service_specific_credentials -- [X] list_signing_certificates -- [ ] list_ssh_public_keys -- [X] list_user_policies -- [ ] list_user_tags -- [X] list_users -- [ ] list_virtual_mfa_devices -- [X] put_group_policy -- [ ] put_role_permissions_boundary -- [X] put_role_policy -- [ ] put_user_permissions_boundary -- [X] put_user_policy -- [ ] remove_client_id_from_open_id_connect_provider -- [X] remove_role_from_instance_profile -- [X] remove_user_from_group -- [ ] reset_service_specific_credential -- [ ] resync_mfa_device -- [ ] set_default_policy_version -- [ ] simulate_custom_policy -- [ ] simulate_principal_policy -- [X] tag_role -- [ ] tag_user -- [X] untag_role -- [ ] untag_user -- [X] update_access_key -- [ ] update_account_password_policy -- [ ] update_assume_role_policy -- [ ] update_group -- [X] update_login_profile -- [ ] update_open_id_connect_provider_thumbprint -- [X] update_role -- [X] update_role_description -- [X] update_saml_provider -- [ ] update_server_certificate -- [ ] update_service_specific_credential -- [X] update_signing_certificate -- [ ] update_ssh_public_key -- [X] update_user -- [ ] upload_server_certificate -- [X] upload_signing_certificate -- [ ] upload_ssh_public_key - -## importexport - 0% implemented -- [ ] cancel_job -- [ ] create_job -- [ ] get_shipping_label -- [ ] get_status -- [ ] list_jobs -- [ ] update_job - -## inspector - 0% implemented -- [ ] add_attributes_to_findings -- [ ] create_assessment_target -- [ ] create_assessment_template -- [ ] create_exclusions_preview -- [ ] create_resource_group -- [ ] delete_assessment_run -- [ ] delete_assessment_target -- [ ] delete_assessment_template -- [ ] describe_assessment_runs -- [ ] describe_assessment_targets -- [ ] describe_assessment_templates -- [ ] describe_cross_account_access_role -- [ ] describe_exclusions -- [ ] describe_findings -- [ ] describe_resource_groups -- [ ] describe_rules_packages -- [ ] get_assessment_report -- [ ] get_exclusions_preview -- [ ] get_telemetry_metadata -- [ ] list_assessment_run_agents -- [ ] list_assessment_runs -- [ ] list_assessment_targets -- [ ] list_assessment_templates -- [ ] list_event_subscriptions -- [ ] list_exclusions -- [ ] list_findings -- [ ] list_rules_packages -- [ ] list_tags_for_resource -- [ ] preview_agents -- [ ] register_cross_account_access_role -- [ ] remove_attributes_from_findings -- [ ] set_tags_for_resource -- [ ] start_assessment_run -- [ ] stop_assessment_run -- [ ] subscribe_to_event -- [ ] unsubscribe_from_event -- [ ] update_assessment_target - -## iot - 33% implemented -- [ ] accept_certificate_transfer -- [ ] add_thing_to_billing_group -- [X] add_thing_to_thing_group -- [ ] associate_targets_with_job -- [X] attach_policy -- [X] attach_principal_policy -- [ ] attach_security_profile -- [X] attach_thing_principal -- [ ] cancel_audit_task -- [ ] cancel_certificate_transfer -- [X] cancel_job -- [X] cancel_job_execution -- [ ] clear_default_authorizer -- [ ] create_authorizer -- [ ] create_billing_group -- [ ] create_certificate_from_csr -- [ ] create_dynamic_thing_group -- [X] create_job -- [X] create_keys_and_certificate -- [ ] create_ota_update -- [X] create_policy -- [X] create_policy_version -- [ ] create_role_alias -- [ ] create_scheduled_audit -- [ ] create_security_profile -- [ ] create_stream -- [X] create_thing -- [X] create_thing_group -- [X] create_thing_type -- [ ] create_topic_rule -- [ ] delete_account_audit_configuration -- [ ] delete_authorizer -- [ ] delete_billing_group -- [ ] delete_ca_certificate -- [X] delete_certificate -- [ ] delete_dynamic_thing_group -- [X] delete_job -- [X] delete_job_execution -- [ ] delete_ota_update -- [X] delete_policy -- [X] delete_policy_version -- [ ] delete_registration_code -- [ ] delete_role_alias -- [ ] delete_scheduled_audit -- [ ] delete_security_profile -- [ ] delete_stream -- [X] delete_thing -- [X] delete_thing_group -- [X] delete_thing_type -- [ ] delete_topic_rule -- [ ] delete_v2_logging_level -- [ ] deprecate_thing_type -- [ ] describe_account_audit_configuration -- [ ] describe_audit_task -- [ ] describe_authorizer -- [ ] describe_billing_group -- [ ] describe_ca_certificate -- [X] describe_certificate -- [ ] describe_default_authorizer -- [ ] describe_endpoint -- [ ] describe_event_configurations -- [ ] describe_index -- [X] describe_job -- [X] describe_job_execution -- [ ] describe_role_alias -- [ ] describe_scheduled_audit -- [ ] describe_security_profile -- [ ] describe_stream -- [X] describe_thing -- [X] describe_thing_group -- [ ] describe_thing_registration_task -- [X] describe_thing_type -- [X] detach_policy -- [X] detach_principal_policy -- [ ] detach_security_profile -- [X] detach_thing_principal -- [ ] disable_topic_rule -- [ ] enable_topic_rule -- [ ] get_effective_policies -- [ ] get_indexing_configuration -- [X] get_job_document -- [ ] get_logging_options -- [ ] get_ota_update -- [X] get_policy -- [X] get_policy_version -- [ ] get_registration_code -- [ ] get_topic_rule -- [ ] get_v2_logging_options -- [ ] list_active_violations -- [X] list_attached_policies -- [ ] list_audit_findings -- [ ] list_audit_tasks -- [ ] list_authorizers -- [ ] list_billing_groups -- [ ] list_ca_certificates -- [X] list_certificates -- [ ] list_certificates_by_ca -- [ ] list_indices -- [X] list_job_executions_for_job -- [X] list_job_executions_for_thing -- [X] list_jobs -- [ ] list_ota_updates -- [ ] list_outgoing_certificates -- [X] list_policies -- [X] list_policy_principals -- [X] list_policy_versions -- [X] list_principal_policies -- [X] list_principal_things -- [ ] list_role_aliases -- [ ] list_scheduled_audits -- [ ] list_security_profiles -- [ ] list_security_profiles_for_target -- [ ] list_streams -- [ ] list_tags_for_resource -- [ ] list_targets_for_policy -- [ ] list_targets_for_security_profile -- [X] list_thing_groups -- [X] list_thing_groups_for_thing -- [X] list_thing_principals -- [ ] list_thing_registration_task_reports -- [ ] list_thing_registration_tasks -- [X] list_thing_types -- [X] list_things -- [ ] list_things_in_billing_group -- [X] list_things_in_thing_group -- [ ] list_topic_rules -- [ ] list_v2_logging_levels -- [ ] list_violation_events -- [ ] register_ca_certificate -- [ ] register_certificate -- [ ] register_thing -- [ ] reject_certificate_transfer -- [ ] remove_thing_from_billing_group -- [X] remove_thing_from_thing_group -- [ ] replace_topic_rule -- [ ] search_index -- [ ] set_default_authorizer -- [X] set_default_policy_version -- [ ] set_logging_options -- [ ] set_v2_logging_level -- [ ] set_v2_logging_options -- [ ] start_on_demand_audit_task -- [ ] start_thing_registration_task -- [ ] stop_thing_registration_task -- [ ] tag_resource -- [ ] test_authorization -- [ ] test_invoke_authorizer -- [ ] transfer_certificate -- [ ] untag_resource -- [ ] update_account_audit_configuration -- [ ] update_authorizer -- [ ] update_billing_group -- [ ] update_ca_certificate -- [X] update_certificate -- [ ] update_dynamic_thing_group -- [ ] update_event_configurations -- [ ] update_indexing_configuration -- [ ] update_job -- [ ] update_role_alias -- [ ] update_scheduled_audit -- [ ] update_security_profile -- [ ] update_stream -- [X] update_thing -- [X] update_thing_group -- [X] update_thing_groups_for_thing -- [ ] validate_security_profile_behaviors - -## iot-data - 100% implemented -- [X] delete_thing_shadow -- [X] get_thing_shadow -- [X] publish -- [X] update_thing_shadow - -## iot-jobs-data - 0% implemented -- [ ] describe_job_execution -- [ ] get_pending_job_executions -- [ ] start_next_pending_job_execution -- [ ] update_job_execution - -## iot1click-devices - 0% implemented -- [ ] claim_devices_by_claim_code -- [ ] describe_device -- [ ] finalize_device_claim -- [ ] get_device_methods -- [ ] initiate_device_claim -- [ ] invoke_device_method -- [ ] list_device_events -- [ ] list_devices -- [ ] unclaim_device -- [ ] update_device_state - -## iot1click-projects - 0% implemented -- [ ] associate_device_with_placement -- [ ] create_placement -- [ ] create_project -- [ ] delete_placement -- [ ] delete_project -- [ ] describe_placement -- [ ] describe_project -- [ ] disassociate_device_from_placement -- [ ] get_devices_in_placement -- [ ] list_placements -- [ ] list_projects -- [ ] update_placement -- [ ] update_project - -## iotanalytics - 0% implemented -- [ ] batch_put_message -- [ ] cancel_pipeline_reprocessing -- [ ] create_channel -- [ ] create_dataset -- [ ] create_dataset_content -- [ ] create_datastore -- [ ] create_pipeline -- [ ] delete_channel -- [ ] delete_dataset -- [ ] delete_dataset_content -- [ ] delete_datastore -- [ ] delete_pipeline -- [ ] describe_channel -- [ ] describe_dataset -- [ ] describe_datastore -- [ ] describe_logging_options -- [ ] describe_pipeline -- [ ] get_dataset_content -- [ ] list_channels -- [ ] list_dataset_contents -- [ ] list_datasets -- [ ] list_datastores -- [ ] list_pipelines -- [ ] list_tags_for_resource -- [ ] put_logging_options -- [ ] run_pipeline_activity -- [ ] sample_channel_data -- [ ] start_pipeline_reprocessing -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_channel -- [ ] update_dataset -- [ ] update_datastore -- [ ] update_pipeline - -## kafka - 0% implemented -- [ ] create_cluster -- [ ] delete_cluster -- [ ] describe_cluster -- [ ] get_bootstrap_brokers -- [ ] list_clusters -- [ ] list_nodes - -## kinesis - 46% implemented -- [X] add_tags_to_stream -- [X] create_stream -- [ ] decrease_stream_retention_period -- [X] delete_stream -- [ ] deregister_stream_consumer -- [ ] describe_limits -- [X] describe_stream -- [ ] describe_stream_consumer -- [ ] describe_stream_summary -- [ ] disable_enhanced_monitoring -- [ ] enable_enhanced_monitoring -- [X] get_records -- [X] get_shard_iterator -- [ ] increase_stream_retention_period -- [ ] list_shards -- [ ] list_stream_consumers -- [X] list_streams -- [X] list_tags_for_stream -- [X] merge_shards -- [X] put_record -- [X] put_records -- [ ] register_stream_consumer -- [X] remove_tags_from_stream -- [X] split_shard -- [ ] start_stream_encryption -- [ ] stop_stream_encryption -- [ ] subscribe_to_shard -- [ ] update_shard_count - -## kinesis-video-archived-media - 0% implemented -- [ ] get_hls_streaming_session_url -- [ ] get_media_for_fragment_list -- [ ] list_fragments - -## kinesis-video-media - 0% implemented -- [ ] get_media - -## kinesisanalytics - 0% implemented -- [ ] add_application_cloud_watch_logging_option -- [ ] add_application_input -- [ ] add_application_input_processing_configuration -- [ ] add_application_output -- [ ] add_application_reference_data_source -- [ ] create_application -- [ ] delete_application -- [ ] delete_application_cloud_watch_logging_option -- [ ] delete_application_input_processing_configuration -- [ ] delete_application_output -- [ ] delete_application_reference_data_source -- [ ] describe_application -- [ ] discover_input_schema -- [ ] list_applications -- [ ] start_application -- [ ] stop_application -- [ ] update_application - -## kinesisanalyticsv2 - 0% implemented -- [ ] add_application_cloud_watch_logging_option -- [ ] add_application_input -- [ ] add_application_input_processing_configuration -- [ ] add_application_output -- [ ] add_application_reference_data_source -- [ ] create_application -- [ ] create_application_snapshot -- [ ] delete_application -- [ ] delete_application_cloud_watch_logging_option -- [ ] delete_application_input_processing_configuration -- [ ] delete_application_output -- [ ] delete_application_reference_data_source -- [ ] delete_application_snapshot -- [ ] describe_application -- [ ] describe_application_snapshot -- [ ] discover_input_schema -- [ ] list_application_snapshots -- [ ] list_applications -- [ ] start_application -- [ ] stop_application -- [ ] update_application - -## kinesisvideo - 0% implemented -- [ ] create_stream -- [ ] delete_stream -- [ ] describe_stream -- [ ] get_data_endpoint -- [ ] list_streams -- [ ] list_tags_for_stream -- [ ] tag_stream -- [ ] untag_stream -- [ ] update_data_retention -- [ ] update_stream - -## kms - 41% implemented -- [X] cancel_key_deletion -- [ ] connect_custom_key_store -- [ ] create_alias -- [ ] create_custom_key_store -- [ ] create_grant -- [X] create_key -- [ ] decrypt -- [X] delete_alias -- [ ] delete_custom_key_store -- [ ] delete_imported_key_material -- [ ] describe_custom_key_stores -- [X] describe_key -- [X] disable_key -- [X] disable_key_rotation -- [ ] disconnect_custom_key_store -- [X] enable_key -- [X] enable_key_rotation -- [ ] encrypt -- [X] generate_data_key -- [ ] generate_data_key_without_plaintext -- [ ] generate_random -- [X] get_key_policy -- [X] get_key_rotation_status -- [ ] get_parameters_for_import -- [ ] import_key_material -- [ ] list_aliases -- [ ] list_grants -- [ ] list_key_policies -- [X] list_keys -- [X] list_resource_tags -- [ ] list_retirable_grants -- [X] put_key_policy -- [ ] re_encrypt -- [ ] retire_grant -- [ ] revoke_grant -- [X] schedule_key_deletion -- [X] tag_resource -- [ ] untag_resource -- [ ] update_alias -- [ ] update_custom_key_store -- [X] update_key_description - -## lambda - 0% implemented -- [ ] add_layer_version_permission -- [ ] add_permission -- [ ] create_alias -- [ ] create_event_source_mapping -- [ ] create_function -- [ ] delete_alias -- [ ] delete_event_source_mapping -- [ ] delete_function -- [ ] delete_function_concurrency -- [ ] delete_layer_version -- [ ] get_account_settings -- [ ] get_alias -- [ ] get_event_source_mapping -- [ ] get_function -- [ ] get_function_configuration -- [ ] get_layer_version -- [ ] get_layer_version_policy -- [ ] get_policy -- [ ] invoke -- [ ] invoke_async -- [ ] list_aliases -- [ ] list_event_source_mappings -- [ ] list_functions -- [ ] list_layer_versions -- [ ] list_layers -- [ ] list_tags -- [ ] list_versions_by_function -- [ ] publish_layer_version -- [ ] publish_version -- [ ] put_function_concurrency -- [ ] remove_layer_version_permission -- [ ] remove_permission -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_alias -- [ ] update_event_source_mapping -- [ ] update_function_code -- [ ] update_function_configuration - -## lex-models - 0% implemented -- [ ] create_bot_version -- [ ] create_intent_version -- [ ] create_slot_type_version -- [ ] delete_bot -- [ ] delete_bot_alias -- [ ] delete_bot_channel_association -- [ ] delete_bot_version -- [ ] delete_intent -- [ ] delete_intent_version -- [ ] delete_slot_type -- [ ] delete_slot_type_version -- [ ] delete_utterances -- [ ] get_bot -- [ ] get_bot_alias -- [ ] get_bot_aliases -- [ ] get_bot_channel_association -- [ ] get_bot_channel_associations -- [ ] get_bot_versions -- [ ] get_bots -- [ ] get_builtin_intent -- [ ] get_builtin_intents -- [ ] get_builtin_slot_types -- [ ] get_export -- [ ] get_import -- [ ] get_intent -- [ ] get_intent_versions -- [ ] get_intents -- [ ] get_slot_type -- [ ] get_slot_type_versions -- [ ] get_slot_types -- [ ] get_utterances_view -- [ ] put_bot -- [ ] put_bot_alias -- [ ] put_intent -- [ ] put_slot_type -- [ ] start_import - -## lex-runtime - 0% implemented -- [ ] post_content -- [ ] post_text - -## license-manager - 0% implemented -- [ ] create_license_configuration -- [ ] delete_license_configuration -- [ ] get_license_configuration -- [ ] get_service_settings -- [ ] list_associations_for_license_configuration -- [ ] list_license_configurations -- [ ] list_license_specifications_for_resource -- [ ] list_resource_inventory -- [ ] list_tags_for_resource -- [ ] list_usage_for_license_configuration -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_license_configuration -- [ ] update_license_specifications_for_resource -- [ ] update_service_settings - -## lightsail - 0% implemented -- [ ] allocate_static_ip -- [ ] attach_disk -- [ ] attach_instances_to_load_balancer -- [ ] attach_load_balancer_tls_certificate -- [ ] attach_static_ip -- [ ] close_instance_public_ports -- [ ] copy_snapshot -- [ ] create_cloud_formation_stack -- [ ] create_disk -- [ ] create_disk_from_snapshot -- [ ] create_disk_snapshot -- [ ] create_domain -- [ ] create_domain_entry -- [ ] create_instance_snapshot -- [ ] create_instances -- [ ] create_instances_from_snapshot -- [ ] create_key_pair -- [ ] create_load_balancer -- [ ] create_load_balancer_tls_certificate -- [ ] create_relational_database -- [ ] create_relational_database_from_snapshot -- [ ] create_relational_database_snapshot -- [ ] delete_disk -- [ ] delete_disk_snapshot -- [ ] delete_domain -- [ ] delete_domain_entry -- [ ] delete_instance -- [ ] delete_instance_snapshot -- [ ] delete_key_pair -- [ ] delete_load_balancer -- [ ] delete_load_balancer_tls_certificate -- [ ] delete_relational_database -- [ ] delete_relational_database_snapshot -- [ ] detach_disk -- [ ] detach_instances_from_load_balancer -- [ ] detach_static_ip -- [ ] download_default_key_pair -- [ ] export_snapshot -- [ ] get_active_names -- [ ] get_blueprints -- [ ] get_bundles -- [ ] get_cloud_formation_stack_records -- [ ] get_disk -- [ ] get_disk_snapshot -- [ ] get_disk_snapshots -- [ ] get_disks -- [ ] get_domain -- [ ] get_domains -- [ ] get_export_snapshot_records -- [ ] get_instance -- [ ] get_instance_access_details -- [ ] get_instance_metric_data -- [ ] get_instance_port_states -- [ ] get_instance_snapshot -- [ ] get_instance_snapshots -- [ ] get_instance_state -- [ ] get_instances -- [ ] get_key_pair -- [ ] get_key_pairs -- [ ] get_load_balancer -- [ ] get_load_balancer_metric_data -- [ ] get_load_balancer_tls_certificates -- [ ] get_load_balancers -- [ ] get_operation -- [ ] get_operations -- [ ] get_operations_for_resource -- [ ] get_regions -- [ ] get_relational_database -- [ ] get_relational_database_blueprints -- [ ] get_relational_database_bundles -- [ ] get_relational_database_events -- [ ] get_relational_database_log_events -- [ ] get_relational_database_log_streams -- [ ] get_relational_database_master_user_password -- [ ] get_relational_database_metric_data -- [ ] get_relational_database_parameters -- [ ] get_relational_database_snapshot -- [ ] get_relational_database_snapshots -- [ ] get_relational_databases -- [ ] get_static_ip -- [ ] get_static_ips -- [ ] import_key_pair -- [ ] is_vpc_peered -- [ ] open_instance_public_ports -- [ ] peer_vpc -- [ ] put_instance_public_ports -- [ ] reboot_instance -- [ ] reboot_relational_database -- [ ] release_static_ip -- [ ] start_instance -- [ ] start_relational_database -- [ ] stop_instance -- [ ] stop_relational_database -- [ ] tag_resource -- [ ] unpeer_vpc -- [ ] untag_resource -- [ ] update_domain_entry -- [ ] update_load_balancer_attribute -- [ ] update_relational_database -- [ ] update_relational_database_parameters - -## logs - 23% implemented -- [ ] associate_kms_key -- [ ] cancel_export_task -- [ ] create_export_task -- [X] create_log_group -- [X] create_log_stream -- [ ] delete_destination -- [X] delete_log_group -- [X] delete_log_stream -- [ ] delete_metric_filter -- [ ] delete_resource_policy -- [ ] delete_retention_policy -- [ ] delete_subscription_filter -- [ ] describe_destinations -- [ ] describe_export_tasks -- [X] describe_log_groups -- [X] describe_log_streams -- [ ] describe_metric_filters -- [ ] describe_queries -- [ ] describe_resource_policies -- [ ] describe_subscription_filters -- [ ] disassociate_kms_key -- [X] filter_log_events -- [X] get_log_events -- [ ] get_log_group_fields -- [ ] get_log_record -- [ ] get_query_results -- [ ] list_tags_log_group -- [ ] put_destination -- [ ] put_destination_policy -- [X] put_log_events -- [ ] put_metric_filter -- [ ] put_resource_policy -- [ ] put_retention_policy -- [ ] put_subscription_filter -- [ ] start_query -- [ ] stop_query -- [ ] tag_log_group -- [ ] test_metric_filter -- [ ] untag_log_group - -## machinelearning - 0% implemented -- [ ] add_tags -- [ ] create_batch_prediction -- [ ] create_data_source_from_rds -- [ ] create_data_source_from_redshift -- [ ] create_data_source_from_s3 -- [ ] create_evaluation -- [ ] create_ml_model -- [ ] create_realtime_endpoint -- [ ] delete_batch_prediction -- [ ] delete_data_source -- [ ] delete_evaluation -- [ ] delete_ml_model -- [ ] delete_realtime_endpoint -- [ ] delete_tags -- [ ] describe_batch_predictions -- [ ] describe_data_sources -- [ ] describe_evaluations -- [ ] describe_ml_models -- [ ] describe_tags -- [ ] get_batch_prediction -- [ ] get_data_source -- [ ] get_evaluation -- [ ] get_ml_model -- [ ] predict -- [ ] update_batch_prediction -- [ ] update_data_source -- [ ] update_evaluation -- [ ] update_ml_model - -## macie - 0% implemented -- [ ] associate_member_account -- [ ] associate_s3_resources -- [ ] disassociate_member_account -- [ ] disassociate_s3_resources -- [ ] list_member_accounts -- [ ] list_s3_resources -- [ ] update_s3_resources - -## marketplace-entitlement - 0% implemented -- [ ] get_entitlements - -## marketplacecommerceanalytics - 0% implemented -- [ ] generate_data_set -- [ ] start_support_data_export - -## mediaconnect - 0% implemented -- [ ] add_flow_outputs -- [ ] create_flow -- [ ] delete_flow -- [ ] describe_flow -- [ ] grant_flow_entitlements -- [ ] list_entitlements -- [ ] list_flows -- [ ] remove_flow_output -- [ ] revoke_flow_entitlement -- [ ] start_flow -- [ ] stop_flow -- [ ] update_flow_entitlement -- [ ] update_flow_output -- [ ] update_flow_source - -## mediaconvert - 0% implemented -- [ ] associate_certificate -- [ ] cancel_job -- [ ] create_job -- [ ] create_job_template -- [ ] create_preset -- [ ] create_queue -- [ ] delete_job_template -- [ ] delete_preset -- [ ] delete_queue -- [ ] describe_endpoints -- [ ] disassociate_certificate -- [ ] get_job -- [ ] get_job_template -- [ ] get_preset -- [ ] get_queue -- [ ] list_job_templates -- [ ] list_jobs -- [ ] list_presets -- [ ] list_queues -- [ ] list_tags_for_resource -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_job_template -- [ ] update_preset -- [ ] update_queue - -## medialive - 0% implemented -- [ ] batch_update_schedule -- [ ] create_channel -- [ ] create_input -- [ ] create_input_security_group -- [ ] delete_channel -- [ ] delete_input -- [ ] delete_input_security_group -- [ ] delete_reservation -- [ ] describe_channel -- [ ] describe_input -- [ ] describe_input_security_group -- [ ] describe_offering -- [ ] describe_reservation -- [ ] describe_schedule -- [ ] list_channels -- [ ] list_input_security_groups -- [ ] list_inputs -- [ ] list_offerings -- [ ] list_reservations -- [ ] purchase_offering -- [ ] start_channel -- [ ] stop_channel -- [ ] update_channel -- [ ] update_input -- [ ] update_input_security_group - -## mediapackage - 0% implemented -- [ ] create_channel -- [ ] create_origin_endpoint -- [ ] delete_channel -- [ ] delete_origin_endpoint -- [ ] describe_channel -- [ ] describe_origin_endpoint -- [ ] list_channels -- [ ] list_origin_endpoints -- [ ] rotate_channel_credentials -- [ ] rotate_ingest_endpoint_credentials -- [ ] update_channel -- [ ] update_origin_endpoint - -## mediastore - 0% implemented -- [ ] create_container -- [ ] delete_container -- [ ] delete_container_policy -- [ ] delete_cors_policy -- [ ] delete_lifecycle_policy -- [ ] describe_container -- [ ] get_container_policy -- [ ] get_cors_policy -- [ ] get_lifecycle_policy -- [ ] list_containers -- [ ] put_container_policy -- [ ] put_cors_policy -- [ ] put_lifecycle_policy - -## mediastore-data - 0% implemented -- [ ] delete_object -- [ ] describe_object -- [ ] get_object -- [ ] list_items -- [ ] put_object - -## mediatailor - 0% implemented -- [ ] delete_playback_configuration -- [ ] get_playback_configuration -- [ ] list_playback_configurations -- [ ] put_playback_configuration - -## meteringmarketplace - 0% implemented -- [ ] batch_meter_usage -- [ ] meter_usage -- [ ] register_usage -- [ ] resolve_customer - -## mgh - 0% implemented -- [ ] associate_created_artifact -- [ ] associate_discovered_resource -- [ ] create_progress_update_stream -- [ ] delete_progress_update_stream -- [ ] describe_application_state -- [ ] describe_migration_task -- [ ] disassociate_created_artifact -- [ ] disassociate_discovered_resource -- [ ] import_migration_task -- [ ] list_created_artifacts -- [ ] list_discovered_resources -- [ ] list_migration_tasks -- [ ] list_progress_update_streams -- [ ] notify_application_state -- [ ] notify_migration_task_state -- [ ] put_resource_attributes - -## mobile - 0% implemented -- [ ] create_project -- [ ] delete_project -- [ ] describe_bundle -- [ ] describe_project -- [ ] export_bundle -- [ ] export_project -- [ ] list_bundles -- [ ] list_projects -- [ ] update_project - -## mq - 0% implemented -- [ ] create_broker -- [ ] create_configuration -- [ ] create_tags -- [ ] create_user -- [ ] delete_broker -- [ ] delete_tags -- [ ] delete_user -- [ ] describe_broker -- [ ] describe_configuration -- [ ] describe_configuration_revision -- [ ] describe_user -- [ ] list_brokers -- [ ] list_configuration_revisions -- [ ] list_configurations -- [ ] list_tags -- [ ] list_users -- [ ] reboot_broker -- [ ] update_broker -- [ ] update_configuration -- [ ] update_user - -## mturk - 0% implemented -- [ ] accept_qualification_request -- [ ] approve_assignment -- [ ] associate_qualification_with_worker -- [ ] create_additional_assignments_for_hit -- [ ] create_hit -- [ ] create_hit_type -- [ ] create_hit_with_hit_type -- [ ] create_qualification_type -- [ ] create_worker_block -- [ ] delete_hit -- [ ] delete_qualification_type -- [ ] delete_worker_block -- [ ] disassociate_qualification_from_worker -- [ ] get_account_balance -- [ ] get_assignment -- [ ] get_file_upload_url -- [ ] get_hit -- [ ] get_qualification_score -- [ ] get_qualification_type -- [ ] list_assignments_for_hit -- [ ] list_bonus_payments -- [ ] list_hits -- [ ] list_hits_for_qualification_type -- [ ] list_qualification_requests -- [ ] list_qualification_types -- [ ] list_review_policy_results_for_hit -- [ ] list_reviewable_hits -- [ ] list_worker_blocks -- [ ] list_workers_with_qualification_type -- [ ] notify_workers -- [ ] reject_assignment -- [ ] reject_qualification_request -- [ ] send_bonus -- [ ] send_test_event_notification -- [ ] update_expiration_for_hit -- [ ] update_hit_review_status -- [ ] update_hit_type_of_hit -- [ ] update_notification_settings -- [ ] update_qualification_type - -## neptune - 0% implemented -- [ ] add_role_to_db_cluster -- [ ] add_source_identifier_to_subscription -- [ ] add_tags_to_resource -- [ ] apply_pending_maintenance_action -- [ ] copy_db_cluster_parameter_group -- [ ] copy_db_cluster_snapshot -- [ ] copy_db_parameter_group -- [ ] create_db_cluster -- [ ] create_db_cluster_parameter_group -- [ ] create_db_cluster_snapshot -- [ ] create_db_instance -- [ ] create_db_parameter_group -- [ ] create_db_subnet_group -- [ ] create_event_subscription -- [ ] delete_db_cluster -- [ ] delete_db_cluster_parameter_group -- [ ] delete_db_cluster_snapshot -- [ ] delete_db_instance -- [ ] delete_db_parameter_group -- [ ] delete_db_subnet_group -- [ ] delete_event_subscription -- [ ] describe_db_cluster_parameter_groups -- [ ] describe_db_cluster_parameters -- [ ] describe_db_cluster_snapshot_attributes -- [ ] describe_db_cluster_snapshots -- [ ] describe_db_clusters -- [ ] describe_db_engine_versions -- [ ] describe_db_instances -- [ ] describe_db_parameter_groups -- [ ] describe_db_parameters -- [ ] describe_db_subnet_groups -- [ ] describe_engine_default_cluster_parameters -- [ ] describe_engine_default_parameters -- [ ] describe_event_categories -- [ ] describe_event_subscriptions -- [ ] describe_events -- [ ] describe_orderable_db_instance_options -- [ ] describe_pending_maintenance_actions -- [ ] describe_valid_db_instance_modifications -- [ ] failover_db_cluster -- [ ] list_tags_for_resource -- [ ] modify_db_cluster -- [ ] modify_db_cluster_parameter_group -- [ ] modify_db_cluster_snapshot_attribute -- [ ] modify_db_instance -- [ ] modify_db_parameter_group -- [ ] modify_db_subnet_group -- [ ] modify_event_subscription -- [ ] promote_read_replica_db_cluster -- [ ] reboot_db_instance -- [ ] remove_role_from_db_cluster -- [ ] remove_source_identifier_from_subscription -- [ ] remove_tags_from_resource -- [ ] reset_db_cluster_parameter_group -- [ ] reset_db_parameter_group -- [ ] restore_db_cluster_from_snapshot -- [ ] restore_db_cluster_to_point_in_time - -## opsworks - 12% implemented -- [ ] assign_instance -- [ ] assign_volume -- [ ] associate_elastic_ip -- [ ] attach_elastic_load_balancer -- [ ] clone_stack -- [X] create_app -- [ ] create_deployment -- [X] create_instance -- [X] create_layer -- [X] create_stack -- [ ] create_user_profile -- [ ] delete_app -- [ ] delete_instance -- [ ] delete_layer -- [ ] delete_stack -- [ ] delete_user_profile -- [ ] deregister_ecs_cluster -- [ ] deregister_elastic_ip -- [ ] deregister_instance -- [ ] deregister_rds_db_instance -- [ ] deregister_volume -- [ ] describe_agent_versions -- [X] describe_apps -- [ ] describe_commands -- [ ] describe_deployments -- [ ] describe_ecs_clusters -- [ ] describe_elastic_ips -- [ ] describe_elastic_load_balancers -- [X] describe_instances -- [X] describe_layers -- [ ] describe_load_based_auto_scaling -- [ ] describe_my_user_profile -- [ ] describe_operating_systems -- [ ] describe_permissions -- [ ] describe_raid_arrays -- [ ] describe_rds_db_instances -- [ ] describe_service_errors -- [ ] describe_stack_provisioning_parameters -- [ ] describe_stack_summary -- [X] describe_stacks -- [ ] describe_time_based_auto_scaling -- [ ] describe_user_profiles -- [ ] describe_volumes -- [ ] detach_elastic_load_balancer -- [ ] disassociate_elastic_ip -- [ ] get_hostname_suggestion -- [ ] grant_access -- [ ] list_tags -- [ ] reboot_instance -- [ ] register_ecs_cluster -- [ ] register_elastic_ip -- [ ] register_instance -- [ ] register_rds_db_instance -- [ ] register_volume -- [ ] set_load_based_auto_scaling -- [ ] set_permission -- [ ] set_time_based_auto_scaling -- [X] start_instance -- [ ] start_stack -- [ ] stop_instance -- [ ] stop_stack -- [ ] tag_resource -- [ ] unassign_instance -- [ ] unassign_volume -- [ ] untag_resource -- [ ] update_app -- [ ] update_elastic_ip -- [ ] update_instance -- [ ] update_layer -- [ ] update_my_user_profile -- [ ] update_rds_db_instance -- [ ] update_stack -- [ ] update_user_profile -- [ ] update_volume - -## opsworkscm - 0% implemented -- [ ] associate_node -- [ ] create_backup -- [ ] create_server -- [ ] delete_backup -- [ ] delete_server -- [ ] describe_account_attributes -- [ ] describe_backups -- [ ] describe_events -- [ ] describe_node_association_status -- [ ] describe_servers -- [ ] disassociate_node -- [ ] export_server_engine_attribute -- [ ] restore_server -- [ ] start_maintenance -- [ ] update_server -- [ ] update_server_engine_attributes - -## organizations - 30% implemented -- [ ] accept_handshake -- [ ] attach_policy -- [ ] cancel_handshake -- [X] create_account -- [X] create_organization -- [X] create_organizational_unit -- [ ] create_policy -- [ ] decline_handshake -- [ ] delete_organization -- [ ] delete_organizational_unit -- [ ] delete_policy -- [X] describe_account -- [ ] describe_create_account_status -- [ ] describe_handshake -- [X] describe_organization -- [X] describe_organizational_unit -- [ ] describe_policy -- [ ] detach_policy -- [ ] disable_aws_service_access -- [ ] disable_policy_type -- [ ] enable_all_features -- [ ] enable_aws_service_access -- [ ] enable_policy_type -- [ ] invite_account_to_organization -- [ ] leave_organization -- [X] list_accounts -- [X] list_accounts_for_parent -- [ ] list_aws_service_access_for_organization -- [X] list_children -- [ ] list_create_account_status -- [ ] list_handshakes_for_account -- [ ] list_handshakes_for_organization -- [X] list_organizational_units_for_parent -- [X] list_parents -- [ ] list_policies -- [ ] list_policies_for_target -- [X] list_roots -- [ ] list_targets_for_policy -- [X] move_account -- [ ] remove_account_from_organization -- [ ] update_organizational_unit -- [ ] update_policy - -## pi - 0% implemented -- [ ] describe_dimension_keys -- [ ] get_resource_metrics - -## pinpoint - 0% implemented -- [ ] create_app -- [ ] create_campaign -- [ ] create_export_job -- [ ] create_import_job -- [ ] create_segment -- [ ] delete_adm_channel -- [ ] delete_apns_channel -- [ ] delete_apns_sandbox_channel -- [ ] delete_apns_voip_channel -- [ ] delete_apns_voip_sandbox_channel -- [ ] delete_app -- [ ] delete_baidu_channel -- [ ] delete_campaign -- [ ] delete_email_channel -- [ ] delete_endpoint -- [ ] delete_event_stream -- [ ] delete_gcm_channel -- [ ] delete_segment -- [ ] delete_sms_channel -- [ ] delete_user_endpoints -- [ ] delete_voice_channel -- [ ] get_adm_channel -- [ ] get_apns_channel -- [ ] get_apns_sandbox_channel -- [ ] get_apns_voip_channel -- [ ] get_apns_voip_sandbox_channel -- [ ] get_app -- [ ] get_application_settings -- [ ] get_apps -- [ ] get_baidu_channel -- [ ] get_campaign -- [ ] get_campaign_activities -- [ ] get_campaign_version -- [ ] get_campaign_versions -- [ ] get_campaigns -- [ ] get_channels -- [ ] get_email_channel -- [ ] get_endpoint -- [ ] get_event_stream -- [ ] get_export_job -- [ ] get_export_jobs -- [ ] get_gcm_channel -- [ ] get_import_job -- [ ] get_import_jobs -- [ ] get_segment -- [ ] get_segment_export_jobs -- [ ] get_segment_import_jobs -- [ ] get_segment_version -- [ ] get_segment_versions -- [ ] get_segments -- [ ] get_sms_channel -- [ ] get_user_endpoints -- [ ] get_voice_channel -- [ ] phone_number_validate -- [ ] put_event_stream -- [ ] put_events -- [ ] remove_attributes -- [ ] send_messages -- [ ] send_users_messages -- [ ] update_adm_channel -- [ ] update_apns_channel -- [ ] update_apns_sandbox_channel -- [ ] update_apns_voip_channel -- [ ] update_apns_voip_sandbox_channel -- [ ] update_application_settings -- [ ] update_baidu_channel -- [ ] update_campaign -- [ ] update_email_channel -- [ ] update_endpoint -- [ ] update_endpoints_batch -- [ ] update_gcm_channel -- [ ] update_segment -- [ ] update_sms_channel -- [ ] update_voice_channel - -## pinpoint-email - 0% implemented -- [ ] create_configuration_set -- [ ] create_configuration_set_event_destination -- [ ] create_dedicated_ip_pool -- [ ] create_deliverability_test_report -- [ ] create_email_identity -- [ ] delete_configuration_set -- [ ] delete_configuration_set_event_destination -- [ ] delete_dedicated_ip_pool -- [ ] delete_email_identity -- [ ] get_account -- [ ] get_blacklist_reports -- [ ] get_configuration_set -- [ ] get_configuration_set_event_destinations -- [ ] get_dedicated_ip -- [ ] get_dedicated_ips -- [ ] get_deliverability_dashboard_options -- [ ] get_deliverability_test_report -- [ ] get_domain_statistics_report -- [ ] get_email_identity -- [ ] list_configuration_sets -- [ ] list_dedicated_ip_pools -- [ ] list_deliverability_test_reports -- [ ] list_email_identities -- [ ] put_account_dedicated_ip_warmup_attributes -- [ ] put_account_sending_attributes -- [ ] put_configuration_set_delivery_options -- [ ] put_configuration_set_reputation_options -- [ ] put_configuration_set_sending_options -- [ ] put_configuration_set_tracking_options -- [ ] put_dedicated_ip_in_pool -- [ ] put_dedicated_ip_warmup_attributes -- [ ] put_deliverability_dashboard_option -- [ ] put_email_identity_dkim_attributes -- [ ] put_email_identity_feedback_attributes -- [ ] put_email_identity_mail_from_attributes -- [ ] send_email -- [ ] update_configuration_set_event_destination - -## polly - 55% implemented -- [X] delete_lexicon -- [X] describe_voices -- [X] get_lexicon -- [ ] get_speech_synthesis_task -- [X] list_lexicons -- [ ] list_speech_synthesis_tasks -- [X] put_lexicon -- [ ] start_speech_synthesis_task -- [ ] synthesize_speech - -## pricing - 0% implemented -- [ ] describe_services -- [ ] get_attribute_values -- [ ] get_products - -## quicksight - 0% implemented -- [ ] create_group -- [ ] create_group_membership -- [ ] delete_group -- [ ] delete_group_membership -- [ ] delete_user -- [ ] describe_group -- [ ] describe_user -- [ ] get_dashboard_embed_url -- [ ] list_group_memberships -- [ ] list_groups -- [ ] list_user_groups -- [ ] list_users -- [ ] register_user -- [ ] update_group -- [ ] update_user - -## ram - 0% implemented -- [ ] accept_resource_share_invitation -- [ ] associate_resource_share -- [ ] create_resource_share -- [ ] delete_resource_share -- [ ] disassociate_resource_share -- [ ] enable_sharing_with_aws_organization -- [ ] get_resource_policies -- [ ] get_resource_share_associations -- [ ] get_resource_share_invitations -- [ ] get_resource_shares -- [ ] list_principals -- [ ] list_resources -- [ ] reject_resource_share_invitation -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_resource_share - -## rds - 0% implemented -- [ ] add_role_to_db_cluster -- [ ] add_source_identifier_to_subscription -- [ ] add_tags_to_resource -- [ ] apply_pending_maintenance_action -- [ ] authorize_db_security_group_ingress -- [ ] backtrack_db_cluster -- [ ] copy_db_cluster_parameter_group -- [ ] copy_db_cluster_snapshot -- [ ] copy_db_parameter_group -- [ ] copy_db_snapshot -- [ ] copy_option_group -- [ ] create_db_cluster -- [ ] create_db_cluster_endpoint -- [ ] create_db_cluster_parameter_group -- [ ] create_db_cluster_snapshot -- [ ] create_db_instance -- [ ] create_db_instance_read_replica -- [ ] create_db_parameter_group -- [ ] create_db_security_group -- [ ] create_db_snapshot -- [ ] create_db_subnet_group -- [ ] create_event_subscription -- [ ] create_global_cluster -- [ ] create_option_group -- [ ] delete_db_cluster -- [ ] delete_db_cluster_endpoint -- [ ] delete_db_cluster_parameter_group -- [ ] delete_db_cluster_snapshot -- [ ] delete_db_instance -- [ ] delete_db_instance_automated_backup -- [ ] delete_db_parameter_group -- [ ] delete_db_security_group -- [ ] delete_db_snapshot -- [ ] delete_db_subnet_group -- [ ] delete_event_subscription -- [ ] delete_global_cluster -- [ ] delete_option_group -- [ ] describe_account_attributes -- [ ] describe_certificates -- [ ] describe_db_cluster_backtracks -- [ ] describe_db_cluster_endpoints -- [ ] describe_db_cluster_parameter_groups -- [ ] describe_db_cluster_parameters -- [ ] describe_db_cluster_snapshot_attributes -- [ ] describe_db_cluster_snapshots -- [ ] describe_db_clusters -- [ ] describe_db_engine_versions -- [ ] describe_db_instance_automated_backups -- [ ] describe_db_instances -- [ ] describe_db_log_files -- [ ] describe_db_parameter_groups -- [ ] describe_db_parameters -- [ ] describe_db_security_groups -- [ ] describe_db_snapshot_attributes -- [ ] describe_db_snapshots -- [ ] describe_db_subnet_groups -- [ ] describe_engine_default_cluster_parameters -- [ ] describe_engine_default_parameters -- [ ] describe_event_categories -- [ ] describe_event_subscriptions -- [ ] describe_events -- [ ] describe_global_clusters -- [ ] describe_option_group_options -- [ ] describe_option_groups -- [ ] describe_orderable_db_instance_options -- [ ] describe_pending_maintenance_actions -- [ ] describe_reserved_db_instances -- [ ] describe_reserved_db_instances_offerings -- [ ] describe_source_regions -- [ ] describe_valid_db_instance_modifications -- [ ] download_db_log_file_portion -- [ ] failover_db_cluster -- [ ] list_tags_for_resource -- [ ] modify_current_db_cluster_capacity -- [ ] modify_db_cluster -- [ ] modify_db_cluster_endpoint -- [ ] modify_db_cluster_parameter_group -- [ ] modify_db_cluster_snapshot_attribute -- [ ] modify_db_instance -- [ ] modify_db_parameter_group -- [ ] modify_db_snapshot -- [ ] modify_db_snapshot_attribute -- [ ] modify_db_subnet_group -- [ ] modify_event_subscription -- [ ] modify_global_cluster -- [ ] modify_option_group -- [ ] promote_read_replica -- [ ] promote_read_replica_db_cluster -- [ ] purchase_reserved_db_instances_offering -- [ ] reboot_db_instance -- [ ] remove_from_global_cluster -- [ ] remove_role_from_db_cluster -- [ ] remove_source_identifier_from_subscription -- [ ] remove_tags_from_resource -- [ ] reset_db_cluster_parameter_group -- [ ] reset_db_parameter_group -- [ ] restore_db_cluster_from_s3 -- [ ] restore_db_cluster_from_snapshot -- [ ] restore_db_cluster_to_point_in_time -- [ ] restore_db_instance_from_db_snapshot -- [ ] restore_db_instance_from_s3 -- [ ] restore_db_instance_to_point_in_time -- [ ] revoke_db_security_group_ingress -- [ ] start_db_cluster -- [ ] start_db_instance -- [ ] stop_db_cluster -- [ ] stop_db_instance - -## rds-data - 0% implemented -- [ ] execute_sql - -## redshift - 32% implemented -- [ ] accept_reserved_node_exchange -- [ ] authorize_cluster_security_group_ingress -- [ ] authorize_snapshot_access -- [ ] batch_delete_cluster_snapshots -- [ ] batch_modify_cluster_snapshots -- [ ] cancel_resize -- [ ] copy_cluster_snapshot -- [X] create_cluster -- [X] create_cluster_parameter_group -- [X] create_cluster_security_group -- [X] create_cluster_snapshot -- [X] create_cluster_subnet_group -- [ ] create_event_subscription -- [ ] create_hsm_client_certificate -- [ ] create_hsm_configuration -- [X] create_snapshot_copy_grant -- [ ] create_snapshot_schedule -- [X] create_tags -- [X] delete_cluster -- [X] delete_cluster_parameter_group -- [X] delete_cluster_security_group -- [X] delete_cluster_snapshot -- [X] delete_cluster_subnet_group -- [ ] delete_event_subscription -- [ ] delete_hsm_client_certificate -- [ ] delete_hsm_configuration -- [X] delete_snapshot_copy_grant -- [ ] delete_snapshot_schedule -- [X] delete_tags -- [ ] describe_account_attributes -- [ ] describe_cluster_db_revisions -- [X] describe_cluster_parameter_groups -- [ ] describe_cluster_parameters -- [X] describe_cluster_security_groups -- [X] describe_cluster_snapshots -- [X] describe_cluster_subnet_groups -- [ ] describe_cluster_tracks -- [ ] describe_cluster_versions -- [X] describe_clusters -- [ ] describe_default_cluster_parameters -- [ ] describe_event_categories -- [ ] describe_event_subscriptions -- [ ] describe_events -- [ ] describe_hsm_client_certificates -- [ ] describe_hsm_configurations -- [ ] describe_logging_status -- [ ] describe_orderable_cluster_options -- [ ] describe_reserved_node_offerings -- [ ] describe_reserved_nodes -- [ ] describe_resize -- [X] describe_snapshot_copy_grants -- [ ] describe_snapshot_schedules -- [ ] describe_storage -- [ ] describe_table_restore_status -- [X] describe_tags -- [ ] disable_logging -- [X] disable_snapshot_copy -- [ ] enable_logging -- [X] enable_snapshot_copy -- [ ] get_cluster_credentials -- [ ] get_reserved_node_exchange_offerings -- [X] modify_cluster -- [ ] modify_cluster_db_revision -- [ ] modify_cluster_iam_roles -- [ ] modify_cluster_maintenance -- [ ] modify_cluster_parameter_group -- [ ] modify_cluster_snapshot -- [ ] modify_cluster_snapshot_schedule -- [ ] modify_cluster_subnet_group -- [ ] modify_event_subscription -- [X] modify_snapshot_copy_retention_period -- [ ] modify_snapshot_schedule -- [ ] purchase_reserved_node_offering -- [ ] reboot_cluster -- [ ] reset_cluster_parameter_group -- [ ] resize_cluster -- [X] restore_from_cluster_snapshot -- [ ] restore_table_from_cluster_snapshot -- [ ] revoke_cluster_security_group_ingress -- [ ] revoke_snapshot_access -- [ ] rotate_encryption_key - -## rekognition - 0% implemented -- [ ] compare_faces -- [ ] create_collection -- [ ] create_stream_processor -- [ ] delete_collection -- [ ] delete_faces -- [ ] delete_stream_processor -- [ ] describe_collection -- [ ] describe_stream_processor -- [ ] detect_faces -- [ ] detect_labels -- [ ] detect_moderation_labels -- [ ] detect_text -- [ ] get_celebrity_info -- [ ] get_celebrity_recognition -- [ ] get_content_moderation -- [ ] get_face_detection -- [ ] get_face_search -- [ ] get_label_detection -- [ ] get_person_tracking -- [ ] index_faces -- [ ] list_collections -- [ ] list_faces -- [ ] list_stream_processors -- [ ] recognize_celebrities -- [ ] search_faces -- [ ] search_faces_by_image -- [ ] start_celebrity_recognition -- [ ] start_content_moderation -- [ ] start_face_detection -- [ ] start_face_search -- [ ] start_label_detection -- [ ] start_person_tracking -- [ ] start_stream_processor -- [ ] stop_stream_processor - -## resource-groups - 0% implemented -- [ ] create_group -- [ ] delete_group -- [ ] get_group -- [ ] get_group_query -- [ ] get_tags -- [ ] list_group_resources -- [ ] list_groups -- [ ] search_resources -- [ ] tag -- [ ] untag -- [ ] update_group -- [ ] update_group_query - -## resourcegroupstaggingapi - 60% implemented -- [X] get_resources -- [X] get_tag_keys -- [X] get_tag_values -- [ ] tag_resources -- [ ] untag_resources - -## robomaker - 0% implemented -- [ ] batch_describe_simulation_job -- [ ] cancel_simulation_job -- [ ] create_deployment_job -- [ ] create_fleet -- [ ] create_robot -- [ ] create_robot_application -- [ ] create_robot_application_version -- [ ] create_simulation_application -- [ ] create_simulation_application_version -- [ ] create_simulation_job -- [ ] delete_fleet -- [ ] delete_robot -- [ ] delete_robot_application -- [ ] delete_simulation_application -- [ ] deregister_robot -- [ ] describe_deployment_job -- [ ] describe_fleet -- [ ] describe_robot -- [ ] describe_robot_application -- [ ] describe_simulation_application -- [ ] describe_simulation_job -- [ ] list_deployment_jobs -- [ ] list_fleets -- [ ] list_robot_applications -- [ ] list_robots -- [ ] list_simulation_applications -- [ ] list_simulation_jobs -- [ ] register_robot -- [ ] restart_simulation_job -- [ ] sync_deployment_job -- [ ] update_robot_application -- [ ] update_simulation_application - -## route53 - 12% implemented -- [ ] associate_vpc_with_hosted_zone -- [ ] change_resource_record_sets -- [X] change_tags_for_resource -- [X] create_health_check -- [X] create_hosted_zone -- [ ] create_query_logging_config -- [ ] create_reusable_delegation_set -- [ ] create_traffic_policy -- [ ] create_traffic_policy_instance -- [ ] create_traffic_policy_version -- [ ] create_vpc_association_authorization -- [X] delete_health_check -- [X] delete_hosted_zone -- [ ] delete_query_logging_config -- [ ] delete_reusable_delegation_set -- [ ] delete_traffic_policy -- [ ] delete_traffic_policy_instance -- [ ] delete_vpc_association_authorization -- [ ] disassociate_vpc_from_hosted_zone -- [ ] get_account_limit -- [ ] get_change -- [ ] get_checker_ip_ranges -- [ ] get_geo_location -- [ ] get_health_check -- [ ] get_health_check_count -- [ ] get_health_check_last_failure_reason -- [ ] get_health_check_status -- [X] get_hosted_zone -- [ ] get_hosted_zone_count -- [ ] get_hosted_zone_limit -- [ ] get_query_logging_config -- [ ] get_reusable_delegation_set -- [ ] get_reusable_delegation_set_limit -- [ ] get_traffic_policy -- [ ] get_traffic_policy_instance -- [ ] get_traffic_policy_instance_count -- [ ] list_geo_locations -- [ ] list_health_checks -- [ ] list_hosted_zones -- [ ] list_hosted_zones_by_name -- [ ] list_query_logging_configs -- [ ] list_resource_record_sets -- [ ] list_reusable_delegation_sets -- [X] list_tags_for_resource -- [ ] list_tags_for_resources -- [ ] list_traffic_policies -- [ ] list_traffic_policy_instances -- [ ] list_traffic_policy_instances_by_hosted_zone -- [ ] list_traffic_policy_instances_by_policy -- [ ] list_traffic_policy_versions -- [ ] list_vpc_association_authorizations -- [ ] test_dns_answer -- [ ] update_health_check -- [ ] update_hosted_zone_comment -- [ ] update_traffic_policy_comment -- [ ] update_traffic_policy_instance - -## route53domains - 0% implemented -- [ ] check_domain_availability -- [ ] check_domain_transferability -- [ ] delete_tags_for_domain -- [ ] disable_domain_auto_renew -- [ ] disable_domain_transfer_lock -- [ ] enable_domain_auto_renew -- [ ] enable_domain_transfer_lock -- [ ] get_contact_reachability_status -- [ ] get_domain_detail -- [ ] get_domain_suggestions -- [ ] get_operation_detail -- [ ] list_domains -- [ ] list_operations -- [ ] list_tags_for_domain -- [ ] register_domain -- [ ] renew_domain -- [ ] resend_contact_reachability_email -- [ ] retrieve_domain_auth_code -- [ ] transfer_domain -- [ ] update_domain_contact -- [ ] update_domain_contact_privacy -- [ ] update_domain_nameservers -- [ ] update_tags_for_domain -- [ ] view_billing - -## route53resolver - 0% implemented -- [ ] associate_resolver_endpoint_ip_address -- [ ] associate_resolver_rule -- [ ] create_resolver_endpoint -- [ ] create_resolver_rule -- [ ] delete_resolver_endpoint -- [ ] delete_resolver_rule -- [ ] disassociate_resolver_endpoint_ip_address -- [ ] disassociate_resolver_rule -- [ ] get_resolver_endpoint -- [ ] get_resolver_rule -- [ ] get_resolver_rule_association -- [ ] get_resolver_rule_policy -- [ ] list_resolver_endpoint_ip_addresses -- [ ] list_resolver_endpoints -- [ ] list_resolver_rule_associations -- [ ] list_resolver_rules -- [ ] list_tags_for_resource -- [ ] put_resolver_rule_policy -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_resolver_endpoint -- [ ] update_resolver_rule - -## s3 - 13% implemented -- [ ] abort_multipart_upload -- [ ] complete_multipart_upload -- [ ] copy_object -- [X] create_bucket -- [ ] create_multipart_upload -- [X] delete_bucket -- [ ] delete_bucket_analytics_configuration -- [X] delete_bucket_cors -- [ ] delete_bucket_encryption -- [ ] delete_bucket_inventory_configuration -- [ ] delete_bucket_lifecycle -- [ ] delete_bucket_metrics_configuration -- [X] delete_bucket_policy -- [ ] delete_bucket_replication -- [X] delete_bucket_tagging -- [ ] delete_bucket_website -- [ ] delete_object -- [ ] delete_object_tagging -- [ ] delete_objects -- [ ] delete_public_access_block -- [ ] get_bucket_accelerate_configuration -- [X] get_bucket_acl -- [ ] get_bucket_analytics_configuration -- [ ] get_bucket_cors -- [ ] get_bucket_encryption -- [ ] get_bucket_inventory_configuration -- [ ] get_bucket_lifecycle -- [ ] get_bucket_lifecycle_configuration -- [ ] get_bucket_location -- [ ] get_bucket_logging -- [ ] get_bucket_metrics_configuration -- [ ] get_bucket_notification -- [ ] get_bucket_notification_configuration -- [X] get_bucket_policy -- [ ] get_bucket_policy_status -- [ ] get_bucket_replication -- [ ] get_bucket_request_payment -- [ ] get_bucket_tagging -- [X] get_bucket_versioning -- [ ] get_bucket_website -- [ ] get_object -- [ ] get_object_acl -- [ ] get_object_legal_hold -- [ ] get_object_lock_configuration -- [ ] get_object_retention -- [ ] get_object_tagging -- [ ] get_object_torrent -- [ ] get_public_access_block -- [ ] head_bucket -- [ ] head_object -- [ ] list_bucket_analytics_configurations -- [ ] list_bucket_inventory_configurations -- [ ] list_bucket_metrics_configurations -- [ ] list_buckets -- [ ] list_multipart_uploads -- [ ] list_object_versions -- [ ] list_objects -- [ ] list_objects_v2 -- [ ] list_parts -- [ ] put_bucket_accelerate_configuration -- [ ] put_bucket_acl -- [ ] put_bucket_analytics_configuration -- [X] put_bucket_cors -- [ ] put_bucket_encryption -- [ ] put_bucket_inventory_configuration -- [ ] put_bucket_lifecycle -- [ ] put_bucket_lifecycle_configuration -- [X] put_bucket_logging -- [ ] put_bucket_metrics_configuration -- [ ] put_bucket_notification -- [X] put_bucket_notification_configuration -- [ ] put_bucket_policy -- [ ] put_bucket_replication -- [ ] put_bucket_request_payment -- [X] put_bucket_tagging -- [ ] put_bucket_versioning -- [ ] put_bucket_website -- [ ] put_object -- [ ] put_object_acl -- [ ] put_object_legal_hold -- [ ] put_object_lock_configuration -- [ ] put_object_retention -- [ ] put_object_tagging -- [ ] put_public_access_block -- [ ] restore_object -- [ ] select_object_content -- [ ] upload_part -- [ ] upload_part_copy - -## s3control - 0% implemented -- [ ] delete_public_access_block -- [ ] get_public_access_block -- [ ] put_public_access_block - -## sagemaker - 0% implemented -- [ ] add_tags -- [ ] create_algorithm -- [ ] create_code_repository -- [ ] create_compilation_job -- [ ] create_endpoint -- [ ] create_endpoint_config -- [ ] create_hyper_parameter_tuning_job -- [ ] create_labeling_job -- [ ] create_model -- [ ] create_model_package -- [ ] create_notebook_instance -- [ ] create_notebook_instance_lifecycle_config -- [ ] create_presigned_notebook_instance_url -- [ ] create_training_job -- [ ] create_transform_job -- [ ] create_workteam -- [ ] delete_algorithm -- [ ] delete_code_repository -- [ ] delete_endpoint -- [ ] delete_endpoint_config -- [ ] delete_model -- [ ] delete_model_package -- [ ] delete_notebook_instance -- [ ] delete_notebook_instance_lifecycle_config -- [ ] delete_tags -- [ ] delete_workteam -- [ ] describe_algorithm -- [ ] describe_code_repository -- [ ] describe_compilation_job -- [ ] describe_endpoint -- [ ] describe_endpoint_config -- [ ] describe_hyper_parameter_tuning_job -- [ ] describe_labeling_job -- [ ] describe_model -- [ ] describe_model_package -- [ ] describe_notebook_instance -- [ ] describe_notebook_instance_lifecycle_config -- [ ] describe_subscribed_workteam -- [ ] describe_training_job -- [ ] describe_transform_job -- [ ] describe_workteam -- [ ] get_search_suggestions -- [ ] list_algorithms -- [ ] list_code_repositories -- [ ] list_compilation_jobs -- [ ] list_endpoint_configs -- [ ] list_endpoints -- [ ] list_hyper_parameter_tuning_jobs -- [ ] list_labeling_jobs -- [ ] list_labeling_jobs_for_workteam -- [ ] list_model_packages -- [ ] list_models -- [ ] list_notebook_instance_lifecycle_configs -- [ ] list_notebook_instances -- [ ] list_subscribed_workteams -- [ ] list_tags -- [ ] list_training_jobs -- [ ] list_training_jobs_for_hyper_parameter_tuning_job -- [ ] list_transform_jobs -- [ ] list_workteams -- [ ] render_ui_template -- [ ] search -- [ ] start_notebook_instance -- [ ] stop_compilation_job -- [ ] stop_hyper_parameter_tuning_job -- [ ] stop_labeling_job -- [ ] stop_notebook_instance -- [ ] stop_training_job -- [ ] stop_transform_job -- [ ] update_code_repository -- [ ] update_endpoint -- [ ] update_endpoint_weights_and_capacities -- [ ] update_notebook_instance -- [ ] update_notebook_instance_lifecycle_config -- [ ] update_workteam - -## sagemaker-runtime - 0% implemented -- [ ] invoke_endpoint - -## sdb - 0% implemented -- [ ] batch_delete_attributes -- [ ] batch_put_attributes -- [ ] create_domain -- [ ] delete_attributes -- [ ] delete_domain -- [ ] domain_metadata -- [ ] get_attributes -- [ ] list_domains -- [ ] put_attributes -- [ ] select - -## secretsmanager - 44% implemented -- [ ] cancel_rotate_secret -- [X] create_secret -- [ ] delete_resource_policy -- [X] delete_secret -- [X] describe_secret -- [X] get_random_password -- [ ] get_resource_policy -- [X] get_secret_value -- [ ] list_secret_version_ids -- [X] list_secrets -- [ ] put_resource_policy -- [ ] put_secret_value -- [X] restore_secret -- [X] rotate_secret -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_secret -- [ ] update_secret_version_stage - -## securityhub - 0% implemented -- [ ] accept_invitation -- [ ] batch_disable_standards -- [ ] batch_enable_standards -- [ ] batch_import_findings -- [ ] create_insight -- [ ] create_members -- [ ] decline_invitations -- [ ] delete_insight -- [ ] delete_invitations -- [ ] delete_members -- [ ] disable_import_findings_for_product -- [ ] disable_security_hub -- [ ] disassociate_from_master_account -- [ ] disassociate_members -- [ ] enable_import_findings_for_product -- [ ] enable_security_hub -- [ ] get_enabled_standards -- [ ] get_findings -- [ ] get_insight_results -- [ ] get_insights -- [ ] get_invitations_count -- [ ] get_master_account -- [ ] get_members -- [ ] invite_members -- [ ] list_enabled_products_for_import -- [ ] list_invitations -- [ ] list_members -- [ ] update_findings -- [ ] update_insight - -## serverlessrepo - 0% implemented -- [ ] create_application -- [ ] create_application_version -- [ ] create_cloud_formation_change_set -- [ ] create_cloud_formation_template -- [ ] delete_application -- [ ] get_application -- [ ] get_application_policy -- [ ] get_cloud_formation_template -- [ ] list_application_dependencies -- [ ] list_application_versions -- [ ] list_applications -- [ ] put_application_policy -- [ ] update_application - -## servicecatalog - 0% implemented -- [ ] accept_portfolio_share -- [ ] associate_principal_with_portfolio -- [ ] associate_product_with_portfolio -- [ ] associate_service_action_with_provisioning_artifact -- [ ] associate_tag_option_with_resource -- [ ] batch_associate_service_action_with_provisioning_artifact -- [ ] batch_disassociate_service_action_from_provisioning_artifact -- [ ] copy_product -- [ ] create_constraint -- [ ] create_portfolio -- [ ] create_portfolio_share -- [ ] create_product -- [ ] create_provisioned_product_plan -- [ ] create_provisioning_artifact -- [ ] create_service_action -- [ ] create_tag_option -- [ ] delete_constraint -- [ ] delete_portfolio -- [ ] delete_portfolio_share -- [ ] delete_product -- [ ] delete_provisioned_product_plan -- [ ] delete_provisioning_artifact -- [ ] delete_service_action -- [ ] delete_tag_option -- [ ] describe_constraint -- [ ] describe_copy_product_status -- [ ] describe_portfolio -- [ ] describe_portfolio_share_status -- [ ] describe_product -- [ ] describe_product_as_admin -- [ ] describe_product_view -- [ ] describe_provisioned_product -- [ ] describe_provisioned_product_plan -- [ ] describe_provisioning_artifact -- [ ] describe_provisioning_parameters -- [ ] describe_record -- [ ] describe_service_action -- [ ] describe_tag_option -- [ ] disable_aws_organizations_access -- [ ] disassociate_principal_from_portfolio -- [ ] disassociate_product_from_portfolio -- [ ] disassociate_service_action_from_provisioning_artifact -- [ ] disassociate_tag_option_from_resource -- [ ] enable_aws_organizations_access -- [ ] execute_provisioned_product_plan -- [ ] execute_provisioned_product_service_action -- [ ] get_aws_organizations_access_status -- [ ] list_accepted_portfolio_shares -- [ ] list_constraints_for_portfolio -- [ ] list_launch_paths -- [ ] list_organization_portfolio_access -- [ ] list_portfolio_access -- [ ] list_portfolios -- [ ] list_portfolios_for_product -- [ ] list_principals_for_portfolio -- [ ] list_provisioned_product_plans -- [ ] list_provisioning_artifacts -- [ ] list_provisioning_artifacts_for_service_action -- [ ] list_record_history -- [ ] list_resources_for_tag_option -- [ ] list_service_actions -- [ ] list_service_actions_for_provisioning_artifact -- [ ] list_tag_options -- [ ] provision_product -- [ ] reject_portfolio_share -- [ ] scan_provisioned_products -- [ ] search_products -- [ ] search_products_as_admin -- [ ] search_provisioned_products -- [ ] terminate_provisioned_product -- [ ] update_constraint -- [ ] update_portfolio -- [ ] update_product -- [ ] update_provisioned_product -- [ ] update_provisioning_artifact -- [ ] update_service_action -- [ ] update_tag_option - -## servicediscovery - 0% implemented -- [ ] create_http_namespace -- [ ] create_private_dns_namespace -- [ ] create_public_dns_namespace -- [ ] create_service -- [ ] delete_namespace -- [ ] delete_service -- [ ] deregister_instance -- [ ] discover_instances -- [ ] get_instance -- [ ] get_instances_health_status -- [ ] get_namespace -- [ ] get_operation -- [ ] get_service -- [ ] list_instances -- [ ] list_namespaces -- [ ] list_operations -- [ ] list_services -- [ ] register_instance -- [ ] update_instance_custom_health_status -- [ ] update_service - -## ses - 11% implemented -- [ ] clone_receipt_rule_set -- [ ] create_configuration_set -- [ ] create_configuration_set_event_destination -- [ ] create_configuration_set_tracking_options -- [ ] create_custom_verification_email_template -- [ ] create_receipt_filter -- [ ] create_receipt_rule -- [ ] create_receipt_rule_set -- [ ] create_template -- [ ] delete_configuration_set -- [ ] delete_configuration_set_event_destination -- [ ] delete_configuration_set_tracking_options -- [ ] delete_custom_verification_email_template -- [X] delete_identity -- [ ] delete_identity_policy -- [ ] delete_receipt_filter -- [ ] delete_receipt_rule -- [ ] delete_receipt_rule_set -- [ ] delete_template -- [ ] delete_verified_email_address -- [ ] describe_active_receipt_rule_set -- [ ] describe_configuration_set -- [ ] describe_receipt_rule -- [ ] describe_receipt_rule_set -- [ ] get_account_sending_enabled -- [ ] get_custom_verification_email_template -- [ ] get_identity_dkim_attributes -- [ ] get_identity_mail_from_domain_attributes -- [ ] get_identity_notification_attributes -- [ ] get_identity_policies -- [ ] get_identity_verification_attributes -- [X] get_send_quota -- [ ] get_send_statistics -- [ ] get_template -- [ ] list_configuration_sets -- [ ] list_custom_verification_email_templates -- [X] list_identities -- [ ] list_identity_policies -- [ ] list_receipt_filters -- [ ] list_receipt_rule_sets -- [ ] list_templates -- [X] list_verified_email_addresses -- [ ] put_identity_policy -- [ ] reorder_receipt_rule_set -- [ ] send_bounce -- [ ] send_bulk_templated_email -- [ ] send_custom_verification_email -- [X] send_email -- [X] send_raw_email -- [ ] send_templated_email -- [ ] set_active_receipt_rule_set -- [ ] set_identity_dkim_enabled -- [ ] set_identity_feedback_forwarding_enabled -- [ ] set_identity_headers_in_notifications_enabled -- [ ] set_identity_mail_from_domain -- [ ] set_identity_notification_topic -- [ ] set_receipt_rule_position -- [ ] test_render_template -- [ ] update_account_sending_enabled -- [ ] update_configuration_set_event_destination -- [ ] update_configuration_set_reputation_metrics_enabled -- [ ] update_configuration_set_sending_enabled -- [ ] update_configuration_set_tracking_options -- [ ] update_custom_verification_email_template -- [ ] update_receipt_rule -- [ ] update_template -- [ ] verify_domain_dkim -- [ ] verify_domain_identity -- [X] verify_email_address -- [X] verify_email_identity - -## shield - 0% implemented -- [ ] associate_drt_log_bucket -- [ ] associate_drt_role -- [ ] create_protection -- [ ] create_subscription -- [ ] delete_protection -- [ ] delete_subscription -- [ ] describe_attack -- [ ] describe_drt_access -- [ ] describe_emergency_contact_settings -- [ ] describe_protection -- [ ] describe_subscription -- [ ] disassociate_drt_log_bucket -- [ ] disassociate_drt_role -- [ ] get_subscription_state -- [ ] list_attacks -- [ ] list_protections -- [ ] update_emergency_contact_settings -- [ ] update_subscription - -## signer - 0% implemented -- [ ] cancel_signing_profile -- [ ] describe_signing_job -- [ ] get_signing_platform -- [ ] get_signing_profile -- [ ] list_signing_jobs -- [ ] list_signing_platforms -- [ ] list_signing_profiles -- [ ] put_signing_profile -- [ ] start_signing_job - -## sms - 0% implemented -- [ ] create_app -- [ ] create_replication_job -- [ ] delete_app -- [ ] delete_app_launch_configuration -- [ ] delete_app_replication_configuration -- [ ] delete_replication_job -- [ ] delete_server_catalog -- [ ] disassociate_connector -- [ ] generate_change_set -- [ ] generate_template -- [ ] get_app -- [ ] get_app_launch_configuration -- [ ] get_app_replication_configuration -- [ ] get_connectors -- [ ] get_replication_jobs -- [ ] get_replication_runs -- [ ] get_servers -- [ ] import_server_catalog -- [ ] launch_app -- [ ] list_apps -- [ ] put_app_launch_configuration -- [ ] put_app_replication_configuration -- [ ] start_app_replication -- [ ] start_on_demand_replication_run -- [ ] stop_app_replication -- [ ] terminate_app -- [ ] update_app -- [ ] update_replication_job - -## sms-voice - 0% implemented -- [ ] create_configuration_set -- [ ] create_configuration_set_event_destination -- [ ] delete_configuration_set -- [ ] delete_configuration_set_event_destination -- [ ] get_configuration_set_event_destinations -- [ ] send_voice_message -- [ ] update_configuration_set_event_destination - -## snowball - 0% implemented -- [ ] cancel_cluster -- [ ] cancel_job -- [ ] create_address -- [ ] create_cluster -- [ ] create_job -- [ ] describe_address -- [ ] describe_addresses -- [ ] describe_cluster -- [ ] describe_job -- [ ] get_job_manifest -- [ ] get_job_unlock_code -- [ ] get_snowball_usage -- [ ] list_cluster_jobs -- [ ] list_clusters -- [ ] list_compatible_images -- [ ] list_jobs -- [ ] update_cluster -- [ ] update_job - -## sns - 53% implemented -- [ ] add_permission -- [ ] check_if_phone_number_is_opted_out -- [ ] confirm_subscription -- [X] create_platform_application -- [X] create_platform_endpoint -- [X] create_topic -- [X] delete_endpoint -- [X] delete_platform_application -- [X] delete_topic -- [ ] get_endpoint_attributes -- [ ] get_platform_application_attributes -- [ ] get_sms_attributes -- [X] get_subscription_attributes -- [ ] get_topic_attributes -- [X] list_endpoints_by_platform_application -- [ ] list_phone_numbers_opted_out -- [X] list_platform_applications -- [X] list_subscriptions -- [ ] list_subscriptions_by_topic -- [X] list_topics -- [ ] opt_in_phone_number -- [X] publish -- [ ] remove_permission -- [X] set_endpoint_attributes -- [ ] set_platform_application_attributes -- [ ] set_sms_attributes -- [X] set_subscription_attributes -- [ ] set_topic_attributes -- [X] subscribe -- [X] unsubscribe - -## sqs - 65% implemented -- [X] add_permission -- [X] change_message_visibility -- [ ] change_message_visibility_batch -- [X] create_queue -- [X] delete_message -- [ ] delete_message_batch -- [X] delete_queue -- [ ] get_queue_attributes -- [ ] get_queue_url -- [X] list_dead_letter_source_queues -- [ ] list_queue_tags -- [X] list_queues -- [X] purge_queue -- [ ] receive_message -- [X] remove_permission -- [X] send_message -- [ ] send_message_batch -- [X] set_queue_attributes -- [X] tag_queue -- [X] untag_queue - -## ssm - 10% implemented -- [X] add_tags_to_resource -- [ ] cancel_command -- [ ] cancel_maintenance_window_execution -- [ ] create_activation -- [ ] create_association -- [ ] create_association_batch -- [ ] create_document -- [ ] create_maintenance_window -- [ ] create_patch_baseline -- [ ] create_resource_data_sync -- [ ] delete_activation -- [ ] delete_association -- [ ] delete_document -- [ ] delete_inventory -- [ ] delete_maintenance_window -- [X] delete_parameter -- [X] delete_parameters -- [ ] delete_patch_baseline -- [ ] delete_resource_data_sync -- [ ] deregister_managed_instance -- [ ] deregister_patch_baseline_for_patch_group -- [ ] deregister_target_from_maintenance_window -- [ ] deregister_task_from_maintenance_window -- [ ] describe_activations -- [ ] describe_association -- [ ] describe_association_execution_targets -- [ ] describe_association_executions -- [ ] describe_automation_executions -- [ ] describe_automation_step_executions -- [ ] describe_available_patches -- [ ] describe_document -- [ ] describe_document_permission -- [ ] describe_effective_instance_associations -- [ ] describe_effective_patches_for_patch_baseline -- [ ] describe_instance_associations_status -- [ ] describe_instance_information -- [ ] describe_instance_patch_states -- [ ] describe_instance_patch_states_for_patch_group -- [ ] describe_instance_patches -- [ ] describe_inventory_deletions -- [ ] describe_maintenance_window_execution_task_invocations -- [ ] describe_maintenance_window_execution_tasks -- [ ] describe_maintenance_window_executions -- [ ] describe_maintenance_window_schedule -- [ ] describe_maintenance_window_targets -- [ ] describe_maintenance_window_tasks -- [ ] describe_maintenance_windows -- [ ] describe_maintenance_windows_for_target -- [ ] describe_parameters -- [ ] describe_patch_baselines -- [ ] describe_patch_group_state -- [ ] describe_patch_groups -- [ ] describe_sessions -- [ ] get_automation_execution -- [X] get_command_invocation -- [ ] get_connection_status -- [ ] get_default_patch_baseline -- [ ] get_deployable_patch_snapshot_for_instance -- [ ] get_document -- [ ] get_inventory -- [ ] get_inventory_schema -- [ ] get_maintenance_window -- [ ] get_maintenance_window_execution -- [ ] get_maintenance_window_execution_task -- [ ] get_maintenance_window_execution_task_invocation -- [ ] get_maintenance_window_task -- [X] get_parameter -- [ ] get_parameter_history -- [X] get_parameters -- [X] get_parameters_by_path -- [ ] get_patch_baseline -- [ ] get_patch_baseline_for_patch_group -- [ ] label_parameter_version -- [ ] list_association_versions -- [ ] list_associations -- [ ] list_command_invocations -- [X] list_commands -- [ ] list_compliance_items -- [ ] list_compliance_summaries -- [ ] list_document_versions -- [ ] list_documents -- [ ] list_inventory_entries -- [ ] list_resource_compliance_summaries -- [ ] list_resource_data_sync -- [X] list_tags_for_resource -- [ ] modify_document_permission -- [ ] put_compliance_items -- [ ] put_inventory -- [X] put_parameter -- [ ] register_default_patch_baseline -- [ ] register_patch_baseline_for_patch_group -- [ ] register_target_with_maintenance_window -- [ ] register_task_with_maintenance_window -- [X] remove_tags_from_resource -- [ ] resume_session -- [ ] send_automation_signal -- [X] send_command -- [ ] start_associations_once -- [ ] start_automation_execution -- [ ] start_session -- [ ] stop_automation_execution -- [ ] terminate_session -- [ ] update_association -- [ ] update_association_status -- [ ] update_document -- [ ] update_document_default_version -- [ ] update_maintenance_window -- [ ] update_maintenance_window_target -- [ ] update_maintenance_window_task -- [ ] update_managed_instance_role -- [ ] update_patch_baseline - -## stepfunctions - 0% implemented -- [ ] create_activity -- [ ] create_state_machine -- [ ] delete_activity -- [ ] delete_state_machine -- [ ] describe_activity -- [ ] describe_execution -- [ ] describe_state_machine -- [ ] describe_state_machine_for_execution -- [ ] get_activity_task -- [ ] get_execution_history -- [ ] list_activities -- [ ] list_executions -- [ ] list_state_machines -- [ ] list_tags_for_resource -- [ ] send_task_failure -- [ ] send_task_heartbeat -- [ ] send_task_success -- [ ] start_execution -- [ ] stop_execution -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_state_machine - -## storagegateway - 0% implemented -- [ ] activate_gateway -- [ ] add_cache -- [ ] add_tags_to_resource -- [ ] add_upload_buffer -- [ ] add_working_storage -- [ ] cancel_archival -- [ ] cancel_retrieval -- [ ] create_cached_iscsi_volume -- [ ] create_nfs_file_share -- [ ] create_smb_file_share -- [ ] create_snapshot -- [ ] create_snapshot_from_volume_recovery_point -- [ ] create_stored_iscsi_volume -- [ ] create_tape_with_barcode -- [ ] create_tapes -- [ ] delete_bandwidth_rate_limit -- [ ] delete_chap_credentials -- [ ] delete_file_share -- [ ] delete_gateway -- [ ] delete_snapshot_schedule -- [ ] delete_tape -- [ ] delete_tape_archive -- [ ] delete_volume -- [ ] describe_bandwidth_rate_limit -- [ ] describe_cache -- [ ] describe_cached_iscsi_volumes -- [ ] describe_chap_credentials -- [ ] describe_gateway_information -- [ ] describe_maintenance_start_time -- [ ] describe_nfs_file_shares -- [ ] describe_smb_file_shares -- [ ] describe_smb_settings -- [ ] describe_snapshot_schedule -- [ ] describe_stored_iscsi_volumes -- [ ] describe_tape_archives -- [ ] describe_tape_recovery_points -- [ ] describe_tapes -- [ ] describe_upload_buffer -- [ ] describe_vtl_devices -- [ ] describe_working_storage -- [ ] disable_gateway -- [ ] join_domain -- [ ] list_file_shares -- [ ] list_gateways -- [ ] list_local_disks -- [ ] list_tags_for_resource -- [ ] list_tapes -- [ ] list_volume_initiators -- [ ] list_volume_recovery_points -- [ ] list_volumes -- [ ] notify_when_uploaded -- [ ] refresh_cache -- [ ] remove_tags_from_resource -- [ ] reset_cache -- [ ] retrieve_tape_archive -- [ ] retrieve_tape_recovery_point -- [ ] set_local_console_password -- [ ] set_smb_guest_password -- [ ] shutdown_gateway -- [ ] start_gateway -- [ ] update_bandwidth_rate_limit -- [ ] update_chap_credentials -- [ ] update_gateway_information -- [ ] update_gateway_software_now -- [ ] update_maintenance_start_time -- [ ] update_nfs_file_share -- [ ] update_smb_file_share -- [ ] update_snapshot_schedule -- [ ] update_vtl_device_type - -## sts - 42% implemented -- [X] assume_role -- [ ] assume_role_with_saml -- [ ] assume_role_with_web_identity -- [ ] decode_authorization_message -- [ ] get_caller_identity -- [X] get_federation_token -- [X] get_session_token - -## support - 0% implemented -- [ ] add_attachments_to_set -- [ ] add_communication_to_case -- [ ] create_case -- [ ] describe_attachment -- [ ] describe_cases -- [ ] describe_communications -- [ ] describe_services -- [ ] describe_severity_levels -- [ ] describe_trusted_advisor_check_refresh_statuses -- [ ] describe_trusted_advisor_check_result -- [ ] describe_trusted_advisor_check_summaries -- [ ] describe_trusted_advisor_checks -- [ ] refresh_trusted_advisor_check -- [ ] resolve_case - -## swf - 58% implemented -- [ ] count_closed_workflow_executions -- [ ] count_open_workflow_executions -- [X] count_pending_activity_tasks -- [X] count_pending_decision_tasks -- [ ] deprecate_activity_type -- [X] deprecate_domain -- [ ] deprecate_workflow_type -- [ ] describe_activity_type -- [X] describe_domain -- [X] describe_workflow_execution -- [ ] describe_workflow_type -- [ ] get_workflow_execution_history -- [ ] list_activity_types -- [X] list_closed_workflow_executions -- [X] list_domains -- [X] list_open_workflow_executions -- [ ] list_workflow_types -- [X] poll_for_activity_task -- [X] poll_for_decision_task -- [X] record_activity_task_heartbeat -- [ ] register_activity_type -- [X] register_domain -- [ ] register_workflow_type -- [ ] request_cancel_workflow_execution -- [ ] respond_activity_task_canceled -- [X] respond_activity_task_completed -- [X] respond_activity_task_failed -- [X] respond_decision_task_completed -- [X] signal_workflow_execution -- [X] start_workflow_execution -- [X] terminate_workflow_execution - -## transcribe - 0% implemented -- [ ] create_vocabulary -- [ ] delete_transcription_job -- [ ] delete_vocabulary -- [ ] get_transcription_job -- [ ] get_vocabulary -- [ ] list_transcription_jobs -- [ ] list_vocabularies -- [ ] start_transcription_job -- [ ] update_vocabulary - -## transfer - 0% implemented -- [ ] create_server -- [ ] create_user -- [ ] delete_server -- [ ] delete_ssh_public_key -- [ ] delete_user -- [ ] describe_server -- [ ] describe_user -- [ ] import_ssh_public_key -- [ ] list_servers -- [ ] list_tags_for_resource -- [ ] list_users -- [ ] start_server -- [ ] stop_server -- [ ] tag_resource -- [ ] test_identity_provider -- [ ] untag_resource -- [ ] update_server -- [ ] update_user - -## translate - 0% implemented -- [ ] delete_terminology -- [ ] get_terminology -- [ ] import_terminology -- [ ] list_terminologies -- [ ] translate_text - -## waf - 0% implemented -- [ ] create_byte_match_set -- [ ] create_geo_match_set -- [ ] create_ip_set -- [ ] create_rate_based_rule -- [ ] create_regex_match_set -- [ ] create_regex_pattern_set -- [ ] create_rule -- [ ] create_rule_group -- [ ] create_size_constraint_set -- [ ] create_sql_injection_match_set -- [ ] create_web_acl -- [ ] create_xss_match_set -- [ ] delete_byte_match_set -- [ ] delete_geo_match_set -- [ ] delete_ip_set -- [ ] delete_logging_configuration -- [ ] delete_permission_policy -- [ ] delete_rate_based_rule -- [ ] delete_regex_match_set -- [ ] delete_regex_pattern_set -- [ ] delete_rule -- [ ] delete_rule_group -- [ ] delete_size_constraint_set -- [ ] delete_sql_injection_match_set -- [ ] delete_web_acl -- [ ] delete_xss_match_set -- [ ] get_byte_match_set -- [ ] get_change_token -- [ ] get_change_token_status -- [ ] get_geo_match_set -- [ ] get_ip_set -- [ ] get_logging_configuration -- [ ] get_permission_policy -- [ ] get_rate_based_rule -- [ ] get_rate_based_rule_managed_keys -- [ ] get_regex_match_set -- [ ] get_regex_pattern_set -- [ ] get_rule -- [ ] get_rule_group -- [ ] get_sampled_requests -- [ ] get_size_constraint_set -- [ ] get_sql_injection_match_set -- [ ] get_web_acl -- [ ] get_xss_match_set -- [ ] list_activated_rules_in_rule_group -- [ ] list_byte_match_sets -- [ ] list_geo_match_sets -- [ ] list_ip_sets -- [ ] list_logging_configurations -- [ ] list_rate_based_rules -- [ ] list_regex_match_sets -- [ ] list_regex_pattern_sets -- [ ] list_rule_groups -- [ ] list_rules -- [ ] list_size_constraint_sets -- [ ] list_sql_injection_match_sets -- [ ] list_subscribed_rule_groups -- [ ] list_web_acls -- [ ] list_xss_match_sets -- [ ] put_logging_configuration -- [ ] put_permission_policy -- [ ] update_byte_match_set -- [ ] update_geo_match_set -- [ ] update_ip_set -- [ ] update_rate_based_rule -- [ ] update_regex_match_set -- [ ] update_regex_pattern_set -- [ ] update_rule -- [ ] update_rule_group -- [ ] update_size_constraint_set -- [ ] update_sql_injection_match_set -- [ ] update_web_acl -- [ ] update_xss_match_set - -## waf-regional - 0% implemented -- [ ] associate_web_acl -- [ ] create_byte_match_set -- [ ] create_geo_match_set -- [ ] create_ip_set -- [ ] create_rate_based_rule -- [ ] create_regex_match_set -- [ ] create_regex_pattern_set -- [ ] create_rule -- [ ] create_rule_group -- [ ] create_size_constraint_set -- [ ] create_sql_injection_match_set -- [ ] create_web_acl -- [ ] create_xss_match_set -- [ ] delete_byte_match_set -- [ ] delete_geo_match_set -- [ ] delete_ip_set -- [ ] delete_logging_configuration -- [ ] delete_permission_policy -- [ ] delete_rate_based_rule -- [ ] delete_regex_match_set -- [ ] delete_regex_pattern_set -- [ ] delete_rule -- [ ] delete_rule_group -- [ ] delete_size_constraint_set -- [ ] delete_sql_injection_match_set -- [ ] delete_web_acl -- [ ] delete_xss_match_set -- [ ] disassociate_web_acl -- [ ] get_byte_match_set -- [ ] get_change_token -- [ ] get_change_token_status -- [ ] get_geo_match_set -- [ ] get_ip_set -- [ ] get_logging_configuration -- [ ] get_permission_policy -- [ ] get_rate_based_rule -- [ ] get_rate_based_rule_managed_keys -- [ ] get_regex_match_set -- [ ] get_regex_pattern_set -- [ ] get_rule -- [ ] get_rule_group -- [ ] get_sampled_requests -- [ ] get_size_constraint_set -- [ ] get_sql_injection_match_set -- [ ] get_web_acl -- [ ] get_web_acl_for_resource -- [ ] get_xss_match_set -- [ ] list_activated_rules_in_rule_group -- [ ] list_byte_match_sets -- [ ] list_geo_match_sets -- [ ] list_ip_sets -- [ ] list_logging_configurations -- [ ] list_rate_based_rules -- [ ] list_regex_match_sets -- [ ] list_regex_pattern_sets -- [ ] list_resources_for_web_acl -- [ ] list_rule_groups -- [ ] list_rules -- [ ] list_size_constraint_sets -- [ ] list_sql_injection_match_sets -- [ ] list_subscribed_rule_groups -- [ ] list_web_acls -- [ ] list_xss_match_sets -- [ ] put_logging_configuration -- [ ] put_permission_policy -- [ ] update_byte_match_set -- [ ] update_geo_match_set -- [ ] update_ip_set -- [ ] update_rate_based_rule -- [ ] update_regex_match_set -- [ ] update_regex_pattern_set -- [ ] update_rule -- [ ] update_rule_group -- [ ] update_size_constraint_set -- [ ] update_sql_injection_match_set -- [ ] update_web_acl -- [ ] update_xss_match_set - -## workdocs - 0% implemented -- [ ] abort_document_version_upload -- [ ] activate_user -- [ ] add_resource_permissions -- [ ] create_comment -- [ ] create_custom_metadata -- [ ] create_folder -- [ ] create_labels -- [ ] create_notification_subscription -- [ ] create_user -- [ ] deactivate_user -- [ ] delete_comment -- [ ] delete_custom_metadata -- [ ] delete_document -- [ ] delete_folder -- [ ] delete_folder_contents -- [ ] delete_labels -- [ ] delete_notification_subscription -- [ ] delete_user -- [ ] describe_activities -- [ ] describe_comments -- [ ] describe_document_versions -- [ ] describe_folder_contents -- [ ] describe_groups -- [ ] describe_notification_subscriptions -- [ ] describe_resource_permissions -- [ ] describe_root_folders -- [ ] describe_users -- [ ] get_current_user -- [ ] get_document -- [ ] get_document_path -- [ ] get_document_version -- [ ] get_folder -- [ ] get_folder_path -- [ ] get_resources -- [ ] initiate_document_version_upload -- [ ] remove_all_resource_permissions -- [ ] remove_resource_permission -- [ ] update_document -- [ ] update_document_version -- [ ] update_folder -- [ ] update_user - -## workmail - 0% implemented -- [ ] associate_delegate_to_resource -- [ ] associate_member_to_group -- [ ] create_alias -- [ ] create_group -- [ ] create_resource -- [ ] create_user -- [ ] delete_alias -- [ ] delete_group -- [ ] delete_mailbox_permissions -- [ ] delete_resource -- [ ] delete_user -- [ ] deregister_from_work_mail -- [ ] describe_group -- [ ] describe_organization -- [ ] describe_resource -- [ ] describe_user -- [ ] disassociate_delegate_from_resource -- [ ] disassociate_member_from_group -- [ ] list_aliases -- [ ] list_group_members -- [ ] list_groups -- [ ] list_mailbox_permissions -- [ ] list_organizations -- [ ] list_resource_delegates -- [ ] list_resources -- [ ] list_users -- [ ] put_mailbox_permissions -- [ ] register_to_work_mail -- [ ] reset_password -- [ ] update_primary_email_address -- [ ] update_resource - -## workspaces - 0% implemented -- [ ] associate_ip_groups -- [ ] authorize_ip_rules -- [ ] create_ip_group -- [ ] create_tags -- [ ] create_workspaces -- [ ] delete_ip_group -- [ ] delete_tags -- [ ] delete_workspace_image -- [ ] describe_account -- [ ] describe_account_modifications -- [ ] describe_client_properties -- [ ] describe_ip_groups -- [ ] describe_tags -- [ ] describe_workspace_bundles -- [ ] describe_workspace_directories -- [ ] describe_workspace_images -- [ ] describe_workspaces -- [ ] describe_workspaces_connection_status -- [ ] disassociate_ip_groups -- [ ] import_workspace_image -- [ ] list_available_management_cidr_ranges -- [ ] modify_account -- [ ] modify_client_properties -- [ ] modify_workspace_properties -- [ ] modify_workspace_state -- [ ] reboot_workspaces -- [ ] rebuild_workspaces -- [ ] revoke_ip_rules -- [ ] start_workspaces -- [ ] stop_workspaces -- [ ] terminate_workspaces -- [ ] update_rules_of_ip_group - -## xray - 0% implemented -- [ ] batch_get_traces -- [ ] create_group -- [ ] create_sampling_rule -- [ ] delete_group -- [ ] delete_sampling_rule -- [ ] get_encryption_config -- [ ] get_group -- [ ] get_groups -- [ ] get_sampling_rules -- [ ] get_sampling_statistic_summaries -- [ ] get_sampling_targets -- [ ] get_service_graph -- [ ] get_trace_graph -- [ ] get_trace_summaries -- [ ] put_encryption_config -- [ ] put_telemetry_records -- [ ] put_trace_segments -- [ ] update_group -- [ ] update_sampling_rule + +## acm - 41% implemented +- [X] add_tags_to_certificate +- [X] delete_certificate +- [ ] describe_certificate +- [ ] export_certificate +- [X] get_certificate +- [ ] import_certificate +- [ ] list_certificates +- [ ] list_tags_for_certificate +- [X] remove_tags_from_certificate +- [X] request_certificate +- [ ] resend_validation_email +- [ ] update_certificate_options + +## acm-pca - 0% implemented +- [ ] create_certificate_authority +- [ ] create_certificate_authority_audit_report +- [ ] delete_certificate_authority +- [ ] describe_certificate_authority +- [ ] describe_certificate_authority_audit_report +- [ ] get_certificate +- [ ] get_certificate_authority_certificate +- [ ] get_certificate_authority_csr +- [ ] import_certificate_authority_certificate +- [ ] issue_certificate +- [ ] list_certificate_authorities +- [ ] list_tags +- [ ] revoke_certificate +- [ ] tag_certificate_authority +- [ ] untag_certificate_authority +- [ ] update_certificate_authority + +## alexaforbusiness - 0% implemented +- [ ] associate_contact_with_address_book +- [ ] associate_device_with_room +- [ ] associate_skill_group_with_room +- [ ] create_address_book +- [ ] create_contact +- [ ] create_profile +- [ ] create_room +- [ ] create_skill_group +- [ ] create_user +- [ ] delete_address_book +- [ ] delete_contact +- [ ] delete_profile +- [ ] delete_room +- [ ] delete_room_skill_parameter +- [ ] delete_skill_group +- [ ] delete_user +- [ ] disassociate_contact_from_address_book +- [ ] disassociate_device_from_room +- [ ] disassociate_skill_group_from_room +- [ ] get_address_book +- [ ] get_contact +- [ ] get_device +- [ ] get_profile +- [ ] get_room +- [ ] get_room_skill_parameter +- [ ] get_skill_group +- [ ] list_skills +- [ ] list_tags +- [ ] put_room_skill_parameter +- [ ] resolve_room +- [ ] revoke_invitation +- [ ] search_address_books +- [ ] search_contacts +- [ ] search_devices +- [ ] search_profiles +- [ ] search_rooms +- [ ] search_skill_groups +- [ ] search_users +- [ ] send_invitation +- [ ] start_device_sync +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_address_book +- [ ] update_contact +- [ ] update_device +- [ ] update_profile +- [ ] update_room +- [ ] update_skill_group + +## apigateway - 24% implemented +- [ ] create_api_key +- [ ] create_authorizer +- [ ] create_base_path_mapping +- [X] create_deployment +- [ ] create_documentation_part +- [ ] create_documentation_version +- [ ] create_domain_name +- [ ] create_model +- [ ] create_request_validator +- [X] create_resource +- [X] create_rest_api +- [X] create_stage +- [X] create_usage_plan +- [X] create_usage_plan_key +- [ ] create_vpc_link +- [ ] delete_api_key +- [ ] delete_authorizer +- [ ] delete_base_path_mapping +- [ ] delete_client_certificate +- [X] delete_deployment +- [ ] delete_documentation_part +- [ ] delete_documentation_version +- [ ] delete_domain_name +- [ ] delete_gateway_response +- [X] delete_integration +- [X] delete_integration_response +- [ ] delete_method +- [X] delete_method_response +- [ ] delete_model +- [ ] delete_request_validator +- [X] delete_resource +- [X] delete_rest_api +- [ ] delete_stage +- [X] delete_usage_plan +- [X] delete_usage_plan_key +- [ ] delete_vpc_link +- [ ] flush_stage_authorizers_cache +- [ ] flush_stage_cache +- [ ] generate_client_certificate +- [ ] get_account +- [ ] get_api_key +- [ ] get_api_keys +- [ ] get_authorizer +- [ ] get_authorizers +- [ ] get_base_path_mapping +- [ ] get_base_path_mappings +- [ ] get_client_certificate +- [ ] get_client_certificates +- [X] get_deployment +- [X] get_deployments +- [ ] get_documentation_part +- [ ] get_documentation_parts +- [ ] get_documentation_version +- [ ] get_documentation_versions +- [ ] get_domain_name +- [ ] get_domain_names +- [ ] get_export +- [ ] get_gateway_response +- [ ] get_gateway_responses +- [X] get_integration +- [X] get_integration_response +- [X] get_method +- [X] get_method_response +- [ ] get_model +- [ ] get_model_template +- [ ] get_models +- [ ] get_request_validator +- [ ] get_request_validators +- [X] get_resource +- [ ] get_resources +- [X] get_rest_api +- [ ] get_rest_apis +- [ ] get_sdk +- [ ] get_sdk_type +- [ ] get_sdk_types +- [X] get_stage +- [X] get_stages +- [ ] get_tags +- [ ] get_usage +- [X] get_usage_plan +- [X] get_usage_plan_key +- [X] get_usage_plan_keys +- [X] get_usage_plans +- [ ] get_vpc_link +- [ ] get_vpc_links +- [ ] import_api_keys +- [ ] import_documentation_parts +- [ ] import_rest_api +- [ ] put_gateway_response +- [ ] put_integration +- [ ] put_integration_response +- [ ] put_method +- [ ] put_method_response +- [ ] put_rest_api +- [ ] tag_resource +- [ ] test_invoke_authorizer +- [ ] test_invoke_method +- [ ] untag_resource +- [ ] update_account +- [ ] update_api_key +- [ ] update_authorizer +- [ ] update_base_path_mapping +- [ ] update_client_certificate +- [ ] update_deployment +- [ ] update_documentation_part +- [ ] update_documentation_version +- [ ] update_domain_name +- [ ] update_gateway_response +- [ ] update_integration +- [ ] update_integration_response +- [ ] update_method +- [ ] update_method_response +- [ ] update_model +- [ ] update_request_validator +- [ ] update_resource +- [ ] update_rest_api +- [X] update_stage +- [ ] update_usage +- [ ] update_usage_plan +- [ ] update_vpc_link + +## application-autoscaling - 0% implemented +- [ ] delete_scaling_policy +- [ ] delete_scheduled_action +- [ ] deregister_scalable_target +- [ ] describe_scalable_targets +- [ ] describe_scaling_activities +- [ ] describe_scaling_policies +- [ ] describe_scheduled_actions +- [ ] put_scaling_policy +- [ ] put_scheduled_action +- [ ] register_scalable_target + +## appstream - 0% implemented +- [ ] associate_fleet +- [ ] copy_image +- [ ] create_directory_config +- [ ] create_fleet +- [ ] create_image_builder +- [ ] create_image_builder_streaming_url +- [ ] create_stack +- [ ] create_streaming_url +- [ ] delete_directory_config +- [ ] delete_fleet +- [ ] delete_image +- [ ] delete_image_builder +- [ ] delete_stack +- [ ] describe_directory_configs +- [ ] describe_fleets +- [ ] describe_image_builders +- [ ] describe_images +- [ ] describe_sessions +- [ ] describe_stacks +- [ ] disassociate_fleet +- [ ] expire_session +- [ ] list_associated_fleets +- [ ] list_associated_stacks +- [ ] list_tags_for_resource +- [ ] start_fleet +- [ ] start_image_builder +- [ ] stop_fleet +- [ ] stop_image_builder +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_directory_config +- [ ] update_fleet +- [ ] update_stack + +## appsync - 0% implemented +- [ ] create_api_key +- [ ] create_data_source +- [ ] create_graphql_api +- [ ] create_resolver +- [ ] create_type +- [ ] delete_api_key +- [ ] delete_data_source +- [ ] delete_graphql_api +- [ ] delete_resolver +- [ ] delete_type +- [ ] get_data_source +- [ ] get_graphql_api +- [ ] get_introspection_schema +- [ ] get_resolver +- [ ] get_schema_creation_status +- [ ] get_type +- [ ] list_api_keys +- [ ] list_data_sources +- [ ] list_graphql_apis +- [ ] list_resolvers +- [ ] list_types +- [ ] start_schema_creation +- [ ] update_api_key +- [ ] update_data_source +- [ ] update_graphql_api +- [ ] update_resolver +- [ ] update_type + +## athena - 0% implemented +- [ ] batch_get_named_query +- [ ] batch_get_query_execution +- [ ] create_named_query +- [ ] delete_named_query +- [ ] get_named_query +- [ ] get_query_execution +- [ ] get_query_results +- [ ] list_named_queries +- [ ] list_query_executions +- [ ] start_query_execution +- [ ] stop_query_execution + +## autoscaling - 44% implemented +- [X] attach_instances +- [X] attach_load_balancer_target_groups +- [X] attach_load_balancers +- [ ] complete_lifecycle_action +- [X] create_auto_scaling_group +- [X] create_launch_configuration +- [X] create_or_update_tags +- [X] delete_auto_scaling_group +- [X] delete_launch_configuration +- [ ] delete_lifecycle_hook +- [ ] delete_notification_configuration +- [X] delete_policy +- [ ] delete_scheduled_action +- [ ] delete_tags +- [ ] describe_account_limits +- [ ] describe_adjustment_types +- [X] describe_auto_scaling_groups +- [X] describe_auto_scaling_instances +- [ ] describe_auto_scaling_notification_types +- [X] describe_launch_configurations +- [ ] describe_lifecycle_hook_types +- [ ] describe_lifecycle_hooks +- [X] describe_load_balancer_target_groups +- [X] describe_load_balancers +- [ ] describe_metric_collection_types +- [ ] describe_notification_configurations +- [X] describe_policies +- [ ] describe_scaling_activities +- [ ] describe_scaling_process_types +- [ ] describe_scheduled_actions +- [ ] describe_tags +- [ ] describe_termination_policy_types +- [X] detach_instances +- [X] detach_load_balancer_target_groups +- [X] detach_load_balancers +- [ ] disable_metrics_collection +- [ ] enable_metrics_collection +- [ ] enter_standby +- [X] execute_policy +- [ ] exit_standby +- [ ] put_lifecycle_hook +- [ ] put_notification_configuration +- [ ] put_scaling_policy +- [ ] put_scheduled_update_group_action +- [ ] record_lifecycle_action_heartbeat +- [ ] resume_processes +- [X] set_desired_capacity +- [X] set_instance_health +- [ ] set_instance_protection +- [X] suspend_processes +- [ ] terminate_instance_in_auto_scaling_group +- [X] update_auto_scaling_group + +## autoscaling-plans - 0% implemented +- [ ] create_scaling_plan +- [ ] delete_scaling_plan +- [ ] describe_scaling_plan_resources +- [ ] describe_scaling_plans + +## batch - 93% implemented +- [ ] cancel_job +- [X] create_compute_environment +- [X] create_job_queue +- [X] delete_compute_environment +- [X] delete_job_queue +- [X] deregister_job_definition +- [X] describe_compute_environments +- [X] describe_job_definitions +- [X] describe_job_queues +- [X] describe_jobs +- [X] list_jobs +- [X] register_job_definition +- [X] submit_job +- [X] terminate_job +- [X] update_compute_environment +- [X] update_job_queue + +## budgets - 0% implemented +- [ ] create_budget +- [ ] create_notification +- [ ] create_subscriber +- [ ] delete_budget +- [ ] delete_notification +- [ ] delete_subscriber +- [ ] describe_budget +- [ ] describe_budgets +- [ ] describe_notifications_for_budget +- [ ] describe_subscribers_for_notification +- [ ] update_budget +- [ ] update_notification +- [ ] update_subscriber + +## ce - 0% implemented +- [ ] get_cost_and_usage +- [ ] get_dimension_values +- [ ] get_reservation_coverage +- [ ] get_reservation_purchase_recommendation +- [ ] get_reservation_utilization +- [ ] get_tags + +## cloud9 - 0% implemented +- [ ] create_environment_ec2 +- [ ] create_environment_membership +- [ ] delete_environment +- [ ] delete_environment_membership +- [ ] describe_environment_memberships +- [ ] describe_environment_status +- [ ] describe_environments +- [ ] list_environments +- [ ] update_environment +- [ ] update_environment_membership + +## clouddirectory - 0% implemented +- [ ] add_facet_to_object +- [ ] apply_schema +- [ ] attach_object +- [ ] attach_policy +- [ ] attach_to_index +- [ ] attach_typed_link +- [ ] batch_read +- [ ] batch_write +- [ ] create_directory +- [ ] create_facet +- [ ] create_index +- [ ] create_object +- [ ] create_schema +- [ ] create_typed_link_facet +- [ ] delete_directory +- [ ] delete_facet +- [ ] delete_object +- [ ] delete_schema +- [ ] delete_typed_link_facet +- [ ] detach_from_index +- [ ] detach_object +- [ ] detach_policy +- [ ] detach_typed_link +- [ ] disable_directory +- [ ] enable_directory +- [ ] get_applied_schema_version +- [ ] get_directory +- [ ] get_facet +- [ ] get_object_attributes +- [ ] get_object_information +- [ ] get_schema_as_json +- [ ] get_typed_link_facet_information +- [ ] list_applied_schema_arns +- [ ] list_attached_indices +- [ ] list_development_schema_arns +- [ ] list_directories +- [ ] list_facet_attributes +- [ ] list_facet_names +- [ ] list_incoming_typed_links +- [ ] list_index +- [ ] list_object_attributes +- [ ] list_object_children +- [ ] list_object_parent_paths +- [ ] list_object_parents +- [ ] list_object_policies +- [ ] list_outgoing_typed_links +- [ ] list_policy_attachments +- [ ] list_published_schema_arns +- [ ] list_tags_for_resource +- [ ] list_typed_link_facet_attributes +- [ ] list_typed_link_facet_names +- [ ] lookup_policy +- [ ] publish_schema +- [ ] put_schema_from_json +- [ ] remove_facet_from_object +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_facet +- [ ] update_object_attributes +- [ ] update_schema +- [ ] update_typed_link_facet +- [ ] upgrade_applied_schema +- [ ] upgrade_published_schema + +## cloudformation - 65% implemented +- [ ] cancel_update_stack +- [ ] continue_update_rollback +- [X] create_change_set +- [X] create_stack +- [X] create_stack_instances +- [X] create_stack_set +- [X] delete_change_set +- [X] delete_stack +- [X] delete_stack_instances +- [X] delete_stack_set +- [ ] deploy +- [ ] describe_account_limits +- [X] describe_change_set +- [ ] describe_stack_drift_detection_status +- [X] describe_stack_events +- [X] describe_stack_instance +- [X] describe_stack_resource +- [ ] describe_stack_resource_drifts +- [X] describe_stack_resources +- [X] describe_stack_set +- [X] describe_stack_set_operation +- [X] describe_stacks +- [ ] detect_stack_drift +- [ ] detect_stack_resource_drift +- [ ] estimate_template_cost +- [X] execute_change_set +- [ ] get_stack_policy +- [X] get_template +- [ ] get_template_summary +- [X] list_change_sets +- [X] list_exports +- [ ] list_imports +- [X] list_stack_instances +- [X] list_stack_resources +- [X] list_stack_set_operation_results +- [X] list_stack_set_operations +- [X] list_stack_sets +- [X] list_stacks +- [ ] package +- [ ] set_stack_policy +- [ ] signal_resource +- [X] stop_stack_set_operation +- [X] update_stack +- [X] update_stack_instances +- [X] update_stack_set +- [ ] update_termination_protection +- [X] validate_template +- [ ] wait + +## cloudfront - 0% implemented +- [ ] create_cloud_front_origin_access_identity +- [ ] create_distribution +- [ ] create_distribution_with_tags +- [ ] create_field_level_encryption_config +- [ ] create_field_level_encryption_profile +- [ ] create_invalidation +- [ ] create_public_key +- [ ] create_streaming_distribution +- [ ] create_streaming_distribution_with_tags +- [ ] delete_cloud_front_origin_access_identity +- [ ] delete_distribution +- [ ] delete_field_level_encryption_config +- [ ] delete_field_level_encryption_profile +- [ ] delete_public_key +- [ ] delete_service_linked_role +- [ ] delete_streaming_distribution +- [ ] get_cloud_front_origin_access_identity +- [ ] get_cloud_front_origin_access_identity_config +- [ ] get_distribution +- [ ] get_distribution_config +- [ ] get_field_level_encryption +- [ ] get_field_level_encryption_config +- [ ] get_field_level_encryption_profile +- [ ] get_field_level_encryption_profile_config +- [ ] get_invalidation +- [ ] get_public_key +- [ ] get_public_key_config +- [ ] get_streaming_distribution +- [ ] get_streaming_distribution_config +- [ ] list_cloud_front_origin_access_identities +- [ ] list_distributions +- [ ] list_distributions_by_web_acl_id +- [ ] list_field_level_encryption_configs +- [ ] list_field_level_encryption_profiles +- [ ] list_invalidations +- [ ] list_public_keys +- [ ] list_streaming_distributions +- [ ] list_tags_for_resource +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_cloud_front_origin_access_identity +- [ ] update_distribution +- [ ] update_field_level_encryption_config +- [ ] update_field_level_encryption_profile +- [ ] update_public_key +- [ ] update_streaming_distribution + +## cloudhsm - 0% implemented +- [ ] add_tags_to_resource +- [ ] create_hapg +- [ ] create_hsm +- [ ] create_luna_client +- [ ] delete_hapg +- [ ] delete_hsm +- [ ] delete_luna_client +- [ ] describe_hapg +- [ ] describe_hsm +- [ ] describe_luna_client +- [ ] get_config +- [ ] list_available_zones +- [ ] list_hapgs +- [ ] list_hsms +- [ ] list_luna_clients +- [ ] list_tags_for_resource +- [ ] modify_hapg +- [ ] modify_hsm +- [ ] modify_luna_client +- [ ] remove_tags_from_resource + +## cloudhsmv2 - 0% implemented +- [ ] create_cluster +- [ ] create_hsm +- [ ] delete_cluster +- [ ] delete_hsm +- [ ] describe_backups +- [ ] describe_clusters +- [ ] initialize_cluster +- [ ] list_tags +- [ ] tag_resource +- [ ] untag_resource + +## cloudsearch - 0% implemented +- [ ] build_suggesters +- [ ] create_domain +- [ ] define_analysis_scheme +- [ ] define_expression +- [ ] define_index_field +- [ ] define_suggester +- [ ] delete_analysis_scheme +- [ ] delete_domain +- [ ] delete_expression +- [ ] delete_index_field +- [ ] delete_suggester +- [ ] describe_analysis_schemes +- [ ] describe_availability_options +- [ ] describe_domains +- [ ] describe_expressions +- [ ] describe_index_fields +- [ ] describe_scaling_parameters +- [ ] describe_service_access_policies +- [ ] describe_suggesters +- [ ] index_documents +- [ ] list_domain_names +- [ ] update_availability_options +- [ ] update_scaling_parameters +- [ ] update_service_access_policies + +## cloudsearchdomain - 0% implemented +- [ ] search +- [ ] suggest +- [ ] upload_documents + +## cloudtrail - 0% implemented +- [ ] add_tags +- [ ] create_trail +- [ ] delete_trail +- [ ] describe_trails +- [ ] get_event_selectors +- [ ] get_trail_status +- [ ] list_public_keys +- [ ] list_tags +- [ ] lookup_events +- [ ] put_event_selectors +- [ ] remove_tags +- [ ] start_logging +- [ ] stop_logging +- [ ] update_trail + +## cloudwatch - 56% implemented +- [X] delete_alarms +- [X] delete_dashboards +- [ ] describe_alarm_history +- [ ] describe_alarms +- [ ] describe_alarms_for_metric +- [ ] disable_alarm_actions +- [ ] enable_alarm_actions +- [X] get_dashboard +- [ ] get_metric_data +- [X] get_metric_statistics +- [X] list_dashboards +- [ ] list_metrics +- [X] put_dashboard +- [X] put_metric_alarm +- [X] put_metric_data +- [X] set_alarm_state + +## codebuild - 0% implemented +- [ ] batch_delete_builds +- [ ] batch_get_builds +- [ ] batch_get_projects +- [ ] create_project +- [ ] create_webhook +- [ ] delete_project +- [ ] delete_webhook +- [ ] invalidate_project_cache +- [ ] list_builds +- [ ] list_builds_for_project +- [ ] list_curated_environment_images +- [ ] list_projects +- [ ] start_build +- [ ] stop_build +- [ ] update_project +- [ ] update_webhook + +## codecommit - 0% implemented +- [ ] batch_get_repositories +- [ ] create_branch +- [ ] create_pull_request +- [ ] create_repository +- [ ] delete_branch +- [ ] delete_comment_content +- [ ] delete_repository +- [ ] describe_pull_request_events +- [ ] get_blob +- [ ] get_branch +- [ ] get_comment +- [ ] get_comments_for_compared_commit +- [ ] get_comments_for_pull_request +- [ ] get_commit +- [ ] get_differences +- [ ] get_merge_conflicts +- [ ] get_pull_request +- [ ] get_repository +- [ ] get_repository_triggers +- [ ] list_branches +- [ ] list_pull_requests +- [ ] list_repositories +- [ ] merge_pull_request_by_fast_forward +- [ ] post_comment_for_compared_commit +- [ ] post_comment_for_pull_request +- [ ] post_comment_reply +- [ ] put_file +- [ ] put_repository_triggers +- [ ] test_repository_triggers +- [ ] update_comment +- [ ] update_default_branch +- [ ] update_pull_request_description +- [ ] update_pull_request_status +- [ ] update_pull_request_title +- [ ] update_repository_description +- [ ] update_repository_name + +## codedeploy - 0% implemented +- [ ] add_tags_to_on_premises_instances +- [ ] batch_get_application_revisions +- [ ] batch_get_applications +- [ ] batch_get_deployment_groups +- [ ] batch_get_deployment_instances +- [ ] batch_get_deployments +- [ ] batch_get_on_premises_instances +- [ ] continue_deployment +- [ ] create_application +- [ ] create_deployment +- [ ] create_deployment_config +- [ ] create_deployment_group +- [ ] delete_application +- [ ] delete_deployment_config +- [ ] delete_deployment_group +- [ ] delete_git_hub_account_token +- [ ] deregister_on_premises_instance +- [ ] get_application +- [ ] get_application_revision +- [ ] get_deployment +- [ ] get_deployment_config +- [ ] get_deployment_group +- [ ] get_deployment_instance +- [ ] get_on_premises_instance +- [ ] list_application_revisions +- [ ] list_applications +- [ ] list_deployment_configs +- [ ] list_deployment_groups +- [ ] list_deployment_instances +- [ ] list_deployments +- [ ] list_git_hub_account_token_names +- [ ] list_on_premises_instances +- [ ] put_lifecycle_event_hook_execution_status +- [ ] register_application_revision +- [ ] register_on_premises_instance +- [ ] remove_tags_from_on_premises_instances +- [ ] skip_wait_time_for_instance_termination +- [ ] stop_deployment +- [ ] update_application +- [ ] update_deployment_group + +## codepipeline - 0% implemented +- [ ] acknowledge_job +- [ ] acknowledge_third_party_job +- [ ] create_custom_action_type +- [ ] create_pipeline +- [ ] delete_custom_action_type +- [ ] delete_pipeline +- [ ] disable_stage_transition +- [ ] enable_stage_transition +- [ ] get_job_details +- [ ] get_pipeline +- [ ] get_pipeline_execution +- [ ] get_pipeline_state +- [ ] get_third_party_job_details +- [ ] list_action_types +- [ ] list_pipeline_executions +- [ ] list_pipelines +- [ ] poll_for_jobs +- [ ] poll_for_third_party_jobs +- [ ] put_action_revision +- [ ] put_approval_result +- [ ] put_job_failure_result +- [ ] put_job_success_result +- [ ] put_third_party_job_failure_result +- [ ] put_third_party_job_success_result +- [ ] retry_stage_execution +- [ ] start_pipeline_execution +- [ ] update_pipeline + +## codestar - 0% implemented +- [ ] associate_team_member +- [ ] create_project +- [ ] create_user_profile +- [ ] delete_project +- [ ] delete_user_profile +- [ ] describe_project +- [ ] describe_user_profile +- [ ] disassociate_team_member +- [ ] list_projects +- [ ] list_resources +- [ ] list_tags_for_project +- [ ] list_team_members +- [ ] list_user_profiles +- [ ] tag_project +- [ ] untag_project +- [ ] update_project +- [ ] update_team_member +- [ ] update_user_profile + +## cognito-identity - 0% implemented +- [ ] create_identity_pool +- [ ] delete_identities +- [ ] delete_identity_pool +- [ ] describe_identity +- [ ] describe_identity_pool +- [ ] get_credentials_for_identity +- [ ] get_id +- [ ] get_identity_pool_roles +- [ ] get_open_id_token +- [ ] get_open_id_token_for_developer_identity +- [ ] list_identities +- [ ] list_identity_pools +- [ ] lookup_developer_identity +- [ ] merge_developer_identities +- [ ] set_identity_pool_roles +- [ ] unlink_developer_identity +- [ ] unlink_identity +- [ ] update_identity_pool + +## cognito-idp - 34% implemented +- [ ] add_custom_attributes +- [X] admin_add_user_to_group +- [ ] admin_confirm_sign_up +- [X] admin_create_user +- [X] admin_delete_user +- [ ] admin_delete_user_attributes +- [ ] admin_disable_provider_for_user +- [X] admin_disable_user +- [X] admin_enable_user +- [ ] admin_forget_device +- [ ] admin_get_device +- [X] admin_get_user +- [X] admin_initiate_auth +- [ ] admin_link_provider_for_user +- [ ] admin_list_devices +- [X] admin_list_groups_for_user +- [ ] admin_list_user_auth_events +- [X] admin_remove_user_from_group +- [ ] admin_reset_user_password +- [ ] admin_respond_to_auth_challenge +- [ ] admin_set_user_mfa_preference +- [ ] admin_set_user_settings +- [ ] admin_update_auth_event_feedback +- [ ] admin_update_device_status +- [X] admin_update_user_attributes +- [ ] admin_user_global_sign_out +- [ ] associate_software_token +- [X] change_password +- [ ] confirm_device +- [X] confirm_forgot_password +- [ ] confirm_sign_up +- [X] create_group +- [X] create_identity_provider +- [ ] create_resource_server +- [ ] create_user_import_job +- [X] create_user_pool +- [X] create_user_pool_client +- [X] create_user_pool_domain +- [X] delete_group +- [X] delete_identity_provider +- [ ] delete_resource_server +- [ ] delete_user +- [ ] delete_user_attributes +- [X] delete_user_pool +- [X] delete_user_pool_client +- [X] delete_user_pool_domain +- [X] describe_identity_provider +- [ ] describe_resource_server +- [ ] describe_risk_configuration +- [ ] describe_user_import_job +- [X] describe_user_pool +- [X] describe_user_pool_client +- [X] describe_user_pool_domain +- [ ] forget_device +- [ ] forgot_password +- [ ] get_csv_header +- [ ] get_device +- [X] get_group +- [ ] get_identity_provider_by_identifier +- [ ] get_signing_certificate +- [ ] get_ui_customization +- [ ] get_user +- [ ] get_user_attribute_verification_code +- [ ] get_user_pool_mfa_config +- [ ] global_sign_out +- [ ] initiate_auth +- [ ] list_devices +- [X] list_groups +- [X] list_identity_providers +- [ ] list_resource_servers +- [ ] list_user_import_jobs +- [X] list_user_pool_clients +- [X] list_user_pools +- [X] list_users +- [X] list_users_in_group +- [ ] resend_confirmation_code +- [X] respond_to_auth_challenge +- [ ] set_risk_configuration +- [ ] set_ui_customization +- [ ] set_user_mfa_preference +- [ ] set_user_pool_mfa_config +- [ ] set_user_settings +- [ ] sign_up +- [ ] start_user_import_job +- [ ] stop_user_import_job +- [ ] update_auth_event_feedback +- [ ] update_device_status +- [ ] update_group +- [x] update_identity_provider +- [ ] update_resource_server +- [ ] update_user_attributes +- [ ] update_user_pool +- [X] update_user_pool_client +- [ ] verify_software_token +- [ ] verify_user_attribute + +## cognito-sync - 0% implemented +- [ ] bulk_publish +- [ ] delete_dataset +- [ ] describe_dataset +- [ ] describe_identity_pool_usage +- [ ] describe_identity_usage +- [ ] get_bulk_publish_details +- [ ] get_cognito_events +- [ ] get_identity_pool_configuration +- [ ] list_datasets +- [ ] list_identity_pool_usage +- [ ] list_records +- [ ] register_device +- [ ] set_cognito_events +- [ ] set_identity_pool_configuration +- [ ] subscribe_to_dataset +- [ ] unsubscribe_from_dataset +- [ ] update_records + +## comprehend - 0% implemented +- [ ] batch_detect_dominant_language +- [ ] batch_detect_entities +- [ ] batch_detect_key_phrases +- [ ] batch_detect_sentiment +- [ ] describe_topics_detection_job +- [ ] detect_dominant_language +- [ ] detect_entities +- [ ] detect_key_phrases +- [ ] detect_sentiment +- [ ] list_topics_detection_jobs +- [ ] start_topics_detection_job + +## config - 0% implemented +- [ ] batch_get_resource_config +- [ ] delete_aggregation_authorization +- [ ] delete_config_rule +- [ ] delete_configuration_aggregator +- [ ] delete_configuration_recorder +- [ ] delete_delivery_channel +- [ ] delete_evaluation_results +- [ ] delete_pending_aggregation_request +- [ ] deliver_config_snapshot +- [ ] describe_aggregate_compliance_by_config_rules +- [ ] describe_aggregation_authorizations +- [ ] describe_compliance_by_config_rule +- [ ] describe_compliance_by_resource +- [ ] describe_config_rule_evaluation_status +- [ ] describe_config_rules +- [ ] describe_configuration_aggregator_sources_status +- [ ] describe_configuration_aggregators +- [ ] describe_configuration_recorder_status +- [ ] describe_configuration_recorders +- [ ] describe_delivery_channel_status +- [ ] describe_delivery_channels +- [ ] describe_pending_aggregation_requests +- [ ] get_aggregate_compliance_details_by_config_rule +- [ ] get_aggregate_config_rule_compliance_summary +- [ ] get_compliance_details_by_config_rule +- [ ] get_compliance_details_by_resource +- [ ] get_compliance_summary_by_config_rule +- [ ] get_compliance_summary_by_resource_type +- [ ] get_discovered_resource_counts +- [ ] get_resource_config_history +- [ ] list_discovered_resources +- [ ] put_aggregation_authorization +- [ ] put_config_rule +- [ ] put_configuration_aggregator +- [ ] put_configuration_recorder +- [ ] put_delivery_channel +- [ ] put_evaluations +- [ ] start_config_rules_evaluation +- [ ] start_configuration_recorder +- [ ] stop_configuration_recorder + +## connect - 0% implemented +- [ ] start_outbound_voice_contact +- [ ] stop_contact + +## cur - 0% implemented +- [ ] delete_report_definition +- [ ] describe_report_definitions +- [ ] put_report_definition + +## datapipeline - 42% implemented +- [X] activate_pipeline +- [ ] add_tags +- [X] create_pipeline +- [ ] deactivate_pipeline +- [X] delete_pipeline +- [X] describe_objects +- [X] describe_pipelines +- [ ] evaluate_expression +- [X] get_pipeline_definition +- [X] list_pipelines +- [ ] poll_for_task +- [X] put_pipeline_definition +- [ ] query_objects +- [ ] remove_tags +- [ ] report_task_progress +- [ ] report_task_runner_heartbeat +- [ ] set_status +- [ ] set_task_status +- [ ] validate_pipeline_definition + +## dax - 0% implemented +- [ ] create_cluster +- [ ] create_parameter_group +- [ ] create_subnet_group +- [ ] decrease_replication_factor +- [ ] delete_cluster +- [ ] delete_parameter_group +- [ ] delete_subnet_group +- [ ] describe_clusters +- [ ] describe_default_parameters +- [ ] describe_events +- [ ] describe_parameter_groups +- [ ] describe_parameters +- [ ] describe_subnet_groups +- [ ] increase_replication_factor +- [ ] list_tags +- [ ] reboot_node +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_cluster +- [ ] update_parameter_group +- [ ] update_subnet_group + +## devicefarm - 0% implemented +- [ ] create_device_pool +- [ ] create_instance_profile +- [ ] create_network_profile +- [ ] create_project +- [ ] create_remote_access_session +- [ ] create_upload +- [ ] delete_device_pool +- [ ] delete_instance_profile +- [ ] delete_network_profile +- [ ] delete_project +- [ ] delete_remote_access_session +- [ ] delete_run +- [ ] delete_upload +- [ ] get_account_settings +- [ ] get_device +- [ ] get_device_instance +- [ ] get_device_pool +- [ ] get_device_pool_compatibility +- [ ] get_instance_profile +- [ ] get_job +- [ ] get_network_profile +- [ ] get_offering_status +- [ ] get_project +- [ ] get_remote_access_session +- [ ] get_run +- [ ] get_suite +- [ ] get_test +- [ ] get_upload +- [ ] install_to_remote_access_session +- [ ] list_artifacts +- [ ] list_device_instances +- [ ] list_device_pools +- [ ] list_devices +- [ ] list_instance_profiles +- [ ] list_jobs +- [ ] list_network_profiles +- [ ] list_offering_promotions +- [ ] list_offering_transactions +- [ ] list_offerings +- [ ] list_projects +- [ ] list_remote_access_sessions +- [ ] list_runs +- [ ] list_samples +- [ ] list_suites +- [ ] list_tests +- [ ] list_unique_problems +- [ ] list_uploads +- [ ] purchase_offering +- [ ] renew_offering +- [ ] schedule_run +- [ ] stop_remote_access_session +- [ ] stop_run +- [ ] update_device_instance +- [ ] update_device_pool +- [ ] update_instance_profile +- [ ] update_network_profile +- [ ] update_project + +## directconnect - 0% implemented +- [ ] allocate_connection_on_interconnect +- [ ] allocate_hosted_connection +- [ ] allocate_private_virtual_interface +- [ ] allocate_public_virtual_interface +- [ ] associate_connection_with_lag +- [ ] associate_hosted_connection +- [ ] associate_virtual_interface +- [ ] confirm_connection +- [ ] confirm_private_virtual_interface +- [ ] confirm_public_virtual_interface +- [ ] create_bgp_peer +- [ ] create_connection +- [ ] create_direct_connect_gateway +- [ ] create_direct_connect_gateway_association +- [ ] create_interconnect +- [ ] create_lag +- [ ] create_private_virtual_interface +- [ ] create_public_virtual_interface +- [ ] delete_bgp_peer +- [ ] delete_connection +- [ ] delete_direct_connect_gateway +- [ ] delete_direct_connect_gateway_association +- [ ] delete_interconnect +- [ ] delete_lag +- [ ] delete_virtual_interface +- [ ] describe_connection_loa +- [ ] describe_connections +- [ ] describe_connections_on_interconnect +- [ ] describe_direct_connect_gateway_associations +- [ ] describe_direct_connect_gateway_attachments +- [ ] describe_direct_connect_gateways +- [ ] describe_hosted_connections +- [ ] describe_interconnect_loa +- [ ] describe_interconnects +- [ ] describe_lags +- [ ] describe_loa +- [ ] describe_locations +- [ ] describe_tags +- [ ] describe_virtual_gateways +- [ ] describe_virtual_interfaces +- [ ] disassociate_connection_from_lag +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_lag + +## discovery - 0% implemented +- [ ] associate_configuration_items_to_application +- [ ] create_application +- [ ] create_tags +- [ ] delete_applications +- [ ] delete_tags +- [ ] describe_agents +- [ ] describe_configurations +- [ ] describe_export_configurations +- [ ] describe_export_tasks +- [ ] describe_tags +- [ ] disassociate_configuration_items_from_application +- [ ] export_configurations +- [ ] get_discovery_summary +- [ ] list_configurations +- [ ] list_server_neighbors +- [ ] start_data_collection_by_agent_ids +- [ ] start_export_task +- [ ] stop_data_collection_by_agent_ids +- [ ] update_application + +## dms - 0% implemented +- [ ] add_tags_to_resource +- [ ] create_endpoint +- [ ] create_event_subscription +- [ ] create_replication_instance +- [ ] create_replication_subnet_group +- [ ] create_replication_task +- [ ] delete_certificate +- [ ] delete_endpoint +- [ ] delete_event_subscription +- [ ] delete_replication_instance +- [ ] delete_replication_subnet_group +- [ ] delete_replication_task +- [ ] describe_account_attributes +- [ ] describe_certificates +- [ ] describe_connections +- [ ] describe_endpoint_types +- [ ] describe_endpoints +- [ ] describe_event_categories +- [ ] describe_event_subscriptions +- [ ] describe_events +- [ ] describe_orderable_replication_instances +- [ ] describe_refresh_schemas_status +- [ ] describe_replication_instance_task_logs +- [ ] describe_replication_instances +- [ ] describe_replication_subnet_groups +- [ ] describe_replication_task_assessment_results +- [ ] describe_replication_tasks +- [ ] describe_schemas +- [ ] describe_table_statistics +- [ ] import_certificate +- [ ] list_tags_for_resource +- [ ] modify_endpoint +- [ ] modify_event_subscription +- [ ] modify_replication_instance +- [ ] modify_replication_subnet_group +- [ ] modify_replication_task +- [ ] reboot_replication_instance +- [ ] refresh_schemas +- [ ] reload_tables +- [ ] remove_tags_from_resource +- [ ] start_replication_task +- [ ] start_replication_task_assessment +- [ ] stop_replication_task +- [ ] test_connection + +## ds - 0% implemented +- [ ] add_ip_routes +- [ ] add_tags_to_resource +- [ ] cancel_schema_extension +- [ ] connect_directory +- [ ] create_alias +- [ ] create_computer +- [ ] create_conditional_forwarder +- [ ] create_directory +- [ ] create_microsoft_ad +- [ ] create_snapshot +- [ ] create_trust +- [ ] delete_conditional_forwarder +- [ ] delete_directory +- [ ] delete_snapshot +- [ ] delete_trust +- [ ] deregister_event_topic +- [ ] describe_conditional_forwarders +- [ ] describe_directories +- [ ] describe_domain_controllers +- [ ] describe_event_topics +- [ ] describe_snapshots +- [ ] describe_trusts +- [ ] disable_radius +- [ ] disable_sso +- [ ] enable_radius +- [ ] enable_sso +- [ ] get_directory_limits +- [ ] get_snapshot_limits +- [ ] list_ip_routes +- [ ] list_schema_extensions +- [ ] list_tags_for_resource +- [ ] register_event_topic +- [ ] remove_ip_routes +- [ ] remove_tags_from_resource +- [ ] restore_from_snapshot +- [ ] start_schema_extension +- [ ] update_conditional_forwarder +- [ ] update_number_of_domain_controllers +- [ ] update_radius +- [ ] verify_trust + +## dynamodb - 22% implemented +- [ ] batch_get_item +- [ ] batch_write_item +- [ ] create_backup +- [ ] create_global_table +- [X] create_table +- [ ] delete_backup +- [X] delete_item +- [X] delete_table +- [ ] describe_backup +- [ ] describe_continuous_backups +- [ ] describe_global_table +- [ ] describe_limits +- [ ] describe_table +- [ ] describe_time_to_live +- [X] get_item +- [ ] list_backups +- [ ] list_global_tables +- [ ] list_tables +- [ ] list_tags_of_resource +- [X] put_item +- [X] query +- [ ] restore_table_from_backup +- [ ] restore_table_to_point_in_time +- [X] scan +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_continuous_backups +- [ ] update_global_table +- [ ] update_item +- [ ] update_table +- [ ] update_time_to_live + +## dynamodbstreams - 0% implemented +- [ ] describe_stream +- [ ] get_records +- [ ] get_shard_iterator +- [ ] list_streams + +## ec2 - 37% implemented +- [ ] accept_reserved_instances_exchange_quote +- [ ] accept_vpc_endpoint_connections +- [X] accept_vpc_peering_connection +- [X] allocate_address +- [ ] allocate_hosts +- [ ] assign_ipv6_addresses +- [ ] assign_private_ip_addresses +- [X] associate_address +- [X] associate_dhcp_options +- [ ] associate_iam_instance_profile +- [X] associate_route_table +- [ ] associate_subnet_cidr_block +- [X] associate_vpc_cidr_block +- [ ] attach_classic_link_vpc +- [X] attach_internet_gateway +- [X] attach_network_interface +- [X] attach_volume +- [X] attach_vpn_gateway +- [X] authorize_security_group_egress +- [X] authorize_security_group_ingress +- [ ] bundle_instance +- [ ] cancel_bundle_task +- [ ] cancel_conversion_task +- [ ] cancel_export_task +- [ ] cancel_import_task +- [ ] cancel_reserved_instances_listing +- [X] cancel_spot_fleet_requests +- [X] cancel_spot_instance_requests +- [ ] confirm_product_instance +- [ ] copy_fpga_image +- [X] copy_image +- [X] copy_snapshot +- [X] create_customer_gateway +- [ ] create_default_subnet +- [ ] create_default_vpc +- [X] create_dhcp_options +- [ ] create_egress_only_internet_gateway +- [ ] create_flow_logs +- [ ] create_fpga_image +- [X] create_image +- [ ] create_instance_export_task +- [X] create_internet_gateway +- [X] create_key_pair +- [ ] create_launch_template +- [ ] create_launch_template_version +- [X] create_nat_gateway +- [X] create_network_acl +- [X] create_network_acl_entry +- [X] create_network_interface +- [ ] create_network_interface_permission +- [ ] create_placement_group +- [ ] create_reserved_instances_listing +- [X] create_route +- [X] create_route_table +- [X] create_security_group +- [X] create_snapshot +- [ ] create_spot_datafeed_subscription +- [X] create_subnet +- [X] create_tags +- [X] create_volume +- [X] create_vpc +- [ ] create_vpc_endpoint +- [ ] create_vpc_endpoint_connection_notification +- [ ] create_vpc_endpoint_service_configuration +- [X] create_vpc_peering_connection +- [X] create_vpn_connection +- [ ] create_vpn_connection_route +- [X] create_vpn_gateway +- [X] delete_customer_gateway +- [ ] delete_dhcp_options +- [ ] delete_egress_only_internet_gateway +- [ ] delete_flow_logs +- [ ] delete_fpga_image +- [X] delete_internet_gateway +- [X] delete_key_pair +- [ ] delete_launch_template +- [ ] delete_launch_template_versions +- [X] delete_nat_gateway +- [X] delete_network_acl +- [X] delete_network_acl_entry +- [X] delete_network_interface +- [ ] delete_network_interface_permission +- [ ] delete_placement_group +- [X] delete_route +- [X] delete_route_table +- [X] delete_security_group +- [X] delete_snapshot +- [ ] delete_spot_datafeed_subscription +- [X] delete_subnet +- [X] delete_tags +- [X] delete_volume +- [X] delete_vpc +- [ ] delete_vpc_endpoint_connection_notifications +- [ ] delete_vpc_endpoint_service_configurations +- [ ] delete_vpc_endpoints +- [X] delete_vpc_peering_connection +- [X] delete_vpn_connection +- [ ] delete_vpn_connection_route +- [X] delete_vpn_gateway +- [X] deregister_image +- [ ] describe_account_attributes +- [X] describe_addresses +- [ ] describe_aggregate_id_format +- [X] describe_availability_zones +- [ ] describe_bundle_tasks +- [ ] describe_classic_link_instances +- [ ] describe_conversion_tasks +- [ ] describe_customer_gateways +- [X] describe_dhcp_options +- [ ] describe_egress_only_internet_gateways +- [ ] describe_elastic_gpus +- [ ] describe_export_tasks +- [ ] describe_flow_logs +- [ ] describe_fpga_image_attribute +- [ ] describe_fpga_images +- [ ] describe_host_reservation_offerings +- [ ] describe_host_reservations +- [ ] describe_hosts +- [ ] describe_iam_instance_profile_associations +- [ ] describe_id_format +- [ ] describe_identity_id_format +- [ ] describe_image_attribute +- [X] describe_images +- [ ] describe_import_image_tasks +- [ ] describe_import_snapshot_tasks +- [X] describe_instance_attribute +- [ ] describe_instance_credit_specifications +- [ ] describe_instance_status +- [ ] describe_instances +- [X] describe_internet_gateways +- [X] describe_key_pairs +- [ ] describe_launch_template_versions +- [ ] describe_launch_templates +- [ ] describe_moving_addresses +- [ ] describe_nat_gateways +- [ ] describe_network_acls +- [ ] describe_network_interface_attribute +- [ ] describe_network_interface_permissions +- [X] describe_network_interfaces +- [ ] describe_placement_groups +- [ ] describe_prefix_lists +- [ ] describe_principal_id_format +- [X] describe_regions +- [ ] describe_reserved_instances +- [ ] describe_reserved_instances_listings +- [ ] describe_reserved_instances_modifications +- [ ] describe_reserved_instances_offerings +- [ ] describe_route_tables +- [ ] describe_scheduled_instance_availability +- [ ] describe_scheduled_instances +- [ ] describe_security_group_references +- [X] describe_security_groups +- [ ] describe_snapshot_attribute +- [X] describe_snapshots +- [ ] describe_spot_datafeed_subscription +- [X] describe_spot_fleet_instances +- [ ] describe_spot_fleet_request_history +- [X] describe_spot_fleet_requests +- [X] describe_spot_instance_requests +- [ ] describe_spot_price_history +- [ ] describe_stale_security_groups +- [ ] describe_subnets +- [X] describe_tags +- [ ] describe_volume_attribute +- [ ] describe_volume_status +- [X] describe_volumes +- [ ] describe_volumes_modifications +- [X] describe_vpc_attribute +- [ ] describe_vpc_classic_link +- [ ] describe_vpc_classic_link_dns_support +- [ ] describe_vpc_endpoint_connection_notifications +- [ ] describe_vpc_endpoint_connections +- [ ] describe_vpc_endpoint_service_configurations +- [ ] describe_vpc_endpoint_service_permissions +- [ ] describe_vpc_endpoint_services +- [ ] describe_vpc_endpoints +- [ ] describe_vpc_peering_connections +- [ ] describe_vpcs +- [X] describe_vpn_connections +- [ ] describe_vpn_gateways +- [ ] detach_classic_link_vpc +- [X] detach_internet_gateway +- [X] detach_network_interface +- [X] detach_volume +- [X] detach_vpn_gateway +- [ ] disable_vgw_route_propagation +- [ ] disable_vpc_classic_link +- [ ] disable_vpc_classic_link_dns_support +- [X] disassociate_address +- [ ] disassociate_iam_instance_profile +- [X] disassociate_route_table +- [ ] disassociate_subnet_cidr_block +- [X] disassociate_vpc_cidr_block +- [ ] enable_vgw_route_propagation +- [ ] enable_volume_io +- [ ] enable_vpc_classic_link +- [ ] enable_vpc_classic_link_dns_support +- [ ] get_console_output +- [ ] get_console_screenshot +- [ ] get_host_reservation_purchase_preview +- [ ] get_launch_template_data +- [ ] get_password_data +- [ ] get_reserved_instances_exchange_quote +- [ ] import_image +- [ ] import_instance +- [X] import_key_pair +- [ ] import_snapshot +- [ ] import_volume +- [ ] modify_fpga_image_attribute +- [ ] modify_hosts +- [ ] modify_id_format +- [ ] modify_identity_id_format +- [ ] modify_image_attribute +- [X] modify_instance_attribute +- [ ] modify_instance_credit_specification +- [ ] modify_instance_placement +- [ ] modify_launch_template +- [X] modify_network_interface_attribute +- [ ] modify_reserved_instances +- [ ] modify_snapshot_attribute +- [X] modify_spot_fleet_request +- [X] modify_subnet_attribute +- [ ] modify_volume +- [ ] modify_volume_attribute +- [X] modify_vpc_attribute +- [ ] modify_vpc_endpoint +- [ ] modify_vpc_endpoint_connection_notification +- [ ] modify_vpc_endpoint_service_configuration +- [ ] modify_vpc_endpoint_service_permissions +- [ ] modify_vpc_peering_connection_options +- [ ] modify_vpc_tenancy +- [ ] monitor_instances +- [ ] move_address_to_vpc +- [ ] purchase_host_reservation +- [ ] purchase_reserved_instances_offering +- [ ] purchase_scheduled_instances +- [X] reboot_instances +- [ ] register_image +- [ ] reject_vpc_endpoint_connections +- [X] reject_vpc_peering_connection +- [X] release_address +- [ ] release_hosts +- [ ] replace_iam_instance_profile_association +- [X] replace_network_acl_association +- [X] replace_network_acl_entry +- [X] replace_route +- [X] replace_route_table_association +- [ ] report_instance_status +- [X] request_spot_fleet +- [X] request_spot_instances +- [ ] reset_fpga_image_attribute +- [ ] reset_image_attribute +- [ ] reset_instance_attribute +- [ ] reset_network_interface_attribute +- [ ] reset_snapshot_attribute +- [ ] restore_address_to_classic +- [X] revoke_security_group_egress +- [X] revoke_security_group_ingress +- [ ] run_instances +- [ ] run_scheduled_instances +- [X] start_instances +- [X] stop_instances +- [X] terminate_instances +- [ ] unassign_ipv6_addresses +- [ ] unassign_private_ip_addresses +- [ ] unmonitor_instances +- [ ] update_security_group_rule_descriptions_egress +- [ ] update_security_group_rule_descriptions_ingress + +## ecr - 31% implemented +- [ ] batch_check_layer_availability +- [ ] batch_delete_image +- [X] batch_get_image +- [ ] complete_layer_upload +- [X] create_repository +- [ ] delete_lifecycle_policy +- [X] delete_repository +- [ ] delete_repository_policy +- [X] describe_images +- [X] describe_repositories +- [ ] get_authorization_token +- [ ] get_download_url_for_layer +- [ ] get_lifecycle_policy +- [ ] get_lifecycle_policy_preview +- [ ] get_repository_policy +- [ ] initiate_layer_upload +- [X] list_images +- [X] put_image +- [ ] put_lifecycle_policy +- [ ] set_repository_policy +- [ ] start_lifecycle_policy_preview +- [ ] upload_layer_part + +## ecs - 87% implemented +- [X] create_cluster +- [X] create_service +- [X] delete_attributes +- [X] delete_cluster +- [X] delete_service +- [X] deregister_container_instance +- [X] deregister_task_definition +- [X] describe_clusters +- [X] describe_container_instances +- [X] describe_services +- [X] describe_task_definition +- [X] describe_tasks +- [ ] discover_poll_endpoint +- [X] list_attributes +- [X] list_clusters +- [X] list_container_instances +- [X] list_services +- [X] list_task_definition_families +- [X] list_task_definitions +- [X] list_tasks +- [X] put_attributes +- [X] register_container_instance +- [X] register_task_definition +- [X] run_task +- [X] start_task +- [X] stop_task +- [ ] submit_container_state_change +- [ ] submit_task_state_change +- [ ] update_container_agent +- [X] update_container_instances_state +- [X] update_service + +## efs - 0% implemented +- [ ] create_file_system +- [ ] create_mount_target +- [ ] create_tags +- [ ] delete_file_system +- [ ] delete_mount_target +- [ ] delete_tags +- [ ] describe_file_systems +- [ ] describe_mount_target_security_groups +- [ ] describe_mount_targets +- [ ] describe_tags +- [ ] modify_mount_target_security_groups + +## elasticache - 0% implemented +- [ ] add_tags_to_resource +- [ ] authorize_cache_security_group_ingress +- [ ] copy_snapshot +- [ ] create_cache_cluster +- [ ] create_cache_parameter_group +- [ ] create_cache_security_group +- [ ] create_cache_subnet_group +- [ ] create_replication_group +- [ ] create_snapshot +- [ ] delete_cache_cluster +- [ ] delete_cache_parameter_group +- [ ] delete_cache_security_group +- [ ] delete_cache_subnet_group +- [ ] delete_replication_group +- [ ] delete_snapshot +- [ ] describe_cache_clusters +- [ ] describe_cache_engine_versions +- [ ] describe_cache_parameter_groups +- [ ] describe_cache_parameters +- [ ] describe_cache_security_groups +- [ ] describe_cache_subnet_groups +- [ ] describe_engine_default_parameters +- [ ] describe_events +- [ ] describe_replication_groups +- [ ] describe_reserved_cache_nodes +- [ ] describe_reserved_cache_nodes_offerings +- [ ] describe_snapshots +- [ ] list_allowed_node_type_modifications +- [ ] list_tags_for_resource +- [ ] modify_cache_cluster +- [ ] modify_cache_parameter_group +- [ ] modify_cache_subnet_group +- [ ] modify_replication_group +- [ ] modify_replication_group_shard_configuration +- [ ] purchase_reserved_cache_nodes_offering +- [ ] reboot_cache_cluster +- [ ] remove_tags_from_resource +- [ ] reset_cache_parameter_group +- [ ] revoke_cache_security_group_ingress +- [ ] test_failover + +## elasticbeanstalk - 0% implemented +- [ ] abort_environment_update +- [ ] apply_environment_managed_action +- [ ] check_dns_availability +- [ ] compose_environments +- [ ] create_application +- [ ] create_application_version +- [ ] create_configuration_template +- [ ] create_environment +- [ ] create_platform_version +- [ ] create_storage_location +- [ ] delete_application +- [ ] delete_application_version +- [ ] delete_configuration_template +- [ ] delete_environment_configuration +- [ ] delete_platform_version +- [ ] describe_account_attributes +- [ ] describe_application_versions +- [ ] describe_applications +- [ ] describe_configuration_options +- [ ] describe_configuration_settings +- [ ] describe_environment_health +- [ ] describe_environment_managed_action_history +- [ ] describe_environment_managed_actions +- [ ] describe_environment_resources +- [ ] describe_environments +- [ ] describe_events +- [ ] describe_instances_health +- [ ] describe_platform_version +- [ ] list_available_solution_stacks +- [ ] list_platform_versions +- [ ] list_tags_for_resource +- [ ] rebuild_environment +- [ ] request_environment_info +- [ ] restart_app_server +- [ ] retrieve_environment_info +- [ ] swap_environment_cnames +- [ ] terminate_environment +- [ ] update_application +- [ ] update_application_resource_lifecycle +- [ ] update_application_version +- [ ] update_configuration_template +- [ ] update_environment +- [ ] update_tags_for_resource +- [ ] validate_configuration_settings + +## elastictranscoder - 0% implemented +- [ ] cancel_job +- [ ] create_job +- [ ] create_pipeline +- [ ] create_preset +- [ ] delete_pipeline +- [ ] delete_preset +- [ ] list_jobs_by_pipeline +- [ ] list_jobs_by_status +- [ ] list_pipelines +- [ ] list_presets +- [ ] read_job +- [ ] read_pipeline +- [ ] read_preset +- [ ] test_role +- [ ] update_pipeline +- [ ] update_pipeline_notifications +- [ ] update_pipeline_status + +## elb - 34% implemented +- [ ] add_tags +- [X] apply_security_groups_to_load_balancer +- [ ] attach_load_balancer_to_subnets +- [X] configure_health_check +- [X] create_app_cookie_stickiness_policy +- [X] create_lb_cookie_stickiness_policy +- [X] create_load_balancer +- [X] create_load_balancer_listeners +- [ ] create_load_balancer_policy +- [X] delete_load_balancer +- [X] delete_load_balancer_listeners +- [ ] delete_load_balancer_policy +- [ ] deregister_instances_from_load_balancer +- [ ] describe_account_limits +- [ ] describe_instance_health +- [ ] describe_load_balancer_attributes +- [ ] describe_load_balancer_policies +- [ ] describe_load_balancer_policy_types +- [X] describe_load_balancers +- [ ] describe_tags +- [ ] detach_load_balancer_from_subnets +- [ ] disable_availability_zones_for_load_balancer +- [ ] enable_availability_zones_for_load_balancer +- [ ] modify_load_balancer_attributes +- [ ] register_instances_with_load_balancer +- [ ] remove_tags +- [ ] set_load_balancer_listener_ssl_certificate +- [ ] set_load_balancer_policies_for_backend_server +- [X] set_load_balancer_policies_of_listener + +## elbv2 - 70% implemented +- [ ] add_listener_certificates +- [ ] add_tags +- [X] create_listener +- [X] create_load_balancer +- [X] create_rule +- [X] create_target_group +- [X] delete_listener +- [X] delete_load_balancer +- [X] delete_rule +- [X] delete_target_group +- [X] deregister_targets +- [ ] describe_account_limits +- [ ] describe_listener_certificates +- [X] describe_listeners +- [X] describe_load_balancer_attributes +- [X] describe_load_balancers +- [X] describe_rules +- [ ] describe_ssl_policies +- [ ] describe_tags +- [ ] describe_target_group_attributes +- [X] describe_target_groups +- [X] describe_target_health +- [X] modify_listener +- [X] modify_load_balancer_attributes +- [X] modify_rule +- [X] modify_target_group +- [ ] modify_target_group_attributes +- [X] register_targets +- [ ] remove_listener_certificates +- [ ] remove_tags +- [X] set_ip_address_type +- [X] set_rule_priorities +- [X] set_security_groups +- [X] set_subnets + +## emr - 55% implemented +- [ ] add_instance_fleet +- [X] add_instance_groups +- [X] add_job_flow_steps +- [X] add_tags +- [ ] cancel_steps +- [ ] create_security_configuration +- [ ] delete_security_configuration +- [ ] describe_cluster +- [X] describe_job_flows +- [ ] describe_security_configuration +- [X] describe_step +- [X] list_bootstrap_actions +- [X] list_clusters +- [ ] list_instance_fleets +- [X] list_instance_groups +- [ ] list_instances +- [ ] list_security_configurations +- [X] list_steps +- [ ] modify_instance_fleet +- [X] modify_instance_groups +- [ ] put_auto_scaling_policy +- [ ] remove_auto_scaling_policy +- [X] remove_tags +- [X] run_job_flow +- [X] set_termination_protection +- [X] set_visible_to_all_users +- [X] terminate_job_flows + +## es - 0% implemented +- [ ] add_tags +- [ ] create_elasticsearch_domain +- [ ] delete_elasticsearch_domain +- [ ] delete_elasticsearch_service_role +- [ ] describe_elasticsearch_domain +- [ ] describe_elasticsearch_domain_config +- [ ] describe_elasticsearch_domains +- [ ] describe_elasticsearch_instance_type_limits +- [ ] list_domain_names +- [ ] list_elasticsearch_instance_types +- [ ] list_elasticsearch_versions +- [ ] list_tags +- [ ] remove_tags +- [ ] update_elasticsearch_domain_config + +## events - 100% implemented +- [X] delete_rule +- [X] describe_event_bus +- [X] describe_rule +- [X] disable_rule +- [X] enable_rule +- [X] list_rule_names_by_target +- [X] list_rules +- [X] list_targets_by_rule +- [X] put_events +- [X] put_permission +- [X] put_rule +- [X] put_targets +- [X] remove_permission +- [X] remove_targets +- [X] test_event_pattern + +## firehose - 0% implemented +- [ ] create_delivery_stream +- [ ] delete_delivery_stream +- [ ] describe_delivery_stream +- [ ] list_delivery_streams +- [ ] put_record +- [ ] put_record_batch +- [ ] update_destination + +## fms - 0% implemented +- [ ] associate_admin_account +- [ ] delete_notification_channel +- [ ] delete_policy +- [ ] disassociate_admin_account +- [ ] get_admin_account +- [ ] get_compliance_detail +- [ ] get_notification_channel +- [ ] get_policy +- [ ] list_compliance_status +- [ ] list_policies +- [ ] put_notification_channel +- [ ] put_policy + +## gamelift - 0% implemented +- [ ] accept_match +- [ ] create_alias +- [ ] create_build +- [ ] create_fleet +- [ ] create_game_session +- [ ] create_game_session_queue +- [ ] create_matchmaking_configuration +- [ ] create_matchmaking_rule_set +- [ ] create_player_session +- [ ] create_player_sessions +- [ ] create_vpc_peering_authorization +- [ ] create_vpc_peering_connection +- [ ] delete_alias +- [ ] delete_build +- [ ] delete_fleet +- [ ] delete_game_session_queue +- [ ] delete_matchmaking_configuration +- [ ] delete_scaling_policy +- [ ] delete_vpc_peering_authorization +- [ ] delete_vpc_peering_connection +- [ ] describe_alias +- [ ] describe_build +- [ ] describe_ec2_instance_limits +- [ ] describe_fleet_attributes +- [ ] describe_fleet_capacity +- [ ] describe_fleet_events +- [ ] describe_fleet_port_settings +- [ ] describe_fleet_utilization +- [ ] describe_game_session_details +- [ ] describe_game_session_placement +- [ ] describe_game_session_queues +- [ ] describe_game_sessions +- [ ] describe_instances +- [ ] describe_matchmaking +- [ ] describe_matchmaking_configurations +- [ ] describe_matchmaking_rule_sets +- [ ] describe_player_sessions +- [ ] describe_runtime_configuration +- [ ] describe_scaling_policies +- [ ] describe_vpc_peering_authorizations +- [ ] describe_vpc_peering_connections +- [ ] get_game_session_log_url +- [ ] get_instance_access +- [ ] list_aliases +- [ ] list_builds +- [ ] list_fleets +- [ ] put_scaling_policy +- [ ] request_upload_credentials +- [ ] resolve_alias +- [ ] search_game_sessions +- [ ] start_game_session_placement +- [ ] start_match_backfill +- [ ] start_matchmaking +- [ ] stop_game_session_placement +- [ ] stop_matchmaking +- [ ] update_alias +- [ ] update_build +- [ ] update_fleet_attributes +- [ ] update_fleet_capacity +- [ ] update_fleet_port_settings +- [ ] update_game_session +- [ ] update_game_session_queue +- [ ] update_matchmaking_configuration +- [ ] update_runtime_configuration +- [ ] validate_matchmaking_rule_set + +## glacier - 12% implemented +- [ ] abort_multipart_upload +- [ ] abort_vault_lock +- [ ] add_tags_to_vault +- [ ] complete_multipart_upload +- [ ] complete_vault_lock +- [X] create_vault +- [ ] delete_archive +- [X] delete_vault +- [ ] delete_vault_access_policy +- [ ] delete_vault_notifications +- [ ] describe_job +- [ ] describe_vault +- [ ] get_data_retrieval_policy +- [ ] get_job_output +- [ ] get_vault_access_policy +- [ ] get_vault_lock +- [ ] get_vault_notifications +- [X] initiate_job +- [ ] initiate_multipart_upload +- [ ] initiate_vault_lock +- [X] list_jobs +- [ ] list_multipart_uploads +- [ ] list_parts +- [ ] list_provisioned_capacity +- [ ] list_tags_for_vault +- [ ] list_vaults +- [ ] purchase_provisioned_capacity +- [ ] remove_tags_from_vault +- [ ] set_data_retrieval_policy +- [ ] set_vault_access_policy +- [ ] set_vault_notifications +- [ ] upload_archive +- [ ] upload_multipart_part + +## glue - 0% implemented +- [ ] batch_create_partition +- [ ] batch_delete_connection +- [ ] batch_delete_partition +- [ ] batch_delete_table +- [ ] batch_delete_table_version +- [ ] batch_get_partition +- [ ] batch_stop_job_run +- [ ] create_classifier +- [ ] create_connection +- [ ] create_crawler +- [ ] create_database +- [ ] create_dev_endpoint +- [ ] create_job +- [ ] create_partition +- [ ] create_script +- [ ] create_table +- [ ] create_trigger +- [ ] create_user_defined_function +- [ ] delete_classifier +- [ ] delete_connection +- [ ] delete_crawler +- [ ] delete_database +- [ ] delete_dev_endpoint +- [ ] delete_job +- [ ] delete_partition +- [ ] delete_table +- [ ] delete_table_version +- [ ] delete_trigger +- [ ] delete_user_defined_function +- [ ] get_catalog_import_status +- [ ] get_classifier +- [ ] get_classifiers +- [ ] get_connection +- [ ] get_connections +- [ ] get_crawler +- [ ] get_crawler_metrics +- [ ] get_crawlers +- [ ] get_database +- [ ] get_databases +- [ ] get_dataflow_graph +- [ ] get_dev_endpoint +- [ ] get_dev_endpoints +- [ ] get_job +- [ ] get_job_run +- [ ] get_job_runs +- [ ] get_jobs +- [ ] get_mapping +- [ ] get_partition +- [ ] get_partitions +- [ ] get_plan +- [ ] get_table +- [ ] get_table_version +- [ ] get_table_versions +- [ ] get_tables +- [ ] get_trigger +- [ ] get_triggers +- [ ] get_user_defined_function +- [ ] get_user_defined_functions +- [ ] import_catalog_to_glue +- [ ] reset_job_bookmark +- [ ] start_crawler +- [ ] start_crawler_schedule +- [ ] start_job_run +- [ ] start_trigger +- [ ] stop_crawler +- [ ] stop_crawler_schedule +- [ ] stop_trigger +- [ ] update_classifier +- [ ] update_connection +- [ ] update_crawler +- [ ] update_crawler_schedule +- [ ] update_database +- [ ] update_dev_endpoint +- [ ] update_job +- [ ] update_partition +- [ ] update_table +- [ ] update_trigger +- [ ] update_user_defined_function + +## greengrass - 0% implemented +- [ ] associate_role_to_group +- [ ] associate_service_role_to_account +- [ ] create_core_definition +- [ ] create_core_definition_version +- [ ] create_deployment +- [ ] create_device_definition +- [ ] create_device_definition_version +- [ ] create_function_definition +- [ ] create_function_definition_version +- [ ] create_group +- [ ] create_group_certificate_authority +- [ ] create_group_version +- [ ] create_logger_definition +- [ ] create_logger_definition_version +- [ ] create_resource_definition +- [ ] create_resource_definition_version +- [ ] create_software_update_job +- [ ] create_subscription_definition +- [ ] create_subscription_definition_version +- [ ] delete_core_definition +- [ ] delete_device_definition +- [ ] delete_function_definition +- [ ] delete_group +- [ ] delete_logger_definition +- [ ] delete_resource_definition +- [ ] delete_subscription_definition +- [ ] disassociate_role_from_group +- [ ] disassociate_service_role_from_account +- [ ] get_associated_role +- [ ] get_connectivity_info +- [ ] get_core_definition +- [ ] get_core_definition_version +- [ ] get_deployment_status +- [ ] get_device_definition +- [ ] get_device_definition_version +- [ ] get_function_definition +- [ ] get_function_definition_version +- [ ] get_group +- [ ] get_group_certificate_authority +- [ ] get_group_certificate_configuration +- [ ] get_group_version +- [ ] get_logger_definition +- [ ] get_logger_definition_version +- [ ] get_resource_definition +- [ ] get_resource_definition_version +- [ ] get_service_role_for_account +- [ ] get_subscription_definition +- [ ] get_subscription_definition_version +- [ ] list_core_definition_versions +- [ ] list_core_definitions +- [ ] list_deployments +- [ ] list_device_definition_versions +- [ ] list_device_definitions +- [ ] list_function_definition_versions +- [ ] list_function_definitions +- [ ] list_group_certificate_authorities +- [ ] list_group_versions +- [ ] list_groups +- [ ] list_logger_definition_versions +- [ ] list_logger_definitions +- [ ] list_resource_definition_versions +- [ ] list_resource_definitions +- [ ] list_subscription_definition_versions +- [ ] list_subscription_definitions +- [ ] reset_deployments +- [ ] update_connectivity_info +- [ ] update_core_definition +- [ ] update_device_definition +- [ ] update_function_definition +- [ ] update_group +- [ ] update_group_certificate_configuration +- [ ] update_logger_definition +- [ ] update_resource_definition +- [ ] update_subscription_definition + +## guardduty - 0% implemented +- [ ] accept_invitation +- [ ] archive_findings +- [ ] create_detector +- [ ] create_ip_set +- [ ] create_members +- [ ] create_sample_findings +- [ ] create_threat_intel_set +- [ ] decline_invitations +- [ ] delete_detector +- [ ] delete_invitations +- [ ] delete_ip_set +- [ ] delete_members +- [ ] delete_threat_intel_set +- [ ] disassociate_from_master_account +- [ ] disassociate_members +- [ ] get_detector +- [ ] get_findings +- [ ] get_findings_statistics +- [ ] get_invitations_count +- [ ] get_ip_set +- [ ] get_master_account +- [ ] get_members +- [ ] get_threat_intel_set +- [ ] invite_members +- [ ] list_detectors +- [ ] list_findings +- [ ] list_invitations +- [ ] list_ip_sets +- [ ] list_members +- [ ] list_threat_intel_sets +- [ ] start_monitoring_members +- [ ] stop_monitoring_members +- [ ] unarchive_findings +- [ ] update_detector +- [ ] update_findings_feedback +- [ ] update_ip_set +- [ ] update_threat_intel_set + +## health - 0% implemented +- [ ] describe_affected_entities +- [ ] describe_entity_aggregates +- [ ] describe_event_aggregates +- [ ] describe_event_details +- [ ] describe_event_types +- [ ] describe_events + +## iam - 62% implemented +- [ ] add_client_id_to_open_id_connect_provider +- [X] add_role_to_instance_profile +- [X] add_user_to_group +- [X] attach_group_policy +- [X] attach_role_policy +- [X] attach_user_policy +- [ ] change_password +- [X] create_access_key +- [X] create_account_alias +- [X] create_group +- [X] create_instance_profile +- [X] create_login_profile +- [ ] create_open_id_connect_provider +- [X] create_policy +- [X] create_policy_version +- [X] create_role +- [X] create_saml_provider +- [ ] create_service_linked_role +- [ ] create_service_specific_credential +- [X] create_user +- [ ] create_virtual_mfa_device +- [X] deactivate_mfa_device +- [X] delete_access_key +- [X] delete_account_alias +- [ ] delete_account_password_policy +- [ ] delete_group +- [ ] delete_group_policy +- [ ] delete_instance_profile +- [X] delete_login_profile +- [ ] delete_open_id_connect_provider +- [ ] delete_policy +- [X] delete_policy_version +- [X] delete_role +- [X] delete_role_policy +- [X] delete_saml_provider +- [X] delete_server_certificate +- [ ] delete_service_linked_role +- [ ] delete_service_specific_credential +- [X] delete_signing_certificate +- [ ] delete_ssh_public_key +- [X] delete_user +- [X] delete_user_policy +- [ ] delete_virtual_mfa_device +- [X] detach_group_policy +- [X] detach_role_policy +- [X] detach_user_policy +- [X] enable_mfa_device +- [ ] generate_credential_report +- [X] get_access_key_last_used +- [X] get_account_authorization_details +- [ ] get_account_password_policy +- [ ] get_account_summary +- [ ] get_context_keys_for_custom_policy +- [ ] get_context_keys_for_principal_policy +- [X] get_credential_report +- [X] get_group +- [X] get_group_policy +- [X] get_instance_profile +- [X] get_login_profile +- [ ] get_open_id_connect_provider +- [X] get_policy +- [X] get_policy_version +- [X] get_role +- [X] get_role_policy +- [X] get_saml_provider +- [X] get_server_certificate +- [ ] get_service_linked_role_deletion_status +- [ ] get_ssh_public_key +- [X] get_user +- [X] get_user_policy +- [X] list_access_keys +- [X] list_account_aliases +- [X] list_attached_group_policies +- [X] list_attached_role_policies +- [X] list_attached_user_policies +- [ ] list_entities_for_policy +- [X] list_group_policies +- [X] list_groups +- [X] list_groups_for_user +- [X] list_instance_profiles +- [X] list_instance_profiles_for_role +- [X] list_mfa_devices +- [ ] list_open_id_connect_providers +- [X] list_policies +- [X] list_policy_versions +- [X] list_role_policies +- [X] list_roles +- [X] list_role_tags +- [ ] list_user_tags +- [X] list_saml_providers +- [X] list_server_certificates +- [ ] list_service_specific_credentials +- [X] list_signing_certificates +- [ ] list_ssh_public_keys +- [X] list_user_policies +- [X] list_users +- [ ] list_virtual_mfa_devices +- [X] put_group_policy +- [X] put_role_policy +- [X] put_user_policy +- [ ] remove_client_id_from_open_id_connect_provider +- [X] remove_role_from_instance_profile +- [X] remove_user_from_group +- [ ] reset_service_specific_credential +- [ ] resync_mfa_device +- [ ] set_default_policy_version +- [ ] simulate_custom_policy +- [ ] simulate_principal_policy +- [X] tag_role +- [ ] tag_user +- [X] untag_role +- [ ] untag_user +- [X] update_access_key +- [ ] update_account_password_policy +- [ ] update_assume_role_policy +- [ ] update_group +- [X] update_login_profile +- [ ] update_open_id_connect_provider_thumbprint +- [ ] update_role +- [ ] update_role_description +- [X] update_saml_provider +- [ ] update_server_certificate +- [ ] update_service_specific_credential +- [X] update_signing_certificate +- [ ] update_ssh_public_key +- [X] update_user +- [X] upload_server_certificate +- [X] upload_signing_certificate +- [ ] upload_ssh_public_key + +## importexport - 0% implemented +- [ ] cancel_job +- [ ] create_job +- [ ] get_shipping_label +- [ ] get_status +- [ ] list_jobs +- [ ] update_job + +## inspector - 0% implemented +- [ ] add_attributes_to_findings +- [ ] create_assessment_target +- [ ] create_assessment_template +- [ ] create_resource_group +- [ ] delete_assessment_run +- [ ] delete_assessment_target +- [ ] delete_assessment_template +- [ ] describe_assessment_runs +- [ ] describe_assessment_targets +- [ ] describe_assessment_templates +- [ ] describe_cross_account_access_role +- [ ] describe_findings +- [ ] describe_resource_groups +- [ ] describe_rules_packages +- [ ] get_assessment_report +- [ ] get_telemetry_metadata +- [ ] list_assessment_run_agents +- [ ] list_assessment_runs +- [ ] list_assessment_targets +- [ ] list_assessment_templates +- [ ] list_event_subscriptions +- [ ] list_findings +- [ ] list_rules_packages +- [ ] list_tags_for_resource +- [ ] preview_agents +- [ ] register_cross_account_access_role +- [ ] remove_attributes_from_findings +- [ ] set_tags_for_resource +- [ ] start_assessment_run +- [ ] stop_assessment_run +- [ ] subscribe_to_event +- [ ] unsubscribe_from_event +- [ ] update_assessment_target + +## iot - 33% implemented +- [ ] accept_certificate_transfer +- [X] add_thing_to_thing_group +- [ ] associate_targets_with_job +- [X] attach_policy +- [X] attach_principal_policy +- [X] attach_thing_principal +- [ ] cancel_certificate_transfer +- [ ] cancel_job +- [ ] clear_default_authorizer +- [ ] create_authorizer +- [ ] create_certificate_from_csr +- [X] create_job +- [X] create_keys_and_certificate +- [ ] create_ota_update +- [X] create_policy +- [ ] create_policy_version +- [ ] create_role_alias +- [ ] create_stream +- [X] create_thing +- [X] create_thing_group +- [X] create_thing_type +- [ ] create_topic_rule +- [ ] delete_authorizer +- [ ] delete_ca_certificate +- [X] delete_certificate +- [ ] delete_ota_update +- [X] delete_policy +- [ ] delete_policy_version +- [ ] delete_registration_code +- [ ] delete_role_alias +- [ ] delete_stream +- [X] delete_thing +- [X] delete_thing_group +- [X] delete_thing_type +- [ ] delete_topic_rule +- [ ] delete_v2_logging_level +- [ ] deprecate_thing_type +- [ ] describe_authorizer +- [ ] describe_ca_certificate +- [X] describe_certificate +- [ ] describe_default_authorizer +- [ ] describe_endpoint +- [ ] describe_event_configurations +- [ ] describe_index +- [X] describe_job +- [ ] describe_job_execution +- [ ] describe_role_alias +- [ ] describe_stream +- [X] describe_thing +- [X] describe_thing_group +- [ ] describe_thing_registration_task +- [X] describe_thing_type +- [X] detach_policy +- [X] detach_principal_policy +- [X] detach_thing_principal +- [ ] disable_topic_rule +- [ ] enable_topic_rule +- [ ] get_effective_policies +- [ ] get_indexing_configuration +- [ ] get_job_document +- [ ] get_logging_options +- [ ] get_ota_update +- [X] get_policy +- [ ] get_policy_version +- [ ] get_registration_code +- [ ] get_topic_rule +- [ ] get_v2_logging_options +- [ ] list_attached_policies +- [ ] list_authorizers +- [ ] list_ca_certificates +- [X] list_certificates +- [ ] list_certificates_by_ca +- [ ] list_indices +- [ ] list_job_executions_for_job +- [ ] list_job_executions_for_thing +- [ ] list_jobs +- [ ] list_ota_updates +- [ ] list_outgoing_certificates +- [X] list_policies +- [X] list_policy_principals +- [ ] list_policy_versions +- [X] list_principal_policies +- [X] list_principal_things +- [ ] list_role_aliases +- [ ] list_streams +- [ ] list_targets_for_policy +- [X] list_thing_groups +- [X] list_thing_groups_for_thing +- [X] list_thing_principals +- [ ] list_thing_registration_task_reports +- [ ] list_thing_registration_tasks +- [X] list_thing_types +- [X] list_things +- [X] list_things_in_thing_group +- [ ] list_topic_rules +- [ ] list_v2_logging_levels +- [ ] register_ca_certificate +- [X] register_certificate +- [ ] register_thing +- [ ] reject_certificate_transfer +- [X] remove_thing_from_thing_group +- [ ] replace_topic_rule +- [ ] search_index +- [ ] set_default_authorizer +- [ ] set_default_policy_version +- [ ] set_logging_options +- [ ] set_v2_logging_level +- [ ] set_v2_logging_options +- [ ] start_thing_registration_task +- [ ] stop_thing_registration_task +- [ ] test_authorization +- [ ] test_invoke_authorizer +- [ ] transfer_certificate +- [ ] update_authorizer +- [ ] update_ca_certificate +- [X] update_certificate +- [ ] update_event_configurations +- [ ] update_indexing_configuration +- [ ] update_role_alias +- [ ] update_stream +- [X] update_thing +- [X] update_thing_group +- [X] update_thing_groups_for_thing + +## iot-data - 0% implemented +- [ ] delete_thing_shadow +- [ ] get_thing_shadow +- [ ] publish +- [ ] update_thing_shadow + +## iot-jobs-data - 0% implemented +- [ ] describe_job_execution +- [ ] get_pending_job_executions +- [ ] start_next_pending_job_execution +- [ ] update_job_execution + +## kinesis - 61% implemented +- [X] add_tags_to_stream +- [X] create_stream +- [ ] decrease_stream_retention_period +- [X] delete_stream +- [ ] describe_limits +- [X] describe_stream +- [X] describe_stream_summary +- [ ] disable_enhanced_monitoring +- [ ] enable_enhanced_monitoring +- [X] get_records +- [X] get_shard_iterator +- [ ] increase_stream_retention_period +- [ ] list_shards +- [X] list_streams +- [X] list_tags_for_stream +- [X] merge_shards +- [X] put_record +- [X] put_records +- [X] remove_tags_from_stream +- [X] split_shard +- [ ] start_stream_encryption +- [ ] stop_stream_encryption +- [ ] update_shard_count + +## kinesis-video-archived-media - 0% implemented +- [ ] get_media_for_fragment_list +- [ ] list_fragments + +## kinesis-video-media - 0% implemented +- [ ] get_media + +## kinesisanalytics - 0% implemented +- [ ] add_application_cloud_watch_logging_option +- [ ] add_application_input +- [ ] add_application_input_processing_configuration +- [ ] add_application_output +- [ ] add_application_reference_data_source +- [ ] create_application +- [ ] delete_application +- [ ] delete_application_cloud_watch_logging_option +- [ ] delete_application_input_processing_configuration +- [ ] delete_application_output +- [ ] delete_application_reference_data_source +- [ ] describe_application +- [ ] discover_input_schema +- [ ] list_applications +- [ ] start_application +- [ ] stop_application +- [ ] update_application + +## kinesisvideo - 0% implemented +- [ ] create_stream +- [ ] delete_stream +- [ ] describe_stream +- [ ] get_data_endpoint +- [ ] list_streams +- [ ] list_tags_for_stream +- [ ] tag_stream +- [ ] untag_stream +- [ ] update_data_retention +- [ ] update_stream + +## kms - 25% implemented +- [ ] cancel_key_deletion +- [ ] create_alias +- [ ] create_grant +- [X] create_key +- [ ] decrypt +- [X] delete_alias +- [ ] delete_imported_key_material +- [X] describe_key +- [ ] disable_key +- [X] disable_key_rotation +- [ ] enable_key +- [X] enable_key_rotation +- [ ] encrypt +- [ ] generate_data_key +- [ ] generate_data_key_without_plaintext +- [ ] generate_random +- [X] get_key_policy +- [X] get_key_rotation_status +- [ ] get_parameters_for_import +- [ ] import_key_material +- [ ] list_aliases +- [ ] list_grants +- [ ] list_key_policies +- [X] list_keys +- [ ] list_resource_tags +- [ ] list_retirable_grants +- [X] put_key_policy +- [ ] re_encrypt +- [ ] retire_grant +- [ ] revoke_grant +- [ ] schedule_key_deletion +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_alias +- [ ] update_key_description + +## lambda - 0% implemented +- [ ] add_permission +- [ ] create_alias +- [ ] create_event_source_mapping +- [ ] create_function +- [ ] delete_alias +- [ ] delete_event_source_mapping +- [ ] delete_function +- [ ] delete_function_concurrency +- [ ] get_account_settings +- [ ] get_alias +- [ ] get_event_source_mapping +- [ ] get_function +- [ ] get_function_configuration +- [ ] get_policy +- [ ] invoke +- [ ] invoke_async +- [ ] list_aliases +- [ ] list_event_source_mappings +- [ ] list_functions +- [ ] list_tags +- [ ] list_versions_by_function +- [ ] publish_version +- [ ] put_function_concurrency +- [ ] remove_permission +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_alias +- [ ] update_event_source_mapping +- [ ] update_function_code +- [ ] update_function_configuration + +## lex-models - 0% implemented +- [ ] create_bot_version +- [ ] create_intent_version +- [ ] create_slot_type_version +- [ ] delete_bot +- [ ] delete_bot_alias +- [ ] delete_bot_channel_association +- [ ] delete_bot_version +- [ ] delete_intent +- [ ] delete_intent_version +- [ ] delete_slot_type +- [ ] delete_slot_type_version +- [ ] delete_utterances +- [ ] get_bot +- [ ] get_bot_alias +- [ ] get_bot_aliases +- [ ] get_bot_channel_association +- [ ] get_bot_channel_associations +- [ ] get_bot_versions +- [ ] get_bots +- [ ] get_builtin_intent +- [ ] get_builtin_intents +- [ ] get_builtin_slot_types +- [ ] get_export +- [ ] get_import +- [ ] get_intent +- [ ] get_intent_versions +- [ ] get_intents +- [ ] get_slot_type +- [ ] get_slot_type_versions +- [ ] get_slot_types +- [ ] get_utterances_view +- [ ] put_bot +- [ ] put_bot_alias +- [ ] put_intent +- [ ] put_slot_type +- [ ] start_import + +## lex-runtime - 0% implemented +- [ ] post_content +- [ ] post_text + +## lightsail - 0% implemented +- [ ] allocate_static_ip +- [ ] attach_disk +- [ ] attach_instances_to_load_balancer +- [ ] attach_load_balancer_tls_certificate +- [ ] attach_static_ip +- [ ] close_instance_public_ports +- [ ] create_disk +- [ ] create_disk_from_snapshot +- [ ] create_disk_snapshot +- [ ] create_domain +- [ ] create_domain_entry +- [ ] create_instance_snapshot +- [ ] create_instances +- [ ] create_instances_from_snapshot +- [ ] create_key_pair +- [ ] create_load_balancer +- [ ] create_load_balancer_tls_certificate +- [ ] delete_disk +- [ ] delete_disk_snapshot +- [ ] delete_domain +- [ ] delete_domain_entry +- [ ] delete_instance +- [ ] delete_instance_snapshot +- [ ] delete_key_pair +- [ ] delete_load_balancer +- [ ] delete_load_balancer_tls_certificate +- [ ] detach_disk +- [ ] detach_instances_from_load_balancer +- [ ] detach_static_ip +- [ ] download_default_key_pair +- [ ] get_active_names +- [ ] get_blueprints +- [ ] get_bundles +- [ ] get_disk +- [ ] get_disk_snapshot +- [ ] get_disk_snapshots +- [ ] get_disks +- [ ] get_domain +- [ ] get_domains +- [ ] get_instance +- [ ] get_instance_access_details +- [ ] get_instance_metric_data +- [ ] get_instance_port_states +- [ ] get_instance_snapshot +- [ ] get_instance_snapshots +- [ ] get_instance_state +- [ ] get_instances +- [ ] get_key_pair +- [ ] get_key_pairs +- [ ] get_load_balancer +- [ ] get_load_balancer_metric_data +- [ ] get_load_balancer_tls_certificates +- [ ] get_load_balancers +- [ ] get_operation +- [ ] get_operations +- [ ] get_operations_for_resource +- [ ] get_regions +- [ ] get_static_ip +- [ ] get_static_ips +- [ ] import_key_pair +- [ ] is_vpc_peered +- [ ] open_instance_public_ports +- [ ] peer_vpc +- [ ] put_instance_public_ports +- [ ] reboot_instance +- [ ] release_static_ip +- [ ] start_instance +- [ ] stop_instance +- [ ] unpeer_vpc +- [ ] update_domain_entry +- [ ] update_load_balancer_attribute + +## logs - 27% implemented +- [ ] associate_kms_key +- [ ] cancel_export_task +- [ ] create_export_task +- [X] create_log_group +- [X] create_log_stream +- [ ] delete_destination +- [X] delete_log_group +- [X] delete_log_stream +- [ ] delete_metric_filter +- [ ] delete_resource_policy +- [ ] delete_retention_policy +- [ ] delete_subscription_filter +- [ ] describe_destinations +- [ ] describe_export_tasks +- [X] describe_log_groups +- [X] describe_log_streams +- [ ] describe_metric_filters +- [ ] describe_resource_policies +- [ ] describe_subscription_filters +- [ ] disassociate_kms_key +- [X] filter_log_events +- [X] get_log_events +- [ ] list_tags_log_group +- [ ] put_destination +- [ ] put_destination_policy +- [X] put_log_events +- [ ] put_metric_filter +- [ ] put_resource_policy +- [ ] put_retention_policy +- [ ] put_subscription_filter +- [ ] tag_log_group +- [ ] test_metric_filter +- [ ] untag_log_group + +## machinelearning - 0% implemented +- [ ] add_tags +- [ ] create_batch_prediction +- [ ] create_data_source_from_rds +- [ ] create_data_source_from_redshift +- [ ] create_data_source_from_s3 +- [ ] create_evaluation +- [ ] create_ml_model +- [ ] create_realtime_endpoint +- [ ] delete_batch_prediction +- [ ] delete_data_source +- [ ] delete_evaluation +- [ ] delete_ml_model +- [ ] delete_realtime_endpoint +- [ ] delete_tags +- [ ] describe_batch_predictions +- [ ] describe_data_sources +- [ ] describe_evaluations +- [ ] describe_ml_models +- [ ] describe_tags +- [ ] get_batch_prediction +- [ ] get_data_source +- [ ] get_evaluation +- [ ] get_ml_model +- [ ] predict +- [ ] update_batch_prediction +- [ ] update_data_source +- [ ] update_evaluation +- [ ] update_ml_model + +## marketplace-entitlement - 0% implemented +- [ ] get_entitlements + +## marketplacecommerceanalytics - 0% implemented +- [ ] generate_data_set +- [ ] start_support_data_export + +## mediaconvert - 0% implemented +- [ ] cancel_job +- [ ] create_job +- [ ] create_job_template +- [ ] create_preset +- [ ] create_queue +- [ ] delete_job_template +- [ ] delete_preset +- [ ] delete_queue +- [ ] describe_endpoints +- [ ] get_job +- [ ] get_job_template +- [ ] get_preset +- [ ] get_queue +- [ ] list_job_templates +- [ ] list_jobs +- [ ] list_presets +- [ ] list_queues +- [ ] update_job_template +- [ ] update_preset +- [ ] update_queue + +## medialive - 0% implemented +- [ ] create_channel +- [ ] create_input +- [ ] create_input_security_group +- [ ] delete_channel +- [ ] delete_input +- [ ] delete_input_security_group +- [ ] describe_channel +- [ ] describe_input +- [ ] describe_input_security_group +- [ ] list_channels +- [ ] list_input_security_groups +- [ ] list_inputs +- [ ] start_channel +- [ ] stop_channel +- [ ] update_channel +- [ ] update_input +- [ ] update_input_security_group + +## mediapackage - 0% implemented +- [ ] create_channel +- [ ] create_origin_endpoint +- [ ] delete_channel +- [ ] delete_origin_endpoint +- [ ] describe_channel +- [ ] describe_origin_endpoint +- [ ] list_channels +- [ ] list_origin_endpoints +- [ ] rotate_channel_credentials +- [ ] update_channel +- [ ] update_origin_endpoint + +## mediastore - 0% implemented +- [ ] create_container +- [ ] delete_container +- [ ] delete_container_policy +- [ ] delete_cors_policy +- [ ] describe_container +- [ ] get_container_policy +- [ ] get_cors_policy +- [ ] list_containers +- [ ] put_container_policy +- [ ] put_cors_policy + +## mediastore-data - 0% implemented +- [ ] delete_object +- [ ] describe_object +- [ ] get_object +- [ ] list_items +- [ ] put_object + +## meteringmarketplace - 0% implemented +- [ ] batch_meter_usage +- [ ] meter_usage +- [ ] resolve_customer + +## mgh - 0% implemented +- [ ] associate_created_artifact +- [ ] associate_discovered_resource +- [ ] create_progress_update_stream +- [ ] delete_progress_update_stream +- [ ] describe_application_state +- [ ] describe_migration_task +- [ ] disassociate_created_artifact +- [ ] disassociate_discovered_resource +- [ ] import_migration_task +- [ ] list_created_artifacts +- [ ] list_discovered_resources +- [ ] list_migration_tasks +- [ ] list_progress_update_streams +- [ ] notify_application_state +- [ ] notify_migration_task_state +- [ ] put_resource_attributes + +## mobile - 0% implemented +- [ ] create_project +- [ ] delete_project +- [ ] describe_bundle +- [ ] describe_project +- [ ] export_bundle +- [ ] export_project +- [ ] list_bundles +- [ ] list_projects +- [ ] update_project + +## mq - 0% implemented +- [ ] create_broker +- [ ] create_configuration +- [ ] create_user +- [ ] delete_broker +- [ ] delete_user +- [ ] describe_broker +- [ ] describe_configuration +- [ ] describe_configuration_revision +- [ ] describe_user +- [ ] list_brokers +- [ ] list_configuration_revisions +- [ ] list_configurations +- [ ] list_users +- [ ] reboot_broker +- [ ] update_broker +- [ ] update_configuration +- [ ] update_user + +## mturk - 0% implemented +- [ ] accept_qualification_request +- [ ] approve_assignment +- [ ] associate_qualification_with_worker +- [ ] create_additional_assignments_for_hit +- [ ] create_hit +- [ ] create_hit_type +- [ ] create_hit_with_hit_type +- [ ] create_qualification_type +- [ ] create_worker_block +- [ ] delete_hit +- [ ] delete_qualification_type +- [ ] delete_worker_block +- [ ] disassociate_qualification_from_worker +- [ ] get_account_balance +- [ ] get_assignment +- [ ] get_file_upload_url +- [ ] get_hit +- [ ] get_qualification_score +- [ ] get_qualification_type +- [ ] list_assignments_for_hit +- [ ] list_bonus_payments +- [ ] list_hits +- [ ] list_hits_for_qualification_type +- [ ] list_qualification_requests +- [ ] list_qualification_types +- [ ] list_review_policy_results_for_hit +- [ ] list_reviewable_hits +- [ ] list_worker_blocks +- [ ] list_workers_with_qualification_type +- [ ] notify_workers +- [ ] reject_assignment +- [ ] reject_qualification_request +- [ ] send_bonus +- [ ] send_test_event_notification +- [ ] update_expiration_for_hit +- [ ] update_hit_review_status +- [ ] update_hit_type_of_hit +- [ ] update_notification_settings +- [ ] update_qualification_type + +## opsworks - 12% implemented +- [ ] assign_instance +- [ ] assign_volume +- [ ] associate_elastic_ip +- [ ] attach_elastic_load_balancer +- [ ] clone_stack +- [X] create_app +- [ ] create_deployment +- [X] create_instance +- [X] create_layer +- [X] create_stack +- [ ] create_user_profile +- [ ] delete_app +- [ ] delete_instance +- [ ] delete_layer +- [ ] delete_stack +- [ ] delete_user_profile +- [ ] deregister_ecs_cluster +- [ ] deregister_elastic_ip +- [ ] deregister_instance +- [ ] deregister_rds_db_instance +- [ ] deregister_volume +- [ ] describe_agent_versions +- [X] describe_apps +- [ ] describe_commands +- [ ] describe_deployments +- [ ] describe_ecs_clusters +- [ ] describe_elastic_ips +- [ ] describe_elastic_load_balancers +- [X] describe_instances +- [X] describe_layers +- [ ] describe_load_based_auto_scaling +- [ ] describe_my_user_profile +- [ ] describe_operating_systems +- [ ] describe_permissions +- [ ] describe_raid_arrays +- [ ] describe_rds_db_instances +- [ ] describe_service_errors +- [ ] describe_stack_provisioning_parameters +- [ ] describe_stack_summary +- [X] describe_stacks +- [ ] describe_time_based_auto_scaling +- [ ] describe_user_profiles +- [ ] describe_volumes +- [ ] detach_elastic_load_balancer +- [ ] disassociate_elastic_ip +- [ ] get_hostname_suggestion +- [ ] grant_access +- [ ] list_tags +- [ ] reboot_instance +- [ ] register_ecs_cluster +- [ ] register_elastic_ip +- [ ] register_instance +- [ ] register_rds_db_instance +- [ ] register_volume +- [ ] set_load_based_auto_scaling +- [ ] set_permission +- [ ] set_time_based_auto_scaling +- [X] start_instance +- [ ] start_stack +- [ ] stop_instance +- [ ] stop_stack +- [ ] tag_resource +- [ ] unassign_instance +- [ ] unassign_volume +- [ ] untag_resource +- [ ] update_app +- [ ] update_elastic_ip +- [ ] update_instance +- [ ] update_layer +- [ ] update_my_user_profile +- [ ] update_rds_db_instance +- [ ] update_stack +- [ ] update_user_profile +- [ ] update_volume + +## opsworkscm - 0% implemented +- [ ] associate_node +- [ ] create_backup +- [ ] create_server +- [ ] delete_backup +- [ ] delete_server +- [ ] describe_account_attributes +- [ ] describe_backups +- [ ] describe_events +- [ ] describe_node_association_status +- [ ] describe_servers +- [ ] disassociate_node +- [ ] restore_server +- [ ] start_maintenance +- [ ] update_server +- [ ] update_server_engine_attributes + +## organizations - 47% implemented +- [ ] accept_handshake +- [X] attach_policy +- [ ] cancel_handshake +- [X] create_account +- [X] create_organization +- [X] create_organizational_unit +- [X] create_policy +- [ ] decline_handshake +- [ ] delete_organization +- [ ] delete_organizational_unit +- [ ] delete_policy +- [X] describe_account +- [ ] describe_create_account_status +- [ ] describe_handshake +- [X] describe_organization +- [X] describe_organizational_unit +- [X] describe_policy +- [ ] detach_policy +- [ ] disable_aws_service_access +- [ ] disable_policy_type +- [ ] enable_all_features +- [ ] enable_aws_service_access +- [ ] enable_policy_type +- [ ] invite_account_to_organization +- [ ] leave_organization +- [X] list_accounts +- [X] list_accounts_for_parent +- [ ] list_aws_service_access_for_organization +- [X] list_children +- [ ] list_create_account_status +- [ ] list_handshakes_for_account +- [ ] list_handshakes_for_organization +- [X] list_organizational_units_for_parent +- [X] list_parents +- [X] list_policies +- [X] list_policies_for_target +- [X] list_roots +- [X] list_targets_for_policy +- [X] move_account +- [ ] remove_account_from_organization +- [ ] update_organizational_unit +- [ ] update_policy + +## pinpoint - 0% implemented +- [ ] create_app +- [ ] create_campaign +- [ ] create_export_job +- [ ] create_import_job +- [ ] create_segment +- [ ] delete_adm_channel +- [ ] delete_apns_channel +- [ ] delete_apns_sandbox_channel +- [ ] delete_apns_voip_channel +- [ ] delete_apns_voip_sandbox_channel +- [ ] delete_app +- [ ] delete_baidu_channel +- [ ] delete_campaign +- [ ] delete_email_channel +- [ ] delete_endpoint +- [ ] delete_event_stream +- [ ] delete_gcm_channel +- [ ] delete_segment +- [ ] delete_sms_channel +- [ ] get_adm_channel +- [ ] get_apns_channel +- [ ] get_apns_sandbox_channel +- [ ] get_apns_voip_channel +- [ ] get_apns_voip_sandbox_channel +- [ ] get_app +- [ ] get_application_settings +- [ ] get_apps +- [ ] get_baidu_channel +- [ ] get_campaign +- [ ] get_campaign_activities +- [ ] get_campaign_version +- [ ] get_campaign_versions +- [ ] get_campaigns +- [ ] get_email_channel +- [ ] get_endpoint +- [ ] get_event_stream +- [ ] get_export_job +- [ ] get_export_jobs +- [ ] get_gcm_channel +- [ ] get_import_job +- [ ] get_import_jobs +- [ ] get_segment +- [ ] get_segment_export_jobs +- [ ] get_segment_import_jobs +- [ ] get_segment_version +- [ ] get_segment_versions +- [ ] get_segments +- [ ] get_sms_channel +- [ ] put_event_stream +- [ ] send_messages +- [ ] send_users_messages +- [ ] update_adm_channel +- [ ] update_apns_channel +- [ ] update_apns_sandbox_channel +- [ ] update_apns_voip_channel +- [ ] update_apns_voip_sandbox_channel +- [ ] update_application_settings +- [ ] update_baidu_channel +- [ ] update_campaign +- [ ] update_email_channel +- [ ] update_endpoint +- [ ] update_endpoints_batch +- [ ] update_gcm_channel +- [ ] update_segment +- [ ] update_sms_channel + +## polly - 83% implemented +- [X] delete_lexicon +- [X] describe_voices +- [X] get_lexicon +- [X] list_lexicons +- [X] put_lexicon +- [ ] synthesize_speech + +## pricing - 0% implemented +- [ ] describe_services +- [ ] get_attribute_values +- [ ] get_products + +## rds - 0% implemented +- [ ] add_role_to_db_cluster +- [ ] add_source_identifier_to_subscription +- [ ] add_tags_to_resource +- [ ] apply_pending_maintenance_action +- [ ] authorize_db_security_group_ingress +- [ ] copy_db_cluster_parameter_group +- [ ] copy_db_cluster_snapshot +- [ ] copy_db_parameter_group +- [ ] copy_db_snapshot +- [ ] copy_option_group +- [ ] create_db_cluster +- [ ] create_db_cluster_parameter_group +- [ ] create_db_cluster_snapshot +- [ ] create_db_instance +- [ ] create_db_instance_read_replica +- [ ] create_db_parameter_group +- [ ] create_db_security_group +- [ ] create_db_snapshot +- [ ] create_db_subnet_group +- [ ] create_event_subscription +- [ ] create_option_group +- [ ] delete_db_cluster +- [ ] delete_db_cluster_parameter_group +- [ ] delete_db_cluster_snapshot +- [ ] delete_db_instance +- [ ] delete_db_parameter_group +- [ ] delete_db_security_group +- [ ] delete_db_snapshot +- [ ] delete_db_subnet_group +- [ ] delete_event_subscription +- [ ] delete_option_group +- [ ] describe_account_attributes +- [ ] describe_certificates +- [ ] describe_db_cluster_parameter_groups +- [ ] describe_db_cluster_parameters +- [ ] describe_db_cluster_snapshot_attributes +- [ ] describe_db_cluster_snapshots +- [ ] describe_db_clusters +- [ ] describe_db_engine_versions +- [ ] describe_db_instances +- [ ] describe_db_log_files +- [ ] describe_db_parameter_groups +- [ ] describe_db_parameters +- [ ] describe_db_security_groups +- [ ] describe_db_snapshot_attributes +- [ ] describe_db_snapshots +- [ ] describe_db_subnet_groups +- [ ] describe_engine_default_cluster_parameters +- [ ] describe_engine_default_parameters +- [ ] describe_event_categories +- [ ] describe_event_subscriptions +- [ ] describe_events +- [ ] describe_option_group_options +- [ ] describe_option_groups +- [ ] describe_orderable_db_instance_options +- [ ] describe_pending_maintenance_actions +- [ ] describe_reserved_db_instances +- [ ] describe_reserved_db_instances_offerings +- [ ] describe_source_regions +- [ ] describe_valid_db_instance_modifications +- [ ] download_db_log_file_portion +- [ ] failover_db_cluster +- [ ] list_tags_for_resource +- [ ] modify_db_cluster +- [ ] modify_db_cluster_parameter_group +- [ ] modify_db_cluster_snapshot_attribute +- [ ] modify_db_instance +- [ ] modify_db_parameter_group +- [ ] modify_db_snapshot +- [ ] modify_db_snapshot_attribute +- [ ] modify_db_subnet_group +- [ ] modify_event_subscription +- [ ] modify_option_group +- [ ] promote_read_replica +- [ ] promote_read_replica_db_cluster +- [ ] purchase_reserved_db_instances_offering +- [ ] reboot_db_instance +- [ ] remove_role_from_db_cluster +- [ ] remove_source_identifier_from_subscription +- [ ] remove_tags_from_resource +- [ ] reset_db_cluster_parameter_group +- [ ] reset_db_parameter_group +- [ ] restore_db_cluster_from_s3 +- [ ] restore_db_cluster_from_snapshot +- [ ] restore_db_cluster_to_point_in_time +- [ ] restore_db_instance_from_db_snapshot +- [ ] restore_db_instance_from_s3 +- [ ] restore_db_instance_to_point_in_time +- [ ] revoke_db_security_group_ingress +- [ ] start_db_instance +- [ ] stop_db_instance + +## redshift - 41% implemented +- [ ] authorize_cluster_security_group_ingress +- [ ] authorize_snapshot_access +- [ ] copy_cluster_snapshot +- [X] create_cluster +- [X] create_cluster_parameter_group +- [X] create_cluster_security_group +- [X] create_cluster_snapshot +- [X] create_cluster_subnet_group +- [ ] create_event_subscription +- [ ] create_hsm_client_certificate +- [ ] create_hsm_configuration +- [X] create_snapshot_copy_grant +- [X] create_tags +- [X] delete_cluster +- [X] delete_cluster_parameter_group +- [X] delete_cluster_security_group +- [X] delete_cluster_snapshot +- [X] delete_cluster_subnet_group +- [ ] delete_event_subscription +- [ ] delete_hsm_client_certificate +- [ ] delete_hsm_configuration +- [X] delete_snapshot_copy_grant +- [X] delete_tags +- [X] describe_cluster_parameter_groups +- [ ] describe_cluster_parameters +- [X] describe_cluster_security_groups +- [X] describe_cluster_snapshots +- [X] describe_cluster_subnet_groups +- [ ] describe_cluster_versions +- [X] describe_clusters +- [ ] describe_default_cluster_parameters +- [ ] describe_event_categories +- [ ] describe_event_subscriptions +- [ ] describe_events +- [ ] describe_hsm_client_certificates +- [ ] describe_hsm_configurations +- [ ] describe_logging_status +- [ ] describe_orderable_cluster_options +- [ ] describe_reserved_node_offerings +- [ ] describe_reserved_nodes +- [ ] describe_resize +- [X] describe_snapshot_copy_grants +- [ ] describe_table_restore_status +- [X] describe_tags +- [ ] disable_logging +- [X] disable_snapshot_copy +- [ ] enable_logging +- [X] enable_snapshot_copy +- [ ] get_cluster_credentials +- [X] modify_cluster +- [ ] modify_cluster_iam_roles +- [ ] modify_cluster_parameter_group +- [ ] modify_cluster_subnet_group +- [ ] modify_event_subscription +- [X] modify_snapshot_copy_retention_period +- [ ] purchase_reserved_node_offering +- [ ] reboot_cluster +- [ ] reset_cluster_parameter_group +- [X] restore_from_cluster_snapshot +- [ ] restore_table_from_cluster_snapshot +- [ ] revoke_cluster_security_group_ingress +- [ ] revoke_snapshot_access +- [ ] rotate_encryption_key + +## rekognition - 0% implemented +- [ ] compare_faces +- [ ] create_collection +- [ ] create_stream_processor +- [ ] delete_collection +- [ ] delete_faces +- [ ] delete_stream_processor +- [ ] describe_stream_processor +- [ ] detect_faces +- [ ] detect_labels +- [ ] detect_moderation_labels +- [ ] detect_text +- [ ] get_celebrity_info +- [ ] get_celebrity_recognition +- [ ] get_content_moderation +- [ ] get_face_detection +- [ ] get_face_search +- [ ] get_label_detection +- [ ] get_person_tracking +- [ ] index_faces +- [ ] list_collections +- [ ] list_faces +- [ ] list_stream_processors +- [ ] recognize_celebrities +- [ ] search_faces +- [ ] search_faces_by_image +- [ ] start_celebrity_recognition +- [ ] start_content_moderation +- [ ] start_face_detection +- [ ] start_face_search +- [ ] start_label_detection +- [ ] start_person_tracking +- [ ] start_stream_processor +- [ ] stop_stream_processor + +## resource-groups - 62% implemented +- [X] create_group +- [X] delete_group +- [X] get_group +- [X] get_group_query +- [ ] get_tags +- [ ] list_group_resources +- [X] list_groups +- [ ] search_resources +- [ ] tag +- [ ] untag +- [X] update_group +- [X] update_group_query + +## resourcegroupstaggingapi - 60% implemented +- [X] get_resources +- [X] get_tag_keys +- [X] get_tag_values +- [ ] tag_resources +- [ ] untag_resources + +## route53 - 12% implemented +- [ ] associate_vpc_with_hosted_zone +- [ ] change_resource_record_sets +- [X] change_tags_for_resource +- [X] create_health_check +- [X] create_hosted_zone +- [ ] create_query_logging_config +- [ ] create_reusable_delegation_set +- [ ] create_traffic_policy +- [ ] create_traffic_policy_instance +- [ ] create_traffic_policy_version +- [ ] create_vpc_association_authorization +- [X] delete_health_check +- [X] delete_hosted_zone +- [ ] delete_query_logging_config +- [ ] delete_reusable_delegation_set +- [ ] delete_traffic_policy +- [ ] delete_traffic_policy_instance +- [ ] delete_vpc_association_authorization +- [ ] disassociate_vpc_from_hosted_zone +- [ ] get_account_limit +- [ ] get_change +- [ ] get_checker_ip_ranges +- [ ] get_geo_location +- [ ] get_health_check +- [ ] get_health_check_count +- [ ] get_health_check_last_failure_reason +- [ ] get_health_check_status +- [X] get_hosted_zone +- [ ] get_hosted_zone_count +- [ ] get_hosted_zone_limit +- [ ] get_query_logging_config +- [ ] get_reusable_delegation_set +- [ ] get_reusable_delegation_set_limit +- [ ] get_traffic_policy +- [ ] get_traffic_policy_instance +- [ ] get_traffic_policy_instance_count +- [ ] list_geo_locations +- [ ] list_health_checks +- [ ] list_hosted_zones +- [ ] list_hosted_zones_by_name +- [ ] list_query_logging_configs +- [ ] list_resource_record_sets +- [ ] list_reusable_delegation_sets +- [X] list_tags_for_resource +- [ ] list_tags_for_resources +- [ ] list_traffic_policies +- [ ] list_traffic_policy_instances +- [ ] list_traffic_policy_instances_by_hosted_zone +- [ ] list_traffic_policy_instances_by_policy +- [ ] list_traffic_policy_versions +- [ ] list_vpc_association_authorizations +- [ ] test_dns_answer +- [ ] update_health_check +- [ ] update_hosted_zone_comment +- [ ] update_traffic_policy_comment +- [ ] update_traffic_policy_instance + +## route53domains - 0% implemented +- [ ] check_domain_availability +- [ ] check_domain_transferability +- [ ] delete_tags_for_domain +- [ ] disable_domain_auto_renew +- [ ] disable_domain_transfer_lock +- [ ] enable_domain_auto_renew +- [ ] enable_domain_transfer_lock +- [ ] get_contact_reachability_status +- [ ] get_domain_detail +- [ ] get_domain_suggestions +- [ ] get_operation_detail +- [ ] list_domains +- [ ] list_operations +- [ ] list_tags_for_domain +- [ ] register_domain +- [ ] renew_domain +- [ ] resend_contact_reachability_email +- [ ] retrieve_domain_auth_code +- [ ] transfer_domain +- [ ] update_domain_contact +- [ ] update_domain_contact_privacy +- [ ] update_domain_nameservers +- [ ] update_tags_for_domain +- [ ] view_billing + +## s3 - 15% implemented +- [ ] abort_multipart_upload +- [ ] complete_multipart_upload +- [ ] copy_object +- [X] create_bucket +- [ ] create_multipart_upload +- [X] delete_bucket +- [ ] delete_bucket_analytics_configuration +- [X] delete_bucket_cors +- [ ] delete_bucket_encryption +- [ ] delete_bucket_inventory_configuration +- [ ] delete_bucket_lifecycle +- [ ] delete_bucket_metrics_configuration +- [X] delete_bucket_policy +- [ ] delete_bucket_replication +- [X] delete_bucket_tagging +- [ ] delete_bucket_website +- [ ] delete_object +- [ ] delete_object_tagging +- [ ] delete_objects +- [X] get_bucket_accelerate_configuration +- [X] get_bucket_acl +- [ ] get_bucket_analytics_configuration +- [ ] get_bucket_cors +- [ ] get_bucket_encryption +- [ ] get_bucket_inventory_configuration +- [ ] get_bucket_lifecycle +- [ ] get_bucket_lifecycle_configuration +- [X] get_bucket_location +- [ ] get_bucket_logging +- [ ] get_bucket_metrics_configuration +- [ ] get_bucket_notification +- [ ] get_bucket_notification_configuration +- [X] get_bucket_policy +- [ ] get_bucket_replication +- [ ] get_bucket_request_payment +- [ ] get_bucket_tagging +- [X] get_bucket_versioning +- [ ] get_bucket_website +- [ ] get_object +- [ ] get_object_acl +- [ ] get_object_tagging +- [ ] get_object_torrent +- [ ] head_bucket +- [ ] head_object +- [ ] list_bucket_analytics_configurations +- [ ] list_bucket_inventory_configurations +- [ ] list_bucket_metrics_configurations +- [ ] list_buckets +- [ ] list_multipart_uploads +- [ ] list_object_versions +- [ ] list_objects +- [ ] list_objects_v2 +- [ ] list_parts +- [X] put_bucket_accelerate_configuration +- [ ] put_bucket_acl +- [ ] put_bucket_analytics_configuration +- [X] put_bucket_cors +- [ ] put_bucket_encryption +- [ ] put_bucket_inventory_configuration +- [ ] put_bucket_lifecycle +- [ ] put_bucket_lifecycle_configuration +- [X] put_bucket_logging +- [ ] put_bucket_metrics_configuration +- [ ] put_bucket_notification +- [X] put_bucket_notification_configuration +- [ ] put_bucket_policy +- [ ] put_bucket_replication +- [ ] put_bucket_request_payment +- [X] put_bucket_tagging +- [ ] put_bucket_versioning +- [ ] put_bucket_website +- [ ] put_object +- [ ] put_object_acl +- [ ] put_object_tagging +- [ ] restore_object +- [ ] select_object_content +- [ ] upload_part +- [ ] upload_part_copy + +## sagemaker - 0% implemented +- [ ] add_tags +- [ ] create_endpoint +- [ ] create_endpoint_config +- [ ] create_model +- [ ] create_notebook_instance +- [ ] create_notebook_instance_lifecycle_config +- [ ] create_presigned_notebook_instance_url +- [ ] create_training_job +- [ ] delete_endpoint +- [ ] delete_endpoint_config +- [ ] delete_model +- [ ] delete_notebook_instance +- [ ] delete_notebook_instance_lifecycle_config +- [ ] delete_tags +- [ ] describe_endpoint +- [ ] describe_endpoint_config +- [ ] describe_model +- [ ] describe_notebook_instance +- [ ] describe_notebook_instance_lifecycle_config +- [ ] describe_training_job +- [ ] list_endpoint_configs +- [ ] list_endpoints +- [ ] list_models +- [ ] list_notebook_instance_lifecycle_configs +- [ ] list_notebook_instances +- [ ] list_tags +- [ ] list_training_jobs +- [ ] start_notebook_instance +- [ ] stop_notebook_instance +- [ ] stop_training_job +- [ ] update_endpoint +- [ ] update_endpoint_weights_and_capacities +- [ ] update_notebook_instance +- [ ] update_notebook_instance_lifecycle_config + +## sagemaker-runtime - 0% implemented +- [ ] invoke_endpoint + +## sdb - 0% implemented +- [ ] batch_delete_attributes +- [ ] batch_put_attributes +- [ ] create_domain +- [ ] delete_attributes +- [ ] delete_domain +- [ ] domain_metadata +- [ ] get_attributes +- [ ] list_domains +- [ ] put_attributes +- [ ] select + +## secretsmanager - 33% implemented +- [ ] cancel_rotate_secret +- [X] create_secret +- [X] delete_secret +- [X] describe_secret +- [X] get_random_password +- [X] get_secret_value +- [X] list_secret_version_ids +- [X] list_secrets +- [X] put_secret_value +- [X] restore_secret +- [X] rotate_secret +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_secret +- [ ] update_secret_version_stage + +## serverlessrepo - 0% implemented +- [ ] create_application +- [ ] create_application_version +- [ ] create_cloud_formation_change_set +- [ ] delete_application +- [ ] get_application +- [ ] get_application_policy +- [ ] list_application_versions +- [ ] list_applications +- [ ] put_application_policy +- [ ] update_application + +## servicecatalog - 0% implemented +- [ ] accept_portfolio_share +- [ ] associate_principal_with_portfolio +- [ ] associate_product_with_portfolio +- [ ] associate_tag_option_with_resource +- [ ] copy_product +- [ ] create_constraint +- [ ] create_portfolio +- [ ] create_portfolio_share +- [ ] create_product +- [ ] create_provisioned_product_plan +- [ ] create_provisioning_artifact +- [ ] create_tag_option +- [ ] delete_constraint +- [ ] delete_portfolio +- [ ] delete_portfolio_share +- [ ] delete_product +- [ ] delete_provisioned_product_plan +- [ ] delete_provisioning_artifact +- [ ] delete_tag_option +- [ ] describe_constraint +- [ ] describe_copy_product_status +- [ ] describe_portfolio +- [ ] describe_product +- [ ] describe_product_as_admin +- [ ] describe_product_view +- [ ] describe_provisioned_product +- [ ] describe_provisioned_product_plan +- [ ] describe_provisioning_artifact +- [ ] describe_provisioning_parameters +- [ ] describe_record +- [ ] describe_tag_option +- [ ] disassociate_principal_from_portfolio +- [ ] disassociate_product_from_portfolio +- [ ] disassociate_tag_option_from_resource +- [ ] execute_provisioned_product_plan +- [ ] list_accepted_portfolio_shares +- [ ] list_constraints_for_portfolio +- [ ] list_launch_paths +- [ ] list_portfolio_access +- [ ] list_portfolios +- [ ] list_portfolios_for_product +- [ ] list_principals_for_portfolio +- [ ] list_provisioned_product_plans +- [ ] list_provisioning_artifacts +- [ ] list_record_history +- [ ] list_resources_for_tag_option +- [ ] list_tag_options +- [ ] provision_product +- [ ] reject_portfolio_share +- [ ] scan_provisioned_products +- [ ] search_products +- [ ] search_products_as_admin +- [ ] search_provisioned_products +- [ ] terminate_provisioned_product +- [ ] update_constraint +- [ ] update_portfolio +- [ ] update_product +- [ ] update_provisioned_product +- [ ] update_provisioning_artifact +- [ ] update_tag_option + +## servicediscovery - 0% implemented +- [ ] create_private_dns_namespace +- [ ] create_public_dns_namespace +- [ ] create_service +- [ ] delete_namespace +- [ ] delete_service +- [ ] deregister_instance +- [ ] get_instance +- [ ] get_instances_health_status +- [ ] get_namespace +- [ ] get_operation +- [ ] get_service +- [ ] list_instances +- [ ] list_namespaces +- [ ] list_operations +- [ ] list_services +- [ ] register_instance +- [ ] update_instance_custom_health_status +- [ ] update_service + +## ses - 11% implemented +- [ ] clone_receipt_rule_set +- [ ] create_configuration_set +- [ ] create_configuration_set_event_destination +- [ ] create_configuration_set_tracking_options +- [ ] create_custom_verification_email_template +- [ ] create_receipt_filter +- [ ] create_receipt_rule +- [ ] create_receipt_rule_set +- [ ] create_template +- [ ] delete_configuration_set +- [ ] delete_configuration_set_event_destination +- [ ] delete_configuration_set_tracking_options +- [ ] delete_custom_verification_email_template +- [X] delete_identity +- [ ] delete_identity_policy +- [ ] delete_receipt_filter +- [ ] delete_receipt_rule +- [ ] delete_receipt_rule_set +- [ ] delete_template +- [ ] delete_verified_email_address +- [ ] describe_active_receipt_rule_set +- [ ] describe_configuration_set +- [ ] describe_receipt_rule +- [ ] describe_receipt_rule_set +- [ ] get_account_sending_enabled +- [ ] get_custom_verification_email_template +- [ ] get_identity_dkim_attributes +- [ ] get_identity_mail_from_domain_attributes +- [ ] get_identity_notification_attributes +- [ ] get_identity_policies +- [ ] get_identity_verification_attributes +- [X] get_send_quota +- [ ] get_send_statistics +- [ ] get_template +- [ ] list_configuration_sets +- [ ] list_custom_verification_email_templates +- [X] list_identities +- [ ] list_identity_policies +- [ ] list_receipt_filters +- [ ] list_receipt_rule_sets +- [ ] list_templates +- [X] list_verified_email_addresses +- [ ] put_identity_policy +- [ ] reorder_receipt_rule_set +- [ ] send_bounce +- [ ] send_bulk_templated_email +- [ ] send_custom_verification_email +- [X] send_email +- [X] send_raw_email +- [ ] send_templated_email +- [ ] set_active_receipt_rule_set +- [ ] set_identity_dkim_enabled +- [ ] set_identity_feedback_forwarding_enabled +- [ ] set_identity_headers_in_notifications_enabled +- [ ] set_identity_mail_from_domain +- [ ] set_identity_notification_topic +- [ ] set_receipt_rule_position +- [ ] test_render_template +- [ ] update_account_sending_enabled +- [ ] update_configuration_set_event_destination +- [ ] update_configuration_set_reputation_metrics_enabled +- [ ] update_configuration_set_sending_enabled +- [ ] update_configuration_set_tracking_options +- [ ] update_custom_verification_email_template +- [ ] update_receipt_rule +- [ ] update_template +- [ ] verify_domain_dkim +- [ ] verify_domain_identity +- [X] verify_email_address +- [X] verify_email_identity + +## shield - 0% implemented +- [ ] create_protection +- [ ] create_subscription +- [ ] delete_protection +- [ ] delete_subscription +- [ ] describe_attack +- [ ] describe_protection +- [ ] describe_subscription +- [ ] get_subscription_state +- [ ] list_attacks +- [ ] list_protections + +## sms - 0% implemented +- [ ] create_replication_job +- [ ] delete_replication_job +- [ ] delete_server_catalog +- [ ] disassociate_connector +- [ ] get_connectors +- [ ] get_replication_jobs +- [ ] get_replication_runs +- [ ] get_servers +- [ ] import_server_catalog +- [ ] start_on_demand_replication_run +- [ ] update_replication_job + +## snowball - 0% implemented +- [ ] cancel_cluster +- [ ] cancel_job +- [ ] create_address +- [ ] create_cluster +- [ ] create_job +- [ ] describe_address +- [ ] describe_addresses +- [ ] describe_cluster +- [ ] describe_job +- [ ] get_job_manifest +- [ ] get_job_unlock_code +- [ ] get_snowball_usage +- [ ] list_cluster_jobs +- [ ] list_clusters +- [ ] list_jobs +- [ ] update_cluster +- [ ] update_job + +## sns - 53% implemented +- [ ] add_permission +- [ ] check_if_phone_number_is_opted_out +- [ ] confirm_subscription +- [X] create_platform_application +- [X] create_platform_endpoint +- [X] create_topic +- [X] delete_endpoint +- [X] delete_platform_application +- [X] delete_topic +- [ ] get_endpoint_attributes +- [ ] get_platform_application_attributes +- [ ] get_sms_attributes +- [X] get_subscription_attributes +- [ ] get_topic_attributes +- [X] list_endpoints_by_platform_application +- [ ] list_phone_numbers_opted_out +- [X] list_platform_applications +- [X] list_subscriptions +- [ ] list_subscriptions_by_topic +- [X] list_topics +- [ ] opt_in_phone_number +- [X] publish +- [ ] remove_permission +- [X] set_endpoint_attributes +- [ ] set_platform_application_attributes +- [ ] set_sms_attributes +- [X] set_subscription_attributes +- [ ] set_topic_attributes +- [X] subscribe +- [X] unsubscribe + +## sqs - 65% implemented +- [X] add_permission +- [X] change_message_visibility +- [ ] change_message_visibility_batch +- [X] create_queue +- [X] delete_message +- [ ] delete_message_batch +- [X] delete_queue +- [ ] get_queue_attributes +- [X] get_queue_url +- [X] list_dead_letter_source_queues +- [ ] list_queue_tags +- [X] list_queues +- [X] purge_queue +- [ ] receive_message +- [X] remove_permission +- [X] send_message +- [ ] send_message_batch +- [X] set_queue_attributes +- [X] tag_queue +- [X] untag_queue + +## ssm - 11% implemented +- [X] add_tags_to_resource +- [ ] cancel_command +- [ ] create_activation +- [ ] create_association +- [ ] create_association_batch +- [ ] create_document +- [ ] create_maintenance_window +- [ ] create_patch_baseline +- [ ] create_resource_data_sync +- [ ] delete_activation +- [ ] delete_association +- [ ] delete_document +- [ ] delete_maintenance_window +- [X] delete_parameter +- [X] delete_parameters +- [ ] delete_patch_baseline +- [ ] delete_resource_data_sync +- [ ] deregister_managed_instance +- [ ] deregister_patch_baseline_for_patch_group +- [ ] deregister_target_from_maintenance_window +- [ ] deregister_task_from_maintenance_window +- [ ] describe_activations +- [ ] describe_association +- [ ] describe_automation_executions +- [ ] describe_automation_step_executions +- [ ] describe_available_patches +- [ ] describe_document +- [ ] describe_document_permission +- [ ] describe_effective_instance_associations +- [ ] describe_effective_patches_for_patch_baseline +- [ ] describe_instance_associations_status +- [ ] describe_instance_information +- [ ] describe_instance_patch_states +- [ ] describe_instance_patch_states_for_patch_group +- [ ] describe_instance_patches +- [ ] describe_maintenance_window_execution_task_invocations +- [ ] describe_maintenance_window_execution_tasks +- [ ] describe_maintenance_window_executions +- [ ] describe_maintenance_window_targets +- [ ] describe_maintenance_window_tasks +- [ ] describe_maintenance_windows +- [ ] describe_parameters +- [ ] describe_patch_baselines +- [ ] describe_patch_group_state +- [ ] describe_patch_groups +- [ ] get_automation_execution +- [ ] get_command_invocation +- [ ] get_default_patch_baseline +- [ ] get_deployable_patch_snapshot_for_instance +- [ ] get_document +- [ ] get_inventory +- [ ] get_inventory_schema +- [ ] get_maintenance_window +- [ ] get_maintenance_window_execution +- [ ] get_maintenance_window_execution_task +- [ ] get_maintenance_window_execution_task_invocation +- [ ] get_maintenance_window_task +- [X] get_parameter +- [ ] get_parameter_history +- [X] get_parameters +- [X] get_parameters_by_path +- [ ] get_patch_baseline +- [ ] get_patch_baseline_for_patch_group +- [ ] list_association_versions +- [ ] list_associations +- [ ] list_command_invocations +- [X] list_commands +- [ ] list_compliance_items +- [ ] list_compliance_summaries +- [ ] list_document_versions +- [ ] list_documents +- [ ] list_inventory_entries +- [ ] list_resource_compliance_summaries +- [ ] list_resource_data_sync +- [X] list_tags_for_resource +- [ ] modify_document_permission +- [ ] put_compliance_items +- [ ] put_inventory +- [X] put_parameter +- [ ] register_default_patch_baseline +- [ ] register_patch_baseline_for_patch_group +- [ ] register_target_with_maintenance_window +- [ ] register_task_with_maintenance_window +- [X] remove_tags_from_resource +- [ ] send_automation_signal +- [X] send_command +- [ ] start_automation_execution +- [ ] stop_automation_execution +- [ ] update_association +- [ ] update_association_status +- [ ] update_document +- [ ] update_document_default_version +- [ ] update_maintenance_window +- [ ] update_maintenance_window_target +- [ ] update_maintenance_window_task +- [ ] update_managed_instance_role +- [ ] update_patch_baseline + +## stepfunctions - 0% implemented +- [ ] create_activity +- [ ] create_state_machine +- [ ] delete_activity +- [ ] delete_state_machine +- [ ] describe_activity +- [ ] describe_execution +- [ ] describe_state_machine +- [ ] describe_state_machine_for_execution +- [ ] get_activity_task +- [ ] get_execution_history +- [ ] list_activities +- [ ] list_executions +- [ ] list_state_machines +- [ ] send_task_failure +- [ ] send_task_heartbeat +- [ ] send_task_success +- [ ] start_execution +- [ ] stop_execution +- [ ] update_state_machine + +## storagegateway - 0% implemented +- [ ] activate_gateway +- [ ] add_cache +- [ ] add_tags_to_resource +- [ ] add_upload_buffer +- [ ] add_working_storage +- [ ] cancel_archival +- [ ] cancel_retrieval +- [ ] create_cached_iscsi_volume +- [ ] create_nfs_file_share +- [ ] create_snapshot +- [ ] create_snapshot_from_volume_recovery_point +- [ ] create_stored_iscsi_volume +- [ ] create_tape_with_barcode +- [ ] create_tapes +- [ ] delete_bandwidth_rate_limit +- [ ] delete_chap_credentials +- [ ] delete_file_share +- [ ] delete_gateway +- [ ] delete_snapshot_schedule +- [ ] delete_tape +- [ ] delete_tape_archive +- [ ] delete_volume +- [ ] describe_bandwidth_rate_limit +- [ ] describe_cache +- [ ] describe_cached_iscsi_volumes +- [ ] describe_chap_credentials +- [ ] describe_gateway_information +- [ ] describe_maintenance_start_time +- [ ] describe_nfs_file_shares +- [ ] describe_snapshot_schedule +- [ ] describe_stored_iscsi_volumes +- [ ] describe_tape_archives +- [ ] describe_tape_recovery_points +- [ ] describe_tapes +- [ ] describe_upload_buffer +- [ ] describe_vtl_devices +- [ ] describe_working_storage +- [ ] disable_gateway +- [ ] list_file_shares +- [ ] list_gateways +- [ ] list_local_disks +- [ ] list_tags_for_resource +- [ ] list_tapes +- [ ] list_volume_initiators +- [ ] list_volume_recovery_points +- [ ] list_volumes +- [ ] notify_when_uploaded +- [ ] refresh_cache +- [ ] remove_tags_from_resource +- [ ] reset_cache +- [ ] retrieve_tape_archive +- [ ] retrieve_tape_recovery_point +- [ ] set_local_console_password +- [ ] shutdown_gateway +- [ ] start_gateway +- [ ] update_bandwidth_rate_limit +- [ ] update_chap_credentials +- [ ] update_gateway_information +- [ ] update_gateway_software_now +- [ ] update_maintenance_start_time +- [ ] update_nfs_file_share +- [ ] update_snapshot_schedule +- [ ] update_vtl_device_type + +## sts - 42% implemented +- [X] assume_role +- [ ] assume_role_with_saml +- [ ] assume_role_with_web_identity +- [ ] decode_authorization_message +- [ ] get_caller_identity +- [X] get_federation_token +- [X] get_session_token + +## support - 0% implemented +- [ ] add_attachments_to_set +- [ ] add_communication_to_case +- [ ] create_case +- [ ] describe_attachment +- [ ] describe_cases +- [ ] describe_communications +- [ ] describe_services +- [ ] describe_severity_levels +- [ ] describe_trusted_advisor_check_refresh_statuses +- [ ] describe_trusted_advisor_check_result +- [ ] describe_trusted_advisor_check_summaries +- [ ] describe_trusted_advisor_checks +- [ ] refresh_trusted_advisor_check +- [ ] resolve_case + +## swf - 58% implemented +- [ ] count_closed_workflow_executions +- [ ] count_open_workflow_executions +- [X] count_pending_activity_tasks +- [X] count_pending_decision_tasks +- [ ] deprecate_activity_type +- [X] deprecate_domain +- [ ] deprecate_workflow_type +- [ ] describe_activity_type +- [X] describe_domain +- [X] describe_workflow_execution +- [ ] describe_workflow_type +- [ ] get_workflow_execution_history +- [ ] list_activity_types +- [X] list_closed_workflow_executions +- [X] list_domains +- [X] list_open_workflow_executions +- [ ] list_workflow_types +- [X] poll_for_activity_task +- [X] poll_for_decision_task +- [X] record_activity_task_heartbeat +- [ ] register_activity_type +- [X] register_domain +- [ ] register_workflow_type +- [ ] request_cancel_workflow_execution +- [ ] respond_activity_task_canceled +- [X] respond_activity_task_completed +- [X] respond_activity_task_failed +- [X] respond_decision_task_completed +- [X] signal_workflow_execution +- [X] start_workflow_execution +- [X] terminate_workflow_execution + +## transcribe - 0% implemented +- [ ] create_vocabulary +- [ ] delete_vocabulary +- [ ] get_transcription_job +- [ ] get_vocabulary +- [ ] list_transcription_jobs +- [ ] list_vocabularies +- [ ] start_transcription_job +- [ ] update_vocabulary + +## translate - 0% implemented +- [ ] translate_text + +## waf - 0% implemented +- [ ] create_byte_match_set +- [ ] create_geo_match_set +- [ ] create_ip_set +- [ ] create_rate_based_rule +- [ ] create_regex_match_set +- [ ] create_regex_pattern_set +- [ ] create_rule +- [ ] create_rule_group +- [ ] create_size_constraint_set +- [ ] create_sql_injection_match_set +- [ ] create_web_acl +- [ ] create_xss_match_set +- [ ] delete_byte_match_set +- [ ] delete_geo_match_set +- [ ] delete_ip_set +- [ ] delete_permission_policy +- [ ] delete_rate_based_rule +- [ ] delete_regex_match_set +- [ ] delete_regex_pattern_set +- [ ] delete_rule +- [ ] delete_rule_group +- [ ] delete_size_constraint_set +- [ ] delete_sql_injection_match_set +- [ ] delete_web_acl +- [ ] delete_xss_match_set +- [ ] get_byte_match_set +- [ ] get_change_token +- [ ] get_change_token_status +- [ ] get_geo_match_set +- [ ] get_ip_set +- [ ] get_permission_policy +- [ ] get_rate_based_rule +- [ ] get_rate_based_rule_managed_keys +- [ ] get_regex_match_set +- [ ] get_regex_pattern_set +- [ ] get_rule +- [ ] get_rule_group +- [ ] get_sampled_requests +- [ ] get_size_constraint_set +- [ ] get_sql_injection_match_set +- [ ] get_web_acl +- [ ] get_xss_match_set +- [ ] list_activated_rules_in_rule_group +- [ ] list_byte_match_sets +- [ ] list_geo_match_sets +- [ ] list_ip_sets +- [ ] list_rate_based_rules +- [ ] list_regex_match_sets +- [ ] list_regex_pattern_sets +- [ ] list_rule_groups +- [ ] list_rules +- [ ] list_size_constraint_sets +- [ ] list_sql_injection_match_sets +- [ ] list_subscribed_rule_groups +- [ ] list_web_acls +- [ ] list_xss_match_sets +- [ ] put_permission_policy +- [ ] update_byte_match_set +- [ ] update_geo_match_set +- [ ] update_ip_set +- [ ] update_rate_based_rule +- [ ] update_regex_match_set +- [ ] update_regex_pattern_set +- [ ] update_rule +- [ ] update_rule_group +- [ ] update_size_constraint_set +- [ ] update_sql_injection_match_set +- [ ] update_web_acl +- [ ] update_xss_match_set + +## waf-regional - 0% implemented +- [ ] associate_web_acl +- [ ] create_byte_match_set +- [ ] create_geo_match_set +- [ ] create_ip_set +- [ ] create_rate_based_rule +- [ ] create_regex_match_set +- [ ] create_regex_pattern_set +- [ ] create_rule +- [ ] create_rule_group +- [ ] create_size_constraint_set +- [ ] create_sql_injection_match_set +- [ ] create_web_acl +- [ ] create_xss_match_set +- [ ] delete_byte_match_set +- [ ] delete_geo_match_set +- [ ] delete_ip_set +- [ ] delete_permission_policy +- [ ] delete_rate_based_rule +- [ ] delete_regex_match_set +- [ ] delete_regex_pattern_set +- [ ] delete_rule +- [ ] delete_rule_group +- [ ] delete_size_constraint_set +- [ ] delete_sql_injection_match_set +- [ ] delete_web_acl +- [ ] delete_xss_match_set +- [ ] disassociate_web_acl +- [ ] get_byte_match_set +- [ ] get_change_token +- [ ] get_change_token_status +- [ ] get_geo_match_set +- [ ] get_ip_set +- [ ] get_permission_policy +- [ ] get_rate_based_rule +- [ ] get_rate_based_rule_managed_keys +- [ ] get_regex_match_set +- [ ] get_regex_pattern_set +- [ ] get_rule +- [ ] get_rule_group +- [ ] get_sampled_requests +- [ ] get_size_constraint_set +- [ ] get_sql_injection_match_set +- [ ] get_web_acl +- [ ] get_web_acl_for_resource +- [ ] get_xss_match_set +- [ ] list_activated_rules_in_rule_group +- [ ] list_byte_match_sets +- [ ] list_geo_match_sets +- [ ] list_ip_sets +- [ ] list_rate_based_rules +- [ ] list_regex_match_sets +- [ ] list_regex_pattern_sets +- [ ] list_resources_for_web_acl +- [ ] list_rule_groups +- [ ] list_rules +- [ ] list_size_constraint_sets +- [ ] list_sql_injection_match_sets +- [ ] list_subscribed_rule_groups +- [ ] list_web_acls +- [ ] list_xss_match_sets +- [ ] put_permission_policy +- [ ] update_byte_match_set +- [ ] update_geo_match_set +- [ ] update_ip_set +- [ ] update_rate_based_rule +- [ ] update_regex_match_set +- [ ] update_regex_pattern_set +- [ ] update_rule +- [ ] update_rule_group +- [ ] update_size_constraint_set +- [ ] update_sql_injection_match_set +- [ ] update_web_acl +- [ ] update_xss_match_set + +## workdocs - 0% implemented +- [ ] abort_document_version_upload +- [ ] activate_user +- [ ] add_resource_permissions +- [ ] create_comment +- [ ] create_custom_metadata +- [ ] create_folder +- [ ] create_labels +- [ ] create_notification_subscription +- [ ] create_user +- [ ] deactivate_user +- [ ] delete_comment +- [ ] delete_custom_metadata +- [ ] delete_document +- [ ] delete_folder +- [ ] delete_folder_contents +- [ ] delete_labels +- [ ] delete_notification_subscription +- [ ] delete_user +- [ ] describe_activities +- [ ] describe_comments +- [ ] describe_document_versions +- [ ] describe_folder_contents +- [ ] describe_groups +- [ ] describe_notification_subscriptions +- [ ] describe_resource_permissions +- [ ] describe_root_folders +- [ ] describe_users +- [ ] get_current_user +- [ ] get_document +- [ ] get_document_path +- [ ] get_document_version +- [ ] get_folder +- [ ] get_folder_path +- [ ] initiate_document_version_upload +- [ ] remove_all_resource_permissions +- [ ] remove_resource_permission +- [ ] update_document +- [ ] update_document_version +- [ ] update_folder +- [ ] update_user + +## workmail - 0% implemented +- [ ] associate_delegate_to_resource +- [ ] associate_member_to_group +- [ ] create_alias +- [ ] create_group +- [ ] create_resource +- [ ] create_user +- [ ] delete_alias +- [ ] delete_group +- [ ] delete_mailbox_permissions +- [ ] delete_resource +- [ ] delete_user +- [ ] deregister_from_work_mail +- [ ] describe_group +- [ ] describe_organization +- [ ] describe_resource +- [ ] describe_user +- [ ] disassociate_delegate_from_resource +- [ ] disassociate_member_from_group +- [ ] list_aliases +- [ ] list_group_members +- [ ] list_groups +- [ ] list_mailbox_permissions +- [ ] list_organizations +- [ ] list_resource_delegates +- [ ] list_resources +- [ ] list_users +- [ ] put_mailbox_permissions +- [ ] register_to_work_mail +- [ ] reset_password +- [ ] update_primary_email_address +- [ ] update_resource + +## workspaces - 0% implemented +- [ ] create_tags +- [ ] create_workspaces +- [ ] delete_tags +- [ ] describe_tags +- [ ] describe_workspace_bundles +- [ ] describe_workspace_directories +- [ ] describe_workspaces +- [ ] describe_workspaces_connection_status +- [ ] modify_workspace_properties +- [ ] reboot_workspaces +- [ ] rebuild_workspaces +- [ ] start_workspaces +- [ ] stop_workspaces +- [ ] terminate_workspaces + +## xray - 0% implemented +- [ ] batch_get_traces +- [ ] get_service_graph +- [ ] get_trace_graph +- [ ] get_trace_summaries +- [ ] put_telemetry_records +- [ ] put_trace_segments diff --git a/README.md b/README.md index 56f73e28e..70faee2c8 100644 --- a/README.md +++ b/README.md @@ -47,7 +47,7 @@ def test_my_model_save(): body = conn.Object('mybucket', 'steve').get()['Body'].read().decode("utf-8") - assert body == b'is awesome' + assert body == 'is awesome' ``` With the decorator wrapping the test, all the calls to s3 are automatically mocked out. The mock keeps the state of the buckets and keys. diff --git a/moto/__init__.py b/moto/__init__.py index 5eeac8471..8c51bab27 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -36,6 +36,7 @@ from .polly import mock_polly # flake8: noqa from .rds import mock_rds, mock_rds_deprecated # flake8: noqa from .rds2 import mock_rds2, mock_rds2_deprecated # flake8: noqa from .redshift import mock_redshift, mock_redshift_deprecated # flake8: noqa +from .resourcegroups import mock_resourcegroups # flake8: noqa from .s3 import mock_s3, mock_s3_deprecated # flake8: noqa from .ses import mock_ses, mock_ses_deprecated # flake8: noqa from .secretsmanager import mock_secretsmanager # flake8: noqa diff --git a/moto/autoscaling/models.py b/moto/autoscaling/models.py index 27e81a87c..24811be73 100644 --- a/moto/autoscaling/models.py +++ b/moto/autoscaling/models.py @@ -1,4 +1,7 @@ from __future__ import unicode_literals + +import random + from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping from moto.compat import OrderedDict from moto.core import BaseBackend, BaseModel @@ -159,13 +162,7 @@ class FakeAutoScalingGroup(BaseModel): self.autoscaling_backend = autoscaling_backend self.name = name - if not availability_zones and not vpc_zone_identifier: - raise AutoscalingClientError( - "ValidationError", - "At least one Availability Zone or VPC Subnet is required." - ) - self.availability_zones = availability_zones - self.vpc_zone_identifier = vpc_zone_identifier + self._set_azs_and_vpcs(availability_zones, vpc_zone_identifier) self.max_size = max_size self.min_size = min_size @@ -188,6 +185,35 @@ class FakeAutoScalingGroup(BaseModel): self.tags = tags if tags else [] self.set_desired_capacity(desired_capacity) + def _set_azs_and_vpcs(self, availability_zones, vpc_zone_identifier, update=False): + # for updates, if only AZs are provided, they must not clash with + # the AZs of existing VPCs + if update and availability_zones and not vpc_zone_identifier: + vpc_zone_identifier = self.vpc_zone_identifier + + if vpc_zone_identifier: + # extract azs for vpcs + subnet_ids = vpc_zone_identifier.split(',') + subnets = self.autoscaling_backend.ec2_backend.get_all_subnets(subnet_ids=subnet_ids) + vpc_zones = [subnet.availability_zone for subnet in subnets] + + if availability_zones and set(availability_zones) != set(vpc_zones): + raise AutoscalingClientError( + "ValidationError", + "The availability zones of the specified subnets and the Auto Scaling group do not match", + ) + availability_zones = vpc_zones + elif not availability_zones: + if not update: + raise AutoscalingClientError( + "ValidationError", + "At least one Availability Zone or VPC Subnet is required." + ) + return + + self.availability_zones = availability_zones + self.vpc_zone_identifier = vpc_zone_identifier + @classmethod def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): properties = cloudformation_json['Properties'] @@ -246,8 +272,8 @@ class FakeAutoScalingGroup(BaseModel): health_check_period, health_check_type, placement_group, termination_policies, new_instances_protected_from_scale_in=None): - if availability_zones: - self.availability_zones = availability_zones + self._set_azs_and_vpcs(availability_zones, vpc_zone_identifier, update=True) + if max_size is not None: self.max_size = max_size if min_size is not None: @@ -257,8 +283,6 @@ class FakeAutoScalingGroup(BaseModel): self.launch_config = self.autoscaling_backend.launch_configurations[ launch_config_name] self.launch_config_name = launch_config_name - if vpc_zone_identifier is not None: - self.vpc_zone_identifier = vpc_zone_identifier if health_check_period is not None: self.health_check_period = health_check_period if health_check_type is not None: @@ -319,7 +343,8 @@ class FakeAutoScalingGroup(BaseModel): self.launch_config.user_data, self.launch_config.security_groups, instance_type=self.launch_config.instance_type, - tags={'instance': propagated_tags} + tags={'instance': propagated_tags}, + placement=random.choice(self.availability_zones), ) for instance in reservation.instances: instance.autoscaling_group = self diff --git a/moto/autoscaling/responses.py b/moto/autoscaling/responses.py index 6a7913021..985c6f852 100644 --- a/moto/autoscaling/responses.py +++ b/moto/autoscaling/responses.py @@ -404,7 +404,7 @@ ATTACH_LOAD_BALANCER_TARGET_GROUPS_TEMPLATE = """ -{{ requestid }} + """ @@ -412,7 +412,7 @@ ATTACH_INSTANCES_TEMPLATE = """ -{{ requestid }} + """ @@ -454,7 +454,7 @@ DETACH_INSTANCES_TEMPLATE = """ -{{ requestid }} + """ @@ -499,7 +499,7 @@ DESCRIBE_AUTOSCALING_GROUPS_TEMPLATE = """ {{ instance_state.health_status }} - us-east-1e + {{ instance_state.instance.placement }} {{ instance_state.instance.id }} {{ group.launch_config_name }} {{ instance_state.lifecycle_state }} @@ -585,7 +585,7 @@ DESCRIBE_AUTOSCALING_INSTANCES_TEMPLATE = """ {{ instance_state.health_status }} {{ instance_state.instance.autoscaling_group.name }} - us-east-1e + {{ instance_state.instance.placement }} {{ instance_state.instance.id }} {{ instance_state.instance.autoscaling_group.launch_config_name }} {{ instance_state.lifecycle_state }} @@ -654,7 +654,7 @@ DELETE_POLICY_TEMPLATE = """ -{{ requestid }} + """ @@ -670,14 +670,14 @@ DESCRIBE_LOAD_BALANCERS_TEMPLATE = """ -{{ requestid }} + """ @@ -690,13 +690,13 @@ SUSPEND_PROCESSES_TEMPLATE = """ -{{ requestid }} + """ SET_INSTANCE_PROTECTION_TEMPLATE = """ -{{ requestid }} + """ diff --git a/moto/awslambda/models.py b/moto/awslambda/models.py index 9fc41c11e..8dfa4724a 100644 --- a/moto/awslambda/models.py +++ b/moto/awslambda/models.py @@ -30,7 +30,7 @@ from moto.s3.models import s3_backend from moto.logs.models import logs_backends from moto.s3.exceptions import MissingBucket, MissingKey from moto import settings -from .utils import make_function_arn +from .utils import make_function_arn, make_function_ver_arn logger = logging.getLogger(__name__) @@ -45,7 +45,7 @@ except ImportError: _stderr_regex = re.compile(r'START|END|REPORT RequestId: .*') _orig_adapter_send = requests.adapters.HTTPAdapter.send -docker_3 = docker.__version__.startswith("3") +docker_3 = docker.__version__[0] >= '3' def zip2tar(zip_bytes): @@ -215,12 +215,12 @@ class LambdaFunction(BaseModel): self.code_size = key.size self.code_sha_256 = hashlib.sha256(key.value).hexdigest() - self.function_arn = make_function_arn(self.region, ACCOUNT_ID, self.function_name, version) + self.function_arn = make_function_arn(self.region, ACCOUNT_ID, self.function_name) self.tags = dict() def set_version(self, version): - self.function_arn = make_function_arn(self.region, ACCOUNT_ID, self.function_name, version) + self.function_arn = make_function_ver_arn(self.region, ACCOUNT_ID, self.function_name, version) self.version = version self.last_modified = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S') @@ -503,7 +503,10 @@ class LambdaStorage(object): def list_versions_by_function(self, name): if name not in self._functions: return None - return [self._functions[name]['latest']] + + latest = copy.copy(self._functions[name]['latest']) + latest.function_arn += ':$LATEST' + return [latest] + self._functions[name]['versions'] def get_arn(self, arn): return self._arns.get(arn, None) @@ -535,6 +538,7 @@ class LambdaStorage(object): fn.set_version(new_version) self._functions[name]['versions'].append(fn) + self._arns[fn.function_arn] = fn return fn def del_function(self, name, qualifier=None): @@ -604,6 +608,9 @@ class LambdaBackend(BaseBackend): self._lambdas.put_function(fn) + if spec.get('Publish'): + ver = self.publish_function(function_name) + fn.version = ver.version return fn def publish_function(self, function_name): diff --git a/moto/awslambda/responses.py b/moto/awslambda/responses.py index 1c43ef84b..c29c9acd9 100644 --- a/moto/awslambda/responses.py +++ b/moto/awslambda/responses.py @@ -150,7 +150,7 @@ class LambdaResponse(BaseResponse): for fn in self.lambda_backend.list_functions(): json_data = fn.get_configuration() - + json_data['Version'] = '$LATEST' result['Functions'].append(json_data) return 200, {}, json.dumps(result) @@ -204,7 +204,10 @@ class LambdaResponse(BaseResponse): if fn: code = fn.get_code() - + if qualifier is None or qualifier == '$LATEST': + code['Configuration']['Version'] = '$LATEST' + if qualifier == '$LATEST': + code['Configuration']['FunctionArn'] += ':$LATEST' return 200, {}, json.dumps(code) else: return 404, {}, "{}" diff --git a/moto/awslambda/utils.py b/moto/awslambda/utils.py index 88146d34f..82027cb2f 100644 --- a/moto/awslambda/utils.py +++ b/moto/awslambda/utils.py @@ -3,8 +3,13 @@ from collections import namedtuple ARN = namedtuple('ARN', ['region', 'account', 'function_name', 'version']) -def make_function_arn(region, account, name, version='1'): - return 'arn:aws:lambda:{0}:{1}:function:{2}:{3}'.format(region, account, name, version) +def make_function_arn(region, account, name): + return 'arn:aws:lambda:{0}:{1}:function:{2}'.format(region, account, name) + + +def make_function_ver_arn(region, account, name, version='1'): + arn = make_function_arn(region, account, name) + return '{0}:{1}'.format(arn, version) def split_function_arn(arn): diff --git a/moto/backends.py b/moto/backends.py index 90cc803a7..6ea85093d 100644 --- a/moto/backends.py +++ b/moto/backends.py @@ -32,6 +32,7 @@ from moto.organizations import organizations_backends from moto.polly import polly_backends from moto.rds2 import rds2_backends from moto.redshift import redshift_backends +from moto.resourcegroups import resourcegroups_backends from moto.route53 import route53_backends from moto.s3 import s3_backends from moto.ses import ses_backends @@ -81,6 +82,7 @@ BACKENDS = { 'organizations': organizations_backends, 'polly': polly_backends, 'redshift': redshift_backends, + 'resource-groups': resourcegroups_backends, 'rds': rds2_backends, 's3': s3_backends, 's3bucket_path': s3_backends, diff --git a/moto/cloudformation/parsing.py b/moto/cloudformation/parsing.py index 0be68944b..3bf994bed 100644 --- a/moto/cloudformation/parsing.py +++ b/moto/cloudformation/parsing.py @@ -12,7 +12,7 @@ from moto.batch import models as batch_models from moto.cloudwatch import models as cloudwatch_models from moto.cognitoidentity import models as cognitoidentity_models from moto.datapipeline import models as datapipeline_models -from moto.dynamodb import models as dynamodb_models +from moto.dynamodb2 import models as dynamodb2_models from moto.ec2 import models as ec2_models from moto.ecs import models as ecs_models from moto.elb import models as elb_models @@ -37,7 +37,7 @@ MODEL_MAP = { "AWS::Batch::JobDefinition": batch_models.JobDefinition, "AWS::Batch::JobQueue": batch_models.JobQueue, "AWS::Batch::ComputeEnvironment": batch_models.ComputeEnvironment, - "AWS::DynamoDB::Table": dynamodb_models.Table, + "AWS::DynamoDB::Table": dynamodb2_models.Table, "AWS::Kinesis::Stream": kinesis_models.Stream, "AWS::Lambda::EventSourceMapping": lambda_models.EventSourceMapping, "AWS::Lambda::Function": lambda_models.LambdaFunction, @@ -425,11 +425,18 @@ class ResourceMap(collections.Mapping): self.resolved_parameters[parameter_name] = parameter.get('Default') # Set any input parameters that were passed + self.no_echo_parameter_keys = [] for key, value in self.input_parameters.items(): if key in self.resolved_parameters: - value_type = parameter_slots[key].get('Type', 'String') + parameter_slot = parameter_slots[key] + + value_type = parameter_slot.get('Type', 'String') if value_type == 'CommaDelimitedList' or value_type.startswith("List"): value = value.split(',') + + if parameter_slot.get('NoEcho'): + self.no_echo_parameter_keys.append(key) + self.resolved_parameters[key] = value # Check if there are any non-default params that were not passed input diff --git a/moto/cloudformation/responses.py b/moto/cloudformation/responses.py index d1ef5ba8a..80970262f 100644 --- a/moto/cloudformation/responses.py +++ b/moto/cloudformation/responses.py @@ -654,7 +654,11 @@ DESCRIBE_STACKS_TEMPLATE = """ {% for param_name, param_value in stack.stack_parameters.items() %} {{ param_name }} - {{ param_value }} + {% if param_name in stack.resource_map.no_echo_parameter_keys %} + **** + {% else %} + {{ param_value }} + {% endif %} {% endfor %} diff --git a/moto/cognitoidp/models.py b/moto/cognitoidp/models.py index bdd279ba6..ef1377789 100644 --- a/moto/cognitoidp/models.py +++ b/moto/cognitoidp/models.py @@ -287,6 +287,18 @@ class CognitoIdpUser(BaseModel): return user_json + def update_attributes(self, new_attributes): + + def flatten_attrs(attrs): + return {attr['Name']: attr['Value'] for attr in attrs} + + def expand_attrs(attrs): + return [{'Name': k, 'Value': v} for k, v in attrs.items()] + + flat_attributes = flatten_attrs(self.attributes) + flat_attributes.update(flatten_attrs(new_attributes)) + self.attributes = expand_attrs(flat_attributes) + class CognitoIdpBackend(BaseBackend): @@ -673,6 +685,17 @@ class CognitoIdpBackend(BaseBackend): else: raise NotAuthorizedError(access_token) + def admin_update_user_attributes(self, user_pool_id, username, attributes): + user_pool = self.user_pools.get(user_pool_id) + if not user_pool: + raise ResourceNotFoundError(user_pool_id) + + if username not in user_pool.users: + raise UserNotFoundError(username) + + user = user_pool.users[username] + user.update_attributes(attributes) + cognitoidp_backends = {} for region in boto.cognito.identity.regions(): diff --git a/moto/cognitoidp/responses.py b/moto/cognitoidp/responses.py index 264910739..e9e83695a 100644 --- a/moto/cognitoidp/responses.py +++ b/moto/cognitoidp/responses.py @@ -352,6 +352,13 @@ class CognitoIdpResponse(BaseResponse): cognitoidp_backends[region].change_password(access_token, previous_password, proposed_password) return "" + def admin_update_user_attributes(self): + user_pool_id = self._get_param("UserPoolId") + username = self._get_param("Username") + attributes = self._get_param("UserAttributes") + cognitoidp_backends[self.region].admin_update_user_attributes(user_pool_id, username, attributes) + return "" + class CognitoIdpJsonWebKeyResponse(BaseResponse): diff --git a/moto/core/responses.py b/moto/core/responses.py index 8fb247f75..9da36b865 100644 --- a/moto/core/responses.py +++ b/moto/core/responses.py @@ -152,11 +152,18 @@ class BaseResponse(_TemplateEnvironmentMixin): for key, value in flat.items(): querystring[key] = [value] elif self.body: - querystring.update(parse_qs(raw_body, keep_blank_values=True)) + try: + querystring.update(parse_qs(raw_body, keep_blank_values=True)) + except UnicodeEncodeError: + pass # ignore encoding errors, as the body may not contain a legitimate querystring if not querystring: querystring.update(headers) - querystring = _decode_dict(querystring) + try: + querystring = _decode_dict(querystring) + except UnicodeDecodeError: + pass # ignore decoding errors, as the body may not contain a legitimate querystring + self.uri = full_url self.path = urlparse(full_url).path self.querystring = querystring diff --git a/moto/core/utils.py b/moto/core/utils.py index 777a03752..ca670e871 100644 --- a/moto/core/utils.py +++ b/moto/core/utils.py @@ -280,7 +280,7 @@ def amzn_request_id(f): # Update request ID in XML try: - body = body.replace('{{ requestid }}', request_id) + body = re.sub(r'(?<=).*(?=<\/RequestId>)', request_id, body) except Exception: # Will just ignore if it cant work on bytes (which are str's on python2) pass diff --git a/moto/dynamodb2/exceptions.py b/moto/dynamodb2/exceptions.py new file mode 100644 index 000000000..9df973292 --- /dev/null +++ b/moto/dynamodb2/exceptions.py @@ -0,0 +1,2 @@ +class InvalidIndexNameError(ValueError): + pass diff --git a/moto/dynamodb2/models.py b/moto/dynamodb2/models.py index 0f4594aa4..6bcde41b2 100644 --- a/moto/dynamodb2/models.py +++ b/moto/dynamodb2/models.py @@ -13,6 +13,7 @@ from moto.core import BaseBackend, BaseModel from moto.core.utils import unix_time from moto.core.exceptions import JsonRESTError from .comparisons import get_comparison_func, get_filter_expression, Op +from .exceptions import InvalidIndexNameError class DynamoJsonEncoder(json.JSONEncoder): @@ -293,6 +294,19 @@ class Item(BaseModel): # TODO: implement other data types raise NotImplementedError( 'ADD not supported for %s' % ', '.join(update_action['Value'].keys())) + elif action == 'DELETE': + if set(update_action['Value'].keys()) == set(['SS']): + existing = self.attrs.get(attribute_name, DynamoType({"SS": {}})) + new_set = set(existing.value).difference(set(new_value)) + self.attrs[attribute_name] = DynamoType({ + "SS": list(new_set) + }) + else: + raise NotImplementedError( + 'ADD not supported for %s' % ', '.join(update_action['Value'].keys())) + else: + raise NotImplementedError( + '%s action not support for update_with_attribute_updates' % action) class StreamRecord(BaseModel): @@ -403,6 +417,25 @@ class Table(BaseModel): } self.set_stream_specification(streams) + @classmethod + def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): + properties = cloudformation_json['Properties'] + params = {} + + if 'KeySchema' in properties: + params['schema'] = properties['KeySchema'] + if 'AttributeDefinitions' in properties: + params['attr'] = properties['AttributeDefinitions'] + if 'GlobalSecondaryIndexes' in properties: + params['global_indexes'] = properties['GlobalSecondaryIndexes'] + if 'ProvisionedThroughput' in properties: + params['throughput'] = properties['ProvisionedThroughput'] + if 'LocalSecondaryIndexes' in properties: + params['indexes'] = properties['LocalSecondaryIndexes'] + + table = dynamodb_backends[region_name].create_table(name=properties['TableName'], **params) + return table + def _generate_arn(self, name): return 'arn:aws:dynamodb:us-east-1:123456789011:table/' + name @@ -572,7 +605,7 @@ class Table(BaseModel): results = [] if index_name: - all_indexes = (self.global_indexes or []) + (self.indexes or []) + all_indexes = self.all_indexes() indexes_by_name = dict((i['IndexName'], i) for i in all_indexes) if index_name not in indexes_by_name: raise ValueError('Invalid index: %s for table: %s. Available indexes are: %s' % ( @@ -672,11 +705,39 @@ class Table(BaseModel): else: yield hash_set - def scan(self, filters, limit, exclusive_start_key, filter_expression=None): + def all_indexes(self): + return (self.global_indexes or []) + (self.indexes or []) + + def has_idx_items(self, index_name): + + all_indexes = self.all_indexes() + indexes_by_name = dict((i['IndexName'], i) for i in all_indexes) + idx = indexes_by_name[index_name] + idx_col_set = set([i['AttributeName'] for i in idx['KeySchema']]) + + for hash_set in self.items.values(): + if self.range_key_attr: + for item in hash_set.values(): + if idx_col_set.issubset(set(item.attrs)): + yield item + else: + if idx_col_set.issubset(set(hash_set.attrs)): + yield hash_set + + def scan(self, filters, limit, exclusive_start_key, filter_expression=None, index_name=None): results = [] scanned_count = 0 + all_indexes = self.all_indexes() + indexes_by_name = dict((i['IndexName'], i) for i in all_indexes) - for item in self.all_items(): + if index_name: + if index_name not in indexes_by_name: + raise InvalidIndexNameError('The table does not have the specified index: %s' % index_name) + items = self.has_idx_items(index_name) + else: + items = self.all_items() + + for item in items: scanned_count += 1 passes_all_conditions = True for attribute_name, (comparison_operator, comparison_objs) in filters.items(): @@ -703,10 +764,10 @@ class Table(BaseModel): results.append(item) results, last_evaluated_key = self._trim_results(results, limit, - exclusive_start_key) + exclusive_start_key, index_name) return results, scanned_count, last_evaluated_key - def _trim_results(self, results, limit, exclusive_start_key): + def _trim_results(self, results, limit, exclusive_start_key, scaned_index=None): if exclusive_start_key is not None: hash_key = DynamoType(exclusive_start_key.get(self.hash_key_attr)) range_key = exclusive_start_key.get(self.range_key_attr) @@ -726,6 +787,14 @@ class Table(BaseModel): if results[-1].range_key is not None: last_evaluated_key[self.range_key_attr] = results[-1].range_key + if scaned_index: + all_indexes = self.all_indexes() + indexes_by_name = dict((i['IndexName'], i) for i in all_indexes) + idx = indexes_by_name[scaned_index] + idx_col_list = [i['AttributeName'] for i in idx['KeySchema']] + for col in idx_col_list: + last_evaluated_key[col] = results[-1].attrs[col] + return results, last_evaluated_key def lookup(self, *args, **kwargs): @@ -893,7 +962,7 @@ class DynamoDBBackend(BaseBackend): return table.query(hash_key, range_comparison, range_values, limit, exclusive_start_key, scan_index_forward, projection_expression, index_name, filter_expression, **filter_kwargs) - def scan(self, table_name, filters, limit, exclusive_start_key, filter_expression, expr_names, expr_values): + def scan(self, table_name, filters, limit, exclusive_start_key, filter_expression, expr_names, expr_values, index_name): table = self.tables.get(table_name) if not table: return None, None, None @@ -908,7 +977,7 @@ class DynamoDBBackend(BaseBackend): else: filter_expression = Op(None, None) # Will always eval to true - return table.scan(scan_filters, limit, exclusive_start_key, filter_expression) + return table.scan(scan_filters, limit, exclusive_start_key, filter_expression, index_name) def update_item(self, table_name, key, update_expression, attribute_updates, expression_attribute_names, expression_attribute_values, expected=None): diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index 49095f09c..7eb565747 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -5,6 +5,7 @@ import re from moto.core.responses import BaseResponse from moto.core.utils import camelcase_to_underscores, amzn_request_id +from .exceptions import InvalidIndexNameError from .models import dynamodb_backends, dynamo_json_dump @@ -156,8 +157,16 @@ class DynamoHandler(BaseResponse): body = self.body # get the table name table_name = body['TableName'] - # get the throughput - throughput = body["ProvisionedThroughput"] + # check billing mode and get the throughput + if "BillingMode" in body.keys() and body["BillingMode"] == "PAY_PER_REQUEST": + if "ProvisionedThroughput" in body.keys(): + er = 'com.amazonaws.dynamodb.v20111205#ValidationException' + return self.error(er, + 'ProvisionedThroughput cannot be specified \ + when BillingMode is PAY_PER_REQUEST') + throughput = None + else: # Provisioned (default billing mode) + throughput = body["ProvisionedThroughput"] # getting the schema key_schema = body['KeySchema'] # getting attribute definition @@ -552,6 +561,7 @@ class DynamoHandler(BaseResponse): exclusive_start_key = self.body.get('ExclusiveStartKey') limit = self.body.get("Limit") + index_name = self.body.get('IndexName') try: items, scanned_count, last_evaluated_key = self.dynamodb_backend.scan(name, filters, @@ -559,7 +569,11 @@ class DynamoHandler(BaseResponse): exclusive_start_key, filter_expression, expression_attribute_names, - expression_attribute_values) + expression_attribute_values, + index_name) + except InvalidIndexNameError as err: + er = 'com.amazonaws.dynamodb.v20111205#ValidationException' + return self.error(er, str(err)) except ValueError as err: er = 'com.amazonaws.dynamodb.v20111205#ValidationError' return self.error(er, 'Bad Filter Expression: {0}'.format(err)) diff --git a/moto/ec2/exceptions.py b/moto/ec2/exceptions.py index f747c9cd5..1357d49e2 100644 --- a/moto/ec2/exceptions.py +++ b/moto/ec2/exceptions.py @@ -58,6 +58,14 @@ class InvalidKeyPairDuplicateError(EC2ClientError): .format(key)) +class InvalidKeyPairFormatError(EC2ClientError): + + def __init__(self): + super(InvalidKeyPairFormatError, self).__init__( + "InvalidKeyPair.Format", + "Key is not in valid OpenSSH public key format") + + class InvalidVPCIdError(EC2ClientError): def __init__(self, vpc_id): @@ -420,3 +428,79 @@ class OperationNotPermitted(EC2ClientError): "The vpc CIDR block with association ID {} may not be disassociated. " "It is the primary IPv4 CIDR block of the VPC".format(association_id) ) + + +class NetworkAclEntryAlreadyExistsError(EC2ClientError): + + def __init__(self, rule_number): + super(NetworkAclEntryAlreadyExistsError, self).__init__( + "NetworkAclEntryAlreadyExists", + "The network acl entry identified by {} already exists.".format(rule_number) + ) + + +class InvalidSubnetRangeError(EC2ClientError): + + def __init__(self, cidr_block): + super(InvalidSubnetRangeError, self).__init__( + "InvalidSubnet.Range", + "The CIDR '{}' is invalid.".format(cidr_block) + ) + + +class InvalidCIDRBlockParameterError(EC2ClientError): + + def __init__(self, cidr_block): + super(InvalidCIDRBlockParameterError, self).__init__( + "InvalidParameterValue", + "Value ({}) for parameter cidrBlock is invalid. This is not a valid CIDR block.".format(cidr_block) + ) + + +class InvalidDestinationCIDRBlockParameterError(EC2ClientError): + + def __init__(self, cidr_block): + super(InvalidDestinationCIDRBlockParameterError, self).__init__( + "InvalidParameterValue", + "Value ({}) for parameter destinationCidrBlock is invalid. This is not a valid CIDR block.".format(cidr_block) + ) + + +class InvalidSubnetConflictError(EC2ClientError): + + def __init__(self, cidr_block): + super(InvalidSubnetConflictError, self).__init__( + "InvalidSubnet.Conflict", + "The CIDR '{}' conflicts with another subnet".format(cidr_block) + ) + + +class InvalidVPCRangeError(EC2ClientError): + + def __init__(self, cidr_block): + super(InvalidVPCRangeError, self).__init__( + "InvalidVpc.Range", + "The CIDR '{}' is invalid.".format(cidr_block) + ) + + +# accept exception +class OperationNotPermitted2(EC2ClientError): + def __init__(self, client_region, pcx_id, acceptor_region): + super(OperationNotPermitted2, self).__init__( + "OperationNotPermitted", + "Incorrect region ({0}) specified for this request." + "VPC peering connection {1} must be accepted in region {2}".format(client_region, pcx_id, acceptor_region) + ) + + +# reject exception +class OperationNotPermitted3(EC2ClientError): + def __init__(self, client_region, pcx_id, acceptor_region): + super(OperationNotPermitted3, self).__init__( + "OperationNotPermitted", + "Incorrect region ({0}) specified for this request." + "VPC peering connection {1} must be accepted or rejected in region {2}".format(client_region, + pcx_id, + acceptor_region) + ) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 0936d2be9..b894853d2 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -20,6 +20,7 @@ from boto.ec2.blockdevicemapping import BlockDeviceMapping, BlockDeviceType from boto.ec2.spotinstancerequest import SpotInstanceRequest as BotoSpotRequest from boto.ec2.launchspecification import LaunchSpecification + from moto.compat import OrderedDict from moto.core import BaseBackend from moto.core.models import Model, BaseModel @@ -35,14 +36,17 @@ from .exceptions import ( InvalidAMIIdError, InvalidAMIAttributeItemValueError, InvalidAssociationIdError, + InvalidCIDRBlockParameterError, InvalidCIDRSubnetError, InvalidCustomerGatewayIdError, + InvalidDestinationCIDRBlockParameterError, InvalidDHCPOptionsIdError, InvalidDomainError, InvalidID, InvalidInstanceIdError, InvalidInternetGatewayIdError, InvalidKeyPairDuplicateError, + InvalidKeyPairFormatError, InvalidKeyPairNameError, InvalidNetworkAclIdError, InvalidNetworkAttachmentIdError, @@ -56,20 +60,26 @@ from .exceptions import ( InvalidSecurityGroupDuplicateError, InvalidSecurityGroupNotFoundError, InvalidSnapshotIdError, + InvalidSubnetConflictError, InvalidSubnetIdError, + InvalidSubnetRangeError, InvalidVolumeIdError, InvalidVolumeAttachmentError, InvalidVpcCidrBlockAssociationIdError, InvalidVPCPeeringConnectionIdError, InvalidVPCPeeringConnectionStateTransitionError, InvalidVPCIdError, + InvalidVPCRangeError, InvalidVpnGatewayIdError, InvalidVpnConnectionIdError, MalformedAMIIdError, MalformedDHCPOptionsIdError, MissingParameterError, MotoNotImplementedError, + NetworkAclEntryAlreadyExistsError, OperationNotPermitted, + OperationNotPermitted2, + OperationNotPermitted3, ResourceAlreadyAssociatedError, RulesPerSecurityGroupLimitExceededError, TagLimitExceeded) @@ -118,6 +128,8 @@ from .utils import ( random_customer_gateway_id, is_tag_filter, tag_filter_matches, + rsa_public_key_parse, + rsa_public_key_fingerprint ) INSTANCE_TYPES = json.load( @@ -404,7 +416,7 @@ class Instance(TaggedEC2Resource, BotoInstance): warnings.warn('Could not find AMI with image-id:{0}, ' 'in the near future this will ' 'cause an error.\n' - 'Use ec2_backend.describe_images() to' + 'Use ec2_backend.describe_images() to ' 'find suitable image for your test'.format(image_id), PendingDeprecationWarning) @@ -908,7 +920,14 @@ class KeyPairBackend(object): def import_key_pair(self, key_name, public_key_material): if key_name in self.keypairs: raise InvalidKeyPairDuplicateError(key_name) - keypair = KeyPair(key_name, **random_key_pair()) + + try: + rsa_public_key = rsa_public_key_parse(public_key_material) + except ValueError: + raise InvalidKeyPairFormatError() + + fingerprint = rsa_public_key_fingerprint(rsa_public_key) + keypair = KeyPair(key_name, material=public_key_material, fingerprint=fingerprint) self.keypairs[key_name] = keypair return keypair @@ -1879,6 +1898,8 @@ class Snapshot(TaggedEC2Resource): return str(self.encrypted).lower() elif filter_name == 'status': return self.status + elif filter_name == 'owner-id': + return self.owner_id else: return super(Snapshot, self).get_filter_value( filter_name, 'DescribeSnapshots') @@ -2120,22 +2141,28 @@ class VPC(TaggedEC2Resource): class VPCBackend(object): - __refs__ = defaultdict(list) + vpc_refs = defaultdict(set) def __init__(self): self.vpcs = {} - self.__refs__[self.__class__].append(weakref.ref(self)) + self.vpc_refs[self.__class__].add(weakref.ref(self)) super(VPCBackend, self).__init__() @classmethod - def get_instances(cls): - for inst_ref in cls.__refs__[cls]: + def get_vpc_refs(cls): + for inst_ref in cls.vpc_refs[cls]: inst = inst_ref() if inst is not None: yield inst def create_vpc(self, cidr_block, instance_tenancy='default', amazon_provided_ipv6_cidr_block=False): vpc_id = random_vpc_id() + try: + vpc_cidr_block = ipaddress.IPv4Network(six.text_type(cidr_block), strict=False) + except ValueError: + raise InvalidCIDRBlockParameterError(cidr_block) + if vpc_cidr_block.prefixlen < 16 or vpc_cidr_block.prefixlen > 28: + raise InvalidVPCRangeError(cidr_block) vpc = VPC(self, vpc_id, cidr_block, len(self.vpcs) == 0, instance_tenancy, amazon_provided_ipv6_cidr_block) self.vpcs[vpc_id] = vpc @@ -2159,7 +2186,7 @@ class VPCBackend(object): # get vpc by vpc id and aws region def get_cross_vpc(self, vpc_id, peer_region): - for vpcs in self.get_instances(): + for vpcs in self.get_vpc_refs(): if vpcs.region_name == peer_region: match_vpc = vpcs.get_vpc(vpc_id) return match_vpc @@ -2280,15 +2307,31 @@ class VPCPeeringConnection(TaggedEC2Resource): class VPCPeeringConnectionBackend(object): + # for cross region vpc reference + vpc_pcx_refs = defaultdict(set) + def __init__(self): self.vpc_pcxs = {} + self.vpc_pcx_refs[self.__class__].add(weakref.ref(self)) super(VPCPeeringConnectionBackend, self).__init__() + @classmethod + def get_vpc_pcx_refs(cls): + for inst_ref in cls.vpc_pcx_refs[cls]: + inst = inst_ref() + if inst is not None: + yield inst + def create_vpc_peering_connection(self, vpc, peer_vpc): vpc_pcx_id = random_vpc_peering_connection_id() vpc_pcx = VPCPeeringConnection(vpc_pcx_id, vpc, peer_vpc) vpc_pcx._status.pending() self.vpc_pcxs[vpc_pcx_id] = vpc_pcx + # insert cross region peering info + if vpc.ec2_backend.region_name != peer_vpc.ec2_backend.region_name: + for vpc_pcx_cx in peer_vpc.ec2_backend.get_vpc_pcx_refs(): + if vpc_pcx_cx.region_name == peer_vpc.ec2_backend.region_name: + vpc_pcx_cx.vpc_pcxs[vpc_pcx_id] = vpc_pcx return vpc_pcx def get_all_vpc_peering_connections(self): @@ -2306,6 +2349,11 @@ class VPCPeeringConnectionBackend(object): def accept_vpc_peering_connection(self, vpc_pcx_id): vpc_pcx = self.get_vpc_peering_connection(vpc_pcx_id) + # if cross region need accepter from another region + pcx_req_region = vpc_pcx.vpc.ec2_backend.region_name + pcx_acp_region = vpc_pcx.peer_vpc.ec2_backend.region_name + if pcx_req_region != pcx_acp_region and self.region_name == pcx_req_region: + raise OperationNotPermitted2(self.region_name, vpc_pcx.id, pcx_acp_region) if vpc_pcx._status.code != 'pending-acceptance': raise InvalidVPCPeeringConnectionStateTransitionError(vpc_pcx.id) vpc_pcx._status.accept() @@ -2313,6 +2361,11 @@ class VPCPeeringConnectionBackend(object): def reject_vpc_peering_connection(self, vpc_pcx_id): vpc_pcx = self.get_vpc_peering_connection(vpc_pcx_id) + # if cross region need accepter from another region + pcx_req_region = vpc_pcx.vpc.ec2_backend.region_name + pcx_acp_region = vpc_pcx.peer_vpc.ec2_backend.region_name + if pcx_req_region != pcx_acp_region and self.region_name == pcx_req_region: + raise OperationNotPermitted3(self.region_name, vpc_pcx.id, pcx_acp_region) if vpc_pcx._status.code != 'pending-acceptance': raise InvalidVPCPeeringConnectionStateTransitionError(vpc_pcx.id) vpc_pcx._status.reject() @@ -2326,7 +2379,7 @@ class Subnet(TaggedEC2Resource): self.id = subnet_id self.vpc_id = vpc_id self.cidr_block = cidr_block - self.cidr = ipaddress.ip_network(six.text_type(self.cidr_block)) + self.cidr = ipaddress.IPv4Network(six.text_type(self.cidr_block), strict=False) self._availability_zone = availability_zone self.default_for_az = default_for_az self.map_public_ip_on_launch = map_public_ip_on_launch @@ -2458,7 +2511,19 @@ class SubnetBackend(object): def create_subnet(self, vpc_id, cidr_block, availability_zone): subnet_id = random_subnet_id() - self.get_vpc(vpc_id) # Validate VPC exists + vpc = self.get_vpc(vpc_id) # Validate VPC exists and the supplied CIDR block is a subnet of the VPC's + vpc_cidr_block = ipaddress.IPv4Network(six.text_type(vpc.cidr_block), strict=False) + try: + subnet_cidr_block = ipaddress.IPv4Network(six.text_type(cidr_block), strict=False) + except ValueError: + raise InvalidCIDRBlockParameterError(cidr_block) + if not (vpc_cidr_block.network_address <= subnet_cidr_block.network_address and + vpc_cidr_block.broadcast_address >= subnet_cidr_block.broadcast_address): + raise InvalidSubnetRangeError(cidr_block) + + for subnet in self.get_all_subnets(filters={'vpc-id': vpc_id}): + if subnet.cidr.overlaps(subnet_cidr_block): + raise InvalidSubnetConflictError(cidr_block) # if this is the first subnet for an availability zone, # consider it the default @@ -2718,6 +2783,11 @@ class RouteBackend(object): elif EC2_RESOURCE_TO_PREFIX['internet-gateway'] in gateway_id: gateway = self.get_internet_gateway(gateway_id) + try: + ipaddress.IPv4Network(six.text_type(destination_cidr_block), strict=False) + except ValueError: + raise InvalidDestinationCIDRBlockParameterError(destination_cidr_block) + route = Route(route_table, destination_cidr_block, local=local, gateway=gateway, instance=self.get_instance( @@ -3595,10 +3665,10 @@ class NetworkAclBackend(object): def add_default_entries(self, network_acl_id): default_acl_entries = [ - {'rule_number': 100, 'rule_action': 'allow', 'egress': 'true'}, - {'rule_number': 32767, 'rule_action': 'deny', 'egress': 'true'}, - {'rule_number': 100, 'rule_action': 'allow', 'egress': 'false'}, - {'rule_number': 32767, 'rule_action': 'deny', 'egress': 'false'} + {'rule_number': "100", 'rule_action': 'allow', 'egress': 'true'}, + {'rule_number': "32767", 'rule_action': 'deny', 'egress': 'true'}, + {'rule_number': "100", 'rule_action': 'allow', 'egress': 'false'}, + {'rule_number': "32767", 'rule_action': 'deny', 'egress': 'false'} ] for entry in default_acl_entries: self.create_network_acl_entry(network_acl_id=network_acl_id, rule_number=entry['rule_number'], protocol='-1', @@ -3629,12 +3699,14 @@ class NetworkAclBackend(object): icmp_code, icmp_type, port_range_from, port_range_to): + network_acl = self.get_network_acl(network_acl_id) + if any(entry.egress == egress and entry.rule_number == rule_number for entry in network_acl.network_acl_entries): + raise NetworkAclEntryAlreadyExistsError(rule_number) network_acl_entry = NetworkAclEntry(self, network_acl_id, rule_number, protocol, rule_action, egress, cidr_block, icmp_code, icmp_type, port_range_from, port_range_to) - network_acl = self.get_network_acl(network_acl_id) network_acl.network_acl_entries.append(network_acl_entry) return network_acl_entry diff --git a/moto/ec2/responses/vpc_peering_connections.py b/moto/ec2/responses/vpc_peering_connections.py index 49d752893..68bae72da 100644 --- a/moto/ec2/responses/vpc_peering_connections.py +++ b/moto/ec2/responses/vpc_peering_connections.py @@ -74,30 +74,35 @@ CREATE_VPC_PEERING_CONNECTION_RESPONSE = """ """ DESCRIBE_VPC_PEERING_CONNECTIONS_RESPONSE = """ - - 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE - - {% for vpc_pcx in vpc_pcxs %} - - {{ vpc_pcx.id }} - - 777788889999 - {{ vpc_pcx.vpc.id }} - {{ vpc_pcx.vpc.cidr_block }} - - - 123456789012 - {{ vpc_pcx.peer_vpc.id }} - - - {{ vpc_pcx._status.code }} - {{ vpc_pcx._status.message }} - - 2014-02-17T16:00:50.000Z - - - {% endfor %} - + +7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + + {% for vpc_pcx in vpc_pcxs %} + + {{ vpc_pcx.id }} + + 777788889999 + {{ vpc_pcx.vpc.id }} + {{ vpc_pcx.vpc.cidr_block }} + + + 123456789012 + {{ vpc_pcx.peer_vpc.id }} + {{ vpc_pcx.peer_vpc.cidr_block }} + + false + true + false + + + + {{ vpc_pcx._status.code }} + {{ vpc_pcx._status.message }} + + + + {% endfor %} + """ @@ -109,19 +114,24 @@ DELETE_VPC_PEERING_CONNECTION_RESPONSE = """ """ ACCEPT_VPC_PEERING_CONNECTION_RESPONSE = """ - + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE {{ vpc_pcx.id }} - 123456789012 + 777788889999 {{ vpc_pcx.vpc.id }} {{ vpc_pcx.vpc.cidr_block }} - 777788889999 + 123456789012 {{ vpc_pcx.peer_vpc.id }} {{ vpc_pcx.peer_vpc.cidr_block }} + + false + false + false + {{ vpc_pcx._status.code }} diff --git a/moto/ec2/utils.py b/moto/ec2/utils.py index f5c9b8512..a998f18ef 100644 --- a/moto/ec2/utils.py +++ b/moto/ec2/utils.py @@ -1,10 +1,19 @@ from __future__ import unicode_literals +import base64 +import hashlib import fnmatch import random import re import six +from cryptography.hazmat.primitives import serialization +from cryptography.hazmat.backends import default_backend +from cryptography.hazmat.primitives.asymmetric import rsa +import sshpubkeys.exceptions +from sshpubkeys.keys import SSHKey + + EC2_RESOURCE_TO_PREFIX = { 'customer-gateway': 'cgw', 'dhcp-options': 'dopt', @@ -453,23 +462,19 @@ def simple_aws_filter_to_re(filter_string): def random_key_pair(): - def random_hex(): - return chr(random.choice(list(range(48, 58)) + list(range(97, 102)))) + private_key = rsa.generate_private_key( + public_exponent=65537, + key_size=2048, + backend=default_backend()) + private_key_material = private_key.private_bytes( + encoding=serialization.Encoding.PEM, + format=serialization.PrivateFormat.TraditionalOpenSSL, + encryption_algorithm=serialization.NoEncryption()) + public_key_fingerprint = rsa_public_key_fingerprint(private_key.public_key()) - def random_fingerprint(): - return ':'.join([random_hex() + random_hex() for i in range(20)]) - - def random_material(): - return ''.join([ - chr(random.choice(list(range(65, 91)) + list(range(48, 58)) + - list(range(97, 102)))) - for i in range(1000) - ]) - material = "---- BEGIN RSA PRIVATE KEY ----" + random_material() + \ - "-----END RSA PRIVATE KEY-----" return { - 'fingerprint': random_fingerprint(), - 'material': material + 'fingerprint': public_key_fingerprint, + 'material': private_key_material.decode('ascii') } @@ -535,3 +540,28 @@ def generate_instance_identity_document(instance): } return document + + +def rsa_public_key_parse(key_material): + try: + if not isinstance(key_material, six.binary_type): + key_material = key_material.encode("ascii") + + decoded_key = base64.b64decode(key_material).decode("ascii") + public_key = SSHKey(decoded_key) + except (sshpubkeys.exceptions.InvalidKeyException, UnicodeDecodeError): + raise ValueError('bad key') + + if not public_key.rsa: + raise ValueError('bad key') + + return public_key.rsa + + +def rsa_public_key_fingerprint(rsa_public_key): + key_data = rsa_public_key.public_bytes( + encoding=serialization.Encoding.DER, + format=serialization.PublicFormat.SubjectPublicKeyInfo) + fingerprint_hex = hashlib.md5(key_data).hexdigest() + fingerprint = re.sub(r'([a-f0-9]{2})(?!$)', r'\1:', fingerprint_hex) + return fingerprint diff --git a/moto/ecs/models.py b/moto/ecs/models.py index efa8a6cd0..a314c7776 100644 --- a/moto/ecs/models.py +++ b/moto/ecs/models.py @@ -699,12 +699,15 @@ class EC2ContainerServiceBackend(BaseBackend): return service - def list_services(self, cluster_str): + def list_services(self, cluster_str, scheduling_strategy=None): cluster_name = cluster_str.split('/')[-1] service_arns = [] for key, value in self.services.items(): if cluster_name + ':' in key: - service_arns.append(self.services[key].arn) + service = self.services[key] + if scheduling_strategy is None or service.scheduling_strategy == scheduling_strategy: + service_arns.append(service.arn) + return sorted(service_arns) def describe_services(self, cluster_str, service_names_or_arns): diff --git a/moto/ecs/responses.py b/moto/ecs/responses.py index 964ef59d2..92b769fad 100644 --- a/moto/ecs/responses.py +++ b/moto/ecs/responses.py @@ -163,7 +163,8 @@ class EC2ContainerServiceResponse(BaseResponse): def list_services(self): cluster_str = self._get_param('cluster') - service_arns = self.ecs_backend.list_services(cluster_str) + scheduling_strategy = self._get_param('schedulingStrategy') + service_arns = self.ecs_backend.list_services(cluster_str, scheduling_strategy) return json.dumps({ 'serviceArns': service_arns # , diff --git a/moto/elbv2/exceptions.py b/moto/elbv2/exceptions.py index 0bf9649d7..11dcbcb21 100644 --- a/moto/elbv2/exceptions.py +++ b/moto/elbv2/exceptions.py @@ -131,7 +131,7 @@ class InvalidActionTypeError(ELBClientError): def __init__(self, invalid_name, index): super(InvalidActionTypeError, self).__init__( "ValidationError", - "1 validation error detected: Value '%s' at 'actions.%s.member.type' failed to satisfy constraint: Member must satisfy enum value set: [forward]" % (invalid_name, index) + "1 validation error detected: Value '%s' at 'actions.%s.member.type' failed to satisfy constraint: Member must satisfy enum value set: [forward, redirect]" % (invalid_name, index) ) diff --git a/moto/elbv2/models.py b/moto/elbv2/models.py index 3925fa95d..8d98f187d 100644 --- a/moto/elbv2/models.py +++ b/moto/elbv2/models.py @@ -204,8 +204,20 @@ class FakeListener(BaseModel): # transform default actions to confirm with the rest of the code and XML templates if "DefaultActions" in properties: default_actions = [] - for action in properties['DefaultActions']: - default_actions.append({'type': action['Type'], 'target_group_arn': action['TargetGroupArn']}) + for i, action in enumerate(properties['DefaultActions']): + action_type = action['Type'] + if action_type == 'forward': + default_actions.append({'type': action_type, 'target_group_arn': action['TargetGroupArn']}) + elif action_type == 'redirect': + redirect_action = {'type': action_type, } + for redirect_config_key, redirect_config_value in action['RedirectConfig'].items(): + # need to match the output of _get_list_prefix + if redirect_config_key == 'StatusCode': + redirect_config_key = 'status_code' + redirect_action['redirect_config._' + redirect_config_key.lower()] = redirect_config_value + default_actions.append(redirect_action) + else: + raise InvalidActionTypeError(action_type, i + 1) else: default_actions = None @@ -417,11 +429,15 @@ class ELBv2Backend(BaseBackend): for i, action in enumerate(actions): index = i + 1 action_type = action['type'] - if action_type not in ['forward']: + if action_type == 'forward': + action_target_group_arn = action['target_group_arn'] + if action_target_group_arn not in target_group_arns: + raise ActionTargetGroupNotFoundError(action_target_group_arn) + elif action_type == 'redirect': + # nothing to do + pass + else: raise InvalidActionTypeError(action_type, index) - action_target_group_arn = action['target_group_arn'] - if action_target_group_arn not in target_group_arns: - raise ActionTargetGroupNotFoundError(action_target_group_arn) # TODO: check for error 'TooManyRegistrationsForTargetId' # TODO: check for error 'TooManyRules' @@ -483,10 +499,18 @@ class ELBv2Backend(BaseBackend): arn = load_balancer_arn.replace(':loadbalancer/', ':listener/') + "/%s%s" % (port, id(self)) listener = FakeListener(load_balancer_arn, arn, protocol, port, ssl_policy, certificate, default_actions) balancer.listeners[listener.arn] = listener - for action in default_actions: - if action['target_group_arn'] in self.target_groups.keys(): - target_group = self.target_groups[action['target_group_arn']] - target_group.load_balancer_arns.append(load_balancer_arn) + for i, action in enumerate(default_actions): + action_type = action['type'] + if action_type == 'forward': + if action['target_group_arn'] in self.target_groups.keys(): + target_group = self.target_groups[action['target_group_arn']] + target_group.load_balancer_arns.append(load_balancer_arn) + elif action_type == 'redirect': + # nothing to do + pass + else: + raise InvalidActionTypeError(action_type, i + 1) + return listener def describe_load_balancers(self, arns, names): @@ -649,11 +673,15 @@ class ELBv2Backend(BaseBackend): for i, action in enumerate(actions): index = i + 1 action_type = action['type'] - if action_type not in ['forward']: + if action_type == 'forward': + action_target_group_arn = action['target_group_arn'] + if action_target_group_arn not in target_group_arns: + raise ActionTargetGroupNotFoundError(action_target_group_arn) + elif action_type == 'redirect': + # nothing to do + pass + else: raise InvalidActionTypeError(action_type, index) - action_target_group_arn = action['target_group_arn'] - if action_target_group_arn not in target_group_arns: - raise ActionTargetGroupNotFoundError(action_target_group_arn) # TODO: check for error 'TooManyRegistrationsForTargetId' # TODO: check for error 'TooManyRules' @@ -873,7 +901,7 @@ class ELBv2Backend(BaseBackend): # Its already validated in responses.py listener.ssl_policy = ssl_policy - if default_actions is not None: + if default_actions is not None and default_actions != []: # Is currently not validated listener.default_actions = default_actions diff --git a/moto/elbv2/responses.py b/moto/elbv2/responses.py index 1814f1273..3ca53240b 100644 --- a/moto/elbv2/responses.py +++ b/moto/elbv2/responses.py @@ -704,7 +704,11 @@ CREATE_RULE_TEMPLATE = """ + {% if action["type"] == "forward" %} {{ action["target_group_arn"] }} + {% elif action["type"] == "redirect" %} + {{ action["redirect_config"] }} + {% endif %} {% endfor %} @@ -772,7 +776,15 @@ CREATE_LISTENER_TEMPLATE = """{{ action["target_group_arn"] }} + {% elif action["type"] == "redirect" %} + + {{ action["redirect_config._protocol"] }} + {{ action["redirect_config._port"] }} + {{ action["redirect_config._status_code"] }} + + {% endif %} {% endfor %} @@ -877,7 +889,15 @@ DESCRIBE_RULES_TEMPLATE = """ + {% if action["type"] == "forward" %} {{ action["target_group_arn"] }} + {% elif action["type"] == "redirect" %} + + {{ action["redirect_config._protocol"] }} + {{ action["redirect_config._port"] }} + {{ action["redirect_config._status_code"] }} + + {% endif %} {% endfor %} @@ -970,7 +990,15 @@ DESCRIBE_LISTENERS_TEMPLATE = """{{ action["target_group_arn"] }}m + {% elif action["type"] == "redirect" %} + + {{ action["redirect_config._protocol"] }} + {{ action["redirect_config._port"] }} + {{ action["redirect_config._status_code"] }} + + {% endif %} {% endfor %} @@ -1399,7 +1427,15 @@ MODIFY_LISTENER_TEMPLATE = """{{ action["target_group_arn"] }} + {% elif action["type"] == "redirect" %} + + {{ action["redirect_config._protocol"] }} + {{ action["redirect_config._port"] }} + {{ action["redirect_config._status_code"] }} + + {% endif %} {% endfor %} diff --git a/moto/emr/models.py b/moto/emr/models.py index 6b7147e3f..4b591acb1 100644 --- a/moto/emr/models.py +++ b/moto/emr/models.py @@ -97,7 +97,8 @@ class FakeCluster(BaseModel): visible_to_all_users='false', release_label=None, requested_ami_version=None, - running_ami_version=None): + running_ami_version=None, + custom_ami_id=None): self.id = cluster_id or random_cluster_id() emr_backend.clusters[self.id] = self self.emr_backend = emr_backend @@ -162,6 +163,7 @@ class FakeCluster(BaseModel): self.release_label = release_label self.requested_ami_version = requested_ami_version self.running_ami_version = running_ami_version + self.custom_ami_id = custom_ami_id self.role = job_flow_role or 'EMRJobflowDefault' self.service_role = service_role diff --git a/moto/emr/responses.py b/moto/emr/responses.py index 933e0177b..c807b5f54 100644 --- a/moto/emr/responses.py +++ b/moto/emr/responses.py @@ -267,6 +267,18 @@ class ElasticMapReduceResponse(BaseResponse): else: kwargs['running_ami_version'] = '1.0.0' + custom_ami_id = self._get_param('CustomAmiId') + if custom_ami_id: + kwargs['custom_ami_id'] = custom_ami_id + if release_label and release_label < 'emr-5.7.0': + message = 'Custom AMI is not allowed' + raise EmrError(error_type='ValidationException', + message=message, template='error_json') + elif ami_version: + message = 'Custom AMI is not supported in this version of EMR' + raise EmrError(error_type='ValidationException', + message=message, template='error_json') + cluster = self.backend.run_job_flow(**kwargs) applications = self._get_list_prefix('Applications.member') @@ -375,6 +387,9 @@ DESCRIBE_CLUSTER_TEMPLATE = """ 1 else "" + errors = "; ".join(self.errors) + raise BadRequestException("{errors_len} validation error{plural} detected: {errors}".format( + errors_len=errors_len, plural=plural, errors=errors, + )) + + def _validate_description(self, value): + errors = [] + if len(value) > 511: + errors.append(self._format_error( + key="description", + value=value, + constraint="Member must have length less than or equal to 512", + )) + if not re.match(r"^[\sa-zA-Z0-9_.-]*$", value): + errors.append(self._format_error( + key="name", + value=value, + constraint=r"Member must satisfy regular expression pattern: [\sa-zA-Z0-9_\.-]*", + )) + if errors: + self.errors += errors + return False + return True + + def _validate_name(self, value): + errors = [] + if len(value) > 128: + errors.append(self._format_error( + key="name", + value=value, + constraint="Member must have length less than or equal to 128", + )) + # Note \ is a character to match not an escape. + if not re.match(r"^[a-zA-Z0-9_\\.-]+$", value): + errors.append(self._format_error( + key="name", + value=value, + constraint=r"Member must satisfy regular expression pattern: [a-zA-Z0-9_\.-]+", + )) + if errors: + self.errors += errors + return False + return True + + def _validate_resource_query(self, value): + errors = [] + if value["Type"] not in {"CLOUDFORMATION_STACK_1_0", "TAG_FILTERS_1_0"}: + errors.append(self._format_error( + key="resourceQuery.type", + value=value, + constraint="Member must satisfy enum value set: [CLOUDFORMATION_STACK_1_0, TAG_FILTERS_1_0]", + )) + if len(value["Query"]) > 2048: + errors.append(self._format_error( + key="resourceQuery.query", + value=value, + constraint="Member must have length less than or equal to 2048", + )) + if errors: + self.errors += errors + return False + return True + + def _validate_tags(self, value): + errors = [] + # AWS only outputs one error for all keys and one for all values. + error_keys = None + error_values = None + regex = re.compile(r"^([\\p{L}\\p{Z}\\p{N}_.:/=+\-@]*)$") + for tag_key, tag_value in value.items(): + # Validation for len(tag_key) >= 1 is done by botocore. + if len(tag_key) > 128 or re.match(regex, tag_key): + error_keys = self._format_error( + key="tags", + value=value, + constraint=( + "Map value must satisfy constraint: [" + "Member must have length less than or equal to 128, " + "Member must have length greater than or equal to 1, " + r"Member must satisfy regular expression pattern: ^([\p{L}\p{Z}\p{N}_.:/=+\-@]*)$" + "]" + ), + ) + # Validation for len(tag_value) >= 0 is nonsensical. + if len(tag_value) > 256 or re.match(regex, tag_key): + error_values = self._format_error( + key="tags", + value=value, + constraint=( + "Map value must satisfy constraint: [" + "Member must have length less than or equal to 256, " + "Member must have length greater than or equal to 0, " + r"Member must satisfy regular expression pattern: ^([\p{L}\p{Z}\p{N}_.:/=+\-@]*)$" + "]" + ), + ) + if error_keys: + errors.append(error_keys) + if error_values: + errors.append(error_values) + if errors: + self.errors += errors + return False + return True + + @property + def description(self): + return self._description + + @description.setter + def description(self, value): + if not self._validate_description(value=value): + self._raise_errors() + self._description = value + + @property + def name(self): + return self._name + + @name.setter + def name(self, value): + if not self._validate_name(value=value): + self._raise_errors() + self._name = value + + @property + def resource_query(self): + return self._resource_query + + @resource_query.setter + def resource_query(self, value): + if not self._validate_resource_query(value=value): + self._raise_errors() + self._resource_query = value + + @property + def tags(self): + return self._tags + + @tags.setter + def tags(self, value): + if not self._validate_tags(value=value): + self._raise_errors() + self._tags = value + + +class ResourceGroups(): + def __init__(self): + self.by_name = {} + self.by_arn = {} + + def __contains__(self, item): + return item in self.by_name + + def append(self, resource_group): + self.by_name[resource_group.name] = resource_group + self.by_arn[resource_group.arn] = resource_group + + def delete(self, name): + group = self.by_name[name] + del self.by_name[name] + del self.by_arn[group.arn] + return group + + +class ResourceGroupsBackend(BaseBackend): + def __init__(self, region_name=None): + super(ResourceGroupsBackend, self).__init__() + self.region_name = region_name + self.groups = ResourceGroups() + + @staticmethod + def _validate_resource_query(resource_query): + type = resource_query["Type"] + query = json.loads(resource_query["Query"]) + query_keys = set(query.keys()) + invalid_json_exception = BadRequestException("Invalid query: Invalid query format: check JSON syntax") + if not isinstance(query["ResourceTypeFilters"], list): + raise invalid_json_exception + if type == "CLOUDFORMATION_STACK_1_0": + if query_keys != {"ResourceTypeFilters", "StackIdentifier"}: + raise invalid_json_exception + stack_identifier = query["StackIdentifier"] + if not isinstance(stack_identifier, str): + raise invalid_json_exception + if not re.match( + r"^arn:aws:cloudformation:[a-z]{2}-[a-z]+-[0-9]+:[0-9]+:stack/[-0-9A-z]+/[-0-9a-f]+$", + stack_identifier, + ): + raise BadRequestException( + "Invalid query: Verify that the specified ARN is formatted correctly." + ) + # Once checking other resources is implemented. + # if stack_identifier not in self.cloudformation_backend.stacks: + # raise BadRequestException("Invalid query: The specified CloudFormation stack doesn't exist.") + if type == "TAG_FILTERS_1_0": + if query_keys != {"ResourceTypeFilters", "TagFilters"}: + raise invalid_json_exception + tag_filters = query["TagFilters"] + if not isinstance(tag_filters, list): + raise invalid_json_exception + if not tag_filters or len(tag_filters) > 50: + raise BadRequestException( + "Invalid query: The TagFilters list must contain between 1 and 50 elements" + ) + for tag_filter in tag_filters: + if not isinstance(tag_filter, dict): + raise invalid_json_exception + if set(tag_filter.keys()) != {"Key", "Values"}: + raise invalid_json_exception + key = tag_filter["Key"] + if not isinstance(key, str): + raise invalid_json_exception + if not key: + raise BadRequestException( + "Invalid query: The TagFilter element cannot have empty or null Key field" + ) + if len(key) > 128: + raise BadRequestException("Invalid query: The maximum length for a tag Key is 128") + values = tag_filter["Values"] + if not isinstance(values, list): + raise invalid_json_exception + if len(values) > 20: + raise BadRequestException( + "Invalid query: The TagFilter Values list must contain between 0 and 20 elements" + ) + for value in values: + if not isinstance(value, str): + raise invalid_json_exception + if len(value) > 256: + raise BadRequestException( + "Invalid query: The maximum length for a tag Value is 256" + ) + + @staticmethod + def _validate_tags(tags): + for tag in tags: + if tag.lower().startswith('aws:'): + raise BadRequestException("Tag keys must not start with 'aws:'") + + def create_group(self, name, resource_query, description=None, tags=None): + tags = tags or {} + group = FakeResourceGroup( + name=name, + resource_query=resource_query, + description=description, + tags=tags, + ) + if name in self.groups: + raise BadRequestException("Cannot create group: group already exists") + if name.upper().startswith("AWS"): + raise BadRequestException("Group name must not start with 'AWS'") + self._validate_tags(tags) + self._validate_resource_query(resource_query) + self.groups.append(group) + return group + + def delete_group(self, group_name): + return self.groups.delete(name=group_name) + + def get_group(self, group_name): + return self.groups.by_name[group_name] + + def get_tags(self, arn): + return self.groups.by_arn[arn].tags + + # def list_group_resources(self): + # ... + + def list_groups(self, filters=None, max_results=None, next_token=None): + return self.groups.by_name + + # def search_resources(self): + # ... + + def tag(self, arn, tags): + all_tags = self.groups.by_arn[arn].tags + all_tags.update(tags) + self._validate_tags(all_tags) + self.groups.by_arn[arn].tags = all_tags + + def untag(self, arn, keys): + group = self.groups.by_arn[arn] + for key in keys: + del group.tags[key] + + def update_group(self, group_name, description=None): + if description: + self.groups.by_name[group_name].description = description + return self.groups.by_name[group_name] + + def update_group_query(self, group_name, resource_query): + self._validate_resource_query(resource_query) + self.groups.by_name[group_name].resource_query = resource_query + return self.groups.by_name[group_name] + + +available_regions = boto3.session.Session().get_available_regions("resource-groups") +resourcegroups_backends = {region: ResourceGroupsBackend(region_name=region) for region in available_regions} diff --git a/moto/resourcegroups/responses.py b/moto/resourcegroups/responses.py new file mode 100644 index 000000000..02ea14c1a --- /dev/null +++ b/moto/resourcegroups/responses.py @@ -0,0 +1,162 @@ +from __future__ import unicode_literals +import json + +try: + from urllib import unquote +except ImportError: + from urllib.parse import unquote + +from moto.core.responses import BaseResponse +from .models import resourcegroups_backends + + +class ResourceGroupsResponse(BaseResponse): + SERVICE_NAME = 'resource-groups' + + @property + def resourcegroups_backend(self): + return resourcegroups_backends[self.region] + + def create_group(self): + name = self._get_param("Name") + description = self._get_param("Description") + resource_query = self._get_param("ResourceQuery") + tags = self._get_param("Tags") + group = self.resourcegroups_backend.create_group( + name=name, + description=description, + resource_query=resource_query, + tags=tags, + ) + return json.dumps({ + "Group": { + "GroupArn": group.arn, + "Name": group.name, + "Description": group.description + }, + "ResourceQuery": group.resource_query, + "Tags": group.tags + }) + + def delete_group(self): + group_name = self._get_param("GroupName") + group = self.resourcegroups_backend.delete_group(group_name=group_name) + return json.dumps({ + "Group": { + "GroupArn": group.arn, + "Name": group.name, + "Description": group.description + }, + }) + + def get_group(self): + group_name = self._get_param("GroupName") + group = self.resourcegroups_backend.get_group(group_name=group_name) + return json.dumps({ + "Group": { + "GroupArn": group.arn, + "Name": group.name, + "Description": group.description, + } + }) + + def get_group_query(self): + group_name = self._get_param("GroupName") + group = self.resourcegroups_backend.get_group(group_name=group_name) + return json.dumps({ + "GroupQuery": { + "GroupName": group.name, + "ResourceQuery": group.resource_query, + } + }) + + def get_tags(self): + arn = unquote(self._get_param("Arn")) + return json.dumps({ + "Arn": arn, + "Tags": self.resourcegroups_backend.get_tags(arn=arn) + }) + + def list_group_resources(self): + raise NotImplementedError('ResourceGroups.list_group_resources is not yet implemented') + + def list_groups(self): + filters = self._get_param("Filters") + if filters: + raise NotImplementedError( + 'ResourceGroups.list_groups with filter parameter is not yet implemented' + ) + max_results = self._get_int_param("MaxResults", 50) + next_token = self._get_param("NextToken") + groups = self.resourcegroups_backend.list_groups( + filters=filters, + max_results=max_results, + next_token=next_token + ) + return json.dumps({ + "GroupIdentifiers": [{ + "GroupName": group.name, + "GroupArn": group.arn, + } for group in groups.values()], + "Groups": [{ + "GroupArn": group.arn, + "Name": group.name, + "Description": group.description, + } for group in groups.values()], + "NextToken": next_token, + }) + + def search_resources(self): + raise NotImplementedError('ResourceGroups.search_resources is not yet implemented') + + def tag(self): + arn = unquote(self._get_param("Arn")) + tags = self._get_param("Tags") + if arn not in self.resourcegroups_backend.groups.by_arn: + raise NotImplementedError( + 'ResourceGroups.tag with non-resource-group Arn parameter is not yet implemented' + ) + self.resourcegroups_backend.tag(arn=arn, tags=tags) + return json.dumps({ + "Arn": arn, + "Tags": tags + }) + + def untag(self): + arn = unquote(self._get_param("Arn")) + keys = self._get_param("Keys") + if arn not in self.resourcegroups_backend.groups.by_arn: + raise NotImplementedError( + 'ResourceGroups.untag with non-resource-group Arn parameter is not yet implemented' + ) + self.resourcegroups_backend.untag(arn=arn, keys=keys) + return json.dumps({ + "Arn": arn, + "Keys": keys + }) + + def update_group(self): + group_name = self._get_param("GroupName") + description = self._get_param("Description", "") + group = self.resourcegroups_backend.update_group(group_name=group_name, description=description) + return json.dumps({ + "Group": { + "GroupArn": group.arn, + "Name": group.name, + "Description": group.description + }, + }) + + def update_group_query(self): + group_name = self._get_param("GroupName") + resource_query = self._get_param("ResourceQuery") + group = self.resourcegroups_backend.update_group_query( + group_name=group_name, + resource_query=resource_query + ) + return json.dumps({ + "GroupQuery": { + "GroupName": group.name, + "ResourceQuery": resource_query + } + }) diff --git a/moto/resourcegroups/urls.py b/moto/resourcegroups/urls.py new file mode 100644 index 000000000..518dde766 --- /dev/null +++ b/moto/resourcegroups/urls.py @@ -0,0 +1,14 @@ +from __future__ import unicode_literals +from .responses import ResourceGroupsResponse + +url_bases = [ + "https?://resource-groups(-fips)?.(.+).amazonaws.com", +] + +url_paths = { + '{0}/groups$': ResourceGroupsResponse.dispatch, + '{0}/groups/(?P[^/]+)$': ResourceGroupsResponse.dispatch, + '{0}/groups/(?P[^/]+)/query$': ResourceGroupsResponse.dispatch, + '{0}/groups-list$': ResourceGroupsResponse.dispatch, + '{0}/resources/(?P[^/]+)/tags$': ResourceGroupsResponse.dispatch, +} diff --git a/moto/s3/models.py b/moto/s3/models.py index 9e4a6a766..7488114e3 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -17,8 +17,11 @@ import six from bisect import insort from moto.core import BaseBackend, BaseModel from moto.core.utils import iso_8601_datetime_with_milliseconds, rfc_1123_datetime -from .exceptions import BucketAlreadyExists, MissingBucket, InvalidBucketName, InvalidPart, \ - EntityTooSmall, MissingKey, InvalidNotificationDestination, MalformedXML, InvalidStorageClass, DuplicateTagKeys +from .exceptions import ( + BucketAlreadyExists, MissingBucket, InvalidBucketName, InvalidPart, InvalidRequest, + EntityTooSmall, MissingKey, InvalidNotificationDestination, MalformedXML, InvalidStorageClass, + InvalidTargetBucketForLogging, DuplicateTagKeys, CrossLocationLoggingProhibitted +) from .utils import clean_key_name, _VersionedKeyStore MAX_BUCKET_NAME_LENGTH = 63 @@ -463,6 +466,7 @@ class FakeBucket(BaseModel): self.cors = [] self.logging = {} self.notification_configuration = None + self.accelerate_configuration = None @property def location(self): @@ -557,7 +561,6 @@ class FakeBucket(BaseModel): self.rules = [] def set_cors(self, rules): - from moto.s3.exceptions import InvalidRequest, MalformedXML self.cors = [] if len(rules) > 100: @@ -607,7 +610,6 @@ class FakeBucket(BaseModel): self.logging = {} return - from moto.s3.exceptions import InvalidTargetBucketForLogging, CrossLocationLoggingProhibitted # Target bucket must exist in the same account (assuming all moto buckets are in the same account): if not bucket_backend.buckets.get(logging_config["TargetBucket"]): raise InvalidTargetBucketForLogging("The target bucket for logging does not exist.") @@ -655,6 +657,13 @@ class FakeBucket(BaseModel): if region != self.region_name: raise InvalidNotificationDestination() + def set_accelerate_configuration(self, accelerate_config): + if self.accelerate_configuration is None and accelerate_config == 'Suspended': + # Cannot "suspend" a not active acceleration. Leaves it undefined + return + + self.accelerate_configuration = accelerate_config + def set_website_configuration(self, website_configuration): self.website_configuration = website_configuration @@ -857,6 +866,15 @@ class S3Backend(BaseBackend): bucket = self.get_bucket(bucket_name) bucket.set_notification_configuration(notification_config) + def put_bucket_accelerate_configuration(self, bucket_name, accelerate_configuration): + if accelerate_configuration not in ['Enabled', 'Suspended']: + raise MalformedXML() + + bucket = self.get_bucket(bucket_name) + if bucket.name.find('.') != -1: + raise InvalidRequest('PutBucketAccelerateConfiguration') + bucket.set_accelerate_configuration(accelerate_configuration) + def initiate_multipart(self, bucket_name, key_name, metadata): bucket = self.get_bucket(bucket_name) new_multipart = FakeMultipart(key_name, metadata) @@ -894,12 +912,11 @@ class S3Backend(BaseBackend): return multipart.set_part(part_id, value) def copy_part(self, dest_bucket_name, multipart_id, part_id, - src_bucket_name, src_key_name, start_byte, end_byte): - src_key_name = clean_key_name(src_key_name) - src_bucket = self.get_bucket(src_bucket_name) + src_bucket_name, src_key_name, src_version_id, start_byte, end_byte): dest_bucket = self.get_bucket(dest_bucket_name) multipart = dest_bucket.multiparts[multipart_id] - src_value = src_bucket.keys[src_key_name].value + + src_value = self.get_key(src_bucket_name, src_key_name, version_id=src_version_id).value if start_byte is not None: src_value = src_value[start_byte:end_byte + 1] return multipart.set_part(part_id, src_value) diff --git a/moto/s3/responses.py b/moto/s3/responses.py old mode 100755 new mode 100644 index 856178941..e03666666 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -257,6 +257,13 @@ class ResponseObject(_TemplateEnvironmentMixin): return 200, {}, "" template = self.response_template(S3_GET_BUCKET_NOTIFICATION_CONFIG) return template.render(bucket=bucket) + elif "accelerate" in querystring: + bucket = self.backend.get_bucket(bucket_name) + if bucket.accelerate_configuration is None: + template = self.response_template(S3_BUCKET_ACCELERATE_NOT_SET) + return 200, {}, template.render() + template = self.response_template(S3_BUCKET_ACCELERATE) + return template.render(bucket=bucket) elif 'versions' in querystring: delimiter = querystring.get('delimiter', [None])[0] @@ -442,6 +449,15 @@ class ResponseObject(_TemplateEnvironmentMixin): raise MalformedXML() except Exception as e: raise e + elif "accelerate" in querystring: + try: + accelerate_status = self._accelerate_config_from_xml(body) + self.backend.put_bucket_accelerate_configuration(bucket_name, accelerate_status) + return "" + except KeyError: + raise MalformedXML() + except Exception as e: + raise e else: if body: @@ -691,6 +707,8 @@ class ResponseObject(_TemplateEnvironmentMixin): if 'x-amz-copy-source' in request.headers: src = unquote(request.headers.get("x-amz-copy-source")).lstrip("/") src_bucket, src_key = src.split("/", 1) + + src_key, src_version_id = src_key.split("?versionId=") if "?versionId=" in src_key else (src_key, None) src_range = request.headers.get( 'x-amz-copy-source-range', '').split("bytes=")[-1] @@ -700,9 +718,13 @@ class ResponseObject(_TemplateEnvironmentMixin): except ValueError: start_byte, end_byte = None, None - key = self.backend.copy_part( - bucket_name, upload_id, part_number, src_bucket, - src_key, start_byte, end_byte) + if self.backend.get_key(src_bucket, src_key, version_id=src_version_id): + key = self.backend.copy_part( + bucket_name, upload_id, part_number, src_bucket, + src_key, src_version_id, start_byte, end_byte) + else: + return 404, response_headers, "" + template = self.response_template(S3_MULTIPART_UPLOAD_RESPONSE) response = template.render(part=key) else: @@ -741,8 +763,13 @@ class ResponseObject(_TemplateEnvironmentMixin): lstrip("/").split("/", 1) src_version_id = parse_qs(src_key_parsed.query).get( 'versionId', [None])[0] - self.backend.copy_key(src_bucket, src_key, bucket_name, key_name, - storage=storage_class, acl=acl, src_version_id=src_version_id) + + if self.backend.get_key(src_bucket, src_key, version_id=src_version_id): + self.backend.copy_key(src_bucket, src_key, bucket_name, key_name, + storage=storage_class, acl=acl, src_version_id=src_version_id) + else: + return 404, response_headers, "" + new_key = self.backend.get_key(bucket_name, key_name) mdirective = request.headers.get('x-amz-metadata-directive') if mdirective is not None and mdirective == 'REPLACE': @@ -1034,6 +1061,11 @@ class ResponseObject(_TemplateEnvironmentMixin): return parsed_xml["NotificationConfiguration"] + def _accelerate_config_from_xml(self, xml): + parsed_xml = xmltodict.parse(xml) + config = parsed_xml['AccelerateConfiguration'] + return config['Status'] + def _key_response_delete(self, bucket_name, query, key_name, headers): if query.get('uploadId'): upload_id = query['uploadId'][0] @@ -1686,3 +1718,13 @@ S3_GET_BUCKET_NOTIFICATION_CONFIG = """ {% endfor %} """ + +S3_BUCKET_ACCELERATE = """ + + {{ bucket.accelerate_configuration }} + +""" + +S3_BUCKET_ACCELERATE_NOT_SET = """ + +""" diff --git a/moto/secretsmanager/exceptions.py b/moto/secretsmanager/exceptions.py index 06010c411..fa81b6d8b 100644 --- a/moto/secretsmanager/exceptions.py +++ b/moto/secretsmanager/exceptions.py @@ -29,6 +29,14 @@ class InvalidParameterException(SecretsManagerClientError): message) +class ResourceExistsException(SecretsManagerClientError): + def __init__(self, message): + super(ResourceExistsException, self).__init__( + 'ResourceExistsException', + message + ) + + class InvalidRequestException(SecretsManagerClientError): def __init__(self, message): super(InvalidRequestException, self).__init__( diff --git a/moto/secretsmanager/models.py b/moto/secretsmanager/models.py index 44ac1ef47..ec90c3e19 100644 --- a/moto/secretsmanager/models.py +++ b/moto/secretsmanager/models.py @@ -11,6 +11,7 @@ from moto.core import BaseBackend, BaseModel from .exceptions import ( ResourceNotFoundException, InvalidParameterException, + ResourceExistsException, InvalidRequestException, ClientError ) @@ -47,6 +48,17 @@ class SecretsManagerBackend(BaseBackend): if not self._is_valid_identifier(secret_id): raise ResourceNotFoundException() + if not version_id and version_stage: + # set version_id to match version_stage + versions_dict = self.secrets[secret_id]['versions'] + for ver_id, ver_val in versions_dict.items(): + if version_stage in ver_val['version_stages']: + version_id = ver_id + break + if not version_id: + raise ResourceNotFoundException() + + # TODO check this part if 'deleted_date' in self.secrets[secret_id]: raise InvalidRequestException( "An error occurred (InvalidRequestException) when calling the GetSecretValue operation: You tried to \ @@ -54,42 +66,91 @@ class SecretsManagerBackend(BaseBackend): ) secret = self.secrets[secret_id] + version_id = version_id or secret['default_version_id'] + + secret_version = secret['versions'][version_id] response = json.dumps({ "ARN": secret_arn(self.region, secret['secret_id']), "Name": secret['name'], - "VersionId": secret['version_id'], - "SecretString": secret['secret_string'], - "VersionStages": [ - "AWSCURRENT", - ], - "CreatedDate": secret['createdate'] + "VersionId": secret_version['version_id'], + "SecretString": secret_version['secret_string'], + "VersionStages": secret_version['version_stages'], + "CreatedDate": secret_version['createdate'], }) return response def create_secret(self, name, secret_string, tags, **kwargs): - generated_version_id = str(uuid.uuid4()) + # error if secret exists + if name in self.secrets.keys(): + raise ResourceExistsException('A resource with the ID you requested already exists.') - secret = { - 'secret_string': secret_string, - 'secret_id': name, - 'name': name, - 'createdate': int(time.time()), - 'rotation_enabled': False, - 'rotation_lambda_arn': '', - 'auto_rotate_after_days': 0, - 'version_id': generated_version_id, - 'tags': tags - } - - self.secrets[name] = secret + version_id = self._add_secret(name, secret_string, tags=tags) response = json.dumps({ "ARN": secret_arn(self.region, name), "Name": name, - "VersionId": generated_version_id, + "VersionId": version_id, + }) + + return response + + def _add_secret(self, secret_id, secret_string, tags=[], version_id=None, version_stages=None): + + if version_stages is None: + version_stages = ['AWSCURRENT'] + + if not version_id: + version_id = str(uuid.uuid4()) + + secret_version = { + 'secret_string': secret_string, + 'createdate': int(time.time()), + 'version_id': version_id, + 'version_stages': version_stages, + } + + if secret_id in self.secrets: + # remove all old AWSPREVIOUS stages + for secret_verion_to_look_at in self.secrets[secret_id]['versions'].values(): + if 'AWSPREVIOUS' in secret_verion_to_look_at['version_stages']: + secret_verion_to_look_at['version_stages'].remove('AWSPREVIOUS') + + # set old AWSCURRENT secret to AWSPREVIOUS + previous_current_version_id = self.secrets[secret_id]['default_version_id'] + self.secrets[secret_id]['versions'][previous_current_version_id]['version_stages'] = ['AWSPREVIOUS'] + + self.secrets[secret_id]['versions'][version_id] = secret_version + self.secrets[secret_id]['default_version_id'] = version_id + else: + self.secrets[secret_id] = { + 'versions': { + version_id: secret_version + }, + 'default_version_id': version_id, + } + + secret = self.secrets[secret_id] + secret['secret_id'] = secret_id + secret['name'] = secret_id + secret['rotation_enabled'] = False + secret['rotation_lambda_arn'] = '' + secret['auto_rotate_after_days'] = 0 + secret['tags'] = tags + + return version_id + + def put_secret_value(self, secret_id, secret_string, version_stages): + + version_id = self._add_secret(secret_id, secret_string, version_stages=version_stages) + + response = json.dumps({ + 'ARN': secret_arn(self.region, secret_id), + 'Name': secret_id, + 'VersionId': version_id, + 'VersionStages': version_stages }) return response @@ -162,17 +223,24 @@ class SecretsManagerBackend(BaseBackend): secret = self.secrets[secret_id] - secret['version_id'] = client_request_token or '' + old_secret_version = secret['versions'][secret['default_version_id']] + new_version_id = client_request_token or str(uuid.uuid4()) + + self._add_secret(secret_id, old_secret_version['secret_string'], secret['tags'], version_id=new_version_id, version_stages=['AWSCURRENT']) + secret['rotation_lambda_arn'] = rotation_lambda_arn or '' if rotation_rules: secret['auto_rotate_after_days'] = rotation_rules.get(rotation_days, 0) if secret['auto_rotate_after_days'] > 0: secret['rotation_enabled'] = True + if 'AWSCURRENT' in old_secret_version['version_stages']: + old_secret_version['version_stages'].remove('AWSCURRENT') + response = json.dumps({ "ARN": secret_arn(self.region, secret['secret_id']), "Name": secret['name'], - "VersionId": secret['version_id'] + "VersionId": new_version_id }) return response @@ -206,28 +274,54 @@ class SecretsManagerBackend(BaseBackend): return response + def list_secret_version_ids(self, secret_id): + secret = self.secrets[secret_id] + + version_list = [] + for version_id, version in secret['versions'].items(): + version_list.append({ + 'CreatedDate': int(time.time()), + 'LastAccessedDate': int(time.time()), + 'VersionId': version_id, + 'VersionStages': version['version_stages'], + }) + + response = json.dumps({ + 'ARN': secret['secret_id'], + 'Name': secret['name'], + 'NextToken': '', + 'Versions': version_list, + }) + + return response + def list_secrets(self, max_results, next_token): # TODO implement pagination and limits - secret_list = [{ - "ARN": secret_arn(self.region, secret['secret_id']), - "DeletedDate": secret.get('deleted_date', None), - "Description": "", - "KmsKeyId": "", - "LastAccessedDate": None, - "LastChangedDate": None, - "LastRotatedDate": None, - "Name": secret['name'], - "RotationEnabled": secret['rotation_enabled'], - "RotationLambdaARN": secret['rotation_lambda_arn'], - "RotationRules": { - "AutomaticallyAfterDays": secret['auto_rotate_after_days'] - }, - "SecretVersionsToStages": { - secret['version_id']: ["AWSCURRENT"] - }, - "Tags": secret['tags'] - } for secret in self.secrets.values()] + secret_list = [] + for secret in self.secrets.values(): + + versions_to_stages = {} + for version_id, version in secret['versions'].items(): + versions_to_stages[version_id] = version['version_stages'] + + secret_list.append({ + "ARN": secret_arn(self.region, secret['secret_id']), + "DeletedDate": secret.get('deleted_date', None), + "Description": "", + "KmsKeyId": "", + "LastAccessedDate": None, + "LastChangedDate": None, + "LastRotatedDate": None, + "Name": secret['name'], + "RotationEnabled": secret['rotation_enabled'], + "RotationLambdaARN": secret['rotation_lambda_arn'], + "RotationRules": { + "AutomaticallyAfterDays": secret['auto_rotate_after_days'] + }, + "SecretVersionsToStages": versions_to_stages, + "Tags": secret['tags'] + }) return secret_list, None diff --git a/moto/secretsmanager/responses.py b/moto/secretsmanager/responses.py index 0eb02e39b..fe51d8c1b 100644 --- a/moto/secretsmanager/responses.py +++ b/moto/secretsmanager/responses.py @@ -67,6 +67,22 @@ class SecretsManagerResponse(BaseResponse): rotation_rules=rotation_rules ) + def put_secret_value(self): + secret_id = self._get_param('SecretId', if_none='') + secret_string = self._get_param('SecretString', if_none='') + version_stages = self._get_param('VersionStages', if_none=['AWSCURRENT']) + return secretsmanager_backends[self.region].put_secret_value( + secret_id=secret_id, + secret_string=secret_string, + version_stages=version_stages, + ) + + def list_secret_version_ids(self): + secret_id = self._get_param('SecretId', if_none='') + return secretsmanager_backends[self.region].list_secret_version_ids( + secret_id=secret_id + ) + def list_secrets(self): max_results = self._get_int_param("MaxResults") next_token = self._get_param("NextToken") diff --git a/moto/sns/models.py b/moto/sns/models.py index 41e83aba4..c764cb25f 100644 --- a/moto/sns/models.py +++ b/moto/sns/models.py @@ -255,7 +255,7 @@ class SNSBackend(BaseBackend): return candidate_topic def _get_values_nexttoken(self, values_map, next_token=None): - if next_token is None: + if next_token is None or not next_token: next_token = 0 next_token = int(next_token) values = list(values_map.values())[ diff --git a/moto/sqs/responses.py b/moto/sqs/responses.py index b4f64b14e..5ddaf8849 100644 --- a/moto/sqs/responses.py +++ b/moto/sqs/responses.py @@ -420,7 +420,7 @@ CREATE_QUEUE_RESPONSE = """ {{ queue.visibility_timeout }} - {{ requestid }} + """ @@ -429,7 +429,7 @@ GET_QUEUE_URL_RESPONSE = """ {{ queue.url(request_url) }} - {{ requestid }} + """ @@ -440,13 +440,13 @@ LIST_QUEUES_RESPONSE = """ {% endfor %} - {{ requestid }} + """ DELETE_QUEUE_RESPONSE = """ - {{ requestid }} + """ @@ -460,13 +460,13 @@ GET_QUEUE_ATTRIBUTES_RESPONSE = """ {% endfor %} - {{ requestid }} + """ SET_QUEUE_ATTRIBUTE_RESPONSE = """ - {{ requestid }} + """ @@ -483,7 +483,7 @@ SEND_MESSAGE_RESPONSE = """ - {{ requestid }} + """ @@ -543,7 +543,7 @@ RECEIVE_MESSAGE_RESPONSE = """ {% endfor %} - {{ requestid }} + """ @@ -561,13 +561,13 @@ SEND_MESSAGE_BATCH_RESPONSE = """ {% endfor %} - {{ requestid }} + """ DELETE_MESSAGE_RESPONSE = """ - {{ requestid }} + """ @@ -580,13 +580,13 @@ DELETE_MESSAGE_BATCH_RESPONSE = """ {% endfor %} - {{ requestid }} + """ CHANGE_MESSAGE_VISIBILITY_RESPONSE = """ - {{ requestid }} + """ @@ -613,7 +613,7 @@ CHANGE_MESSAGE_VISIBILITY_BATCH_RESPONSE = """ - {{ requestid }} + """ diff --git a/setup.py b/setup.py index 2ef3227d4..e3fea2828 100755 --- a/setup.py +++ b/setup.py @@ -28,7 +28,7 @@ install_requires = [ "xmltodict", "six>1.9", "werkzeug", - "PyYAML", + "PyYAML==3.13", "pytz", "python-dateutil<3.0.0,>=2.1", "python-jose<4.0.0", @@ -39,6 +39,7 @@ install_requires = [ "responses>=0.9.0", "idna<2.9,>=2.5", "cfn-lint", + "sshpubkeys>=3.1.0,<4.0" ] extras_require = { diff --git a/tests/test_autoscaling/test_autoscaling.py b/tests/test_autoscaling/test_autoscaling.py index b1a65fb7e..750605c07 100644 --- a/tests/test_autoscaling/test_autoscaling.py +++ b/tests/test_autoscaling/test_autoscaling.py @@ -32,7 +32,7 @@ def test_create_autoscaling_group(): group = AutoScalingGroup( name='tester_group', - availability_zones=['us-east-1c', 'us-east-1b'], + availability_zones=['us-east-1a', 'us-east-1b'], default_cooldown=60, desired_capacity=2, health_check_period=100, @@ -42,7 +42,10 @@ def test_create_autoscaling_group(): launch_config=config, load_balancers=["test_lb"], placement_group="test_placement", - vpc_zone_identifier=mocked_networking['subnet1'], + vpc_zone_identifier="{subnet1},{subnet2}".format( + subnet1=mocked_networking['subnet1'], + subnet2=mocked_networking['subnet2'], + ), termination_policies=["OldestInstance", "NewestInstance"], tags=[Tag( resource_id='tester_group', @@ -57,12 +60,15 @@ def test_create_autoscaling_group(): group = conn.get_all_groups()[0] group.name.should.equal('tester_group') set(group.availability_zones).should.equal( - set(['us-east-1c', 'us-east-1b'])) + set(['us-east-1a', 'us-east-1b'])) group.desired_capacity.should.equal(2) group.max_size.should.equal(2) group.min_size.should.equal(2) group.instances.should.have.length_of(2) - group.vpc_zone_identifier.should.equal(mocked_networking['subnet1']) + group.vpc_zone_identifier.should.equal("{subnet1},{subnet2}".format( + subnet1=mocked_networking['subnet1'], + subnet2=mocked_networking['subnet2'], + )) group.launch_config_name.should.equal('tester') group.default_cooldown.should.equal(60) group.health_check_period.should.equal(100) @@ -109,7 +115,7 @@ def test_create_autoscaling_groups_defaults(): group.launch_config_name.should.equal('tester') # Defaults - list(group.availability_zones).should.equal([]) + list(group.availability_zones).should.equal(['us-east-1a']) # subnet1 group.desired_capacity.should.equal(2) group.vpc_zone_identifier.should.equal(mocked_networking['subnet1']) group.default_cooldown.should.equal(300) @@ -217,7 +223,6 @@ def test_autoscaling_update(): group = AutoScalingGroup( name='tester_group', - availability_zones=['us-east-1c', 'us-east-1b'], desired_capacity=2, max_size=2, min_size=2, @@ -227,13 +232,16 @@ def test_autoscaling_update(): conn.create_auto_scaling_group(group) group = conn.get_all_groups()[0] + group.availability_zones.should.equal(['us-east-1a']) group.vpc_zone_identifier.should.equal(mocked_networking['subnet1']) - group.vpc_zone_identifier = 'subnet-5678efgh' + group.availability_zones = ['us-east-1b'] + group.vpc_zone_identifier = mocked_networking['subnet2'] group.update() group = conn.get_all_groups()[0] - group.vpc_zone_identifier.should.equal('subnet-5678efgh') + group.availability_zones.should.equal(['us-east-1b']) + group.vpc_zone_identifier.should.equal(mocked_networking['subnet2']) @mock_autoscaling_deprecated @@ -249,7 +257,7 @@ def test_autoscaling_tags_update(): group = AutoScalingGroup( name='tester_group', - availability_zones=['us-east-1c', 'us-east-1b'], + availability_zones=['us-east-1a'], desired_capacity=2, max_size=2, min_size=2, @@ -309,7 +317,7 @@ def test_autoscaling_group_delete(): @mock_autoscaling_deprecated def test_autoscaling_group_describe_instances(): mocked_networking = setup_networking_deprecated() - conn = boto.connect_autoscale() + conn = boto.ec2.autoscale.connect_to_region('us-east-1') config = LaunchConfiguration( name='tester', image_id='ami-abcd1234', @@ -332,7 +340,7 @@ def test_autoscaling_group_describe_instances(): instances[0].health_status.should.equal('Healthy') autoscale_instance_ids = [instance.instance_id for instance in instances] - ec2_conn = boto.connect_ec2() + ec2_conn = boto.ec2.connect_to_region('us-east-1') reservations = ec2_conn.get_all_instances() instances = reservations[0].instances instances.should.have.length_of(2) @@ -355,7 +363,7 @@ def test_set_desired_capacity_up(): group = AutoScalingGroup( name='tester_group', - availability_zones=['us-east-1c', 'us-east-1b'], + availability_zones=['us-east-1a'], desired_capacity=2, max_size=2, min_size=2, @@ -391,7 +399,7 @@ def test_set_desired_capacity_down(): group = AutoScalingGroup( name='tester_group', - availability_zones=['us-east-1c', 'us-east-1b'], + availability_zones=['us-east-1a'], desired_capacity=2, max_size=2, min_size=2, @@ -427,7 +435,7 @@ def test_set_desired_capacity_the_same(): group = AutoScalingGroup( name='tester_group', - availability_zones=['us-east-1c', 'us-east-1b'], + availability_zones=['us-east-1a'], desired_capacity=2, max_size=2, min_size=2, @@ -543,6 +551,7 @@ def test_describe_load_balancers(): ) response = client.describe_load_balancers(AutoScalingGroupName='test_asg') + assert response['ResponseMetadata']['RequestId'] list(response['LoadBalancers']).should.have.length_of(1) response['LoadBalancers'][0]['LoadBalancerName'].should.equal('my-lb') @@ -738,8 +747,12 @@ def test_describe_autoscaling_groups_boto3(): response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) group = response['AutoScalingGroups'][0] group['AutoScalingGroupName'].should.equal('test_asg') + group['AvailabilityZones'].should.equal(['us-east-1a']) + group['VPCZoneIdentifier'].should.equal(mocked_networking['subnet1']) group['NewInstancesProtectedFromScaleIn'].should.equal(True) - group['Instances'][0]['ProtectedFromScaleIn'].should.equal(True) + for instance in group['Instances']: + instance['AvailabilityZone'].should.equal('us-east-1a') + instance['ProtectedFromScaleIn'].should.equal(True) @mock_autoscaling @@ -770,6 +783,7 @@ def test_describe_autoscaling_instances_boto3(): response = client.describe_auto_scaling_instances(InstanceIds=instance_ids) for instance in response['AutoScalingInstances']: instance['AutoScalingGroupName'].should.equal('test_asg') + instance['AvailabilityZone'].should.equal('us-east-1a') instance['ProtectedFromScaleIn'].should.equal(True) @@ -793,6 +807,10 @@ def test_update_autoscaling_group_boto3(): _ = client.update_auto_scaling_group( AutoScalingGroupName='test_asg', MinSize=1, + VPCZoneIdentifier="{subnet1},{subnet2}".format( + subnet1=mocked_networking['subnet1'], + subnet2=mocked_networking['subnet2'], + ), NewInstancesProtectedFromScaleIn=False, ) @@ -801,6 +819,7 @@ def test_update_autoscaling_group_boto3(): ) group = response['AutoScalingGroups'][0] group['MinSize'].should.equal(1) + set(group['AvailabilityZones']).should.equal({'us-east-1a', 'us-east-1b'}) group['NewInstancesProtectedFromScaleIn'].should.equal(False) diff --git a/tests/test_autoscaling/test_elbv2.py b/tests/test_autoscaling/test_elbv2.py index 3a50484c1..a142fd133 100644 --- a/tests/test_autoscaling/test_elbv2.py +++ b/tests/test_autoscaling/test_elbv2.py @@ -106,7 +106,7 @@ def test_detach_all_target_groups(): MaxSize=INSTANCE_COUNT, DesiredCapacity=INSTANCE_COUNT, TargetGroupARNs=[target_group_arn], - VPCZoneIdentifier=mocked_networking['vpc']) + VPCZoneIdentifier=mocked_networking['subnet1']) response = client.describe_load_balancer_target_groups( AutoScalingGroupName='test_asg') diff --git a/tests/test_autoscaling/utils.py b/tests/test_autoscaling/utils.py index b167ba5f5..ebbffbed3 100644 --- a/tests/test_autoscaling/utils.py +++ b/tests/test_autoscaling/utils.py @@ -1,5 +1,6 @@ import boto import boto3 +from boto import vpc as boto_vpc from moto import mock_ec2, mock_ec2_deprecated @@ -19,9 +20,14 @@ def setup_networking(): @mock_ec2_deprecated def setup_networking_deprecated(): - conn = boto.connect_vpc() + conn = boto_vpc.connect_to_region('us-east-1') vpc = conn.create_vpc("10.11.0.0/16") - subnet1 = conn.create_subnet(vpc.id, "10.11.1.0/24") - subnet2 = conn.create_subnet(vpc.id, "10.11.2.0/24") + subnet1 = conn.create_subnet( + vpc.id, + "10.11.1.0/24", + availability_zone='us-east-1a') + subnet2 = conn.create_subnet( + vpc.id, + "10.11.2.0/24", + availability_zone='us-east-1b') return {'vpc': vpc.id, 'subnet1': subnet1.id, 'subnet2': subnet2.id} - diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index 479aaaa8a..9ef6fdb0d 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -282,7 +282,7 @@ def test_create_function_from_aws_bucket(): result.pop('LastModified') result.should.equal({ 'FunctionName': 'testFunction', - 'FunctionArn': 'arn:aws:lambda:{}:123456789012:function:testFunction:$LATEST'.format(_lambda_region), + 'FunctionArn': 'arn:aws:lambda:{}:123456789012:function:testFunction'.format(_lambda_region), 'Runtime': 'python2.7', 'Role': 'test-iam-role', 'Handler': 'lambda_function.lambda_handler', @@ -291,7 +291,7 @@ def test_create_function_from_aws_bucket(): 'Description': 'test lambda function', 'Timeout': 3, 'MemorySize': 128, - 'Version': '$LATEST', + 'Version': '1', 'VpcConfig': { "SecurityGroupIds": ["sg-123abc"], "SubnetIds": ["subnet-123abc"], @@ -327,7 +327,7 @@ def test_create_function_from_zipfile(): result.should.equal({ 'FunctionName': 'testFunction', - 'FunctionArn': 'arn:aws:lambda:{}:123456789012:function:testFunction:$LATEST'.format(_lambda_region), + 'FunctionArn': 'arn:aws:lambda:{}:123456789012:function:testFunction'.format(_lambda_region), 'Runtime': 'python2.7', 'Role': 'test-iam-role', 'Handler': 'lambda_function.lambda_handler', @@ -336,7 +336,7 @@ def test_create_function_from_zipfile(): 'Timeout': 3, 'MemorySize': 128, 'CodeSha256': hashlib.sha256(zip_content).hexdigest(), - 'Version': '$LATEST', + 'Version': '1', 'VpcConfig': { "SecurityGroupIds": [], "SubnetIds": [], @@ -398,6 +398,8 @@ def test_get_function(): # Test get function with result = conn.get_function(FunctionName='testFunction', Qualifier='$LATEST') result['Configuration']['Version'].should.equal('$LATEST') + result['Configuration']['FunctionArn'].should.equal('arn:aws:lambda:us-west-2:123456789012:function:testFunction:$LATEST') + # Test get function when can't find function name with assert_raises(ClientError): @@ -464,7 +466,7 @@ def test_publish(): Description='test lambda function', Timeout=3, MemorySize=128, - Publish=True, + Publish=False, ) function_list = conn.list_functions() @@ -485,7 +487,7 @@ def test_publish(): function_list = conn.list_functions() function_list['Functions'].should.have.length_of(1) - function_list['Functions'][0]['FunctionArn'].should.contain('testFunction:$LATEST') + function_list['Functions'][0]['FunctionArn'].should.contain('testFunction') @mock_lambda @@ -528,7 +530,7 @@ def test_list_create_list_get_delete_list(): "CodeSha256": hashlib.sha256(zip_content).hexdigest(), "CodeSize": len(zip_content), "Description": "test lambda function", - "FunctionArn": 'arn:aws:lambda:{}:123456789012:function:testFunction:$LATEST'.format(_lambda_region), + "FunctionArn": 'arn:aws:lambda:{}:123456789012:function:testFunction'.format(_lambda_region), "FunctionName": "testFunction", "Handler": "lambda_function.lambda_handler", "MemorySize": 128, @@ -701,7 +703,7 @@ def test_invoke_async_function(): ) success_result = conn.invoke_async( - FunctionName='testFunction', + FunctionName='testFunction', InvokeArgs=json.dumps({'test': 'event'}) ) @@ -741,7 +743,7 @@ def test_get_function_created_with_zipfile(): "CodeSha256": hashlib.sha256(zip_content).hexdigest(), "CodeSize": len(zip_content), "Description": "test lambda function", - "FunctionArn": 'arn:aws:lambda:{}:123456789012:function:testFunction:$LATEST'.format(_lambda_region), + "FunctionArn": 'arn:aws:lambda:{}:123456789012:function:testFunction'.format(_lambda_region), "FunctionName": "testFunction", "Handler": "lambda_function.handler", "MemorySize": 128, @@ -842,7 +844,7 @@ def test_list_versions_by_function(): conn.create_function( FunctionName='testFunction', Runtime='python2.7', - Role='test-iam-role', + Role='arn:aws:iam::123456789012:role/test-iam-role', Handler='lambda_function.lambda_handler', Code={ 'S3Bucket': 'test-bucket', @@ -857,8 +859,28 @@ def test_list_versions_by_function(): res = conn.publish_version(FunctionName='testFunction') assert res['ResponseMetadata']['HTTPStatusCode'] == 201 versions = conn.list_versions_by_function(FunctionName='testFunction') - + assert len(versions['Versions']) == 3 assert versions['Versions'][0]['FunctionArn'] == 'arn:aws:lambda:us-west-2:123456789012:function:testFunction:$LATEST' + assert versions['Versions'][1]['FunctionArn'] == 'arn:aws:lambda:us-west-2:123456789012:function:testFunction:1' + assert versions['Versions'][2]['FunctionArn'] == 'arn:aws:lambda:us-west-2:123456789012:function:testFunction:2' + + conn.create_function( + FunctionName='testFunction_2', + Runtime='python2.7', + Role='arn:aws:iam::123456789012:role/test-iam-role', + Handler='lambda_function.lambda_handler', + Code={ + 'S3Bucket': 'test-bucket', + 'S3Key': 'test.zip', + }, + Description='test lambda function', + Timeout=3, + MemorySize=128, + Publish=False, + ) + versions = conn.list_versions_by_function(FunctionName='testFunction_2') + assert len(versions['Versions']) == 1 + assert versions['Versions'][0]['FunctionArn'] == 'arn:aws:lambda:us-west-2:123456789012:function:testFunction_2:$LATEST' @mock_lambda diff --git a/tests/test_cloudformation/test_cloudformation_stack_integration.py b/tests/test_cloudformation/test_cloudformation_stack_integration.py index 449fde4ce..42ddd2351 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_integration.py +++ b/tests/test_cloudformation/test_cloudformation_stack_integration.py @@ -2,6 +2,8 @@ from __future__ import unicode_literals import json import base64 +from decimal import Decimal + import boto import boto.cloudformation import boto.datapipeline @@ -22,6 +24,7 @@ from moto import ( mock_cloudformation, mock_cloudformation_deprecated, mock_datapipeline_deprecated, + mock_dynamodb2, mock_ec2, mock_ec2_deprecated, mock_elb, @@ -39,6 +42,7 @@ from moto import ( mock_sqs, mock_sqs_deprecated, mock_elbv2) +from moto.dynamodb2.models import Table from .fixtures import ( ec2_classic_eip, @@ -2085,7 +2089,7 @@ def test_stack_kms(): def test_stack_spot_fleet(): conn = boto3.client('ec2', 'us-east-1') - vpc = conn.create_vpc(CidrBlock="10.0.0.0/8")['Vpc'] + vpc = conn.create_vpc(CidrBlock="10.0.0.0/16")['Vpc'] subnet = conn.create_subnet( VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet'] subnet_id = subnet['SubnetId'] @@ -2169,7 +2173,7 @@ def test_stack_spot_fleet(): def test_stack_spot_fleet_should_figure_out_default_price(): conn = boto3.client('ec2', 'us-east-1') - vpc = conn.create_vpc(CidrBlock="10.0.0.0/8")['Vpc'] + vpc = conn.create_vpc(CidrBlock="10.0.0.0/16")['Vpc'] subnet = conn.create_subnet( VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet'] subnet_id = subnet['SubnetId'] @@ -2433,3 +2437,131 @@ def test_stack_elbv2_resources_integration(): dns['OutputValue'].should.equal(load_balancers[0]['DNSName']) name['OutputValue'].should.equal(load_balancers[0]['LoadBalancerName']) + + +@mock_dynamodb2 +@mock_cloudformation +def test_stack_dynamodb_resources_integration(): + dynamodb_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "myDynamoDBTable": { + "Type": "AWS::DynamoDB::Table", + "Properties": { + "AttributeDefinitions": [ + { + "AttributeName": "Album", + "AttributeType": "S" + }, + { + "AttributeName": "Artist", + "AttributeType": "S" + }, + { + "AttributeName": "Sales", + "AttributeType": "N" + }, + { + "AttributeName": "NumberOfSongs", + "AttributeType": "N" + } + ], + "KeySchema": [ + { + "AttributeName": "Album", + "KeyType": "HASH" + }, + { + "AttributeName": "Artist", + "KeyType": "RANGE" + } + ], + "ProvisionedThroughput": { + "ReadCapacityUnits": "5", + "WriteCapacityUnits": "5" + }, + "TableName": "myTableName", + "GlobalSecondaryIndexes": [{ + "IndexName": "myGSI", + "KeySchema": [ + { + "AttributeName": "Sales", + "KeyType": "HASH" + }, + { + "AttributeName": "Artist", + "KeyType": "RANGE" + } + ], + "Projection": { + "NonKeyAttributes": ["Album","NumberOfSongs"], + "ProjectionType": "INCLUDE" + }, + "ProvisionedThroughput": { + "ReadCapacityUnits": "5", + "WriteCapacityUnits": "5" + } + }, + { + "IndexName": "myGSI2", + "KeySchema": [ + { + "AttributeName": "NumberOfSongs", + "KeyType": "HASH" + }, + { + "AttributeName": "Sales", + "KeyType": "RANGE" + } + ], + "Projection": { + "NonKeyAttributes": ["Album","Artist"], + "ProjectionType": "INCLUDE" + }, + "ProvisionedThroughput": { + "ReadCapacityUnits": "5", + "WriteCapacityUnits": "5" + } + }], + "LocalSecondaryIndexes":[{ + "IndexName": "myLSI", + "KeySchema": [ + { + "AttributeName": "Album", + "KeyType": "HASH" + }, + { + "AttributeName": "Sales", + "KeyType": "RANGE" + } + ], + "Projection": { + "NonKeyAttributes": ["Artist","NumberOfSongs"], + "ProjectionType": "INCLUDE" + } + }] + } + } + } + } + + dynamodb_template_json = json.dumps(dynamodb_template) + + cfn_conn = boto3.client('cloudformation', 'us-east-1') + cfn_conn.create_stack( + StackName='dynamodb_stack', + TemplateBody=dynamodb_template_json, + ) + + dynamodb_conn = boto3.resource('dynamodb', region_name='us-east-1') + table = dynamodb_conn.Table('myTableName') + table.name.should.equal('myTableName') + + table.put_item(Item={"Album": "myAlbum", "Artist": "myArtist", "Sales": 10, "NumberOfSongs": 5}) + + response = table.get_item(Key={"Album": "myAlbum", "Artist": "myArtist"}) + + response['Item']['Album'].should.equal('myAlbum') + response['Item']['Sales'].should.equal(Decimal('10')) + response['Item']['NumberOfSongs'].should.equal(Decimal('5')) + response['Item']['Album'].should.equal('myAlbum') diff --git a/tests/test_cloudformation/test_stack_parsing.py b/tests/test_cloudformation/test_stack_parsing.py index d21db2d48..25242e352 100644 --- a/tests/test_cloudformation/test_stack_parsing.py +++ b/tests/test_cloudformation/test_stack_parsing.py @@ -83,6 +83,18 @@ get_availability_zones_output = { } } +parameters = { + "Parameters": { + "Param": { + "Type": "String", + }, + "NoEchoParam": { + "Type": "String", + "NoEcho": True + } + } +} + split_select_template = { "AWSTemplateFormatVersion": "2010-09-09", "Resources": { @@ -157,6 +169,9 @@ get_attribute_outputs_template = dict( get_availability_zones_template = dict( list(dummy_template.items()) + list(get_availability_zones_output.items())) +parameters_template = dict( + list(dummy_template.items()) + list(parameters.items())) + dummy_template_json = json.dumps(dummy_template) name_type_template_json = json.dumps(name_type_template) output_type_template_json = json.dumps(outputs_template) @@ -165,6 +180,7 @@ get_attribute_outputs_template_json = json.dumps( get_attribute_outputs_template) get_availability_zones_template_json = json.dumps( get_availability_zones_template) +parameters_template_json = json.dumps(parameters_template) split_select_template_json = json.dumps(split_select_template) sub_template_json = json.dumps(sub_template) export_value_template_json = json.dumps(export_value_template) @@ -290,6 +306,18 @@ def test_parse_stack_with_bad_get_attribute_outputs(): "test_id", "test_stack", bad_output_template_json, {}, "us-west-1").should.throw(ValidationError) +def test_parse_stack_with_parameters(): + stack = FakeStack( + stack_id="test_id", + name="test_stack", + template=parameters_template_json, + parameters={"Param": "visible value", "NoEchoParam": "hidden value"}, + region_name='us-west-1') + + stack.resource_map.no_echo_parameter_keys.should.have("NoEchoParam") + stack.resource_map.no_echo_parameter_keys.should_not.have("Param") + + def test_parse_equals_condition(): parse_condition( condition={"Fn::Equals": [{"Ref": "EnvType"}, "prod"]}, diff --git a/tests/test_cognitoidp/test_cognitoidp.py b/tests/test_cognitoidp/test_cognitoidp.py index e4e38e821..1483fcd0e 100644 --- a/tests/test_cognitoidp/test_cognitoidp.py +++ b/tests/test_cognitoidp/test_cognitoidp.py @@ -1162,3 +1162,53 @@ def test_confirm_forgot_password(): ConfirmationCode=str(uuid.uuid4()), Password=str(uuid.uuid4()), ) + +@mock_cognitoidp +def test_admin_update_user_attributes(): + conn = boto3.client("cognito-idp", "us-west-2") + + username = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + + conn.admin_create_user( + UserPoolId=user_pool_id, + Username=username, + UserAttributes=[ + { + 'Name': 'family_name', + 'Value': 'Doe', + }, + { + 'Name': 'given_name', + 'Value': 'John', + } + ] + ) + + conn.admin_update_user_attributes( + UserPoolId=user_pool_id, + Username=username, + UserAttributes=[ + { + 'Name': 'family_name', + 'Value': 'Doe', + }, + { + 'Name': 'given_name', + 'Value': 'Jane', + } + ] + ) + + user = conn.admin_get_user( + UserPoolId=user_pool_id, + Username=username + ) + attributes = user['UserAttributes'] + attributes.should.be.a(list) + for attr in attributes: + val = attr['Value'] + if attr['Name'] == 'family_name': + val.should.equal('Doe') + elif attr['Name'] == 'given_name': + val.should.equal('Jane') diff --git a/tests/test_core/test_responses.py b/tests/test_core/test_responses.py index f3f369ff3..d0f672ab8 100644 --- a/tests/test_core/test_responses.py +++ b/tests/test_core/test_responses.py @@ -1,81 +1,89 @@ -from __future__ import unicode_literals - -import sure # noqa - -from moto.core.responses import AWSServiceSpec -from moto.core.responses import flatten_json_request_body - - -def test_flatten_json_request_body(): - spec = AWSServiceSpec( - 'data/emr/2009-03-31/service-2.json').input_spec('RunJobFlow') - - body = { - 'Name': 'cluster', - 'Instances': { - 'Ec2KeyName': 'ec2key', - 'InstanceGroups': [ - {'InstanceRole': 'MASTER', - 'InstanceType': 'm1.small'}, - {'InstanceRole': 'CORE', - 'InstanceType': 'm1.medium'}, - ], - 'Placement': {'AvailabilityZone': 'us-east-1'}, - }, - 'Steps': [ - {'HadoopJarStep': { - 'Properties': [ - {'Key': 'k1', 'Value': 'v1'}, - {'Key': 'k2', 'Value': 'v2'} - ], - 'Args': ['arg1', 'arg2']}}, - ], - 'Configurations': [ - {'Classification': 'class', - 'Properties': {'propkey1': 'propkey1', - 'propkey2': 'propkey2'}}, - {'Classification': 'anotherclass', - 'Properties': {'propkey3': 'propkey3'}}, - ] - } - - flat = flatten_json_request_body('', body, spec) - flat['Name'].should.equal(body['Name']) - flat['Instances.Ec2KeyName'].should.equal(body['Instances']['Ec2KeyName']) - for idx in range(2): - flat['Instances.InstanceGroups.member.' + str(idx + 1) + '.InstanceRole'].should.equal( - body['Instances']['InstanceGroups'][idx]['InstanceRole']) - flat['Instances.InstanceGroups.member.' + str(idx + 1) + '.InstanceType'].should.equal( - body['Instances']['InstanceGroups'][idx]['InstanceType']) - flat['Instances.Placement.AvailabilityZone'].should.equal( - body['Instances']['Placement']['AvailabilityZone']) - - for idx in range(1): - prefix = 'Steps.member.' + str(idx + 1) + '.HadoopJarStep' - step = body['Steps'][idx]['HadoopJarStep'] - i = 0 - while prefix + '.Properties.member.' + str(i + 1) + '.Key' in flat: - flat[prefix + '.Properties.member.' + - str(i + 1) + '.Key'].should.equal(step['Properties'][i]['Key']) - flat[prefix + '.Properties.member.' + - str(i + 1) + '.Value'].should.equal(step['Properties'][i]['Value']) - i += 1 - i = 0 - while prefix + '.Args.member.' + str(i + 1) in flat: - flat[prefix + '.Args.member.' + - str(i + 1)].should.equal(step['Args'][i]) - i += 1 - - for idx in range(2): - flat['Configurations.member.' + str(idx + 1) + '.Classification'].should.equal( - body['Configurations'][idx]['Classification']) - - props = {} - i = 1 - keyfmt = 'Configurations.member.{0}.Properties.entry.{1}' - key = keyfmt.format(idx + 1, i) - while key + '.key' in flat: - props[flat[key + '.key']] = flat[key + '.value'] - i += 1 - key = keyfmt.format(idx + 1, i) - props.should.equal(body['Configurations'][idx]['Properties']) +from __future__ import unicode_literals + +import sure # noqa + +from botocore.awsrequest import AWSPreparedRequest + +from moto.core.responses import AWSServiceSpec, BaseResponse +from moto.core.responses import flatten_json_request_body + + +def test_flatten_json_request_body(): + spec = AWSServiceSpec( + 'data/emr/2009-03-31/service-2.json').input_spec('RunJobFlow') + + body = { + 'Name': 'cluster', + 'Instances': { + 'Ec2KeyName': 'ec2key', + 'InstanceGroups': [ + {'InstanceRole': 'MASTER', + 'InstanceType': 'm1.small'}, + {'InstanceRole': 'CORE', + 'InstanceType': 'm1.medium'}, + ], + 'Placement': {'AvailabilityZone': 'us-east-1'}, + }, + 'Steps': [ + {'HadoopJarStep': { + 'Properties': [ + {'Key': 'k1', 'Value': 'v1'}, + {'Key': 'k2', 'Value': 'v2'} + ], + 'Args': ['arg1', 'arg2']}}, + ], + 'Configurations': [ + {'Classification': 'class', + 'Properties': {'propkey1': 'propkey1', + 'propkey2': 'propkey2'}}, + {'Classification': 'anotherclass', + 'Properties': {'propkey3': 'propkey3'}}, + ] + } + + flat = flatten_json_request_body('', body, spec) + flat['Name'].should.equal(body['Name']) + flat['Instances.Ec2KeyName'].should.equal(body['Instances']['Ec2KeyName']) + for idx in range(2): + flat['Instances.InstanceGroups.member.' + str(idx + 1) + '.InstanceRole'].should.equal( + body['Instances']['InstanceGroups'][idx]['InstanceRole']) + flat['Instances.InstanceGroups.member.' + str(idx + 1) + '.InstanceType'].should.equal( + body['Instances']['InstanceGroups'][idx]['InstanceType']) + flat['Instances.Placement.AvailabilityZone'].should.equal( + body['Instances']['Placement']['AvailabilityZone']) + + for idx in range(1): + prefix = 'Steps.member.' + str(idx + 1) + '.HadoopJarStep' + step = body['Steps'][idx]['HadoopJarStep'] + i = 0 + while prefix + '.Properties.member.' + str(i + 1) + '.Key' in flat: + flat[prefix + '.Properties.member.' + + str(i + 1) + '.Key'].should.equal(step['Properties'][i]['Key']) + flat[prefix + '.Properties.member.' + + str(i + 1) + '.Value'].should.equal(step['Properties'][i]['Value']) + i += 1 + i = 0 + while prefix + '.Args.member.' + str(i + 1) in flat: + flat[prefix + '.Args.member.' + + str(i + 1)].should.equal(step['Args'][i]) + i += 1 + + for idx in range(2): + flat['Configurations.member.' + str(idx + 1) + '.Classification'].should.equal( + body['Configurations'][idx]['Classification']) + + props = {} + i = 1 + keyfmt = 'Configurations.member.{0}.Properties.entry.{1}' + key = keyfmt.format(idx + 1, i) + while key + '.key' in flat: + props[flat[key + '.key']] = flat[key + '.value'] + i += 1 + key = keyfmt.format(idx + 1, i) + props.should.equal(body['Configurations'][idx]['Properties']) + + +def test_parse_qs_unicode_decode_error(): + body = b'{"key": "%D0"}, "C": "#0 = :0"}' + request = AWSPreparedRequest('GET', 'http://request', {'foo': 'bar'}, body, False) + BaseResponse().setup_class(request, request.url, request.headers) diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 32fd61d16..77846de04 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -949,6 +949,33 @@ def test_bad_scan_filter(): raise RuntimeError('Should of raised ResourceInUseException') +@mock_dynamodb2 +def test_create_table_pay_per_request(): + client = boto3.client('dynamodb', region_name='us-east-1') + client.create_table( + TableName='test1', + AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], + KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], + BillingMode="PAY_PER_REQUEST" + ) + + +@mock_dynamodb2 +def test_create_table_error_pay_per_request_with_provisioned_param(): + client = boto3.client('dynamodb', region_name='us-east-1') + + try: + client.create_table( + TableName='test1', + AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], + KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], + ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123}, + BillingMode="PAY_PER_REQUEST" + ) + except ClientError as err: + err.response['Error']['Code'].should.equal('ValidationException') + + @mock_dynamodb2 def test_duplicate_create(): client = boto3.client('dynamodb', region_name='us-east-1') @@ -1504,6 +1531,7 @@ def test_dynamodb_streams_2(): } assert 'LatestStreamLabel' in resp['TableDescription'] assert 'LatestStreamArn' in resp['TableDescription'] + @mock_dynamodb2 def test_condition_expressions(): @@ -1669,8 +1697,8 @@ def test_query_gsi_with_range_key(): res = dynamodb.query(TableName='test', IndexName='test_gsi', KeyConditionExpression='gsi_hash_key = :gsi_hash_key AND gsi_range_key = :gsi_range_key', ExpressionAttributeValues={ - ':gsi_hash_key': {'S': 'key1'}, - ':gsi_range_key': {'S': 'range1'} + ':gsi_hash_key': {'S': 'key1'}, + ':gsi_range_key': {'S': 'range1'} }) res.should.have.key("Count").equal(1) res.should.have.key("Items") @@ -1679,3 +1707,45 @@ def test_query_gsi_with_range_key(): 'gsi_hash_key': {'S': 'key1'}, 'gsi_range_key': {'S': 'range1'}, }) + + +@mock_dynamodb2 +def test_scan_by_non_exists_index(): + dynamodb = boto3.client('dynamodb', region_name='us-east-1') + + dynamodb.create_table( + TableName='test', + KeySchema=[{'AttributeName': 'id', 'KeyType': 'HASH'}], + AttributeDefinitions=[ + {'AttributeName': 'id', 'AttributeType': 'S'}, + {'AttributeName': 'gsi_col', 'AttributeType': 'S'} + ], + ProvisionedThroughput={'ReadCapacityUnits': 1, 'WriteCapacityUnits': 1}, + GlobalSecondaryIndexes=[ + { + 'IndexName': 'test_gsi', + 'KeySchema': [ + { + 'AttributeName': 'gsi_col', + 'KeyType': 'HASH' + }, + ], + 'Projection': { + 'ProjectionType': 'ALL', + }, + 'ProvisionedThroughput': { + 'ReadCapacityUnits': 1, + 'WriteCapacityUnits': 1 + } + }, + ] + ) + + with assert_raises(ClientError) as ex: + dynamodb.scan(TableName='test', IndexName='non_exists_index') + + ex.exception.response['Error']['Code'].should.equal('ValidationException') + ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) + ex.exception.response['Error']['Message'].should.equal( + 'The table does not have the specified index: non_exists_index' + ) diff --git a/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py b/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py index cc7fca11e..e64d7d196 100644 --- a/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py +++ b/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py @@ -1,1963 +1,2101 @@ -from __future__ import unicode_literals - -from decimal import Decimal - -import boto -import boto3 -from boto3.dynamodb.conditions import Key -from botocore.exceptions import ClientError -import sure # noqa -from freezegun import freeze_time -from moto import mock_dynamodb2, mock_dynamodb2_deprecated -from boto.exception import JSONResponseError -from tests.helpers import requires_boto_gte -try: - from boto.dynamodb2.fields import GlobalAllIndex, HashKey, RangeKey, AllIndex - from boto.dynamodb2.table import Item, Table - from boto.dynamodb2.types import STRING, NUMBER - from boto.dynamodb2.exceptions import ValidationException - from boto.dynamodb2.exceptions import ConditionalCheckFailedException -except ImportError: - pass - - -def create_table(): - table = Table.create('messages', schema=[ - HashKey('forum_name'), - RangeKey('subject'), - ], throughput={ - 'read': 10, - 'write': 10, - }) - return table - - -def create_table_with_local_indexes(): - table = Table.create( - 'messages', - schema=[ - HashKey('forum_name'), - RangeKey('subject'), - ], - throughput={ - 'read': 10, - 'write': 10, - }, - indexes=[ - AllIndex( - 'threads_index', - parts=[ - HashKey('forum_name', data_type=STRING), - RangeKey('threads', data_type=NUMBER), - ] - ) - ] - ) - return table - - -def iterate_results(res): - for i in res: - pass - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -@freeze_time("2012-01-14") -def test_create_table(): - table = create_table() - expected = { - 'Table': { - 'AttributeDefinitions': [ - {'AttributeName': 'forum_name', 'AttributeType': 'S'}, - {'AttributeName': 'subject', 'AttributeType': 'S'} - ], - 'ProvisionedThroughput': { - 'NumberOfDecreasesToday': 0, 'WriteCapacityUnits': 10, 'ReadCapacityUnits': 10 - }, - 'TableSizeBytes': 0, - 'TableName': 'messages', - 'TableStatus': 'ACTIVE', - 'TableArn': 'arn:aws:dynamodb:us-east-1:123456789011:table/messages', - 'KeySchema': [ - {'KeyType': 'HASH', 'AttributeName': 'forum_name'}, - {'KeyType': 'RANGE', 'AttributeName': 'subject'} - ], - 'LocalSecondaryIndexes': [], - 'ItemCount': 0, 'CreationDateTime': 1326499200.0, - 'GlobalSecondaryIndexes': [] - } - } - table.describe().should.equal(expected) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -@freeze_time("2012-01-14") -def test_create_table_with_local_index(): - table = create_table_with_local_indexes() - expected = { - 'Table': { - 'AttributeDefinitions': [ - {'AttributeName': 'forum_name', 'AttributeType': 'S'}, - {'AttributeName': 'subject', 'AttributeType': 'S'}, - {'AttributeName': 'threads', 'AttributeType': 'N'} - ], - 'ProvisionedThroughput': { - 'NumberOfDecreasesToday': 0, - 'WriteCapacityUnits': 10, - 'ReadCapacityUnits': 10, - }, - 'TableSizeBytes': 0, - 'TableName': 'messages', - 'TableStatus': 'ACTIVE', - 'TableArn': 'arn:aws:dynamodb:us-east-1:123456789011:table/messages', - 'KeySchema': [ - {'KeyType': 'HASH', 'AttributeName': 'forum_name'}, - {'KeyType': 'RANGE', 'AttributeName': 'subject'} - ], - 'LocalSecondaryIndexes': [ - { - 'IndexName': 'threads_index', - 'KeySchema': [ - {'AttributeName': 'forum_name', 'KeyType': 'HASH'}, - {'AttributeName': 'threads', 'KeyType': 'RANGE'} - ], - 'Projection': {'ProjectionType': 'ALL'} - } - ], - 'ItemCount': 0, - 'CreationDateTime': 1326499200.0, - 'GlobalSecondaryIndexes': [] - } - } - table.describe().should.equal(expected) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_delete_table(): - conn = boto.dynamodb2.layer1.DynamoDBConnection() - table = create_table() - conn.list_tables()["TableNames"].should.have.length_of(1) - - table.delete() - conn.list_tables()["TableNames"].should.have.length_of(0) - conn.delete_table.when.called_with( - 'messages').should.throw(JSONResponseError) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_update_table_throughput(): - table = create_table() - table.throughput["read"].should.equal(10) - table.throughput["write"].should.equal(10) - table.update(throughput={ - 'read': 5, - 'write': 15, - }) - - table.throughput["read"].should.equal(5) - table.throughput["write"].should.equal(15) - - table.update(throughput={ - 'read': 5, - 'write': 6, - }) - - table.describe() - - table.throughput["read"].should.equal(5) - table.throughput["write"].should.equal(6) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_item_add_and_describe_and_update(): - table = create_table() - ok = table.put_item(data={ - 'forum_name': 'LOLCat Forum', - 'subject': 'Check this out!', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - }) - ok.should.equal(True) - - table.get_item(forum_name="LOLCat Forum", - subject='Check this out!').should_not.be.none - - returned_item = table.get_item( - forum_name='LOLCat Forum', - subject='Check this out!' - ) - dict(returned_item).should.equal({ - 'forum_name': 'LOLCat Forum', - 'subject': 'Check this out!', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - }) - - returned_item['SentBy'] = 'User B' - returned_item.save(overwrite=True) - - returned_item = table.get_item( - forum_name='LOLCat Forum', - subject='Check this out!' - ) - dict(returned_item).should.equal({ - 'forum_name': 'LOLCat Forum', - 'subject': 'Check this out!', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User B', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - }) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_item_partial_save(): - table = create_table() - - data = { - 'forum_name': 'LOLCat Forum', - 'subject': 'The LOLz', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - } - - table.put_item(data=data) - returned_item = table.get_item( - forum_name="LOLCat Forum", subject='The LOLz') - - returned_item['SentBy'] = 'User B' - returned_item.partial_save() - - returned_item = table.get_item( - forum_name='LOLCat Forum', - subject='The LOLz' - ) - dict(returned_item).should.equal({ - 'forum_name': 'LOLCat Forum', - 'subject': 'The LOLz', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User B', - }) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_item_put_without_table(): - table = Table('undeclared-table') - item_data = { - 'forum_name': 'LOLCat Forum', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - } - item = Item(table, item_data) - item.save.when.called_with().should.throw(JSONResponseError) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_get_missing_item(): - table = create_table() - - table.get_item.when.called_with( - hash_key='tester', - range_key='other', - ).should.throw(ValidationException) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_get_item_with_undeclared_table(): - table = Table('undeclared-table') - table.get_item.when.called_with( - test_hash=3241526475).should.throw(JSONResponseError) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_get_item_without_range_key(): - table = Table.create('messages', schema=[ - HashKey('test_hash'), - RangeKey('test_range'), - ], throughput={ - 'read': 10, - 'write': 10, - }) - - hash_key = 3241526475 - range_key = 1234567890987 - table.put_item(data={'test_hash': hash_key, 'test_range': range_key}) - table.get_item.when.called_with( - test_hash=hash_key).should.throw(ValidationException) - - -@requires_boto_gte("2.30.0") -@mock_dynamodb2_deprecated -def test_delete_item(): - table = create_table() - item_data = { - 'forum_name': 'LOLCat Forum', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - } - item = Item(table, item_data) - item['subject'] = 'Check this out!' - item.save() - table.count().should.equal(1) - - response = item.delete() - response.should.equal(True) - - table.count().should.equal(0) - # Deletes are idempotent - item.delete().should.equal(True) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_delete_item_with_undeclared_table(): - table = Table("undeclared-table") - item_data = { - 'forum_name': 'LOLCat Forum', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - } - item = Item(table, item_data) - item.delete.when.called_with().should.throw(JSONResponseError) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_query(): - table = create_table() - - item_data = { - 'forum_name': 'LOLCat Forum', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - 'subject': 'Check this out!' - } - item = Item(table, item_data) - item.save(overwrite=True) - - item['forum_name'] = 'the-key' - item['subject'] = '456' - item.save(overwrite=True) - - item['forum_name'] = 'the-key' - item['subject'] = '123' - item.save(overwrite=True) - - item['forum_name'] = 'the-key' - item['subject'] = '789' - item.save(overwrite=True) - - table.count().should.equal(4) - - results = table.query_2(forum_name__eq='the-key', - subject__gt='1', consistent=True) - expected = ["123", "456", "789"] - for index, item in enumerate(results): - item["subject"].should.equal(expected[index]) - - results = table.query_2(forum_name__eq="the-key", - subject__gt='1', reverse=True) - for index, item in enumerate(results): - item["subject"].should.equal(expected[len(expected) - 1 - index]) - - results = table.query_2(forum_name__eq='the-key', - subject__gt='1', consistent=True) - sum(1 for _ in results).should.equal(3) - - results = table.query_2(forum_name__eq='the-key', - subject__gt='234', consistent=True) - sum(1 for _ in results).should.equal(2) - - results = table.query_2(forum_name__eq='the-key', subject__gt='9999') - sum(1 for _ in results).should.equal(0) - - results = table.query_2(forum_name__eq='the-key', subject__beginswith='12') - sum(1 for _ in results).should.equal(1) - - results = table.query_2(forum_name__eq='the-key', subject__beginswith='7') - sum(1 for _ in results).should.equal(1) - - results = table.query_2(forum_name__eq='the-key', - subject__between=['567', '890']) - sum(1 for _ in results).should.equal(1) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_query_with_undeclared_table(): - table = Table('undeclared') - results = table.query( - forum_name__eq='Amazon DynamoDB', - subject__beginswith='DynamoDB', - limit=1 - ) - iterate_results.when.called_with(results).should.throw(JSONResponseError) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_scan(): - table = create_table() - item_data = { - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - } - item_data['forum_name'] = 'the-key' - item_data['subject'] = '456' - - item = Item(table, item_data) - item.save() - - item['forum_name'] = 'the-key' - item['subject'] = '123' - item.save() - - item_data = { - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User B', - 'ReceivedTime': '12/9/2011 11:36:09 PM', - 'Ids': set([1, 2, 3]), - 'PK': 7, - } - - item_data['forum_name'] = 'the-key' - item_data['subject'] = '789' - - item = Item(table, item_data) - item.save() - - results = table.scan() - sum(1 for _ in results).should.equal(3) - - results = table.scan(SentBy__eq='User B') - sum(1 for _ in results).should.equal(1) - - results = table.scan(Body__beginswith='http') - sum(1 for _ in results).should.equal(3) - - results = table.scan(Ids__null=False) - sum(1 for _ in results).should.equal(1) - - results = table.scan(Ids__null=True) - sum(1 for _ in results).should.equal(2) - - results = table.scan(PK__between=[8, 9]) - sum(1 for _ in results).should.equal(0) - - results = table.scan(PK__between=[5, 8]) - sum(1 for _ in results).should.equal(1) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_scan_with_undeclared_table(): - conn = boto.dynamodb2.layer1.DynamoDBConnection() - conn.scan.when.called_with( - table_name='undeclared-table', - scan_filter={ - "SentBy": { - "AttributeValueList": [{ - "S": "User B"} - ], - "ComparisonOperator": "EQ" - } - }, - ).should.throw(JSONResponseError) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_write_batch(): - table = create_table() - with table.batch_write() as batch: - batch.put_item(data={ - 'forum_name': 'the-key', - 'subject': '123', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - }) - batch.put_item(data={ - 'forum_name': 'the-key', - 'subject': '789', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User B', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - }) - - table.count().should.equal(2) - with table.batch_write() as batch: - batch.delete_item( - forum_name='the-key', - subject='789' - ) - - table.count().should.equal(1) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_batch_read(): - table = create_table() - item_data = { - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - } - - item_data['forum_name'] = 'the-key' - item_data['subject'] = '456' - - item = Item(table, item_data) - item.save() - - item = Item(table, item_data) - item_data['forum_name'] = 'the-key' - item_data['subject'] = '123' - item.save() - - item_data = { - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User B', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - 'Ids': set([1, 2, 3]), - 'PK': 7, - } - item = Item(table, item_data) - item_data['forum_name'] = 'another-key' - item_data['subject'] = '789' - item.save() - results = table.batch_get( - keys=[ - {'forum_name': 'the-key', 'subject': '123'}, - {'forum_name': 'another-key', 'subject': '789'}, - ] - ) - - # Iterate through so that batch_item gets called - count = len([x for x in results]) - count.should.equal(2) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_get_key_fields(): - table = create_table() - kf = table.get_key_fields() - kf.should.equal(['forum_name', 'subject']) - - -@mock_dynamodb2_deprecated -def test_create_with_global_indexes(): - conn = boto.dynamodb2.layer1.DynamoDBConnection() - - Table.create('messages', schema=[ - HashKey('subject'), - RangeKey('version'), - ], global_indexes=[ - GlobalAllIndex('topic-created_at-index', - parts=[ - HashKey('topic'), - RangeKey('created_at', data_type='N') - ], - throughput={ - 'read': 6, - 'write': 1 - } - ), - ]) - - table_description = conn.describe_table("messages") - table_description['Table']["GlobalSecondaryIndexes"].should.equal([ - { - "IndexName": "topic-created_at-index", - "KeySchema": [ - { - "AttributeName": "topic", - "KeyType": "HASH" - }, - { - "AttributeName": "created_at", - "KeyType": "RANGE" - }, - ], - "Projection": { - "ProjectionType": "ALL" - }, - "ProvisionedThroughput": { - "ReadCapacityUnits": 6, - "WriteCapacityUnits": 1, - } - } - ]) - - -@mock_dynamodb2_deprecated -def test_query_with_global_indexes(): - table = Table.create('messages', schema=[ - HashKey('subject'), - RangeKey('version'), - ], global_indexes=[ - GlobalAllIndex('topic-created_at-index', - parts=[ - HashKey('topic'), - RangeKey('created_at', data_type='N') - ], - throughput={ - 'read': 6, - 'write': 1 - } - ), - GlobalAllIndex('status-created_at-index', - parts=[ - HashKey('status'), - RangeKey('created_at', data_type='N') - ], - throughput={ - 'read': 2, - 'write': 1 - } - ) - ]) - - item_data = { - 'subject': 'Check this out!', - 'version': '1', - 'created_at': 0, - 'status': 'inactive' - } - item = Item(table, item_data) - item.save(overwrite=True) - - item['version'] = '2' - item.save(overwrite=True) - - results = table.query(status__eq='active') - list(results).should.have.length_of(0) - - -@mock_dynamodb2_deprecated -def test_query_with_local_indexes(): - table = create_table_with_local_indexes() - item_data = { - 'forum_name': 'Cool Forum', - 'subject': 'Check this out!', - 'version': '1', - 'threads': 1, - 'status': 'inactive' - } - item = Item(table, item_data) - item.save(overwrite=True) - - item['version'] = '2' - item.save(overwrite=True) - results = table.query(forum_name__eq='Cool Forum', - index='threads_index', threads__eq=1) - list(results).should.have.length_of(1) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_query_filter_eq(): - table = create_table_with_local_indexes() - item_data = [ - { - 'forum_name': 'Cool Forum', - 'subject': 'Check this out!', - 'version': '1', - 'threads': 1, - }, - { - 'forum_name': 'Cool Forum', - 'subject': 'Read this now!', - 'version': '1', - 'threads': 5, - }, - { - 'forum_name': 'Cool Forum', - 'subject': 'Please read this... please', - 'version': '1', - 'threads': 0, - } - ] - for data in item_data: - item = Item(table, data) - item.save(overwrite=True) - results = table.query_2( - forum_name__eq='Cool Forum', index='threads_index', threads__eq=5 - ) - list(results).should.have.length_of(1) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_query_filter_lt(): - table = create_table_with_local_indexes() - item_data = [ - { - 'forum_name': 'Cool Forum', - 'subject': 'Check this out!', - 'version': '1', - 'threads': 1, - }, - { - 'forum_name': 'Cool Forum', - 'subject': 'Read this now!', - 'version': '1', - 'threads': 5, - }, - { - 'forum_name': 'Cool Forum', - 'subject': 'Please read this... please', - 'version': '1', - 'threads': 0, - } - ] - for data in item_data: - item = Item(table, data) - item.save(overwrite=True) - - results = table.query( - forum_name__eq='Cool Forum', index='threads_index', threads__lt=5 - ) - results = list(results) - results.should.have.length_of(2) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_query_filter_gt(): - table = create_table_with_local_indexes() - item_data = [ - { - 'forum_name': 'Cool Forum', - 'subject': 'Check this out!', - 'version': '1', - 'threads': 1, - }, - { - 'forum_name': 'Cool Forum', - 'subject': 'Read this now!', - 'version': '1', - 'threads': 5, - }, - { - 'forum_name': 'Cool Forum', - 'subject': 'Please read this... please', - 'version': '1', - 'threads': 0, - } - ] - for data in item_data: - item = Item(table, data) - item.save(overwrite=True) - - results = table.query( - forum_name__eq='Cool Forum', index='threads_index', threads__gt=1 - ) - list(results).should.have.length_of(1) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_query_filter_lte(): - table = create_table_with_local_indexes() - item_data = [ - { - 'forum_name': 'Cool Forum', - 'subject': 'Check this out!', - 'version': '1', - 'threads': 1, - }, - { - 'forum_name': 'Cool Forum', - 'subject': 'Read this now!', - 'version': '1', - 'threads': 5, - }, - { - 'forum_name': 'Cool Forum', - 'subject': 'Please read this... please', - 'version': '1', - 'threads': 0, - } - ] - for data in item_data: - item = Item(table, data) - item.save(overwrite=True) - - results = table.query( - forum_name__eq='Cool Forum', index='threads_index', threads__lte=5 - ) - list(results).should.have.length_of(3) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_query_filter_gte(): - table = create_table_with_local_indexes() - item_data = [ - { - 'forum_name': 'Cool Forum', - 'subject': 'Check this out!', - 'version': '1', - 'threads': 1, - }, - { - 'forum_name': 'Cool Forum', - 'subject': 'Read this now!', - 'version': '1', - 'threads': 5, - }, - { - 'forum_name': 'Cool Forum', - 'subject': 'Please read this... please', - 'version': '1', - 'threads': 0, - } - ] - for data in item_data: - item = Item(table, data) - item.save(overwrite=True) - - results = table.query( - forum_name__eq='Cool Forum', index='threads_index', threads__gte=1 - ) - list(results).should.have.length_of(2) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_query_non_hash_range_key(): - table = create_table_with_local_indexes() - item_data = [ - { - 'forum_name': 'Cool Forum', - 'subject': 'Check this out!', - 'version': '1', - 'threads': 1, - }, - { - 'forum_name': 'Cool Forum', - 'subject': 'Read this now!', - 'version': '3', - 'threads': 5, - }, - { - 'forum_name': 'Cool Forum', - 'subject': 'Please read this... please', - 'version': '2', - 'threads': 0, - } - ] - for data in item_data: - item = Item(table, data) - item.save(overwrite=True) - - results = table.query( - forum_name__eq='Cool Forum', version__gt="2" - ) - results = list(results) - results.should.have.length_of(1) - - results = table.query( - forum_name__eq='Cool Forum', version__lt="3" - ) - results = list(results) - results.should.have.length_of(2) - - -@mock_dynamodb2_deprecated -def test_reverse_query(): - conn = boto.dynamodb2.layer1.DynamoDBConnection() - - table = Table.create('messages', schema=[ - HashKey('subject'), - RangeKey('created_at', data_type='N') - ]) - - for i in range(10): - table.put_item({ - 'subject': "Hi", - 'created_at': i - }) - - results = table.query_2(subject__eq="Hi", - created_at__lt=6, - limit=4, - reverse=True) - - expected = [Decimal(5), Decimal(4), Decimal(3), Decimal(2)] - [r['created_at'] for r in results].should.equal(expected) - - -@mock_dynamodb2_deprecated -def test_lookup(): - from decimal import Decimal - table = Table.create('messages', schema=[ - HashKey('test_hash'), - RangeKey('test_range'), - ], throughput={ - 'read': 10, - 'write': 10, - }) - - hash_key = 3241526475 - range_key = 1234567890987 - data = {'test_hash': hash_key, 'test_range': range_key} - table.put_item(data=data) - message = table.lookup(hash_key, range_key) - message.get('test_hash').should.equal(Decimal(hash_key)) - message.get('test_range').should.equal(Decimal(range_key)) - - -@mock_dynamodb2_deprecated -def test_failed_overwrite(): - table = Table.create('messages', schema=[ - HashKey('id'), - RangeKey('range'), - ], throughput={ - 'read': 7, - 'write': 3, - }) - - data1 = {'id': '123', 'range': 'abc', 'data': '678'} - table.put_item(data=data1) - - data2 = {'id': '123', 'range': 'abc', 'data': '345'} - table.put_item(data=data2, overwrite=True) - - data3 = {'id': '123', 'range': 'abc', 'data': '812'} - table.put_item.when.called_with(data=data3).should.throw( - ConditionalCheckFailedException) - - returned_item = table.lookup('123', 'abc') - dict(returned_item).should.equal(data2) - - data4 = {'id': '123', 'range': 'ghi', 'data': 812} - table.put_item(data=data4) - - returned_item = table.lookup('123', 'ghi') - dict(returned_item).should.equal(data4) - - -@mock_dynamodb2_deprecated -def test_conflicting_writes(): - table = Table.create('messages', schema=[ - HashKey('id'), - RangeKey('range'), - ]) - - item_data = {'id': '123', 'range': 'abc', 'data': '678'} - item1 = Item(table, item_data) - item2 = Item(table, item_data) - item1.save() - - item1['data'] = '579' - item2['data'] = '912' - - item1.save() - item2.save.when.called_with().should.throw(ConditionalCheckFailedException) - -""" -boto3 -""" - - -@mock_dynamodb2 -def test_boto3_conditions(): - dynamodb = boto3.resource('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - table = dynamodb.create_table( - TableName='users', - KeySchema=[ - { - 'AttributeName': 'forum_name', - 'KeyType': 'HASH' - }, - { - 'AttributeName': 'subject', - 'KeyType': 'RANGE' - }, - ], - AttributeDefinitions=[ - { - 'AttributeName': 'forum_name', - 'AttributeType': 'S' - }, - { - 'AttributeName': 'subject', - 'AttributeType': 'S' - }, - ], - ProvisionedThroughput={ - 'ReadCapacityUnits': 5, - 'WriteCapacityUnits': 5 - } - ) - table = dynamodb.Table('users') - - table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '123' - }) - table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '456' - }) - table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '789' - }) - - # Test a query returning all items - results = table.query( - KeyConditionExpression=Key('forum_name').eq( - 'the-key') & Key("subject").gt('1'), - ScanIndexForward=True, - ) - expected = ["123", "456", "789"] - for index, item in enumerate(results['Items']): - item["subject"].should.equal(expected[index]) - - # Return all items again, but in reverse - results = table.query( - KeyConditionExpression=Key('forum_name').eq( - 'the-key') & Key("subject").gt('1'), - ScanIndexForward=False, - ) - for index, item in enumerate(reversed(results['Items'])): - item["subject"].should.equal(expected[index]) - - # Filter the subjects to only return some of the results - results = table.query( - KeyConditionExpression=Key('forum_name').eq( - 'the-key') & Key("subject").gt('234'), - ConsistentRead=True, - ) - results['Count'].should.equal(2) - - # Filter to return no results - results = table.query( - KeyConditionExpression=Key('forum_name').eq( - 'the-key') & Key("subject").gt('9999') - ) - results['Count'].should.equal(0) - - results = table.query( - KeyConditionExpression=Key('forum_name').eq( - 'the-key') & Key("subject").begins_with('12') - ) - results['Count'].should.equal(1) - - results = table.query( - KeyConditionExpression=Key("subject").begins_with( - '7') & Key('forum_name').eq('the-key') - ) - results['Count'].should.equal(1) - - results = table.query( - KeyConditionExpression=Key('forum_name').eq( - 'the-key') & Key("subject").between('567', '890') - ) - results['Count'].should.equal(1) - - -@mock_dynamodb2 -def test_boto3_put_item_with_conditions(): - import botocore - dynamodb = boto3.resource('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - table = dynamodb.create_table( - TableName='users', - KeySchema=[ - { - 'AttributeName': 'forum_name', - 'KeyType': 'HASH' - }, - { - 'AttributeName': 'subject', - 'KeyType': 'RANGE' - }, - ], - AttributeDefinitions=[ - { - 'AttributeName': 'forum_name', - 'AttributeType': 'S' - }, - { - 'AttributeName': 'subject', - 'AttributeType': 'S' - }, - ], - ProvisionedThroughput={ - 'ReadCapacityUnits': 5, - 'WriteCapacityUnits': 5 - } - ) - table = dynamodb.Table('users') - - table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '123' - }) - - table.put_item( - Item={ - 'forum_name': 'the-key-2', - 'subject': '1234', - }, - ConditionExpression='attribute_not_exists(forum_name) AND attribute_not_exists(subject)' - ) - - table.put_item.when.called_with( - Item={ - 'forum_name': 'the-key', - 'subject': '123' - }, - ConditionExpression='attribute_not_exists(forum_name) AND attribute_not_exists(subject)' - ).should.throw(botocore.exceptions.ClientError) - - table.put_item.when.called_with( - Item={ - 'forum_name': 'bogus-key', - 'subject': 'bogus', - 'test': '123' - }, - ConditionExpression='attribute_exists(forum_name) AND attribute_exists(subject)' - ).should.throw(botocore.exceptions.ClientError) - - -def _create_table_with_range_key(): - dynamodb = boto3.resource('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - table = dynamodb.create_table( - TableName='users', - KeySchema=[ - { - 'AttributeName': 'forum_name', - 'KeyType': 'HASH' - }, - { - 'AttributeName': 'subject', - 'KeyType': 'RANGE' - }, - ], - GlobalSecondaryIndexes=[{ - 'IndexName': 'TestGSI', - 'KeySchema': [ - { - 'AttributeName': 'username', - 'KeyType': 'HASH', - }, - { - 'AttributeName': 'created', - 'KeyType': 'RANGE', - } - ], - 'Projection': { - 'ProjectionType': 'ALL', - }, - 'ProvisionedThroughput': { - 'ReadCapacityUnits': 5, - 'WriteCapacityUnits': 5 - } - }], - AttributeDefinitions=[ - { - 'AttributeName': 'forum_name', - 'AttributeType': 'S' - }, - { - 'AttributeName': 'subject', - 'AttributeType': 'S' - }, - { - 'AttributeName': 'username', - 'AttributeType': 'S' - }, - { - 'AttributeName': 'created', - 'AttributeType': 'N' - } - ], - ProvisionedThroughput={ - 'ReadCapacityUnits': 5, - 'WriteCapacityUnits': 5 - } - ) - return dynamodb.Table('users') - - -@mock_dynamodb2 -def test_update_item_range_key_set(): - table = _create_table_with_range_key() - table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '123', - 'username': 'johndoe', - 'created': Decimal('3'), - }) - - item_key = {'forum_name': 'the-key', 'subject': '123'} - table.update_item( - Key=item_key, - AttributeUpdates={ - 'username': { - 'Action': u'PUT', - 'Value': 'johndoe2' - }, - 'created': { - 'Action': u'PUT', - 'Value': Decimal('4'), - }, - 'mapfield': { - 'Action': u'PUT', - 'Value': {'key': 'value'}, - } - }, - ) - - returned_item = dict((k, str(v) if isinstance(v, Decimal) else v) - for k, v in table.get_item(Key=item_key)['Item'].items()) - dict(returned_item).should.equal({ - 'username': "johndoe2", - 'forum_name': 'the-key', - 'subject': '123', - 'created': '4', - 'mapfield': {'key': 'value'}, - }) - - -@mock_dynamodb2 -def test_update_item_does_not_exist_is_created(): - table = _create_table_with_range_key() - - item_key = {'forum_name': 'the-key', 'subject': '123'} - result = table.update_item( - Key=item_key, - AttributeUpdates={ - 'username': { - 'Action': u'PUT', - 'Value': 'johndoe2' - }, - 'created': { - 'Action': u'PUT', - 'Value': Decimal('4'), - }, - 'mapfield': { - 'Action': u'PUT', - 'Value': {'key': 'value'}, - } - }, - ReturnValues='ALL_OLD', - ) - - assert not result.get('Attributes') - - returned_item = dict((k, str(v) if isinstance(v, Decimal) else v) - for k, v in table.get_item(Key=item_key)['Item'].items()) - dict(returned_item).should.equal({ - 'username': "johndoe2", - 'forum_name': 'the-key', - 'subject': '123', - 'created': '4', - 'mapfield': {'key': 'value'}, - }) - - -@mock_dynamodb2 -def test_update_item_add_value(): - table = _create_table_with_range_key() - - table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '123', - 'numeric_field': Decimal('-1'), - }) - - item_key = {'forum_name': 'the-key', 'subject': '123'} - table.update_item( - Key=item_key, - AttributeUpdates={ - 'numeric_field': { - 'Action': u'ADD', - 'Value': Decimal('2'), - }, - }, - ) - - returned_item = dict((k, str(v) if isinstance(v, Decimal) else v) - for k, v in table.get_item(Key=item_key)['Item'].items()) - dict(returned_item).should.equal({ - 'numeric_field': '1', - 'forum_name': 'the-key', - 'subject': '123', - }) - - -@mock_dynamodb2 -def test_update_item_add_value_string_set(): - table = _create_table_with_range_key() - - table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '123', - 'string_set': set(['str1', 'str2']), - }) - - item_key = {'forum_name': 'the-key', 'subject': '123'} - table.update_item( - Key=item_key, - AttributeUpdates={ - 'string_set': { - 'Action': u'ADD', - 'Value': set(['str3']), - }, - }, - ) - - returned_item = dict((k, str(v) if isinstance(v, Decimal) else v) - for k, v in table.get_item(Key=item_key)['Item'].items()) - dict(returned_item).should.equal({ - 'string_set': set(['str1', 'str2', 'str3']), - 'forum_name': 'the-key', - 'subject': '123', - }) - - -@mock_dynamodb2 -def test_update_item_add_value_does_not_exist_is_created(): - table = _create_table_with_range_key() - - item_key = {'forum_name': 'the-key', 'subject': '123'} - table.update_item( - Key=item_key, - AttributeUpdates={ - 'numeric_field': { - 'Action': u'ADD', - 'Value': Decimal('2'), - }, - }, - ) - - returned_item = dict((k, str(v) if isinstance(v, Decimal) else v) - for k, v in table.get_item(Key=item_key)['Item'].items()) - dict(returned_item).should.equal({ - 'numeric_field': '2', - 'forum_name': 'the-key', - 'subject': '123', - }) - - -@mock_dynamodb2 -def test_update_item_with_expression(): - table = _create_table_with_range_key() - - table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '123', - 'field': '1' - }) - - item_key = {'forum_name': 'the-key', 'subject': '123'} - - table.update_item( - Key=item_key, - UpdateExpression='SET field=2', - ) - dict(table.get_item(Key=item_key)['Item']).should.equal({ - 'field': '2', - 'forum_name': 'the-key', - 'subject': '123', - }) - - table.update_item( - Key=item_key, - UpdateExpression='SET field = 3', - ) - dict(table.get_item(Key=item_key)['Item']).should.equal({ - 'field': '3', - 'forum_name': 'the-key', - 'subject': '123', - }) - -@mock_dynamodb2 -def test_update_item_add_with_expression(): - table = _create_table_with_range_key() - - item_key = {'forum_name': 'the-key', 'subject': '123'} - current_item = { - 'forum_name': 'the-key', - 'subject': '123', - 'str_set': {'item1', 'item2', 'item3'}, - 'num_set': {1, 2, 3}, - 'num_val': 6 - } - - # Put an entry in the DB to play with - table.put_item(Item=current_item) - - # Update item to add a string value to a string set - table.update_item( - Key=item_key, - UpdateExpression='ADD str_set :v', - ExpressionAttributeValues={ - ':v': {'item4'} - } - ) - current_item['str_set'] = current_item['str_set'].union({'item4'}) - dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) - - # Update item to add a num value to a num set - table.update_item( - Key=item_key, - UpdateExpression='ADD num_set :v', - ExpressionAttributeValues={ - ':v': {6} - } - ) - current_item['num_set'] = current_item['num_set'].union({6}) - dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) - - # Update item to add a value to a number value - table.update_item( - Key=item_key, - UpdateExpression='ADD num_val :v', - ExpressionAttributeValues={ - ':v': 20 - } - ) - current_item['num_val'] = current_item['num_val'] + 20 - dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) - - # Attempt to add a number value to a string set, should raise Client Error - table.update_item.when.called_with( - Key=item_key, - UpdateExpression='ADD str_set :v', - ExpressionAttributeValues={ - ':v': 20 - } - ).should.have.raised(ClientError) - dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) - - # Attempt to add a number set to the string set, should raise a ClientError - table.update_item.when.called_with( - Key=item_key, - UpdateExpression='ADD str_set :v', - ExpressionAttributeValues={ - ':v': { 20 } - } - ).should.have.raised(ClientError) - dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) - - # Attempt to update with a bad expression - table.update_item.when.called_with( - Key=item_key, - UpdateExpression='ADD str_set bad_value' - ).should.have.raised(ClientError) - - # Attempt to add a string value instead of a string set - table.update_item.when.called_with( - Key=item_key, - UpdateExpression='ADD str_set :v', - ExpressionAttributeValues={ - ':v': 'new_string' - } - ).should.have.raised(ClientError) - - -@mock_dynamodb2 -def test_update_item_delete_with_expression(): - table = _create_table_with_range_key() - - item_key = {'forum_name': 'the-key', 'subject': '123'} - current_item = { - 'forum_name': 'the-key', - 'subject': '123', - 'str_set': {'item1', 'item2', 'item3'}, - 'num_set': {1, 2, 3}, - 'num_val': 6 - } - - # Put an entry in the DB to play with - table.put_item(Item=current_item) - - # Update item to delete a string value from a string set - table.update_item( - Key=item_key, - UpdateExpression='DELETE str_set :v', - ExpressionAttributeValues={ - ':v': {'item2'} - } - ) - current_item['str_set'] = current_item['str_set'].difference({'item2'}) - dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) - - # Update item to delete a num value from a num set - table.update_item( - Key=item_key, - UpdateExpression='DELETE num_set :v', - ExpressionAttributeValues={ - ':v': {2} - } - ) - current_item['num_set'] = current_item['num_set'].difference({2}) - dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) - - # Try to delete on a number, this should fail - table.update_item.when.called_with( - Key=item_key, - UpdateExpression='DELETE num_val :v', - ExpressionAttributeValues={ - ':v': 20 - } - ).should.have.raised(ClientError) - dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) - - # Try to delete a string set from a number set - table.update_item.when.called_with( - Key=item_key, - UpdateExpression='DELETE num_set :v', - ExpressionAttributeValues={ - ':v': {'del_str'} - } - ).should.have.raised(ClientError) - dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) - - # Attempt to update with a bad expression - table.update_item.when.called_with( - Key=item_key, - UpdateExpression='DELETE num_val badvalue' - ).should.have.raised(ClientError) - - -@mock_dynamodb2 -def test_boto3_query_gsi_range_comparison(): - table = _create_table_with_range_key() - - table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '123', - 'username': 'johndoe', - 'created': 3, - }) - table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '456', - 'username': 'johndoe', - 'created': 1, - }) - table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '789', - 'username': 'johndoe', - 'created': 2, - }) - table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '159', - 'username': 'janedoe', - 'created': 2, - }) - table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '601', - 'username': 'janedoe', - 'created': 5, - }) - - # Test a query returning all johndoe items - results = table.query( - KeyConditionExpression=Key('username').eq( - 'johndoe') & Key("created").gt(0), - ScanIndexForward=True, - IndexName='TestGSI', - ) - expected = ["456", "789", "123"] - for index, item in enumerate(results['Items']): - item["subject"].should.equal(expected[index]) - - # Return all johndoe items again, but in reverse - results = table.query( - KeyConditionExpression=Key('username').eq( - 'johndoe') & Key("created").gt(0), - ScanIndexForward=False, - IndexName='TestGSI', - ) - for index, item in enumerate(reversed(results['Items'])): - item["subject"].should.equal(expected[index]) - - # Filter the creation to only return some of the results - # And reverse order of hash + range key - results = table.query( - KeyConditionExpression=Key("created").gt( - 1) & Key('username').eq('johndoe'), - ConsistentRead=True, - IndexName='TestGSI', - ) - results['Count'].should.equal(2) - - # Filter to return no results - results = table.query( - KeyConditionExpression=Key('username').eq( - 'janedoe') & Key("created").gt(9), - IndexName='TestGSI', - ) - results['Count'].should.equal(0) - - results = table.query( - KeyConditionExpression=Key('username').eq( - 'janedoe') & Key("created").eq(5), - IndexName='TestGSI', - ) - results['Count'].should.equal(1) - - # Test range key sorting - results = table.query( - KeyConditionExpression=Key('username').eq( - 'johndoe') & Key("created").gt(0), - IndexName='TestGSI', - ) - expected = [Decimal('1'), Decimal('2'), Decimal('3')] - for index, item in enumerate(results['Items']): - item["created"].should.equal(expected[index]) - - -@mock_dynamodb2 -def test_boto3_update_table_throughput(): - dynamodb = boto3.resource('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - table = dynamodb.create_table( - TableName='users', - KeySchema=[ - { - 'AttributeName': 'forum_name', - 'KeyType': 'HASH' - }, - { - 'AttributeName': 'subject', - 'KeyType': 'RANGE' - }, - ], - AttributeDefinitions=[ - { - 'AttributeName': 'forum_name', - 'AttributeType': 'S' - }, - { - 'AttributeName': 'subject', - 'AttributeType': 'S' - }, - ], - ProvisionedThroughput={ - 'ReadCapacityUnits': 5, - 'WriteCapacityUnits': 6 - } - ) - table = dynamodb.Table('users') - - table.provisioned_throughput['ReadCapacityUnits'].should.equal(5) - table.provisioned_throughput['WriteCapacityUnits'].should.equal(6) - - table.update(ProvisionedThroughput={ - 'ReadCapacityUnits': 10, - 'WriteCapacityUnits': 11, - }) - - table = dynamodb.Table('users') - - table.provisioned_throughput['ReadCapacityUnits'].should.equal(10) - table.provisioned_throughput['WriteCapacityUnits'].should.equal(11) - - -@mock_dynamodb2 -def test_boto3_update_table_gsi_throughput(): - dynamodb = boto3.resource('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - table = dynamodb.create_table( - TableName='users', - KeySchema=[ - { - 'AttributeName': 'forum_name', - 'KeyType': 'HASH' - }, - { - 'AttributeName': 'subject', - 'KeyType': 'RANGE' - }, - ], - GlobalSecondaryIndexes=[{ - 'IndexName': 'TestGSI', - 'KeySchema': [ - { - 'AttributeName': 'username', - 'KeyType': 'HASH', - }, - { - 'AttributeName': 'created', - 'KeyType': 'RANGE', - } - ], - 'Projection': { - 'ProjectionType': 'ALL', - }, - 'ProvisionedThroughput': { - 'ReadCapacityUnits': 3, - 'WriteCapacityUnits': 4 - } - }], - AttributeDefinitions=[ - { - 'AttributeName': 'forum_name', - 'AttributeType': 'S' - }, - { - 'AttributeName': 'subject', - 'AttributeType': 'S' - }, - ], - ProvisionedThroughput={ - 'ReadCapacityUnits': 5, - 'WriteCapacityUnits': 6 - } - ) - table = dynamodb.Table('users') - - gsi_throughput = table.global_secondary_indexes[0]['ProvisionedThroughput'] - gsi_throughput['ReadCapacityUnits'].should.equal(3) - gsi_throughput['WriteCapacityUnits'].should.equal(4) - - table.provisioned_throughput['ReadCapacityUnits'].should.equal(5) - table.provisioned_throughput['WriteCapacityUnits'].should.equal(6) - - table.update(GlobalSecondaryIndexUpdates=[{ - 'Update': { - 'IndexName': 'TestGSI', - 'ProvisionedThroughput': { - 'ReadCapacityUnits': 10, - 'WriteCapacityUnits': 11, - } - }, - }]) - - table = dynamodb.Table('users') - - # Primary throughput has not changed - table.provisioned_throughput['ReadCapacityUnits'].should.equal(5) - table.provisioned_throughput['WriteCapacityUnits'].should.equal(6) - - gsi_throughput = table.global_secondary_indexes[0]['ProvisionedThroughput'] - gsi_throughput['ReadCapacityUnits'].should.equal(10) - gsi_throughput['WriteCapacityUnits'].should.equal(11) - - -@mock_dynamodb2 -def test_update_table_gsi_create(): - dynamodb = boto3.resource('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - table = dynamodb.create_table( - TableName='users', - KeySchema=[ - { - 'AttributeName': 'forum_name', - 'KeyType': 'HASH' - }, - { - 'AttributeName': 'subject', - 'KeyType': 'RANGE' - }, - ], - AttributeDefinitions=[ - { - 'AttributeName': 'forum_name', - 'AttributeType': 'S' - }, - { - 'AttributeName': 'subject', - 'AttributeType': 'S' - }, - ], - ProvisionedThroughput={ - 'ReadCapacityUnits': 5, - 'WriteCapacityUnits': 6 - } - ) - table = dynamodb.Table('users') - - table.global_secondary_indexes.should.have.length_of(0) - - table.update(GlobalSecondaryIndexUpdates=[{ - 'Create': { - 'IndexName': 'TestGSI', - 'KeySchema': [ - { - 'AttributeName': 'username', - 'KeyType': 'HASH', - }, - { - 'AttributeName': 'created', - 'KeyType': 'RANGE', - } - ], - 'Projection': { - 'ProjectionType': 'ALL', - }, - 'ProvisionedThroughput': { - 'ReadCapacityUnits': 3, - 'WriteCapacityUnits': 4 - } - }, - }]) - - table = dynamodb.Table('users') - table.global_secondary_indexes.should.have.length_of(1) - - gsi_throughput = table.global_secondary_indexes[0]['ProvisionedThroughput'] - assert gsi_throughput['ReadCapacityUnits'].should.equal(3) - assert gsi_throughput['WriteCapacityUnits'].should.equal(4) - - # Check update works - table.update(GlobalSecondaryIndexUpdates=[{ - 'Update': { - 'IndexName': 'TestGSI', - 'ProvisionedThroughput': { - 'ReadCapacityUnits': 10, - 'WriteCapacityUnits': 11, - } - }, - }]) - table = dynamodb.Table('users') - - gsi_throughput = table.global_secondary_indexes[0]['ProvisionedThroughput'] - assert gsi_throughput['ReadCapacityUnits'].should.equal(10) - assert gsi_throughput['WriteCapacityUnits'].should.equal(11) - - table.update(GlobalSecondaryIndexUpdates=[{ - 'Delete': { - 'IndexName': 'TestGSI', - }, - }]) - - table = dynamodb.Table('users') - table.global_secondary_indexes.should.have.length_of(0) - - -@mock_dynamodb2 -def test_update_table_gsi_throughput(): - dynamodb = boto3.resource('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - table = dynamodb.create_table( - TableName='users', - KeySchema=[ - { - 'AttributeName': 'forum_name', - 'KeyType': 'HASH' - }, - { - 'AttributeName': 'subject', - 'KeyType': 'RANGE' - }, - ], - GlobalSecondaryIndexes=[{ - 'IndexName': 'TestGSI', - 'KeySchema': [ - { - 'AttributeName': 'username', - 'KeyType': 'HASH', - }, - { - 'AttributeName': 'created', - 'KeyType': 'RANGE', - } - ], - 'Projection': { - 'ProjectionType': 'ALL', - }, - 'ProvisionedThroughput': { - 'ReadCapacityUnits': 3, - 'WriteCapacityUnits': 4 - } - }], - AttributeDefinitions=[ - { - 'AttributeName': 'forum_name', - 'AttributeType': 'S' - }, - { - 'AttributeName': 'subject', - 'AttributeType': 'S' - }, - ], - ProvisionedThroughput={ - 'ReadCapacityUnits': 5, - 'WriteCapacityUnits': 6 - } - ) - table = dynamodb.Table('users') - table.global_secondary_indexes.should.have.length_of(1) - - table.update(GlobalSecondaryIndexUpdates=[{ - 'Delete': { - 'IndexName': 'TestGSI', - }, - }]) - - table = dynamodb.Table('users') - table.global_secondary_indexes.should.have.length_of(0) - - -@mock_dynamodb2 -def test_query_pagination(): - table = _create_table_with_range_key() - for i in range(10): - table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '{0}'.format(i), - 'username': 'johndoe', - 'created': Decimal('3'), - }) - - page1 = table.query( - KeyConditionExpression=Key('forum_name').eq('the-key'), - Limit=6 - ) - page1['Count'].should.equal(6) - page1['Items'].should.have.length_of(6) - page1.should.have.key('LastEvaluatedKey') - - page2 = table.query( - KeyConditionExpression=Key('forum_name').eq('the-key'), - Limit=6, - ExclusiveStartKey=page1['LastEvaluatedKey'] - ) - page2['Count'].should.equal(4) - page2['Items'].should.have.length_of(4) - page2.should_not.have.key('LastEvaluatedKey') - - results = page1['Items'] + page2['Items'] - subjects = set([int(r['subject']) for r in results]) - subjects.should.equal(set(range(10))) +from __future__ import unicode_literals + +from decimal import Decimal + +import boto +import boto3 +from boto3.dynamodb.conditions import Key +from botocore.exceptions import ClientError +import sure # noqa +from freezegun import freeze_time +from moto import mock_dynamodb2, mock_dynamodb2_deprecated +from boto.exception import JSONResponseError +from tests.helpers import requires_boto_gte +try: + from boto.dynamodb2.fields import GlobalAllIndex, HashKey, RangeKey, AllIndex + from boto.dynamodb2.table import Item, Table + from boto.dynamodb2.types import STRING, NUMBER + from boto.dynamodb2.exceptions import ValidationException + from boto.dynamodb2.exceptions import ConditionalCheckFailedException +except ImportError: + pass + + +def create_table(): + table = Table.create('messages', schema=[ + HashKey('forum_name'), + RangeKey('subject'), + ], throughput={ + 'read': 10, + 'write': 10, + }) + return table + + +def create_table_with_local_indexes(): + table = Table.create( + 'messages', + schema=[ + HashKey('forum_name'), + RangeKey('subject'), + ], + throughput={ + 'read': 10, + 'write': 10, + }, + indexes=[ + AllIndex( + 'threads_index', + parts=[ + HashKey('forum_name', data_type=STRING), + RangeKey('threads', data_type=NUMBER), + ] + ) + ] + ) + return table + + +def iterate_results(res): + for i in res: + pass + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +@freeze_time("2012-01-14") +def test_create_table(): + table = create_table() + expected = { + 'Table': { + 'AttributeDefinitions': [ + {'AttributeName': 'forum_name', 'AttributeType': 'S'}, + {'AttributeName': 'subject', 'AttributeType': 'S'} + ], + 'ProvisionedThroughput': { + 'NumberOfDecreasesToday': 0, 'WriteCapacityUnits': 10, 'ReadCapacityUnits': 10 + }, + 'TableSizeBytes': 0, + 'TableName': 'messages', + 'TableStatus': 'ACTIVE', + 'TableArn': 'arn:aws:dynamodb:us-east-1:123456789011:table/messages', + 'KeySchema': [ + {'KeyType': 'HASH', 'AttributeName': 'forum_name'}, + {'KeyType': 'RANGE', 'AttributeName': 'subject'} + ], + 'LocalSecondaryIndexes': [], + 'ItemCount': 0, 'CreationDateTime': 1326499200.0, + 'GlobalSecondaryIndexes': [] + } + } + table.describe().should.equal(expected) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +@freeze_time("2012-01-14") +def test_create_table_with_local_index(): + table = create_table_with_local_indexes() + expected = { + 'Table': { + 'AttributeDefinitions': [ + {'AttributeName': 'forum_name', 'AttributeType': 'S'}, + {'AttributeName': 'subject', 'AttributeType': 'S'}, + {'AttributeName': 'threads', 'AttributeType': 'N'} + ], + 'ProvisionedThroughput': { + 'NumberOfDecreasesToday': 0, + 'WriteCapacityUnits': 10, + 'ReadCapacityUnits': 10, + }, + 'TableSizeBytes': 0, + 'TableName': 'messages', + 'TableStatus': 'ACTIVE', + 'TableArn': 'arn:aws:dynamodb:us-east-1:123456789011:table/messages', + 'KeySchema': [ + {'KeyType': 'HASH', 'AttributeName': 'forum_name'}, + {'KeyType': 'RANGE', 'AttributeName': 'subject'} + ], + 'LocalSecondaryIndexes': [ + { + 'IndexName': 'threads_index', + 'KeySchema': [ + {'AttributeName': 'forum_name', 'KeyType': 'HASH'}, + {'AttributeName': 'threads', 'KeyType': 'RANGE'} + ], + 'Projection': {'ProjectionType': 'ALL'} + } + ], + 'ItemCount': 0, + 'CreationDateTime': 1326499200.0, + 'GlobalSecondaryIndexes': [] + } + } + table.describe().should.equal(expected) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_delete_table(): + conn = boto.dynamodb2.layer1.DynamoDBConnection() + table = create_table() + conn.list_tables()["TableNames"].should.have.length_of(1) + + table.delete() + conn.list_tables()["TableNames"].should.have.length_of(0) + conn.delete_table.when.called_with( + 'messages').should.throw(JSONResponseError) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_update_table_throughput(): + table = create_table() + table.throughput["read"].should.equal(10) + table.throughput["write"].should.equal(10) + table.update(throughput={ + 'read': 5, + 'write': 15, + }) + + table.throughput["read"].should.equal(5) + table.throughput["write"].should.equal(15) + + table.update(throughput={ + 'read': 5, + 'write': 6, + }) + + table.describe() + + table.throughput["read"].should.equal(5) + table.throughput["write"].should.equal(6) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_item_add_and_describe_and_update(): + table = create_table() + ok = table.put_item(data={ + 'forum_name': 'LOLCat Forum', + 'subject': 'Check this out!', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + }) + ok.should.equal(True) + + table.get_item(forum_name="LOLCat Forum", + subject='Check this out!').should_not.be.none + + returned_item = table.get_item( + forum_name='LOLCat Forum', + subject='Check this out!' + ) + dict(returned_item).should.equal({ + 'forum_name': 'LOLCat Forum', + 'subject': 'Check this out!', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + }) + + returned_item['SentBy'] = 'User B' + returned_item.save(overwrite=True) + + returned_item = table.get_item( + forum_name='LOLCat Forum', + subject='Check this out!' + ) + dict(returned_item).should.equal({ + 'forum_name': 'LOLCat Forum', + 'subject': 'Check this out!', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User B', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + }) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_item_partial_save(): + table = create_table() + + data = { + 'forum_name': 'LOLCat Forum', + 'subject': 'The LOLz', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + } + + table.put_item(data=data) + returned_item = table.get_item( + forum_name="LOLCat Forum", subject='The LOLz') + + returned_item['SentBy'] = 'User B' + returned_item.partial_save() + + returned_item = table.get_item( + forum_name='LOLCat Forum', + subject='The LOLz' + ) + dict(returned_item).should.equal({ + 'forum_name': 'LOLCat Forum', + 'subject': 'The LOLz', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User B', + }) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_item_put_without_table(): + table = Table('undeclared-table') + item_data = { + 'forum_name': 'LOLCat Forum', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + } + item = Item(table, item_data) + item.save.when.called_with().should.throw(JSONResponseError) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_get_missing_item(): + table = create_table() + + table.get_item.when.called_with( + hash_key='tester', + range_key='other', + ).should.throw(ValidationException) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_get_item_with_undeclared_table(): + table = Table('undeclared-table') + table.get_item.when.called_with( + test_hash=3241526475).should.throw(JSONResponseError) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_get_item_without_range_key(): + table = Table.create('messages', schema=[ + HashKey('test_hash'), + RangeKey('test_range'), + ], throughput={ + 'read': 10, + 'write': 10, + }) + + hash_key = 3241526475 + range_key = 1234567890987 + table.put_item(data={'test_hash': hash_key, 'test_range': range_key}) + table.get_item.when.called_with( + test_hash=hash_key).should.throw(ValidationException) + + +@requires_boto_gte("2.30.0") +@mock_dynamodb2_deprecated +def test_delete_item(): + table = create_table() + item_data = { + 'forum_name': 'LOLCat Forum', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + } + item = Item(table, item_data) + item['subject'] = 'Check this out!' + item.save() + table.count().should.equal(1) + + response = item.delete() + response.should.equal(True) + + table.count().should.equal(0) + # Deletes are idempotent + item.delete().should.equal(True) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_delete_item_with_undeclared_table(): + table = Table("undeclared-table") + item_data = { + 'forum_name': 'LOLCat Forum', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + } + item = Item(table, item_data) + item.delete.when.called_with().should.throw(JSONResponseError) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_query(): + table = create_table() + + item_data = { + 'forum_name': 'LOLCat Forum', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + 'subject': 'Check this out!' + } + item = Item(table, item_data) + item.save(overwrite=True) + + item['forum_name'] = 'the-key' + item['subject'] = '456' + item.save(overwrite=True) + + item['forum_name'] = 'the-key' + item['subject'] = '123' + item.save(overwrite=True) + + item['forum_name'] = 'the-key' + item['subject'] = '789' + item.save(overwrite=True) + + table.count().should.equal(4) + + results = table.query_2(forum_name__eq='the-key', + subject__gt='1', consistent=True) + expected = ["123", "456", "789"] + for index, item in enumerate(results): + item["subject"].should.equal(expected[index]) + + results = table.query_2(forum_name__eq="the-key", + subject__gt='1', reverse=True) + for index, item in enumerate(results): + item["subject"].should.equal(expected[len(expected) - 1 - index]) + + results = table.query_2(forum_name__eq='the-key', + subject__gt='1', consistent=True) + sum(1 for _ in results).should.equal(3) + + results = table.query_2(forum_name__eq='the-key', + subject__gt='234', consistent=True) + sum(1 for _ in results).should.equal(2) + + results = table.query_2(forum_name__eq='the-key', subject__gt='9999') + sum(1 for _ in results).should.equal(0) + + results = table.query_2(forum_name__eq='the-key', subject__beginswith='12') + sum(1 for _ in results).should.equal(1) + + results = table.query_2(forum_name__eq='the-key', subject__beginswith='7') + sum(1 for _ in results).should.equal(1) + + results = table.query_2(forum_name__eq='the-key', + subject__between=['567', '890']) + sum(1 for _ in results).should.equal(1) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_query_with_undeclared_table(): + table = Table('undeclared') + results = table.query( + forum_name__eq='Amazon DynamoDB', + subject__beginswith='DynamoDB', + limit=1 + ) + iterate_results.when.called_with(results).should.throw(JSONResponseError) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_scan(): + table = create_table() + item_data = { + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + } + item_data['forum_name'] = 'the-key' + item_data['subject'] = '456' + + item = Item(table, item_data) + item.save() + + item['forum_name'] = 'the-key' + item['subject'] = '123' + item.save() + + item_data = { + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User B', + 'ReceivedTime': '12/9/2011 11:36:09 PM', + 'Ids': set([1, 2, 3]), + 'PK': 7, + } + + item_data['forum_name'] = 'the-key' + item_data['subject'] = '789' + + item = Item(table, item_data) + item.save() + + results = table.scan() + sum(1 for _ in results).should.equal(3) + + results = table.scan(SentBy__eq='User B') + sum(1 for _ in results).should.equal(1) + + results = table.scan(Body__beginswith='http') + sum(1 for _ in results).should.equal(3) + + results = table.scan(Ids__null=False) + sum(1 for _ in results).should.equal(1) + + results = table.scan(Ids__null=True) + sum(1 for _ in results).should.equal(2) + + results = table.scan(PK__between=[8, 9]) + sum(1 for _ in results).should.equal(0) + + results = table.scan(PK__between=[5, 8]) + sum(1 for _ in results).should.equal(1) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_scan_with_undeclared_table(): + conn = boto.dynamodb2.layer1.DynamoDBConnection() + conn.scan.when.called_with( + table_name='undeclared-table', + scan_filter={ + "SentBy": { + "AttributeValueList": [{ + "S": "User B"} + ], + "ComparisonOperator": "EQ" + } + }, + ).should.throw(JSONResponseError) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_write_batch(): + table = create_table() + with table.batch_write() as batch: + batch.put_item(data={ + 'forum_name': 'the-key', + 'subject': '123', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + }) + batch.put_item(data={ + 'forum_name': 'the-key', + 'subject': '789', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User B', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + }) + + table.count().should.equal(2) + with table.batch_write() as batch: + batch.delete_item( + forum_name='the-key', + subject='789' + ) + + table.count().should.equal(1) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_batch_read(): + table = create_table() + item_data = { + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + } + + item_data['forum_name'] = 'the-key' + item_data['subject'] = '456' + + item = Item(table, item_data) + item.save() + + item = Item(table, item_data) + item_data['forum_name'] = 'the-key' + item_data['subject'] = '123' + item.save() + + item_data = { + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User B', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + 'Ids': set([1, 2, 3]), + 'PK': 7, + } + item = Item(table, item_data) + item_data['forum_name'] = 'another-key' + item_data['subject'] = '789' + item.save() + results = table.batch_get( + keys=[ + {'forum_name': 'the-key', 'subject': '123'}, + {'forum_name': 'another-key', 'subject': '789'}, + ] + ) + + # Iterate through so that batch_item gets called + count = len([x for x in results]) + count.should.equal(2) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_get_key_fields(): + table = create_table() + kf = table.get_key_fields() + kf.should.equal(['forum_name', 'subject']) + + +@mock_dynamodb2_deprecated +def test_create_with_global_indexes(): + conn = boto.dynamodb2.layer1.DynamoDBConnection() + + Table.create('messages', schema=[ + HashKey('subject'), + RangeKey('version'), + ], global_indexes=[ + GlobalAllIndex('topic-created_at-index', + parts=[ + HashKey('topic'), + RangeKey('created_at', data_type='N') + ], + throughput={ + 'read': 6, + 'write': 1 + } + ), + ]) + + table_description = conn.describe_table("messages") + table_description['Table']["GlobalSecondaryIndexes"].should.equal([ + { + "IndexName": "topic-created_at-index", + "KeySchema": [ + { + "AttributeName": "topic", + "KeyType": "HASH" + }, + { + "AttributeName": "created_at", + "KeyType": "RANGE" + }, + ], + "Projection": { + "ProjectionType": "ALL" + }, + "ProvisionedThroughput": { + "ReadCapacityUnits": 6, + "WriteCapacityUnits": 1, + } + } + ]) + + +@mock_dynamodb2_deprecated +def test_query_with_global_indexes(): + table = Table.create('messages', schema=[ + HashKey('subject'), + RangeKey('version'), + ], global_indexes=[ + GlobalAllIndex('topic-created_at-index', + parts=[ + HashKey('topic'), + RangeKey('created_at', data_type='N') + ], + throughput={ + 'read': 6, + 'write': 1 + } + ), + GlobalAllIndex('status-created_at-index', + parts=[ + HashKey('status'), + RangeKey('created_at', data_type='N') + ], + throughput={ + 'read': 2, + 'write': 1 + } + ) + ]) + + item_data = { + 'subject': 'Check this out!', + 'version': '1', + 'created_at': 0, + 'status': 'inactive' + } + item = Item(table, item_data) + item.save(overwrite=True) + + item['version'] = '2' + item.save(overwrite=True) + + results = table.query(status__eq='active') + list(results).should.have.length_of(0) + + +@mock_dynamodb2_deprecated +def test_query_with_local_indexes(): + table = create_table_with_local_indexes() + item_data = { + 'forum_name': 'Cool Forum', + 'subject': 'Check this out!', + 'version': '1', + 'threads': 1, + 'status': 'inactive' + } + item = Item(table, item_data) + item.save(overwrite=True) + + item['version'] = '2' + item.save(overwrite=True) + results = table.query(forum_name__eq='Cool Forum', + index='threads_index', threads__eq=1) + list(results).should.have.length_of(1) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_query_filter_eq(): + table = create_table_with_local_indexes() + item_data = [ + { + 'forum_name': 'Cool Forum', + 'subject': 'Check this out!', + 'version': '1', + 'threads': 1, + }, + { + 'forum_name': 'Cool Forum', + 'subject': 'Read this now!', + 'version': '1', + 'threads': 5, + }, + { + 'forum_name': 'Cool Forum', + 'subject': 'Please read this... please', + 'version': '1', + 'threads': 0, + } + ] + for data in item_data: + item = Item(table, data) + item.save(overwrite=True) + results = table.query_2( + forum_name__eq='Cool Forum', index='threads_index', threads__eq=5 + ) + list(results).should.have.length_of(1) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_query_filter_lt(): + table = create_table_with_local_indexes() + item_data = [ + { + 'forum_name': 'Cool Forum', + 'subject': 'Check this out!', + 'version': '1', + 'threads': 1, + }, + { + 'forum_name': 'Cool Forum', + 'subject': 'Read this now!', + 'version': '1', + 'threads': 5, + }, + { + 'forum_name': 'Cool Forum', + 'subject': 'Please read this... please', + 'version': '1', + 'threads': 0, + } + ] + for data in item_data: + item = Item(table, data) + item.save(overwrite=True) + + results = table.query( + forum_name__eq='Cool Forum', index='threads_index', threads__lt=5 + ) + results = list(results) + results.should.have.length_of(2) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_query_filter_gt(): + table = create_table_with_local_indexes() + item_data = [ + { + 'forum_name': 'Cool Forum', + 'subject': 'Check this out!', + 'version': '1', + 'threads': 1, + }, + { + 'forum_name': 'Cool Forum', + 'subject': 'Read this now!', + 'version': '1', + 'threads': 5, + }, + { + 'forum_name': 'Cool Forum', + 'subject': 'Please read this... please', + 'version': '1', + 'threads': 0, + } + ] + for data in item_data: + item = Item(table, data) + item.save(overwrite=True) + + results = table.query( + forum_name__eq='Cool Forum', index='threads_index', threads__gt=1 + ) + list(results).should.have.length_of(1) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_query_filter_lte(): + table = create_table_with_local_indexes() + item_data = [ + { + 'forum_name': 'Cool Forum', + 'subject': 'Check this out!', + 'version': '1', + 'threads': 1, + }, + { + 'forum_name': 'Cool Forum', + 'subject': 'Read this now!', + 'version': '1', + 'threads': 5, + }, + { + 'forum_name': 'Cool Forum', + 'subject': 'Please read this... please', + 'version': '1', + 'threads': 0, + } + ] + for data in item_data: + item = Item(table, data) + item.save(overwrite=True) + + results = table.query( + forum_name__eq='Cool Forum', index='threads_index', threads__lte=5 + ) + list(results).should.have.length_of(3) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_query_filter_gte(): + table = create_table_with_local_indexes() + item_data = [ + { + 'forum_name': 'Cool Forum', + 'subject': 'Check this out!', + 'version': '1', + 'threads': 1, + }, + { + 'forum_name': 'Cool Forum', + 'subject': 'Read this now!', + 'version': '1', + 'threads': 5, + }, + { + 'forum_name': 'Cool Forum', + 'subject': 'Please read this... please', + 'version': '1', + 'threads': 0, + } + ] + for data in item_data: + item = Item(table, data) + item.save(overwrite=True) + + results = table.query( + forum_name__eq='Cool Forum', index='threads_index', threads__gte=1 + ) + list(results).should.have.length_of(2) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_query_non_hash_range_key(): + table = create_table_with_local_indexes() + item_data = [ + { + 'forum_name': 'Cool Forum', + 'subject': 'Check this out!', + 'version': '1', + 'threads': 1, + }, + { + 'forum_name': 'Cool Forum', + 'subject': 'Read this now!', + 'version': '3', + 'threads': 5, + }, + { + 'forum_name': 'Cool Forum', + 'subject': 'Please read this... please', + 'version': '2', + 'threads': 0, + } + ] + for data in item_data: + item = Item(table, data) + item.save(overwrite=True) + + results = table.query( + forum_name__eq='Cool Forum', version__gt="2" + ) + results = list(results) + results.should.have.length_of(1) + + results = table.query( + forum_name__eq='Cool Forum', version__lt="3" + ) + results = list(results) + results.should.have.length_of(2) + + +@mock_dynamodb2_deprecated +def test_reverse_query(): + conn = boto.dynamodb2.layer1.DynamoDBConnection() + + table = Table.create('messages', schema=[ + HashKey('subject'), + RangeKey('created_at', data_type='N') + ]) + + for i in range(10): + table.put_item({ + 'subject': "Hi", + 'created_at': i + }) + + results = table.query_2(subject__eq="Hi", + created_at__lt=6, + limit=4, + reverse=True) + + expected = [Decimal(5), Decimal(4), Decimal(3), Decimal(2)] + [r['created_at'] for r in results].should.equal(expected) + + +@mock_dynamodb2_deprecated +def test_lookup(): + from decimal import Decimal + table = Table.create('messages', schema=[ + HashKey('test_hash'), + RangeKey('test_range'), + ], throughput={ + 'read': 10, + 'write': 10, + }) + + hash_key = 3241526475 + range_key = 1234567890987 + data = {'test_hash': hash_key, 'test_range': range_key} + table.put_item(data=data) + message = table.lookup(hash_key, range_key) + message.get('test_hash').should.equal(Decimal(hash_key)) + message.get('test_range').should.equal(Decimal(range_key)) + + +@mock_dynamodb2_deprecated +def test_failed_overwrite(): + table = Table.create('messages', schema=[ + HashKey('id'), + RangeKey('range'), + ], throughput={ + 'read': 7, + 'write': 3, + }) + + data1 = {'id': '123', 'range': 'abc', 'data': '678'} + table.put_item(data=data1) + + data2 = {'id': '123', 'range': 'abc', 'data': '345'} + table.put_item(data=data2, overwrite=True) + + data3 = {'id': '123', 'range': 'abc', 'data': '812'} + table.put_item.when.called_with(data=data3).should.throw( + ConditionalCheckFailedException) + + returned_item = table.lookup('123', 'abc') + dict(returned_item).should.equal(data2) + + data4 = {'id': '123', 'range': 'ghi', 'data': 812} + table.put_item(data=data4) + + returned_item = table.lookup('123', 'ghi') + dict(returned_item).should.equal(data4) + + +@mock_dynamodb2_deprecated +def test_conflicting_writes(): + table = Table.create('messages', schema=[ + HashKey('id'), + RangeKey('range'), + ]) + + item_data = {'id': '123', 'range': 'abc', 'data': '678'} + item1 = Item(table, item_data) + item2 = Item(table, item_data) + item1.save() + + item1['data'] = '579' + item2['data'] = '912' + + item1.save() + item2.save.when.called_with().should.throw(ConditionalCheckFailedException) + +""" +boto3 +""" + + +@mock_dynamodb2 +def test_boto3_conditions(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + table = dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'forum_name', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'subject', + 'KeyType': 'RANGE' + }, + ], + AttributeDefinitions=[ + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'subject', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + } + ) + table = dynamodb.Table('users') + + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123' + }) + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '456' + }) + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '789' + }) + + # Test a query returning all items + results = table.query( + KeyConditionExpression=Key('forum_name').eq( + 'the-key') & Key("subject").gt('1'), + ScanIndexForward=True, + ) + expected = ["123", "456", "789"] + for index, item in enumerate(results['Items']): + item["subject"].should.equal(expected[index]) + + # Return all items again, but in reverse + results = table.query( + KeyConditionExpression=Key('forum_name').eq( + 'the-key') & Key("subject").gt('1'), + ScanIndexForward=False, + ) + for index, item in enumerate(reversed(results['Items'])): + item["subject"].should.equal(expected[index]) + + # Filter the subjects to only return some of the results + results = table.query( + KeyConditionExpression=Key('forum_name').eq( + 'the-key') & Key("subject").gt('234'), + ConsistentRead=True, + ) + results['Count'].should.equal(2) + + # Filter to return no results + results = table.query( + KeyConditionExpression=Key('forum_name').eq( + 'the-key') & Key("subject").gt('9999') + ) + results['Count'].should.equal(0) + + results = table.query( + KeyConditionExpression=Key('forum_name').eq( + 'the-key') & Key("subject").begins_with('12') + ) + results['Count'].should.equal(1) + + results = table.query( + KeyConditionExpression=Key("subject").begins_with( + '7') & Key('forum_name').eq('the-key') + ) + results['Count'].should.equal(1) + + results = table.query( + KeyConditionExpression=Key('forum_name').eq( + 'the-key') & Key("subject").between('567', '890') + ) + results['Count'].should.equal(1) + + +@mock_dynamodb2 +def test_boto3_put_item_with_conditions(): + import botocore + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + table = dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'forum_name', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'subject', + 'KeyType': 'RANGE' + }, + ], + AttributeDefinitions=[ + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'subject', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + } + ) + table = dynamodb.Table('users') + + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123' + }) + + table.put_item( + Item={ + 'forum_name': 'the-key-2', + 'subject': '1234', + }, + ConditionExpression='attribute_not_exists(forum_name) AND attribute_not_exists(subject)' + ) + + table.put_item.when.called_with( + Item={ + 'forum_name': 'the-key', + 'subject': '123' + }, + ConditionExpression='attribute_not_exists(forum_name) AND attribute_not_exists(subject)' + ).should.throw(botocore.exceptions.ClientError) + + table.put_item.when.called_with( + Item={ + 'forum_name': 'bogus-key', + 'subject': 'bogus', + 'test': '123' + }, + ConditionExpression='attribute_exists(forum_name) AND attribute_exists(subject)' + ).should.throw(botocore.exceptions.ClientError) + + +def _create_table_with_range_key(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + table = dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'forum_name', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'subject', + 'KeyType': 'RANGE' + }, + ], + GlobalSecondaryIndexes=[{ + 'IndexName': 'TestGSI', + 'KeySchema': [ + { + 'AttributeName': 'username', + 'KeyType': 'HASH', + }, + { + 'AttributeName': 'created', + 'KeyType': 'RANGE', + } + ], + 'Projection': { + 'ProjectionType': 'ALL', + }, + 'ProvisionedThroughput': { + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + } + }], + AttributeDefinitions=[ + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'subject', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'username', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'created', + 'AttributeType': 'N' + } + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + } + ) + return dynamodb.Table('users') + + +@mock_dynamodb2 +def test_update_item_range_key_set(): + table = _create_table_with_range_key() + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123', + 'username': 'johndoe', + 'created': Decimal('3'), + }) + + item_key = {'forum_name': 'the-key', 'subject': '123'} + table.update_item( + Key=item_key, + AttributeUpdates={ + 'username': { + 'Action': u'PUT', + 'Value': 'johndoe2' + }, + 'created': { + 'Action': u'PUT', + 'Value': Decimal('4'), + }, + 'mapfield': { + 'Action': u'PUT', + 'Value': {'key': 'value'}, + } + }, + ) + + returned_item = dict((k, str(v) if isinstance(v, Decimal) else v) + for k, v in table.get_item(Key=item_key)['Item'].items()) + dict(returned_item).should.equal({ + 'username': "johndoe2", + 'forum_name': 'the-key', + 'subject': '123', + 'created': '4', + 'mapfield': {'key': 'value'}, + }) + + +@mock_dynamodb2 +def test_update_item_does_not_exist_is_created(): + table = _create_table_with_range_key() + + item_key = {'forum_name': 'the-key', 'subject': '123'} + result = table.update_item( + Key=item_key, + AttributeUpdates={ + 'username': { + 'Action': u'PUT', + 'Value': 'johndoe2' + }, + 'created': { + 'Action': u'PUT', + 'Value': Decimal('4'), + }, + 'mapfield': { + 'Action': u'PUT', + 'Value': {'key': 'value'}, + } + }, + ReturnValues='ALL_OLD', + ) + + assert not result.get('Attributes') + + returned_item = dict((k, str(v) if isinstance(v, Decimal) else v) + for k, v in table.get_item(Key=item_key)['Item'].items()) + dict(returned_item).should.equal({ + 'username': "johndoe2", + 'forum_name': 'the-key', + 'subject': '123', + 'created': '4', + 'mapfield': {'key': 'value'}, + }) + + +@mock_dynamodb2 +def test_update_item_add_value(): + table = _create_table_with_range_key() + + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123', + 'numeric_field': Decimal('-1'), + }) + + item_key = {'forum_name': 'the-key', 'subject': '123'} + table.update_item( + Key=item_key, + AttributeUpdates={ + 'numeric_field': { + 'Action': u'ADD', + 'Value': Decimal('2'), + }, + }, + ) + + returned_item = dict((k, str(v) if isinstance(v, Decimal) else v) + for k, v in table.get_item(Key=item_key)['Item'].items()) + dict(returned_item).should.equal({ + 'numeric_field': '1', + 'forum_name': 'the-key', + 'subject': '123', + }) + + +@mock_dynamodb2 +def test_update_item_add_value_string_set(): + table = _create_table_with_range_key() + + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123', + 'string_set': set(['str1', 'str2']), + }) + + item_key = {'forum_name': 'the-key', 'subject': '123'} + table.update_item( + Key=item_key, + AttributeUpdates={ + 'string_set': { + 'Action': u'ADD', + 'Value': set(['str3']), + }, + }, + ) + + returned_item = dict((k, str(v) if isinstance(v, Decimal) else v) + for k, v in table.get_item(Key=item_key)['Item'].items()) + dict(returned_item).should.equal({ + 'string_set': set(['str1', 'str2', 'str3']), + 'forum_name': 'the-key', + 'subject': '123', + }) + +@mock_dynamodb2 +def test_update_item_delete_value_string_set(): + table = _create_table_with_range_key() + + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123', + 'string_set': set(['str1', 'str2']), + }) + + item_key = {'forum_name': 'the-key', 'subject': '123'} + table.update_item( + Key=item_key, + AttributeUpdates={ + 'string_set': { + 'Action': u'DELETE', + 'Value': set(['str2']), + }, + }, + ) + + returned_item = dict((k, str(v) if isinstance(v, Decimal) else v) + for k, v in table.get_item(Key=item_key)['Item'].items()) + dict(returned_item).should.equal({ + 'string_set': set(['str1']), + 'forum_name': 'the-key', + 'subject': '123', + }) + +@mock_dynamodb2 +def test_update_item_add_value_does_not_exist_is_created(): + table = _create_table_with_range_key() + + item_key = {'forum_name': 'the-key', 'subject': '123'} + table.update_item( + Key=item_key, + AttributeUpdates={ + 'numeric_field': { + 'Action': u'ADD', + 'Value': Decimal('2'), + }, + }, + ) + + returned_item = dict((k, str(v) if isinstance(v, Decimal) else v) + for k, v in table.get_item(Key=item_key)['Item'].items()) + dict(returned_item).should.equal({ + 'numeric_field': '2', + 'forum_name': 'the-key', + 'subject': '123', + }) + + +@mock_dynamodb2 +def test_update_item_with_expression(): + table = _create_table_with_range_key() + + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123', + 'field': '1' + }) + + item_key = {'forum_name': 'the-key', 'subject': '123'} + + table.update_item( + Key=item_key, + UpdateExpression='SET field=2', + ) + dict(table.get_item(Key=item_key)['Item']).should.equal({ + 'field': '2', + 'forum_name': 'the-key', + 'subject': '123', + }) + + table.update_item( + Key=item_key, + UpdateExpression='SET field = 3', + ) + dict(table.get_item(Key=item_key)['Item']).should.equal({ + 'field': '3', + 'forum_name': 'the-key', + 'subject': '123', + }) + +@mock_dynamodb2 +def test_update_item_add_with_expression(): + table = _create_table_with_range_key() + + item_key = {'forum_name': 'the-key', 'subject': '123'} + current_item = { + 'forum_name': 'the-key', + 'subject': '123', + 'str_set': {'item1', 'item2', 'item3'}, + 'num_set': {1, 2, 3}, + 'num_val': 6 + } + + # Put an entry in the DB to play with + table.put_item(Item=current_item) + + # Update item to add a string value to a string set + table.update_item( + Key=item_key, + UpdateExpression='ADD str_set :v', + ExpressionAttributeValues={ + ':v': {'item4'} + } + ) + current_item['str_set'] = current_item['str_set'].union({'item4'}) + dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) + + # Update item to add a num value to a num set + table.update_item( + Key=item_key, + UpdateExpression='ADD num_set :v', + ExpressionAttributeValues={ + ':v': {6} + } + ) + current_item['num_set'] = current_item['num_set'].union({6}) + dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) + + # Update item to add a value to a number value + table.update_item( + Key=item_key, + UpdateExpression='ADD num_val :v', + ExpressionAttributeValues={ + ':v': 20 + } + ) + current_item['num_val'] = current_item['num_val'] + 20 + dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) + + # Attempt to add a number value to a string set, should raise Client Error + table.update_item.when.called_with( + Key=item_key, + UpdateExpression='ADD str_set :v', + ExpressionAttributeValues={ + ':v': 20 + } + ).should.have.raised(ClientError) + dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) + + # Attempt to add a number set to the string set, should raise a ClientError + table.update_item.when.called_with( + Key=item_key, + UpdateExpression='ADD str_set :v', + ExpressionAttributeValues={ + ':v': { 20 } + } + ).should.have.raised(ClientError) + dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) + + # Attempt to update with a bad expression + table.update_item.when.called_with( + Key=item_key, + UpdateExpression='ADD str_set bad_value' + ).should.have.raised(ClientError) + + # Attempt to add a string value instead of a string set + table.update_item.when.called_with( + Key=item_key, + UpdateExpression='ADD str_set :v', + ExpressionAttributeValues={ + ':v': 'new_string' + } + ).should.have.raised(ClientError) + + +@mock_dynamodb2 +def test_update_item_delete_with_expression(): + table = _create_table_with_range_key() + + item_key = {'forum_name': 'the-key', 'subject': '123'} + current_item = { + 'forum_name': 'the-key', + 'subject': '123', + 'str_set': {'item1', 'item2', 'item3'}, + 'num_set': {1, 2, 3}, + 'num_val': 6 + } + + # Put an entry in the DB to play with + table.put_item(Item=current_item) + + # Update item to delete a string value from a string set + table.update_item( + Key=item_key, + UpdateExpression='DELETE str_set :v', + ExpressionAttributeValues={ + ':v': {'item2'} + } + ) + current_item['str_set'] = current_item['str_set'].difference({'item2'}) + dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) + + # Update item to delete a num value from a num set + table.update_item( + Key=item_key, + UpdateExpression='DELETE num_set :v', + ExpressionAttributeValues={ + ':v': {2} + } + ) + current_item['num_set'] = current_item['num_set'].difference({2}) + dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) + + # Try to delete on a number, this should fail + table.update_item.when.called_with( + Key=item_key, + UpdateExpression='DELETE num_val :v', + ExpressionAttributeValues={ + ':v': 20 + } + ).should.have.raised(ClientError) + dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) + + # Try to delete a string set from a number set + table.update_item.when.called_with( + Key=item_key, + UpdateExpression='DELETE num_set :v', + ExpressionAttributeValues={ + ':v': {'del_str'} + } + ).should.have.raised(ClientError) + dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) + + # Attempt to update with a bad expression + table.update_item.when.called_with( + Key=item_key, + UpdateExpression='DELETE num_val badvalue' + ).should.have.raised(ClientError) + + +@mock_dynamodb2 +def test_boto3_query_gsi_range_comparison(): + table = _create_table_with_range_key() + + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123', + 'username': 'johndoe', + 'created': 3, + }) + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '456', + 'username': 'johndoe', + 'created': 1, + }) + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '789', + 'username': 'johndoe', + 'created': 2, + }) + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '159', + 'username': 'janedoe', + 'created': 2, + }) + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '601', + 'username': 'janedoe', + 'created': 5, + }) + + # Test a query returning all johndoe items + results = table.query( + KeyConditionExpression=Key('username').eq( + 'johndoe') & Key("created").gt(0), + ScanIndexForward=True, + IndexName='TestGSI', + ) + expected = ["456", "789", "123"] + for index, item in enumerate(results['Items']): + item["subject"].should.equal(expected[index]) + + # Return all johndoe items again, but in reverse + results = table.query( + KeyConditionExpression=Key('username').eq( + 'johndoe') & Key("created").gt(0), + ScanIndexForward=False, + IndexName='TestGSI', + ) + for index, item in enumerate(reversed(results['Items'])): + item["subject"].should.equal(expected[index]) + + # Filter the creation to only return some of the results + # And reverse order of hash + range key + results = table.query( + KeyConditionExpression=Key("created").gt( + 1) & Key('username').eq('johndoe'), + ConsistentRead=True, + IndexName='TestGSI', + ) + results['Count'].should.equal(2) + + # Filter to return no results + results = table.query( + KeyConditionExpression=Key('username').eq( + 'janedoe') & Key("created").gt(9), + IndexName='TestGSI', + ) + results['Count'].should.equal(0) + + results = table.query( + KeyConditionExpression=Key('username').eq( + 'janedoe') & Key("created").eq(5), + IndexName='TestGSI', + ) + results['Count'].should.equal(1) + + # Test range key sorting + results = table.query( + KeyConditionExpression=Key('username').eq( + 'johndoe') & Key("created").gt(0), + IndexName='TestGSI', + ) + expected = [Decimal('1'), Decimal('2'), Decimal('3')] + for index, item in enumerate(results['Items']): + item["created"].should.equal(expected[index]) + + +@mock_dynamodb2 +def test_boto3_update_table_throughput(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + table = dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'forum_name', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'subject', + 'KeyType': 'RANGE' + }, + ], + AttributeDefinitions=[ + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'subject', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 6 + } + ) + table = dynamodb.Table('users') + + table.provisioned_throughput['ReadCapacityUnits'].should.equal(5) + table.provisioned_throughput['WriteCapacityUnits'].should.equal(6) + + table.update(ProvisionedThroughput={ + 'ReadCapacityUnits': 10, + 'WriteCapacityUnits': 11, + }) + + table = dynamodb.Table('users') + + table.provisioned_throughput['ReadCapacityUnits'].should.equal(10) + table.provisioned_throughput['WriteCapacityUnits'].should.equal(11) + + +@mock_dynamodb2 +def test_boto3_update_table_gsi_throughput(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + table = dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'forum_name', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'subject', + 'KeyType': 'RANGE' + }, + ], + GlobalSecondaryIndexes=[{ + 'IndexName': 'TestGSI', + 'KeySchema': [ + { + 'AttributeName': 'username', + 'KeyType': 'HASH', + }, + { + 'AttributeName': 'created', + 'KeyType': 'RANGE', + } + ], + 'Projection': { + 'ProjectionType': 'ALL', + }, + 'ProvisionedThroughput': { + 'ReadCapacityUnits': 3, + 'WriteCapacityUnits': 4 + } + }], + AttributeDefinitions=[ + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'subject', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 6 + } + ) + table = dynamodb.Table('users') + + gsi_throughput = table.global_secondary_indexes[0]['ProvisionedThroughput'] + gsi_throughput['ReadCapacityUnits'].should.equal(3) + gsi_throughput['WriteCapacityUnits'].should.equal(4) + + table.provisioned_throughput['ReadCapacityUnits'].should.equal(5) + table.provisioned_throughput['WriteCapacityUnits'].should.equal(6) + + table.update(GlobalSecondaryIndexUpdates=[{ + 'Update': { + 'IndexName': 'TestGSI', + 'ProvisionedThroughput': { + 'ReadCapacityUnits': 10, + 'WriteCapacityUnits': 11, + } + }, + }]) + + table = dynamodb.Table('users') + + # Primary throughput has not changed + table.provisioned_throughput['ReadCapacityUnits'].should.equal(5) + table.provisioned_throughput['WriteCapacityUnits'].should.equal(6) + + gsi_throughput = table.global_secondary_indexes[0]['ProvisionedThroughput'] + gsi_throughput['ReadCapacityUnits'].should.equal(10) + gsi_throughput['WriteCapacityUnits'].should.equal(11) + + +@mock_dynamodb2 +def test_update_table_gsi_create(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + table = dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'forum_name', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'subject', + 'KeyType': 'RANGE' + }, + ], + AttributeDefinitions=[ + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'subject', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 6 + } + ) + table = dynamodb.Table('users') + + table.global_secondary_indexes.should.have.length_of(0) + + table.update(GlobalSecondaryIndexUpdates=[{ + 'Create': { + 'IndexName': 'TestGSI', + 'KeySchema': [ + { + 'AttributeName': 'username', + 'KeyType': 'HASH', + }, + { + 'AttributeName': 'created', + 'KeyType': 'RANGE', + } + ], + 'Projection': { + 'ProjectionType': 'ALL', + }, + 'ProvisionedThroughput': { + 'ReadCapacityUnits': 3, + 'WriteCapacityUnits': 4 + } + }, + }]) + + table = dynamodb.Table('users') + table.global_secondary_indexes.should.have.length_of(1) + + gsi_throughput = table.global_secondary_indexes[0]['ProvisionedThroughput'] + assert gsi_throughput['ReadCapacityUnits'].should.equal(3) + assert gsi_throughput['WriteCapacityUnits'].should.equal(4) + + # Check update works + table.update(GlobalSecondaryIndexUpdates=[{ + 'Update': { + 'IndexName': 'TestGSI', + 'ProvisionedThroughput': { + 'ReadCapacityUnits': 10, + 'WriteCapacityUnits': 11, + } + }, + }]) + table = dynamodb.Table('users') + + gsi_throughput = table.global_secondary_indexes[0]['ProvisionedThroughput'] + assert gsi_throughput['ReadCapacityUnits'].should.equal(10) + assert gsi_throughput['WriteCapacityUnits'].should.equal(11) + + table.update(GlobalSecondaryIndexUpdates=[{ + 'Delete': { + 'IndexName': 'TestGSI', + }, + }]) + + table = dynamodb.Table('users') + table.global_secondary_indexes.should.have.length_of(0) + + +@mock_dynamodb2 +def test_update_table_gsi_throughput(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + table = dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'forum_name', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'subject', + 'KeyType': 'RANGE' + }, + ], + GlobalSecondaryIndexes=[{ + 'IndexName': 'TestGSI', + 'KeySchema': [ + { + 'AttributeName': 'username', + 'KeyType': 'HASH', + }, + { + 'AttributeName': 'created', + 'KeyType': 'RANGE', + } + ], + 'Projection': { + 'ProjectionType': 'ALL', + }, + 'ProvisionedThroughput': { + 'ReadCapacityUnits': 3, + 'WriteCapacityUnits': 4 + } + }], + AttributeDefinitions=[ + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'subject', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 6 + } + ) + table = dynamodb.Table('users') + table.global_secondary_indexes.should.have.length_of(1) + + table.update(GlobalSecondaryIndexUpdates=[{ + 'Delete': { + 'IndexName': 'TestGSI', + }, + }]) + + table = dynamodb.Table('users') + table.global_secondary_indexes.should.have.length_of(0) + + +@mock_dynamodb2 +def test_query_pagination(): + table = _create_table_with_range_key() + for i in range(10): + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '{0}'.format(i), + 'username': 'johndoe', + 'created': Decimal('3'), + }) + + page1 = table.query( + KeyConditionExpression=Key('forum_name').eq('the-key'), + Limit=6 + ) + page1['Count'].should.equal(6) + page1['Items'].should.have.length_of(6) + page1.should.have.key('LastEvaluatedKey') + + page2 = table.query( + KeyConditionExpression=Key('forum_name').eq('the-key'), + Limit=6, + ExclusiveStartKey=page1['LastEvaluatedKey'] + ) + page2['Count'].should.equal(4) + page2['Items'].should.have.length_of(4) + page2.should_not.have.key('LastEvaluatedKey') + + results = page1['Items'] + page2['Items'] + subjects = set([int(r['subject']) for r in results]) + subjects.should.equal(set(range(10))) + + +@mock_dynamodb2 +def test_scan_by_index(): + dynamodb = boto3.client('dynamodb', region_name='us-east-1') + + dynamodb.create_table( + TableName='test', + KeySchema=[ + {'AttributeName': 'id', 'KeyType': 'HASH'}, + {'AttributeName': 'range_key', 'KeyType': 'RANGE'}, + ], + AttributeDefinitions=[ + {'AttributeName': 'id', 'AttributeType': 'S'}, + {'AttributeName': 'range_key', 'AttributeType': 'S'}, + {'AttributeName': 'gsi_col', 'AttributeType': 'S'}, + {'AttributeName': 'gsi_range_key', 'AttributeType': 'S'}, + {'AttributeName': 'lsi_range_key', 'AttributeType': 'S'}, + ], + ProvisionedThroughput={'ReadCapacityUnits': 1, 'WriteCapacityUnits': 1}, + GlobalSecondaryIndexes=[ + { + 'IndexName': 'test_gsi', + 'KeySchema': [ + {'AttributeName': 'gsi_col', 'KeyType': 'HASH'}, + {'AttributeName': 'gsi_range_key', 'KeyType': 'RANGE'}, + ], + 'Projection': { + 'ProjectionType': 'ALL', + }, + 'ProvisionedThroughput': { + 'ReadCapacityUnits': 1, + 'WriteCapacityUnits': 1 + } + }, + ], + LocalSecondaryIndexes=[ + { + 'IndexName': 'test_lsi', + 'KeySchema': [ + {'AttributeName': 'id', 'KeyType': 'HASH'}, + {'AttributeName': 'lsi_range_key', 'KeyType': 'RANGE'}, + ], + 'Projection': { + 'ProjectionType': 'ALL', + }, + }, + ] + ) + + dynamodb.put_item( + TableName='test', + Item={ + 'id': {'S': '1'}, + 'range_key': {'S': '1'}, + 'col1': {'S': 'val1'}, + 'gsi_col': {'S': '1'}, + 'gsi_range_key': {'S': '1'}, + 'lsi_range_key': {'S': '1'}, + } + ) + + dynamodb.put_item( + TableName='test', + Item={ + 'id': {'S': '1'}, + 'range_key': {'S': '2'}, + 'col1': {'S': 'val2'}, + 'gsi_col': {'S': '1'}, + 'gsi_range_key': {'S': '2'}, + 'lsi_range_key': {'S': '2'}, + } + ) + + dynamodb.put_item( + TableName='test', + Item={ + 'id': {'S': '3'}, + 'range_key': {'S': '1'}, + 'col1': {'S': 'val3'}, + } + ) + + res = dynamodb.scan(TableName='test') + assert res['Count'] == 3 + assert len(res['Items']) == 3 + + res = dynamodb.scan(TableName='test', IndexName='test_gsi') + assert res['Count'] == 2 + assert len(res['Items']) == 2 + + res = dynamodb.scan(TableName='test', IndexName='test_gsi', Limit=1) + assert res['Count'] == 1 + assert len(res['Items']) == 1 + last_eval_key = res['LastEvaluatedKey'] + assert last_eval_key['id']['S'] == '1' + assert last_eval_key['gsi_col']['S'] == '1' + assert last_eval_key['gsi_range_key']['S'] == '1' + + res = dynamodb.scan(TableName='test', IndexName='test_lsi') + assert res['Count'] == 2 + assert len(res['Items']) == 2 + + res = dynamodb.scan(TableName='test', IndexName='test_lsi', Limit=1) + assert res['Count'] == 1 + assert len(res['Items']) == 1 + last_eval_key = res['LastEvaluatedKey'] + assert last_eval_key['id']['S'] == '1' + assert last_eval_key['range_key']['S'] == '1' + assert last_eval_key['lsi_range_key']['S'] == '1' diff --git a/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py b/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py index 874804db0..1880c7cab 100644 --- a/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py +++ b/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py @@ -829,3 +829,77 @@ def test_scan_pagination(): results = page1['Items'] + page2['Items'] usernames = set([r['username'] for r in results]) usernames.should.equal(set(expected_usernames)) + + +@mock_dynamodb2 +def test_scan_by_index(): + dynamodb = boto3.client('dynamodb', region_name='us-east-1') + + dynamodb.create_table( + TableName='test', + KeySchema=[{'AttributeName': 'id', 'KeyType': 'HASH'}], + AttributeDefinitions=[ + {'AttributeName': 'id', 'AttributeType': 'S'}, + {'AttributeName': 'gsi_col', 'AttributeType': 'S'} + ], + ProvisionedThroughput={'ReadCapacityUnits': 1, 'WriteCapacityUnits': 1}, + GlobalSecondaryIndexes=[ + { + 'IndexName': 'test_gsi', + 'KeySchema': [ + { + 'AttributeName': 'gsi_col', + 'KeyType': 'HASH' + }, + ], + 'Projection': { + 'ProjectionType': 'ALL', + }, + 'ProvisionedThroughput': { + 'ReadCapacityUnits': 1, + 'WriteCapacityUnits': 1 + } + }, + ] + ) + + dynamodb.put_item( + TableName='test', + Item={ + 'id': {'S': '1'}, + 'col1': {'S': 'val1'}, + 'gsi_col': {'S': 'gsi_val1'}, + } + ) + + dynamodb.put_item( + TableName='test', + Item={ + 'id': {'S': '2'}, + 'col1': {'S': 'val2'}, + 'gsi_col': {'S': 'gsi_val2'}, + } + ) + + dynamodb.put_item( + TableName='test', + Item={ + 'id': {'S': '3'}, + 'col1': {'S': 'val3'}, + } + ) + + res = dynamodb.scan(TableName='test') + assert res['Count'] == 3 + assert len(res['Items']) == 3 + + res = dynamodb.scan(TableName='test', IndexName='test_gsi') + assert res['Count'] == 2 + assert len(res['Items']) == 2 + + res = dynamodb.scan(TableName='test', IndexName='test_gsi', Limit=1) + assert res['Count'] == 1 + assert len(res['Items']) == 1 + last_eval_key = res['LastEvaluatedKey'] + assert last_eval_key['id']['S'] == '1' + assert last_eval_key['gsi_col']['S'] == 'gsi_val1' diff --git a/tests/test_ec2/__init__.py b/tests/test_ec2/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/test_ec2/helpers.py b/tests/test_ec2/helpers.py new file mode 100644 index 000000000..94c9c10cb --- /dev/null +++ b/tests/test_ec2/helpers.py @@ -0,0 +1,15 @@ +import six + +from cryptography.hazmat.backends import default_backend +from cryptography.hazmat.primitives import serialization +from cryptography.hazmat.primitives.asymmetric import rsa + + +def rsa_check_private_key(private_key_material): + assert isinstance(private_key_material, six.string_types) + + private_key = serialization.load_pem_private_key( + data=private_key_material.encode('ascii'), + backend=default_backend(), + password=None) + assert isinstance(private_key, rsa.RSAPrivateKey) diff --git a/tests/test_ec2/test_elastic_block_store.py b/tests/test_ec2/test_elastic_block_store.py index 8f4a00b13..ab5b31ba0 100644 --- a/tests/test_ec2/test_elastic_block_store.py +++ b/tests/test_ec2/test_elastic_block_store.py @@ -16,7 +16,7 @@ from moto import mock_ec2_deprecated, mock_ec2 @mock_ec2_deprecated def test_create_and_delete_volume(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") volume = conn.create_volume(80, "us-east-1a") all_volumes = conn.get_all_volumes() @@ -52,7 +52,7 @@ def test_create_and_delete_volume(): @mock_ec2_deprecated def test_create_encrypted_volume_dryrun(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") with assert_raises(EC2ResponseError) as ex: conn.create_volume(80, "us-east-1a", encrypted=True, dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') @@ -63,7 +63,7 @@ def test_create_encrypted_volume_dryrun(): @mock_ec2_deprecated def test_create_encrypted_volume(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") volume = conn.create_volume(80, "us-east-1a", encrypted=True) with assert_raises(EC2ResponseError) as ex: @@ -79,7 +79,7 @@ def test_create_encrypted_volume(): @mock_ec2_deprecated def test_filter_volume_by_id(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") volume1 = conn.create_volume(80, "us-east-1a") volume2 = conn.create_volume(36, "us-east-1b") volume3 = conn.create_volume(20, "us-east-1c") @@ -99,7 +99,7 @@ def test_filter_volume_by_id(): @mock_ec2_deprecated def test_volume_filters(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") reservation = conn.run_instances('ami-1234abcd') instance = reservation.instances[0] @@ -196,7 +196,7 @@ def test_volume_filters(): @mock_ec2_deprecated def test_volume_attach_and_detach(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") reservation = conn.run_instances('ami-1234abcd') instance = reservation.instances[0] volume = conn.create_volume(80, "us-east-1a") @@ -252,7 +252,7 @@ def test_volume_attach_and_detach(): @mock_ec2_deprecated def test_create_snapshot(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") volume = conn.create_volume(80, "us-east-1a") with assert_raises(EC2ResponseError) as ex: @@ -291,7 +291,7 @@ def test_create_snapshot(): @mock_ec2_deprecated def test_create_encrypted_snapshot(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") volume = conn.create_volume(80, "us-east-1a", encrypted=True) snapshot = volume.create_snapshot('a test snapshot') snapshot.update() @@ -306,7 +306,7 @@ def test_create_encrypted_snapshot(): @mock_ec2_deprecated def test_filter_snapshot_by_id(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") volume1 = conn.create_volume(36, "us-east-1a") snap1 = volume1.create_snapshot('a test snapshot 1') volume2 = conn.create_volume(42, 'us-east-1a') @@ -333,7 +333,7 @@ def test_filter_snapshot_by_id(): @mock_ec2_deprecated def test_snapshot_filters(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") volume1 = conn.create_volume(20, "us-east-1a", encrypted=False) volume2 = conn.create_volume(25, "us-east-1a", encrypted=True) @@ -394,12 +394,17 @@ def test_snapshot_filters(): set([snap.id for snap in snapshots_by_encrypted] ).should.equal({snapshot3.id}) + snapshots_by_owner_id = conn.get_all_snapshots( + filters={'owner-id': '123456789012'}) + set([snap.id for snap in snapshots_by_owner_id] + ).should.equal({snapshot1.id, snapshot2.id, snapshot3.id}) + @mock_ec2_deprecated def test_snapshot_attribute(): import copy - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") volume = conn.create_volume(80, "us-east-1a") snapshot = volume.create_snapshot() @@ -502,7 +507,7 @@ def test_snapshot_attribute(): @mock_ec2_deprecated def test_create_volume_from_snapshot(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") volume = conn.create_volume(80, "us-east-1a") snapshot = volume.create_snapshot('a test snapshot') @@ -524,7 +529,7 @@ def test_create_volume_from_snapshot(): @mock_ec2_deprecated def test_create_volume_from_encrypted_snapshot(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") volume = conn.create_volume(80, "us-east-1a", encrypted=True) snapshot = volume.create_snapshot('a test snapshot') @@ -569,7 +574,7 @@ def test_modify_attribute_blockDeviceMapping(): @mock_ec2_deprecated def test_volume_tag_escaping(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") vol = conn.create_volume(10, 'us-east-1a') snapshot = conn.create_snapshot(vol.id, 'Desc') diff --git a/tests/test_ec2/test_instances.py b/tests/test_ec2/test_instances.py index c0f0eea4d..f14f85721 100644 --- a/tests/test_ec2/test_instances.py +++ b/tests/test_ec2/test_instances.py @@ -42,7 +42,7 @@ def test_add_servers(): @freeze_time("2014-01-01 05:00:00") @mock_ec2_deprecated def test_instance_launch_and_terminate(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") with assert_raises(EC2ResponseError) as ex: reservation = conn.run_instances('ami-1234abcd', dry_run=True) @@ -820,7 +820,7 @@ def test_run_instance_with_instance_type(): @mock_ec2_deprecated def test_run_instance_with_default_placement(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") reservation = conn.run_instances('ami-1234abcd') instance = reservation.instances[0] diff --git a/tests/test_ec2/test_key_pairs.py b/tests/test_ec2/test_key_pairs.py index 75c1aa73f..dfe6eabdf 100644 --- a/tests/test_ec2/test_key_pairs.py +++ b/tests/test_ec2/test_key_pairs.py @@ -1,151 +1,224 @@ -from __future__ import unicode_literals -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises -from nose.tools import assert_raises - -import boto -import six -import sure # noqa - -from boto.exception import EC2ResponseError -from moto import mock_ec2_deprecated - - -@mock_ec2_deprecated -def test_key_pairs_empty(): - conn = boto.connect_ec2('the_key', 'the_secret') - assert len(conn.get_all_key_pairs()) == 0 - - -@mock_ec2_deprecated -def test_key_pairs_invalid_id(): - conn = boto.connect_ec2('the_key', 'the_secret') - - with assert_raises(EC2ResponseError) as cm: - conn.get_all_key_pairs('foo') - cm.exception.code.should.equal('InvalidKeyPair.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_key_pairs_create(): - conn = boto.connect_ec2('the_key', 'the_secret') - - with assert_raises(EC2ResponseError) as ex: - kp = conn.create_key_pair('foo', dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the CreateKeyPair operation: Request would have succeeded, but DryRun flag is set') - - kp = conn.create_key_pair('foo') - assert kp.material.startswith('---- BEGIN RSA PRIVATE KEY ----') - kps = conn.get_all_key_pairs() - assert len(kps) == 1 - assert kps[0].name == 'foo' - - -@mock_ec2_deprecated -def test_key_pairs_create_two(): - conn = boto.connect_ec2('the_key', 'the_secret') - kp = conn.create_key_pair('foo') - kp = conn.create_key_pair('bar') - assert kp.material.startswith('---- BEGIN RSA PRIVATE KEY ----') - kps = conn.get_all_key_pairs() - kps.should.have.length_of(2) - [i.name for i in kps].should.contain('foo') - [i.name for i in kps].should.contain('bar') - kps = conn.get_all_key_pairs('foo') - kps.should.have.length_of(1) - kps[0].name.should.equal('foo') - - -@mock_ec2_deprecated -def test_key_pairs_create_exist(): - conn = boto.connect_ec2('the_key', 'the_secret') - kp = conn.create_key_pair('foo') - assert kp.material.startswith('---- BEGIN RSA PRIVATE KEY ----') - assert len(conn.get_all_key_pairs()) == 1 - - with assert_raises(EC2ResponseError) as cm: - conn.create_key_pair('foo') - cm.exception.code.should.equal('InvalidKeyPair.Duplicate') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_key_pairs_delete_no_exist(): - conn = boto.connect_ec2('the_key', 'the_secret') - assert len(conn.get_all_key_pairs()) == 0 - r = conn.delete_key_pair('foo') - r.should.be.ok - - -@mock_ec2_deprecated -def test_key_pairs_delete_exist(): - conn = boto.connect_ec2('the_key', 'the_secret') - conn.create_key_pair('foo') - - with assert_raises(EC2ResponseError) as ex: - r = conn.delete_key_pair('foo', dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the DeleteKeyPair operation: Request would have succeeded, but DryRun flag is set') - - r = conn.delete_key_pair('foo') - r.should.be.ok - assert len(conn.get_all_key_pairs()) == 0 - - -@mock_ec2_deprecated -def test_key_pairs_import(): - conn = boto.connect_ec2('the_key', 'the_secret') - - with assert_raises(EC2ResponseError) as ex: - kp = conn.import_key_pair('foo', b'content', dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the ImportKeyPair operation: Request would have succeeded, but DryRun flag is set') - - kp = conn.import_key_pair('foo', b'content') - assert kp.name == 'foo' - kps = conn.get_all_key_pairs() - assert len(kps) == 1 - assert kps[0].name == 'foo' - - -@mock_ec2_deprecated -def test_key_pairs_import_exist(): - conn = boto.connect_ec2('the_key', 'the_secret') - kp = conn.import_key_pair('foo', b'content') - assert kp.name == 'foo' - assert len(conn.get_all_key_pairs()) == 1 - - with assert_raises(EC2ResponseError) as cm: - conn.create_key_pair('foo') - cm.exception.code.should.equal('InvalidKeyPair.Duplicate') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_key_pair_filters(): - conn = boto.connect_ec2('the_key', 'the_secret') - - _ = conn.create_key_pair('kpfltr1') - kp2 = conn.create_key_pair('kpfltr2') - kp3 = conn.create_key_pair('kpfltr3') - - kp_by_name = conn.get_all_key_pairs( - filters={'key-name': 'kpfltr2'}) - set([kp.name for kp in kp_by_name] - ).should.equal(set([kp2.name])) - - kp_by_name = conn.get_all_key_pairs( - filters={'fingerprint': kp3.fingerprint}) - set([kp.name for kp in kp_by_name] - ).should.equal(set([kp3.name])) +from __future__ import unicode_literals +# Ensure 'assert_raises' context manager support for Python 2.6 +import tests.backport_assert_raises +from nose.tools import assert_raises + +import boto +import sure # noqa + +from boto.exception import EC2ResponseError +from moto import mock_ec2_deprecated + +from .helpers import rsa_check_private_key + + +RSA_PUBLIC_KEY_OPENSSH = b"""\ +ssh-rsa \ +AAAAB3NzaC1yc2EAAAADAQABAAABAQDusXfgTE4eBP50NglSzCSEGnIL6+cr6m3H\ +6cZANOQ+P1o/W4BdtcAL3sor4iGi7SOeJgo\8kweyMQrhrt6HaKGgromRiz37LQx\ +4YIAcBi4Zd023mO/V7Rc2Chh18mWgLSmA6ng+j37ip6452zxtv0jHAz9pJolbKBp\ +JzbZlPN45ZCTk9ck0fSVHRl6VRSSPQcpqi65XpRf+35zNOCGCc1mAOOTmw59Q2a6\ +A3t8mL7r91aM5q6QOQm219lctFM8O7HRJnDgmhGpnjRwE1LyKktWTbgFZ4SNWU2X\ +qusUO07jKuSxzPumXBeU+JEtx0J1tqZwJlpGt2R+0qN7nKnPl2+hx \ +moto@github.com""" + +RSA_PUBLIC_KEY_RFC4716 = b"""\ +---- BEGIN SSH2 PUBLIC KEY ---- +AAAAB3NzaC1yc2EAAAADAQABAAABAQDusXfgTE4eBP50NglSzCSEGnIL6+cr6m3H6cZANO +Q+P1o/W4BdtcAL3sor4iGi7SOeJgo8kweyMQrhrt6HaKGgromRiz37LQx4YIAcBi4Zd023 +mO/V7Rc2Chh18mWgLSmA6ng+j37ip6452zxtv0jHAz9pJolbKBpJzbZlPN45ZCTk9ck0fS +VHRl6VRSSPQcpqi65XpRf+35zNOCGCc1mAOOTmw59Q2a6A3t8mL7r91aM5q6QOQm219lct +FM8O7HRJnDgmhGpnjRwE1LyKktWTbgFZ4SNWU2XqusUO07jKuSxzPumXBeU+JEtx0J1tqZ +wJlpGt2R+0qN7nKnPl2+hx +---- END SSH2 PUBLIC KEY ---- +""" + +RSA_PUBLIC_KEY_FINGERPRINT = "6a:49:07:1c:7e:bd:d2:bd:96:25:fe:b5:74:83:ae:fd" + +DSA_PUBLIC_KEY_OPENSSH = b"""ssh-dss \ +AAAAB3NzaC1kc3MAAACBAJ0aXctVwbN6VB81gpo8R7DUk8zXRjZvrkg8Y8vEGt63gklpNJNsLXtEUXkl5D4c0nD2FZO1rJNqFoe\ +OQOCoGSfclHvt9w4yPl/lUEtb3Qtj1j80MInETHr19vaSunRk5R+M+8YH+LLcdYdz7MijuGey02mbi0H9K5nUIcuLMArVAAAAFQ\ +D0RDvsObRWBlnaW8645obZBM86jwAAAIBNZwf3B4krIzAwVfkMHLDSdAvs7lOWE7o8SJLzr9t4a9HhYp9SLbMzJ815KWfidEYV2\ ++s4ZaPCfcZ1GENFRbE8rixz5eMAjEUXEPMJkblDZTHzMsH96z2cOCQZ0vfOmgznsf18Uf725pqo9OqAioEsTJjX8jtI2qNPEBU0\ +uhMSZQAAAIBBMGhDu5CWPUlS2QG7vzmzw81XasmHE/s2YPDRbolkriwlunpgwZhCscoQP8HFHY+DLUVvUb+GZwBmFt4l1uHl03b\ +ffsm7UIHtCBYERr9Nx0u20ldfhkgB1lhaJb5o0ZJ3pmJ38KChfyHe5EUcqRdEFo89Mp72VI2Z6UHyL175RA== \ +moto@github.com""" + + +@mock_ec2_deprecated +def test_key_pairs_empty(): + conn = boto.connect_ec2('the_key', 'the_secret') + assert len(conn.get_all_key_pairs()) == 0 + + +@mock_ec2_deprecated +def test_key_pairs_invalid_id(): + conn = boto.connect_ec2('the_key', 'the_secret') + + with assert_raises(EC2ResponseError) as cm: + conn.get_all_key_pairs('foo') + cm.exception.code.should.equal('InvalidKeyPair.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_key_pairs_create(): + conn = boto.connect_ec2('the_key', 'the_secret') + + with assert_raises(EC2ResponseError) as ex: + conn.create_key_pair('foo', dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateKeyPair operation: Request would have succeeded, but DryRun flag is set') + + kp = conn.create_key_pair('foo') + rsa_check_private_key(kp.material) + + kps = conn.get_all_key_pairs() + assert len(kps) == 1 + assert kps[0].name == 'foo' + + +@mock_ec2_deprecated +def test_key_pairs_create_two(): + conn = boto.connect_ec2('the_key', 'the_secret') + + kp1 = conn.create_key_pair('foo') + rsa_check_private_key(kp1.material) + + kp2 = conn.create_key_pair('bar') + rsa_check_private_key(kp2.material) + + assert kp1.material != kp2.material + + kps = conn.get_all_key_pairs() + kps.should.have.length_of(2) + assert {i.name for i in kps} == {'foo', 'bar'} + + kps = conn.get_all_key_pairs('foo') + kps.should.have.length_of(1) + kps[0].name.should.equal('foo') + + +@mock_ec2_deprecated +def test_key_pairs_create_exist(): + conn = boto.connect_ec2('the_key', 'the_secret') + conn.create_key_pair('foo') + assert len(conn.get_all_key_pairs()) == 1 + + with assert_raises(EC2ResponseError) as cm: + conn.create_key_pair('foo') + cm.exception.code.should.equal('InvalidKeyPair.Duplicate') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_key_pairs_delete_no_exist(): + conn = boto.connect_ec2('the_key', 'the_secret') + assert len(conn.get_all_key_pairs()) == 0 + r = conn.delete_key_pair('foo') + r.should.be.ok + + +@mock_ec2_deprecated +def test_key_pairs_delete_exist(): + conn = boto.connect_ec2('the_key', 'the_secret') + conn.create_key_pair('foo') + + with assert_raises(EC2ResponseError) as ex: + r = conn.delete_key_pair('foo', dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the DeleteKeyPair operation: Request would have succeeded, but DryRun flag is set') + + r = conn.delete_key_pair('foo') + r.should.be.ok + assert len(conn.get_all_key_pairs()) == 0 + + +@mock_ec2_deprecated +def test_key_pairs_import(): + conn = boto.connect_ec2('the_key', 'the_secret') + + with assert_raises(EC2ResponseError) as ex: + conn.import_key_pair('foo', RSA_PUBLIC_KEY_OPENSSH, dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the ImportKeyPair operation: Request would have succeeded, but DryRun flag is set') + + kp1 = conn.import_key_pair('foo', RSA_PUBLIC_KEY_OPENSSH) + assert kp1.name == 'foo' + assert kp1.fingerprint == RSA_PUBLIC_KEY_FINGERPRINT + + kp2 = conn.import_key_pair('foo2', RSA_PUBLIC_KEY_RFC4716) + assert kp2.name == 'foo2' + assert kp2.fingerprint == RSA_PUBLIC_KEY_FINGERPRINT + + kps = conn.get_all_key_pairs() + assert len(kps) == 2 + assert kps[0].name == kp1.name + assert kps[1].name == kp2.name + + +@mock_ec2_deprecated +def test_key_pairs_import_exist(): + conn = boto.connect_ec2('the_key', 'the_secret') + kp = conn.import_key_pair('foo', RSA_PUBLIC_KEY_OPENSSH) + assert kp.name == 'foo' + assert len(conn.get_all_key_pairs()) == 1 + + with assert_raises(EC2ResponseError) as cm: + conn.create_key_pair('foo') + cm.exception.code.should.equal('InvalidKeyPair.Duplicate') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_key_pairs_invalid(): + conn = boto.connect_ec2('the_key', 'the_secret') + + with assert_raises(EC2ResponseError) as ex: + conn.import_key_pair('foo', b'') + ex.exception.error_code.should.equal('InvalidKeyPair.Format') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'Key is not in valid OpenSSH public key format') + + with assert_raises(EC2ResponseError) as ex: + conn.import_key_pair('foo', b'garbage') + ex.exception.error_code.should.equal('InvalidKeyPair.Format') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'Key is not in valid OpenSSH public key format') + + with assert_raises(EC2ResponseError) as ex: + conn.import_key_pair('foo', DSA_PUBLIC_KEY_OPENSSH) + ex.exception.error_code.should.equal('InvalidKeyPair.Format') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'Key is not in valid OpenSSH public key format') + + +@mock_ec2_deprecated +def test_key_pair_filters(): + conn = boto.connect_ec2('the_key', 'the_secret') + + _ = conn.create_key_pair('kpfltr1') + kp2 = conn.create_key_pair('kpfltr2') + kp3 = conn.create_key_pair('kpfltr3') + + kp_by_name = conn.get_all_key_pairs( + filters={'key-name': 'kpfltr2'}) + set([kp.name for kp in kp_by_name] + ).should.equal(set([kp2.name])) + + kp_by_name = conn.get_all_key_pairs( + filters={'fingerprint': kp3.fingerprint}) + set([kp.name for kp in kp_by_name] + ).should.equal(set([kp3.name])) diff --git a/tests/test_ec2/test_network_acls.py b/tests/test_ec2/test_network_acls.py index 9c92c949e..d4c330f00 100644 --- a/tests/test_ec2/test_network_acls.py +++ b/tests/test_ec2/test_network_acls.py @@ -2,6 +2,8 @@ from __future__ import unicode_literals import boto import boto3 import sure # noqa +from nose.tools import assert_raises +from botocore.exceptions import ClientError from moto import mock_ec2_deprecated, mock_ec2 @@ -28,7 +30,7 @@ def test_new_subnet_associates_with_default_network_acl(): conn = boto.connect_vpc('the_key', 'the secret') vpc = conn.get_all_vpcs()[0] - subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") + subnet = conn.create_subnet(vpc.id, "172.31.48.0/20") all_network_acls = conn.get_all_network_acls() all_network_acls.should.have.length_of(1) @@ -214,3 +216,37 @@ def test_default_network_acl_default_entries(): unique_entries.append(entry) unique_entries.should.have.length_of(4) + + +@mock_ec2 +def test_delete_default_network_acl_default_entry(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + default_network_acl = next(iter(ec2.network_acls.all()), None) + default_network_acl.is_default.should.be.ok + + default_network_acl.entries.should.have.length_of(4) + first_default_network_acl_entry = default_network_acl.entries[0] + + default_network_acl.delete_entry(Egress=first_default_network_acl_entry['Egress'], + RuleNumber=first_default_network_acl_entry['RuleNumber']) + + default_network_acl.entries.should.have.length_of(3) + + +@mock_ec2 +def test_duplicate_network_acl_entry(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + default_network_acl = next(iter(ec2.network_acls.all()), None) + default_network_acl.is_default.should.be.ok + + rule_number = 200 + egress = True + default_network_acl.create_entry(CidrBlock="0.0.0.0/0", Egress=egress, Protocol="-1", RuleAction="allow", RuleNumber=rule_number) + + with assert_raises(ClientError) as ex: + default_network_acl.create_entry(CidrBlock="10.0.0.0/0", Egress=egress, Protocol="-1", RuleAction="deny", RuleNumber=rule_number) + str(ex.exception).should.equal( + "An error occurred (NetworkAclEntryAlreadyExists) when calling the CreateNetworkAclEntry " + "operation: The network acl entry identified by {} already exists.".format(rule_number)) + + diff --git a/tests/test_ec2/test_regions.py b/tests/test_ec2/test_regions.py index 7f0ea2f18..f94c78eaf 100644 --- a/tests/test_ec2/test_regions.py +++ b/tests/test_ec2/test_regions.py @@ -1,148 +1,150 @@ -from __future__ import unicode_literals -import boto.ec2 -import boto.ec2.autoscale -import boto.ec2.elb -import sure -from moto import mock_ec2_deprecated, mock_autoscaling_deprecated, mock_elb_deprecated - -from moto.ec2 import ec2_backends - -def test_use_boto_regions(): - boto_regions = {r.name for r in boto.ec2.regions()} - moto_regions = set(ec2_backends) - - moto_regions.should.equal(boto_regions) - -def add_servers_to_region(ami_id, count, region): - conn = boto.ec2.connect_to_region(region) - for index in range(count): - conn.run_instances(ami_id) - -@mock_ec2_deprecated -def test_add_servers_to_a_single_region(): - region = 'ap-northeast-1' - add_servers_to_region('ami-1234abcd', 1, region) - add_servers_to_region('ami-5678efgh', 1, region) - - conn = boto.ec2.connect_to_region(region) - reservations = conn.get_all_instances() - len(reservations).should.equal(2) - reservations.sort(key=lambda x: x.instances[0].image_id) - - reservations[0].instances[0].image_id.should.equal('ami-1234abcd') - reservations[1].instances[0].image_id.should.equal('ami-5678efgh') - - -@mock_ec2_deprecated -def test_add_servers_to_multiple_regions(): - region1 = 'us-east-1' - region2 = 'ap-northeast-1' - add_servers_to_region('ami-1234abcd', 1, region1) - add_servers_to_region('ami-5678efgh', 1, region2) - - us_conn = boto.ec2.connect_to_region(region1) - ap_conn = boto.ec2.connect_to_region(region2) - us_reservations = us_conn.get_all_instances() - ap_reservations = ap_conn.get_all_instances() - - len(us_reservations).should.equal(1) - len(ap_reservations).should.equal(1) - - us_reservations[0].instances[0].image_id.should.equal('ami-1234abcd') - ap_reservations[0].instances[0].image_id.should.equal('ami-5678efgh') - - -@mock_autoscaling_deprecated -@mock_elb_deprecated -def test_create_autoscaling_group(): - elb_conn = boto.ec2.elb.connect_to_region('us-east-1') - elb_conn.create_load_balancer( - 'us_test_lb', zones=[], listeners=[(80, 8080, 'http')]) - elb_conn = boto.ec2.elb.connect_to_region('ap-northeast-1') - elb_conn.create_load_balancer( - 'ap_test_lb', zones=[], listeners=[(80, 8080, 'http')]) - - us_conn = boto.ec2.autoscale.connect_to_region('us-east-1') - config = boto.ec2.autoscale.LaunchConfiguration( - name='us_tester', - image_id='ami-abcd1234', - instance_type='m1.small', - ) - us_conn.create_launch_configuration(config) - - group = boto.ec2.autoscale.AutoScalingGroup( - name='us_tester_group', - availability_zones=['us-east-1c'], - default_cooldown=60, - desired_capacity=2, - health_check_period=100, - health_check_type="EC2", - max_size=2, - min_size=2, - launch_config=config, - load_balancers=["us_test_lb"], - placement_group="us_test_placement", - vpc_zone_identifier='subnet-1234abcd', - termination_policies=["OldestInstance", "NewestInstance"], - ) - us_conn.create_auto_scaling_group(group) - - ap_conn = boto.ec2.autoscale.connect_to_region('ap-northeast-1') - config = boto.ec2.autoscale.LaunchConfiguration( - name='ap_tester', - image_id='ami-efgh5678', - instance_type='m1.small', - ) - ap_conn.create_launch_configuration(config) - - group = boto.ec2.autoscale.AutoScalingGroup( - name='ap_tester_group', - availability_zones=['ap-northeast-1a'], - default_cooldown=60, - desired_capacity=2, - health_check_period=100, - health_check_type="EC2", - max_size=2, - min_size=2, - launch_config=config, - load_balancers=["ap_test_lb"], - placement_group="ap_test_placement", - vpc_zone_identifier='subnet-5678efgh', - termination_policies=["OldestInstance", "NewestInstance"], - ) - ap_conn.create_auto_scaling_group(group) - - len(us_conn.get_all_groups()).should.equal(1) - len(ap_conn.get_all_groups()).should.equal(1) - - us_group = us_conn.get_all_groups()[0] - us_group.name.should.equal('us_tester_group') - list(us_group.availability_zones).should.equal(['us-east-1c']) - us_group.desired_capacity.should.equal(2) - us_group.max_size.should.equal(2) - us_group.min_size.should.equal(2) - us_group.vpc_zone_identifier.should.equal('subnet-1234abcd') - us_group.launch_config_name.should.equal('us_tester') - us_group.default_cooldown.should.equal(60) - us_group.health_check_period.should.equal(100) - us_group.health_check_type.should.equal("EC2") - list(us_group.load_balancers).should.equal(["us_test_lb"]) - us_group.placement_group.should.equal("us_test_placement") - list(us_group.termination_policies).should.equal( - ["OldestInstance", "NewestInstance"]) - - ap_group = ap_conn.get_all_groups()[0] - ap_group.name.should.equal('ap_tester_group') - list(ap_group.availability_zones).should.equal(['ap-northeast-1a']) - ap_group.desired_capacity.should.equal(2) - ap_group.max_size.should.equal(2) - ap_group.min_size.should.equal(2) - ap_group.vpc_zone_identifier.should.equal('subnet-5678efgh') - ap_group.launch_config_name.should.equal('ap_tester') - ap_group.default_cooldown.should.equal(60) - ap_group.health_check_period.should.equal(100) - ap_group.health_check_type.should.equal("EC2") - list(ap_group.load_balancers).should.equal(["ap_test_lb"]) - ap_group.placement_group.should.equal("ap_test_placement") - list(ap_group.termination_policies).should.equal( - ["OldestInstance", "NewestInstance"]) +from __future__ import unicode_literals +import boto.ec2 +import boto.ec2.autoscale +import boto.ec2.elb +import sure +from moto import mock_ec2_deprecated, mock_autoscaling_deprecated, mock_elb_deprecated + +from moto.ec2 import ec2_backends + +def test_use_boto_regions(): + boto_regions = {r.name for r in boto.ec2.regions()} + moto_regions = set(ec2_backends) + + moto_regions.should.equal(boto_regions) + +def add_servers_to_region(ami_id, count, region): + conn = boto.ec2.connect_to_region(region) + for index in range(count): + conn.run_instances(ami_id) + +@mock_ec2_deprecated +def test_add_servers_to_a_single_region(): + region = 'ap-northeast-1' + add_servers_to_region('ami-1234abcd', 1, region) + add_servers_to_region('ami-5678efgh', 1, region) + + conn = boto.ec2.connect_to_region(region) + reservations = conn.get_all_instances() + len(reservations).should.equal(2) + reservations.sort(key=lambda x: x.instances[0].image_id) + + reservations[0].instances[0].image_id.should.equal('ami-1234abcd') + reservations[1].instances[0].image_id.should.equal('ami-5678efgh') + + +@mock_ec2_deprecated +def test_add_servers_to_multiple_regions(): + region1 = 'us-east-1' + region2 = 'ap-northeast-1' + add_servers_to_region('ami-1234abcd', 1, region1) + add_servers_to_region('ami-5678efgh', 1, region2) + + us_conn = boto.ec2.connect_to_region(region1) + ap_conn = boto.ec2.connect_to_region(region2) + us_reservations = us_conn.get_all_instances() + ap_reservations = ap_conn.get_all_instances() + + len(us_reservations).should.equal(1) + len(ap_reservations).should.equal(1) + + us_reservations[0].instances[0].image_id.should.equal('ami-1234abcd') + ap_reservations[0].instances[0].image_id.should.equal('ami-5678efgh') + + +@mock_autoscaling_deprecated +@mock_elb_deprecated +def test_create_autoscaling_group(): + elb_conn = boto.ec2.elb.connect_to_region('us-east-1') + elb_conn.create_load_balancer( + 'us_test_lb', zones=[], listeners=[(80, 8080, 'http')]) + elb_conn = boto.ec2.elb.connect_to_region('ap-northeast-1') + elb_conn.create_load_balancer( + 'ap_test_lb', zones=[], listeners=[(80, 8080, 'http')]) + + us_conn = boto.ec2.autoscale.connect_to_region('us-east-1') + config = boto.ec2.autoscale.LaunchConfiguration( + name='us_tester', + image_id='ami-abcd1234', + instance_type='m1.small', + ) + x = us_conn.create_launch_configuration(config) + + us_subnet_id = list(ec2_backends['us-east-1'].subnets['us-east-1c'].keys())[0] + ap_subnet_id = list(ec2_backends['ap-northeast-1'].subnets['ap-northeast-1a'].keys())[0] + group = boto.ec2.autoscale.AutoScalingGroup( + name='us_tester_group', + availability_zones=['us-east-1c'], + default_cooldown=60, + desired_capacity=2, + health_check_period=100, + health_check_type="EC2", + max_size=2, + min_size=2, + launch_config=config, + load_balancers=["us_test_lb"], + placement_group="us_test_placement", + vpc_zone_identifier=us_subnet_id, + termination_policies=["OldestInstance", "NewestInstance"], + ) + us_conn.create_auto_scaling_group(group) + + ap_conn = boto.ec2.autoscale.connect_to_region('ap-northeast-1') + config = boto.ec2.autoscale.LaunchConfiguration( + name='ap_tester', + image_id='ami-efgh5678', + instance_type='m1.small', + ) + ap_conn.create_launch_configuration(config) + + group = boto.ec2.autoscale.AutoScalingGroup( + name='ap_tester_group', + availability_zones=['ap-northeast-1a'], + default_cooldown=60, + desired_capacity=2, + health_check_period=100, + health_check_type="EC2", + max_size=2, + min_size=2, + launch_config=config, + load_balancers=["ap_test_lb"], + placement_group="ap_test_placement", + vpc_zone_identifier=ap_subnet_id, + termination_policies=["OldestInstance", "NewestInstance"], + ) + ap_conn.create_auto_scaling_group(group) + + len(us_conn.get_all_groups()).should.equal(1) + len(ap_conn.get_all_groups()).should.equal(1) + + us_group = us_conn.get_all_groups()[0] + us_group.name.should.equal('us_tester_group') + list(us_group.availability_zones).should.equal(['us-east-1c']) + us_group.desired_capacity.should.equal(2) + us_group.max_size.should.equal(2) + us_group.min_size.should.equal(2) + us_group.vpc_zone_identifier.should.equal(us_subnet_id) + us_group.launch_config_name.should.equal('us_tester') + us_group.default_cooldown.should.equal(60) + us_group.health_check_period.should.equal(100) + us_group.health_check_type.should.equal("EC2") + list(us_group.load_balancers).should.equal(["us_test_lb"]) + us_group.placement_group.should.equal("us_test_placement") + list(us_group.termination_policies).should.equal( + ["OldestInstance", "NewestInstance"]) + + ap_group = ap_conn.get_all_groups()[0] + ap_group.name.should.equal('ap_tester_group') + list(ap_group.availability_zones).should.equal(['ap-northeast-1a']) + ap_group.desired_capacity.should.equal(2) + ap_group.max_size.should.equal(2) + ap_group.min_size.should.equal(2) + ap_group.vpc_zone_identifier.should.equal(ap_subnet_id) + ap_group.launch_config_name.should.equal('ap_tester') + ap_group.default_cooldown.should.equal(60) + ap_group.health_check_period.should.equal(100) + ap_group.health_check_type.should.equal("EC2") + list(ap_group.load_balancers).should.equal(["ap_test_lb"]) + ap_group.placement_group.should.equal("ap_test_placement") + list(ap_group.termination_policies).should.equal( + ["OldestInstance", "NewestInstance"]) diff --git a/tests/test_ec2/test_route_tables.py b/tests/test_ec2/test_route_tables.py index e6f767a0a..de33b3f7a 100644 --- a/tests/test_ec2/test_route_tables.py +++ b/tests/test_ec2/test_route_tables.py @@ -1,530 +1,554 @@ -from __future__ import unicode_literals -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises -from nose.tools import assert_raises - -import boto -import boto3 -from boto.exception import EC2ResponseError -import sure # noqa - -from moto import mock_ec2, mock_ec2_deprecated -from tests.helpers import requires_boto_gte - - -@mock_ec2_deprecated -def test_route_tables_defaults(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - - all_route_tables = conn.get_all_route_tables(filters={'vpc-id': vpc.id}) - all_route_tables.should.have.length_of(1) - - main_route_table = all_route_tables[0] - main_route_table.vpc_id.should.equal(vpc.id) - - routes = main_route_table.routes - routes.should.have.length_of(1) - - local_route = routes[0] - local_route.gateway_id.should.equal('local') - local_route.state.should.equal('active') - local_route.destination_cidr_block.should.equal(vpc.cidr_block) - - vpc.delete() - - all_route_tables = conn.get_all_route_tables(filters={'vpc-id': vpc.id}) - all_route_tables.should.have.length_of(0) - - -@mock_ec2_deprecated -def test_route_tables_additional(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - route_table = conn.create_route_table(vpc.id) - - all_route_tables = conn.get_all_route_tables(filters={'vpc-id': vpc.id}) - all_route_tables.should.have.length_of(2) - all_route_tables[0].vpc_id.should.equal(vpc.id) - all_route_tables[1].vpc_id.should.equal(vpc.id) - - all_route_table_ids = [route_table.id for route_table in all_route_tables] - all_route_table_ids.should.contain(route_table.id) - - routes = route_table.routes - routes.should.have.length_of(1) - - local_route = routes[0] - local_route.gateway_id.should.equal('local') - local_route.state.should.equal('active') - local_route.destination_cidr_block.should.equal(vpc.cidr_block) - - with assert_raises(EC2ResponseError) as cm: - conn.delete_vpc(vpc.id) - cm.exception.code.should.equal('DependencyViolation') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - conn.delete_route_table(route_table.id) - - all_route_tables = conn.get_all_route_tables(filters={'vpc-id': vpc.id}) - all_route_tables.should.have.length_of(1) - - with assert_raises(EC2ResponseError) as cm: - conn.delete_route_table("rtb-1234abcd") - cm.exception.code.should.equal('InvalidRouteTableID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_route_tables_filters_standard(): - conn = boto.connect_vpc('the_key', 'the_secret') - - vpc1 = conn.create_vpc("10.0.0.0/16") - route_table1 = conn.create_route_table(vpc1.id) - - vpc2 = conn.create_vpc("10.0.0.0/16") - route_table2 = conn.create_route_table(vpc2.id) - - all_route_tables = conn.get_all_route_tables() - all_route_tables.should.have.length_of(5) - - # Filter by main route table - main_route_tables = conn.get_all_route_tables( - filters={'association.main': 'true'}) - main_route_tables.should.have.length_of(3) - main_route_table_ids = [ - route_table.id for route_table in main_route_tables] - main_route_table_ids.should_not.contain(route_table1.id) - main_route_table_ids.should_not.contain(route_table2.id) - - # Filter by VPC - vpc1_route_tables = conn.get_all_route_tables(filters={'vpc-id': vpc1.id}) - vpc1_route_tables.should.have.length_of(2) - vpc1_route_table_ids = [ - route_table.id for route_table in vpc1_route_tables] - vpc1_route_table_ids.should.contain(route_table1.id) - vpc1_route_table_ids.should_not.contain(route_table2.id) - - # Filter by VPC and main route table - vpc2_main_route_tables = conn.get_all_route_tables( - filters={'association.main': 'true', 'vpc-id': vpc2.id}) - vpc2_main_route_tables.should.have.length_of(1) - vpc2_main_route_table_ids = [ - route_table.id for route_table in vpc2_main_route_tables] - vpc2_main_route_table_ids.should_not.contain(route_table1.id) - vpc2_main_route_table_ids.should_not.contain(route_table2.id) - - # Unsupported filter - conn.get_all_route_tables.when.called_with( - filters={'not-implemented-filter': 'foobar'}).should.throw(NotImplementedError) - - -@mock_ec2_deprecated -def test_route_tables_filters_associations(): - conn = boto.connect_vpc('the_key', 'the_secret') - - vpc = conn.create_vpc("10.0.0.0/16") - subnet1 = conn.create_subnet(vpc.id, "10.0.0.0/24") - subnet2 = conn.create_subnet(vpc.id, "10.0.1.0/24") - subnet3 = conn.create_subnet(vpc.id, "10.0.2.0/24") - route_table1 = conn.create_route_table(vpc.id) - route_table2 = conn.create_route_table(vpc.id) - - association_id1 = conn.associate_route_table(route_table1.id, subnet1.id) - association_id2 = conn.associate_route_table(route_table1.id, subnet2.id) - association_id3 = conn.associate_route_table(route_table2.id, subnet3.id) - - all_route_tables = conn.get_all_route_tables() - all_route_tables.should.have.length_of(4) - - # Filter by association ID - association1_route_tables = conn.get_all_route_tables( - filters={'association.route-table-association-id': association_id1}) - association1_route_tables.should.have.length_of(1) - association1_route_tables[0].id.should.equal(route_table1.id) - association1_route_tables[0].associations.should.have.length_of(2) - - # Filter by route table ID - route_table2_route_tables = conn.get_all_route_tables( - filters={'association.route-table-id': route_table2.id}) - route_table2_route_tables.should.have.length_of(1) - route_table2_route_tables[0].id.should.equal(route_table2.id) - route_table2_route_tables[0].associations.should.have.length_of(1) - - # Filter by subnet ID - subnet_route_tables = conn.get_all_route_tables( - filters={'association.subnet-id': subnet1.id}) - subnet_route_tables.should.have.length_of(1) - subnet_route_tables[0].id.should.equal(route_table1.id) - association1_route_tables[0].associations.should.have.length_of(2) - - -@mock_ec2_deprecated -def test_route_table_associations(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") - route_table = conn.create_route_table(vpc.id) - - all_route_tables = conn.get_all_route_tables() - all_route_tables.should.have.length_of(3) - - # Refresh - route_table = conn.get_all_route_tables(route_table.id)[0] - route_table.associations.should.have.length_of(0) - - # Associate - association_id = conn.associate_route_table(route_table.id, subnet.id) - - # Refresh - route_table = conn.get_all_route_tables(route_table.id)[0] - route_table.associations.should.have.length_of(1) - - route_table.associations[0].id.should.equal(association_id) - route_table.associations[0].main.should.equal(False) - route_table.associations[0].route_table_id.should.equal(route_table.id) - route_table.associations[0].subnet_id.should.equal(subnet.id) - - # Associate is idempotent - association_id_idempotent = conn.associate_route_table( - route_table.id, subnet.id) - association_id_idempotent.should.equal(association_id) - - # Error: Attempt delete associated route table. - with assert_raises(EC2ResponseError) as cm: - conn.delete_route_table(route_table.id) - cm.exception.code.should.equal('DependencyViolation') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - # Disassociate - conn.disassociate_route_table(association_id) - - # Refresh - route_table = conn.get_all_route_tables(route_table.id)[0] - route_table.associations.should.have.length_of(0) - - # Error: Disassociate with invalid association ID - with assert_raises(EC2ResponseError) as cm: - conn.disassociate_route_table(association_id) - cm.exception.code.should.equal('InvalidAssociationID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - # Error: Associate with invalid subnet ID - with assert_raises(EC2ResponseError) as cm: - conn.associate_route_table(route_table.id, "subnet-1234abcd") - cm.exception.code.should.equal('InvalidSubnetID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - # Error: Associate with invalid route table ID - with assert_raises(EC2ResponseError) as cm: - conn.associate_route_table("rtb-1234abcd", subnet.id) - cm.exception.code.should.equal('InvalidRouteTableID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@requires_boto_gte("2.16.0") -@mock_ec2_deprecated -def test_route_table_replace_route_table_association(): - """ - Note: Boto has deprecated replace_route_table_assocation (which returns status) - and now uses replace_route_table_assocation_with_assoc (which returns association ID). - """ - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") - route_table1 = conn.create_route_table(vpc.id) - route_table2 = conn.create_route_table(vpc.id) - - all_route_tables = conn.get_all_route_tables() - all_route_tables.should.have.length_of(4) - - # Refresh - route_table1 = conn.get_all_route_tables(route_table1.id)[0] - route_table1.associations.should.have.length_of(0) - - # Associate - association_id1 = conn.associate_route_table(route_table1.id, subnet.id) - - # Refresh - route_table1 = conn.get_all_route_tables(route_table1.id)[0] - route_table2 = conn.get_all_route_tables(route_table2.id)[0] - - # Validate - route_table1.associations.should.have.length_of(1) - route_table2.associations.should.have.length_of(0) - - route_table1.associations[0].id.should.equal(association_id1) - route_table1.associations[0].main.should.equal(False) - route_table1.associations[0].route_table_id.should.equal(route_table1.id) - route_table1.associations[0].subnet_id.should.equal(subnet.id) - - # Replace Association - association_id2 = conn.replace_route_table_association_with_assoc( - association_id1, route_table2.id) - - # Refresh - route_table1 = conn.get_all_route_tables(route_table1.id)[0] - route_table2 = conn.get_all_route_tables(route_table2.id)[0] - - # Validate - route_table1.associations.should.have.length_of(0) - route_table2.associations.should.have.length_of(1) - - route_table2.associations[0].id.should.equal(association_id2) - route_table2.associations[0].main.should.equal(False) - route_table2.associations[0].route_table_id.should.equal(route_table2.id) - route_table2.associations[0].subnet_id.should.equal(subnet.id) - - # Replace Association is idempotent - association_id_idempotent = conn.replace_route_table_association_with_assoc( - association_id2, route_table2.id) - association_id_idempotent.should.equal(association_id2) - - # Error: Replace association with invalid association ID - with assert_raises(EC2ResponseError) as cm: - conn.replace_route_table_association_with_assoc( - "rtbassoc-1234abcd", route_table1.id) - cm.exception.code.should.equal('InvalidAssociationID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - # Error: Replace association with invalid route table ID - with assert_raises(EC2ResponseError) as cm: - conn.replace_route_table_association_with_assoc( - association_id2, "rtb-1234abcd") - cm.exception.code.should.equal('InvalidRouteTableID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_route_table_get_by_tag(): - conn = boto.connect_vpc('the_key', 'the_secret') - - vpc = conn.create_vpc('10.0.0.0/16') - - route_table = conn.create_route_table(vpc.id) - route_table.add_tag('Name', 'TestRouteTable') - - route_tables = conn.get_all_route_tables( - filters={'tag:Name': 'TestRouteTable'}) - - route_tables.should.have.length_of(1) - route_tables[0].vpc_id.should.equal(vpc.id) - route_tables[0].id.should.equal(route_table.id) - route_tables[0].tags.should.have.length_of(1) - route_tables[0].tags['Name'].should.equal('TestRouteTable') - - -@mock_ec2 -def test_route_table_get_by_tag_boto3(): - ec2 = boto3.resource('ec2', region_name='eu-central-1') - - vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - - route_table = ec2.create_route_table(VpcId=vpc.id) - route_table.create_tags(Tags=[{'Key': 'Name', 'Value': 'TestRouteTable'}]) - - filters = [{'Name': 'tag:Name', 'Values': ['TestRouteTable']}] - route_tables = list(ec2.route_tables.filter(Filters=filters)) - - route_tables.should.have.length_of(1) - route_tables[0].vpc_id.should.equal(vpc.id) - route_tables[0].id.should.equal(route_table.id) - route_tables[0].tags.should.have.length_of(1) - route_tables[0].tags[0].should.equal( - {'Key': 'Name', 'Value': 'TestRouteTable'}) - - -@mock_ec2_deprecated -def test_routes_additional(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - main_route_table = conn.get_all_route_tables(filters={'vpc-id': vpc.id})[0] - local_route = main_route_table.routes[0] - igw = conn.create_internet_gateway() - ROUTE_CIDR = "10.0.0.4/24" - - conn.create_route(main_route_table.id, ROUTE_CIDR, gateway_id=igw.id) - - main_route_table = conn.get_all_route_tables( - filters={'vpc-id': vpc.id})[0] # Refresh route table - - main_route_table.routes.should.have.length_of(2) - new_routes = [ - route for route in main_route_table.routes if route.destination_cidr_block != vpc.cidr_block] - new_routes.should.have.length_of(1) - - new_route = new_routes[0] - new_route.gateway_id.should.equal(igw.id) - new_route.instance_id.should.be.none - new_route.state.should.equal('active') - new_route.destination_cidr_block.should.equal(ROUTE_CIDR) - - conn.delete_route(main_route_table.id, ROUTE_CIDR) - - main_route_table = conn.get_all_route_tables( - filters={'vpc-id': vpc.id})[0] # Refresh route table - - main_route_table.routes.should.have.length_of(1) - new_routes = [ - route for route in main_route_table.routes if route.destination_cidr_block != vpc.cidr_block] - new_routes.should.have.length_of(0) - - with assert_raises(EC2ResponseError) as cm: - conn.delete_route(main_route_table.id, ROUTE_CIDR) - cm.exception.code.should.equal('InvalidRoute.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_routes_replace(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - main_route_table = conn.get_all_route_tables( - filters={'association.main': 'true', 'vpc-id': vpc.id})[0] - local_route = main_route_table.routes[0] - ROUTE_CIDR = "10.0.0.4/24" - - # Various route targets - igw = conn.create_internet_gateway() - - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - - # Create initial route - conn.create_route(main_route_table.id, ROUTE_CIDR, gateway_id=igw.id) - - # Replace... - def get_target_route(): - route_table = conn.get_all_route_tables(main_route_table.id)[0] - routes = [ - route for route in route_table.routes if route.destination_cidr_block != vpc.cidr_block] - routes.should.have.length_of(1) - return routes[0] - - conn.replace_route(main_route_table.id, ROUTE_CIDR, - instance_id=instance.id) - - target_route = get_target_route() - target_route.gateway_id.should.be.none - target_route.instance_id.should.equal(instance.id) - target_route.state.should.equal('active') - target_route.destination_cidr_block.should.equal(ROUTE_CIDR) - - conn.replace_route(main_route_table.id, ROUTE_CIDR, gateway_id=igw.id) - - target_route = get_target_route() - target_route.gateway_id.should.equal(igw.id) - target_route.instance_id.should.be.none - target_route.state.should.equal('active') - target_route.destination_cidr_block.should.equal(ROUTE_CIDR) - - with assert_raises(EC2ResponseError) as cm: - conn.replace_route('rtb-1234abcd', ROUTE_CIDR, gateway_id=igw.id) - cm.exception.code.should.equal('InvalidRouteTableID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@requires_boto_gte("2.19.0") -@mock_ec2_deprecated -def test_routes_not_supported(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - main_route_table = conn.get_all_route_tables()[0] - local_route = main_route_table.routes[0] - igw = conn.create_internet_gateway() - ROUTE_CIDR = "10.0.0.4/24" - - # Create - conn.create_route.when.called_with( - main_route_table.id, ROUTE_CIDR, interface_id='eni-1234abcd').should.throw(NotImplementedError) - - # Replace - igw = conn.create_internet_gateway() - conn.create_route(main_route_table.id, ROUTE_CIDR, gateway_id=igw.id) - conn.replace_route.when.called_with( - main_route_table.id, ROUTE_CIDR, interface_id='eni-1234abcd').should.throw(NotImplementedError) - - -@requires_boto_gte("2.34.0") -@mock_ec2_deprecated -def test_routes_vpc_peering_connection(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - main_route_table = conn.get_all_route_tables( - filters={'association.main': 'true', 'vpc-id': vpc.id})[0] - local_route = main_route_table.routes[0] - ROUTE_CIDR = "10.0.0.4/24" - - peer_vpc = conn.create_vpc("11.0.0.0/16") - vpc_pcx = conn.create_vpc_peering_connection(vpc.id, peer_vpc.id) - - conn.create_route(main_route_table.id, ROUTE_CIDR, - vpc_peering_connection_id=vpc_pcx.id) - - # Refresh route table - main_route_table = conn.get_all_route_tables(main_route_table.id)[0] - new_routes = [ - route for route in main_route_table.routes if route.destination_cidr_block != vpc.cidr_block] - new_routes.should.have.length_of(1) - - new_route = new_routes[0] - new_route.gateway_id.should.be.none - new_route.instance_id.should.be.none - new_route.vpc_peering_connection_id.should.equal(vpc_pcx.id) - new_route.state.should.equal('blackhole') - new_route.destination_cidr_block.should.equal(ROUTE_CIDR) - - -@requires_boto_gte("2.34.0") -@mock_ec2_deprecated -def test_routes_vpn_gateway(): - - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - main_route_table = conn.get_all_route_tables( - filters={'association.main': 'true', 'vpc-id': vpc.id})[0] - ROUTE_CIDR = "10.0.0.4/24" - - vpn_gw = conn.create_vpn_gateway(type="ipsec.1") - - conn.create_route(main_route_table.id, ROUTE_CIDR, gateway_id=vpn_gw.id) - - main_route_table = conn.get_all_route_tables(main_route_table.id)[0] - new_routes = [ - route for route in main_route_table.routes if route.destination_cidr_block != vpc.cidr_block] - new_routes.should.have.length_of(1) - - new_route = new_routes[0] - new_route.gateway_id.should.equal(vpn_gw.id) - new_route.instance_id.should.be.none - new_route.vpc_peering_connection_id.should.be.none - - -@mock_ec2_deprecated -def test_network_acl_tagging(): - - conn = boto.connect_vpc('the_key', 'the secret') - vpc = conn.create_vpc("10.0.0.0/16") - - route_table = conn.create_route_table(vpc.id) - route_table.add_tag("a key", "some value") - - tag = conn.get_all_tags()[0] - tag.name.should.equal("a key") - tag.value.should.equal("some value") - - all_route_tables = conn.get_all_route_tables() - test_route_table = next(na for na in all_route_tables - if na.id == route_table.id) - test_route_table.tags.should.have.length_of(1) - test_route_table.tags["a key"].should.equal("some value") +from __future__ import unicode_literals +# Ensure 'assert_raises' context manager support for Python 2.6 +import tests.backport_assert_raises +from nose.tools import assert_raises + +import boto +import boto3 +from boto.exception import EC2ResponseError +from botocore.exceptions import ClientError +import sure # noqa + +from moto import mock_ec2, mock_ec2_deprecated +from tests.helpers import requires_boto_gte + + +@mock_ec2_deprecated +def test_route_tables_defaults(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + + all_route_tables = conn.get_all_route_tables(filters={'vpc-id': vpc.id}) + all_route_tables.should.have.length_of(1) + + main_route_table = all_route_tables[0] + main_route_table.vpc_id.should.equal(vpc.id) + + routes = main_route_table.routes + routes.should.have.length_of(1) + + local_route = routes[0] + local_route.gateway_id.should.equal('local') + local_route.state.should.equal('active') + local_route.destination_cidr_block.should.equal(vpc.cidr_block) + + vpc.delete() + + all_route_tables = conn.get_all_route_tables(filters={'vpc-id': vpc.id}) + all_route_tables.should.have.length_of(0) + + +@mock_ec2_deprecated +def test_route_tables_additional(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + route_table = conn.create_route_table(vpc.id) + + all_route_tables = conn.get_all_route_tables(filters={'vpc-id': vpc.id}) + all_route_tables.should.have.length_of(2) + all_route_tables[0].vpc_id.should.equal(vpc.id) + all_route_tables[1].vpc_id.should.equal(vpc.id) + + all_route_table_ids = [route_table.id for route_table in all_route_tables] + all_route_table_ids.should.contain(route_table.id) + + routes = route_table.routes + routes.should.have.length_of(1) + + local_route = routes[0] + local_route.gateway_id.should.equal('local') + local_route.state.should.equal('active') + local_route.destination_cidr_block.should.equal(vpc.cidr_block) + + with assert_raises(EC2ResponseError) as cm: + conn.delete_vpc(vpc.id) + cm.exception.code.should.equal('DependencyViolation') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + conn.delete_route_table(route_table.id) + + all_route_tables = conn.get_all_route_tables(filters={'vpc-id': vpc.id}) + all_route_tables.should.have.length_of(1) + + with assert_raises(EC2ResponseError) as cm: + conn.delete_route_table("rtb-1234abcd") + cm.exception.code.should.equal('InvalidRouteTableID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_route_tables_filters_standard(): + conn = boto.connect_vpc('the_key', 'the_secret') + + vpc1 = conn.create_vpc("10.0.0.0/16") + route_table1 = conn.create_route_table(vpc1.id) + + vpc2 = conn.create_vpc("10.0.0.0/16") + route_table2 = conn.create_route_table(vpc2.id) + + all_route_tables = conn.get_all_route_tables() + all_route_tables.should.have.length_of(5) + + # Filter by main route table + main_route_tables = conn.get_all_route_tables( + filters={'association.main': 'true'}) + main_route_tables.should.have.length_of(3) + main_route_table_ids = [ + route_table.id for route_table in main_route_tables] + main_route_table_ids.should_not.contain(route_table1.id) + main_route_table_ids.should_not.contain(route_table2.id) + + # Filter by VPC + vpc1_route_tables = conn.get_all_route_tables(filters={'vpc-id': vpc1.id}) + vpc1_route_tables.should.have.length_of(2) + vpc1_route_table_ids = [ + route_table.id for route_table in vpc1_route_tables] + vpc1_route_table_ids.should.contain(route_table1.id) + vpc1_route_table_ids.should_not.contain(route_table2.id) + + # Filter by VPC and main route table + vpc2_main_route_tables = conn.get_all_route_tables( + filters={'association.main': 'true', 'vpc-id': vpc2.id}) + vpc2_main_route_tables.should.have.length_of(1) + vpc2_main_route_table_ids = [ + route_table.id for route_table in vpc2_main_route_tables] + vpc2_main_route_table_ids.should_not.contain(route_table1.id) + vpc2_main_route_table_ids.should_not.contain(route_table2.id) + + # Unsupported filter + conn.get_all_route_tables.when.called_with( + filters={'not-implemented-filter': 'foobar'}).should.throw(NotImplementedError) + + +@mock_ec2_deprecated +def test_route_tables_filters_associations(): + conn = boto.connect_vpc('the_key', 'the_secret') + + vpc = conn.create_vpc("10.0.0.0/16") + subnet1 = conn.create_subnet(vpc.id, "10.0.0.0/24") + subnet2 = conn.create_subnet(vpc.id, "10.0.1.0/24") + subnet3 = conn.create_subnet(vpc.id, "10.0.2.0/24") + route_table1 = conn.create_route_table(vpc.id) + route_table2 = conn.create_route_table(vpc.id) + + association_id1 = conn.associate_route_table(route_table1.id, subnet1.id) + association_id2 = conn.associate_route_table(route_table1.id, subnet2.id) + association_id3 = conn.associate_route_table(route_table2.id, subnet3.id) + + all_route_tables = conn.get_all_route_tables() + all_route_tables.should.have.length_of(4) + + # Filter by association ID + association1_route_tables = conn.get_all_route_tables( + filters={'association.route-table-association-id': association_id1}) + association1_route_tables.should.have.length_of(1) + association1_route_tables[0].id.should.equal(route_table1.id) + association1_route_tables[0].associations.should.have.length_of(2) + + # Filter by route table ID + route_table2_route_tables = conn.get_all_route_tables( + filters={'association.route-table-id': route_table2.id}) + route_table2_route_tables.should.have.length_of(1) + route_table2_route_tables[0].id.should.equal(route_table2.id) + route_table2_route_tables[0].associations.should.have.length_of(1) + + # Filter by subnet ID + subnet_route_tables = conn.get_all_route_tables( + filters={'association.subnet-id': subnet1.id}) + subnet_route_tables.should.have.length_of(1) + subnet_route_tables[0].id.should.equal(route_table1.id) + association1_route_tables[0].associations.should.have.length_of(2) + + +@mock_ec2_deprecated +def test_route_table_associations(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") + route_table = conn.create_route_table(vpc.id) + + all_route_tables = conn.get_all_route_tables() + all_route_tables.should.have.length_of(3) + + # Refresh + route_table = conn.get_all_route_tables(route_table.id)[0] + route_table.associations.should.have.length_of(0) + + # Associate + association_id = conn.associate_route_table(route_table.id, subnet.id) + + # Refresh + route_table = conn.get_all_route_tables(route_table.id)[0] + route_table.associations.should.have.length_of(1) + + route_table.associations[0].id.should.equal(association_id) + route_table.associations[0].main.should.equal(False) + route_table.associations[0].route_table_id.should.equal(route_table.id) + route_table.associations[0].subnet_id.should.equal(subnet.id) + + # Associate is idempotent + association_id_idempotent = conn.associate_route_table( + route_table.id, subnet.id) + association_id_idempotent.should.equal(association_id) + + # Error: Attempt delete associated route table. + with assert_raises(EC2ResponseError) as cm: + conn.delete_route_table(route_table.id) + cm.exception.code.should.equal('DependencyViolation') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + # Disassociate + conn.disassociate_route_table(association_id) + + # Refresh + route_table = conn.get_all_route_tables(route_table.id)[0] + route_table.associations.should.have.length_of(0) + + # Error: Disassociate with invalid association ID + with assert_raises(EC2ResponseError) as cm: + conn.disassociate_route_table(association_id) + cm.exception.code.should.equal('InvalidAssociationID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + # Error: Associate with invalid subnet ID + with assert_raises(EC2ResponseError) as cm: + conn.associate_route_table(route_table.id, "subnet-1234abcd") + cm.exception.code.should.equal('InvalidSubnetID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + # Error: Associate with invalid route table ID + with assert_raises(EC2ResponseError) as cm: + conn.associate_route_table("rtb-1234abcd", subnet.id) + cm.exception.code.should.equal('InvalidRouteTableID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@requires_boto_gte("2.16.0") +@mock_ec2_deprecated +def test_route_table_replace_route_table_association(): + """ + Note: Boto has deprecated replace_route_table_assocation (which returns status) + and now uses replace_route_table_assocation_with_assoc (which returns association ID). + """ + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") + route_table1 = conn.create_route_table(vpc.id) + route_table2 = conn.create_route_table(vpc.id) + + all_route_tables = conn.get_all_route_tables() + all_route_tables.should.have.length_of(4) + + # Refresh + route_table1 = conn.get_all_route_tables(route_table1.id)[0] + route_table1.associations.should.have.length_of(0) + + # Associate + association_id1 = conn.associate_route_table(route_table1.id, subnet.id) + + # Refresh + route_table1 = conn.get_all_route_tables(route_table1.id)[0] + route_table2 = conn.get_all_route_tables(route_table2.id)[0] + + # Validate + route_table1.associations.should.have.length_of(1) + route_table2.associations.should.have.length_of(0) + + route_table1.associations[0].id.should.equal(association_id1) + route_table1.associations[0].main.should.equal(False) + route_table1.associations[0].route_table_id.should.equal(route_table1.id) + route_table1.associations[0].subnet_id.should.equal(subnet.id) + + # Replace Association + association_id2 = conn.replace_route_table_association_with_assoc( + association_id1, route_table2.id) + + # Refresh + route_table1 = conn.get_all_route_tables(route_table1.id)[0] + route_table2 = conn.get_all_route_tables(route_table2.id)[0] + + # Validate + route_table1.associations.should.have.length_of(0) + route_table2.associations.should.have.length_of(1) + + route_table2.associations[0].id.should.equal(association_id2) + route_table2.associations[0].main.should.equal(False) + route_table2.associations[0].route_table_id.should.equal(route_table2.id) + route_table2.associations[0].subnet_id.should.equal(subnet.id) + + # Replace Association is idempotent + association_id_idempotent = conn.replace_route_table_association_with_assoc( + association_id2, route_table2.id) + association_id_idempotent.should.equal(association_id2) + + # Error: Replace association with invalid association ID + with assert_raises(EC2ResponseError) as cm: + conn.replace_route_table_association_with_assoc( + "rtbassoc-1234abcd", route_table1.id) + cm.exception.code.should.equal('InvalidAssociationID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + # Error: Replace association with invalid route table ID + with assert_raises(EC2ResponseError) as cm: + conn.replace_route_table_association_with_assoc( + association_id2, "rtb-1234abcd") + cm.exception.code.should.equal('InvalidRouteTableID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_route_table_get_by_tag(): + conn = boto.connect_vpc('the_key', 'the_secret') + + vpc = conn.create_vpc('10.0.0.0/16') + + route_table = conn.create_route_table(vpc.id) + route_table.add_tag('Name', 'TestRouteTable') + + route_tables = conn.get_all_route_tables( + filters={'tag:Name': 'TestRouteTable'}) + + route_tables.should.have.length_of(1) + route_tables[0].vpc_id.should.equal(vpc.id) + route_tables[0].id.should.equal(route_table.id) + route_tables[0].tags.should.have.length_of(1) + route_tables[0].tags['Name'].should.equal('TestRouteTable') + + +@mock_ec2 +def test_route_table_get_by_tag_boto3(): + ec2 = boto3.resource('ec2', region_name='eu-central-1') + + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + + route_table = ec2.create_route_table(VpcId=vpc.id) + route_table.create_tags(Tags=[{'Key': 'Name', 'Value': 'TestRouteTable'}]) + + filters = [{'Name': 'tag:Name', 'Values': ['TestRouteTable']}] + route_tables = list(ec2.route_tables.filter(Filters=filters)) + + route_tables.should.have.length_of(1) + route_tables[0].vpc_id.should.equal(vpc.id) + route_tables[0].id.should.equal(route_table.id) + route_tables[0].tags.should.have.length_of(1) + route_tables[0].tags[0].should.equal( + {'Key': 'Name', 'Value': 'TestRouteTable'}) + + +@mock_ec2_deprecated +def test_routes_additional(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + main_route_table = conn.get_all_route_tables(filters={'vpc-id': vpc.id})[0] + local_route = main_route_table.routes[0] + igw = conn.create_internet_gateway() + ROUTE_CIDR = "10.0.0.4/24" + + conn.create_route(main_route_table.id, ROUTE_CIDR, gateway_id=igw.id) + + main_route_table = conn.get_all_route_tables( + filters={'vpc-id': vpc.id})[0] # Refresh route table + + main_route_table.routes.should.have.length_of(2) + new_routes = [ + route for route in main_route_table.routes if route.destination_cidr_block != vpc.cidr_block] + new_routes.should.have.length_of(1) + + new_route = new_routes[0] + new_route.gateway_id.should.equal(igw.id) + new_route.instance_id.should.be.none + new_route.state.should.equal('active') + new_route.destination_cidr_block.should.equal(ROUTE_CIDR) + + conn.delete_route(main_route_table.id, ROUTE_CIDR) + + main_route_table = conn.get_all_route_tables( + filters={'vpc-id': vpc.id})[0] # Refresh route table + + main_route_table.routes.should.have.length_of(1) + new_routes = [ + route for route in main_route_table.routes if route.destination_cidr_block != vpc.cidr_block] + new_routes.should.have.length_of(0) + + with assert_raises(EC2ResponseError) as cm: + conn.delete_route(main_route_table.id, ROUTE_CIDR) + cm.exception.code.should.equal('InvalidRoute.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_routes_replace(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + main_route_table = conn.get_all_route_tables( + filters={'association.main': 'true', 'vpc-id': vpc.id})[0] + local_route = main_route_table.routes[0] + ROUTE_CIDR = "10.0.0.4/24" + + # Various route targets + igw = conn.create_internet_gateway() + + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + + # Create initial route + conn.create_route(main_route_table.id, ROUTE_CIDR, gateway_id=igw.id) + + # Replace... + def get_target_route(): + route_table = conn.get_all_route_tables(main_route_table.id)[0] + routes = [ + route for route in route_table.routes if route.destination_cidr_block != vpc.cidr_block] + routes.should.have.length_of(1) + return routes[0] + + conn.replace_route(main_route_table.id, ROUTE_CIDR, + instance_id=instance.id) + + target_route = get_target_route() + target_route.gateway_id.should.be.none + target_route.instance_id.should.equal(instance.id) + target_route.state.should.equal('active') + target_route.destination_cidr_block.should.equal(ROUTE_CIDR) + + conn.replace_route(main_route_table.id, ROUTE_CIDR, gateway_id=igw.id) + + target_route = get_target_route() + target_route.gateway_id.should.equal(igw.id) + target_route.instance_id.should.be.none + target_route.state.should.equal('active') + target_route.destination_cidr_block.should.equal(ROUTE_CIDR) + + with assert_raises(EC2ResponseError) as cm: + conn.replace_route('rtb-1234abcd', ROUTE_CIDR, gateway_id=igw.id) + cm.exception.code.should.equal('InvalidRouteTableID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@requires_boto_gte("2.19.0") +@mock_ec2_deprecated +def test_routes_not_supported(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + main_route_table = conn.get_all_route_tables()[0] + local_route = main_route_table.routes[0] + igw = conn.create_internet_gateway() + ROUTE_CIDR = "10.0.0.4/24" + + # Create + conn.create_route.when.called_with( + main_route_table.id, ROUTE_CIDR, interface_id='eni-1234abcd').should.throw(NotImplementedError) + + # Replace + igw = conn.create_internet_gateway() + conn.create_route(main_route_table.id, ROUTE_CIDR, gateway_id=igw.id) + conn.replace_route.when.called_with( + main_route_table.id, ROUTE_CIDR, interface_id='eni-1234abcd').should.throw(NotImplementedError) + + +@requires_boto_gte("2.34.0") +@mock_ec2_deprecated +def test_routes_vpc_peering_connection(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + main_route_table = conn.get_all_route_tables( + filters={'association.main': 'true', 'vpc-id': vpc.id})[0] + local_route = main_route_table.routes[0] + ROUTE_CIDR = "10.0.0.4/24" + + peer_vpc = conn.create_vpc("11.0.0.0/16") + vpc_pcx = conn.create_vpc_peering_connection(vpc.id, peer_vpc.id) + + conn.create_route(main_route_table.id, ROUTE_CIDR, + vpc_peering_connection_id=vpc_pcx.id) + + # Refresh route table + main_route_table = conn.get_all_route_tables(main_route_table.id)[0] + new_routes = [ + route for route in main_route_table.routes if route.destination_cidr_block != vpc.cidr_block] + new_routes.should.have.length_of(1) + + new_route = new_routes[0] + new_route.gateway_id.should.be.none + new_route.instance_id.should.be.none + new_route.vpc_peering_connection_id.should.equal(vpc_pcx.id) + new_route.state.should.equal('blackhole') + new_route.destination_cidr_block.should.equal(ROUTE_CIDR) + + +@requires_boto_gte("2.34.0") +@mock_ec2_deprecated +def test_routes_vpn_gateway(): + + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + main_route_table = conn.get_all_route_tables( + filters={'association.main': 'true', 'vpc-id': vpc.id})[0] + ROUTE_CIDR = "10.0.0.4/24" + + vpn_gw = conn.create_vpn_gateway(type="ipsec.1") + + conn.create_route(main_route_table.id, ROUTE_CIDR, gateway_id=vpn_gw.id) + + main_route_table = conn.get_all_route_tables(main_route_table.id)[0] + new_routes = [ + route for route in main_route_table.routes if route.destination_cidr_block != vpc.cidr_block] + new_routes.should.have.length_of(1) + + new_route = new_routes[0] + new_route.gateway_id.should.equal(vpn_gw.id) + new_route.instance_id.should.be.none + new_route.vpc_peering_connection_id.should.be.none + + +@mock_ec2_deprecated +def test_network_acl_tagging(): + + conn = boto.connect_vpc('the_key', 'the secret') + vpc = conn.create_vpc("10.0.0.0/16") + + route_table = conn.create_route_table(vpc.id) + route_table.add_tag("a key", "some value") + + tag = conn.get_all_tags()[0] + tag.name.should.equal("a key") + tag.value.should.equal("some value") + + all_route_tables = conn.get_all_route_tables() + test_route_table = next(na for na in all_route_tables + if na.id == route_table.id) + test_route_table.tags.should.have.length_of(1) + test_route_table.tags["a key"].should.equal("some value") + + +@mock_ec2 +def test_create_route_with_invalid_destination_cidr_block_parameter(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + vpc.reload() + vpc.is_default.shouldnt.be.ok + + route_table = ec2.create_route_table(VpcId=vpc.id) + route_table.reload() + + internet_gateway = ec2.create_internet_gateway() + vpc.attach_internet_gateway(InternetGatewayId=internet_gateway.id) + internet_gateway.reload() + + destination_cidr_block = '1000.1.0.0/20' + with assert_raises(ClientError) as ex: + route = route_table.create_route(DestinationCidrBlock=destination_cidr_block, GatewayId=internet_gateway.id) + str(ex.exception).should.equal( + "An error occurred (InvalidParameterValue) when calling the CreateRoute " + "operation: Value ({}) for parameter destinationCidrBlock is invalid. This is not a valid CIDR block.".format(destination_cidr_block)) \ No newline at end of file diff --git a/tests/test_ec2/test_security_groups.py b/tests/test_ec2/test_security_groups.py index 15be94fbe..c09b1e8f4 100644 --- a/tests/test_ec2/test_security_groups.py +++ b/tests/test_ec2/test_security_groups.py @@ -1,737 +1,737 @@ -from __future__ import unicode_literals - -import copy - -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises # noqa -from nose.tools import assert_raises - -import boto3 -import boto -from botocore.exceptions import ClientError -from boto.exception import EC2ResponseError -import sure # noqa - -from moto import mock_ec2, mock_ec2_deprecated - - -@mock_ec2_deprecated -def test_create_and_describe_security_group(): - conn = boto.connect_ec2('the_key', 'the_secret') - - with assert_raises(EC2ResponseError) as ex: - security_group = conn.create_security_group( - 'test security group', 'this is a test security group', dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the CreateSecurityGroup operation: Request would have succeeded, but DryRun flag is set') - - security_group = conn.create_security_group( - 'test security group', 'this is a test security group') - - security_group.name.should.equal('test security group') - security_group.description.should.equal('this is a test security group') - - # Trying to create another group with the same name should throw an error - with assert_raises(EC2ResponseError) as cm: - conn.create_security_group( - 'test security group', 'this is a test security group') - cm.exception.code.should.equal('InvalidGroup.Duplicate') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - all_groups = conn.get_all_security_groups() - # The default group gets created automatically - all_groups.should.have.length_of(3) - group_names = [group.name for group in all_groups] - set(group_names).should.equal(set(["default", "test security group"])) - - -@mock_ec2_deprecated -def test_create_security_group_without_description_raises_error(): - conn = boto.connect_ec2('the_key', 'the_secret') - - with assert_raises(EC2ResponseError) as cm: - conn.create_security_group('test security group', '') - cm.exception.code.should.equal('MissingParameter') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_default_security_group(): - conn = boto.ec2.connect_to_region('us-east-1') - groups = conn.get_all_security_groups() - groups.should.have.length_of(2) - groups[0].name.should.equal("default") - - -@mock_ec2_deprecated -def test_create_and_describe_vpc_security_group(): - conn = boto.connect_ec2('the_key', 'the_secret') - vpc_id = 'vpc-5300000c' - security_group = conn.create_security_group( - 'test security group', 'this is a test security group', vpc_id=vpc_id) - - security_group.vpc_id.should.equal(vpc_id) - - security_group.name.should.equal('test security group') - security_group.description.should.equal('this is a test security group') - - # Trying to create another group with the same name in the same VPC should - # throw an error - with assert_raises(EC2ResponseError) as cm: - conn.create_security_group( - 'test security group', 'this is a test security group', vpc_id) - cm.exception.code.should.equal('InvalidGroup.Duplicate') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - all_groups = conn.get_all_security_groups(filters={'vpc_id': [vpc_id]}) - - all_groups[0].vpc_id.should.equal(vpc_id) - - all_groups.should.have.length_of(1) - all_groups[0].name.should.equal('test security group') - - -@mock_ec2_deprecated -def test_create_two_security_groups_with_same_name_in_different_vpc(): - conn = boto.connect_ec2('the_key', 'the_secret') - vpc_id = 'vpc-5300000c' - vpc_id2 = 'vpc-5300000d' - - conn.create_security_group( - 'test security group', 'this is a test security group', vpc_id) - conn.create_security_group( - 'test security group', 'this is a test security group', vpc_id2) - - all_groups = conn.get_all_security_groups() - - all_groups.should.have.length_of(4) - group_names = [group.name for group in all_groups] - # The default group is created automatically - set(group_names).should.equal(set(["default", "test security group"])) - - -@mock_ec2_deprecated -def test_deleting_security_groups(): - conn = boto.connect_ec2('the_key', 'the_secret') - security_group1 = conn.create_security_group('test1', 'test1') - conn.create_security_group('test2', 'test2') - - conn.get_all_security_groups().should.have.length_of(4) - - # Deleting a group that doesn't exist should throw an error - with assert_raises(EC2ResponseError) as cm: - conn.delete_security_group('foobar') - cm.exception.code.should.equal('InvalidGroup.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - # Delete by name - with assert_raises(EC2ResponseError) as ex: - conn.delete_security_group('test2', dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the DeleteSecurityGroup operation: Request would have succeeded, but DryRun flag is set') - - conn.delete_security_group('test2') - conn.get_all_security_groups().should.have.length_of(3) - - # Delete by group id - conn.delete_security_group(group_id=security_group1.id) - conn.get_all_security_groups().should.have.length_of(2) - - -@mock_ec2_deprecated -def test_delete_security_group_in_vpc(): - conn = boto.connect_ec2('the_key', 'the_secret') - vpc_id = "vpc-12345" - security_group1 = conn.create_security_group('test1', 'test1', vpc_id) - - # this should not throw an exception - conn.delete_security_group(group_id=security_group1.id) - - -@mock_ec2_deprecated -def test_authorize_ip_range_and_revoke(): - conn = boto.connect_ec2('the_key', 'the_secret') - security_group = conn.create_security_group('test', 'test') - - with assert_raises(EC2ResponseError) as ex: - success = security_group.authorize( - ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32", dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the GrantSecurityGroupIngress operation: Request would have succeeded, but DryRun flag is set') - - success = security_group.authorize( - ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32") - assert success.should.be.true - - security_group = conn.get_all_security_groups(groupnames=['test'])[0] - int(security_group.rules[0].to_port).should.equal(2222) - security_group.rules[0].grants[ - 0].cidr_ip.should.equal("123.123.123.123/32") - - # Wrong Cidr should throw error - with assert_raises(EC2ResponseError) as cm: - security_group.revoke(ip_protocol="tcp", from_port="22", - to_port="2222", cidr_ip="123.123.123.122/32") - cm.exception.code.should.equal('InvalidPermission.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - # Actually revoke - with assert_raises(EC2ResponseError) as ex: - security_group.revoke(ip_protocol="tcp", from_port="22", - to_port="2222", cidr_ip="123.123.123.123/32", dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the RevokeSecurityGroupIngress operation: Request would have succeeded, but DryRun flag is set') - - security_group.revoke(ip_protocol="tcp", from_port="22", - to_port="2222", cidr_ip="123.123.123.123/32") - - security_group = conn.get_all_security_groups()[0] - security_group.rules.should.have.length_of(0) - - # Test for egress as well - egress_security_group = conn.create_security_group( - 'testegress', 'testegress', vpc_id='vpc-3432589') - - with assert_raises(EC2ResponseError) as ex: - success = conn.authorize_security_group_egress( - egress_security_group.id, "tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32", dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the GrantSecurityGroupEgress operation: Request would have succeeded, but DryRun flag is set') - - success = conn.authorize_security_group_egress( - egress_security_group.id, "tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32") - assert success.should.be.true - egress_security_group = conn.get_all_security_groups( - groupnames='testegress')[0] - # There are two egress rules associated with the security group: - # the default outbound rule and the new one - int(egress_security_group.rules_egress[1].to_port).should.equal(2222) - egress_security_group.rules_egress[1].grants[ - 0].cidr_ip.should.equal("123.123.123.123/32") - - # Wrong Cidr should throw error - egress_security_group.revoke.when.called_with( - ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.122/32").should.throw(EC2ResponseError) - - # Actually revoke - with assert_raises(EC2ResponseError) as ex: - conn.revoke_security_group_egress( - egress_security_group.id, "tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32", dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the RevokeSecurityGroupEgress operation: Request would have succeeded, but DryRun flag is set') - - conn.revoke_security_group_egress( - egress_security_group.id, "tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32") - - egress_security_group = conn.get_all_security_groups()[0] - # There is still the default outbound rule - egress_security_group.rules_egress.should.have.length_of(1) - - -@mock_ec2_deprecated -def test_authorize_other_group_and_revoke(): - conn = boto.connect_ec2('the_key', 'the_secret') - security_group = conn.create_security_group('test', 'test') - other_security_group = conn.create_security_group('other', 'other') - wrong_group = conn.create_security_group('wrong', 'wrong') - - success = security_group.authorize( - ip_protocol="tcp", from_port="22", to_port="2222", src_group=other_security_group) - assert success.should.be.true - - security_group = [ - group for group in conn.get_all_security_groups() if group.name == 'test'][0] - int(security_group.rules[0].to_port).should.equal(2222) - security_group.rules[0].grants[ - 0].group_id.should.equal(other_security_group.id) - - # Wrong source group should throw error - with assert_raises(EC2ResponseError) as cm: - security_group.revoke(ip_protocol="tcp", from_port="22", - to_port="2222", src_group=wrong_group) - cm.exception.code.should.equal('InvalidPermission.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - # Actually revoke - security_group.revoke(ip_protocol="tcp", from_port="22", - to_port="2222", src_group=other_security_group) - - security_group = [ - group for group in conn.get_all_security_groups() if group.name == 'test'][0] - security_group.rules.should.have.length_of(0) - - -@mock_ec2 -def test_authorize_other_group_egress_and_revoke(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - - vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - - sg01 = ec2.create_security_group( - GroupName='sg01', Description='Test security group sg01', VpcId=vpc.id) - sg02 = ec2.create_security_group( - GroupName='sg02', Description='Test security group sg02', VpcId=vpc.id) - - ip_permission = { - 'IpProtocol': 'tcp', - 'FromPort': 27017, - 'ToPort': 27017, - 'UserIdGroupPairs': [{'GroupId': sg02.id, 'GroupName': 'sg02', 'UserId': sg02.owner_id}], - 'IpRanges': [] - } - - sg01.authorize_egress(IpPermissions=[ip_permission]) - sg01.ip_permissions_egress.should.have.length_of(2) - sg01.ip_permissions_egress.should.contain(ip_permission) - - sg01.revoke_egress(IpPermissions=[ip_permission]) - sg01.ip_permissions_egress.should.have.length_of(1) - - -@mock_ec2_deprecated -def test_authorize_group_in_vpc(): - conn = boto.connect_ec2('the_key', 'the_secret') - vpc_id = "vpc-12345" - - # create 2 groups in a vpc - security_group = conn.create_security_group('test1', 'test1', vpc_id) - other_security_group = conn.create_security_group('test2', 'test2', vpc_id) - - success = security_group.authorize( - ip_protocol="tcp", from_port="22", to_port="2222", src_group=other_security_group) - success.should.be.true - - # Check that the rule is accurate - security_group = [ - group for group in conn.get_all_security_groups() if group.name == 'test1'][0] - int(security_group.rules[0].to_port).should.equal(2222) - security_group.rules[0].grants[ - 0].group_id.should.equal(other_security_group.id) - - # Now remove the rule - success = security_group.revoke( - ip_protocol="tcp", from_port="22", to_port="2222", src_group=other_security_group) - success.should.be.true - - # And check that it gets revoked - security_group = [ - group for group in conn.get_all_security_groups() if group.name == 'test1'][0] - security_group.rules.should.have.length_of(0) - - -@mock_ec2_deprecated -def test_get_all_security_groups(): - conn = boto.connect_ec2() - sg1 = conn.create_security_group( - name='test1', description='test1', vpc_id='vpc-mjm05d27') - conn.create_security_group(name='test2', description='test2') - - resp = conn.get_all_security_groups(groupnames=['test1']) - resp.should.have.length_of(1) - resp[0].id.should.equal(sg1.id) - - with assert_raises(EC2ResponseError) as cm: - conn.get_all_security_groups(groupnames=['does_not_exist']) - cm.exception.code.should.equal('InvalidGroup.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - resp.should.have.length_of(1) - resp[0].id.should.equal(sg1.id) - - resp = conn.get_all_security_groups(filters={'vpc-id': ['vpc-mjm05d27']}) - resp.should.have.length_of(1) - resp[0].id.should.equal(sg1.id) - - resp = conn.get_all_security_groups(filters={'vpc_id': ['vpc-mjm05d27']}) - resp.should.have.length_of(1) - resp[0].id.should.equal(sg1.id) - - resp = conn.get_all_security_groups(filters={'description': ['test1']}) - resp.should.have.length_of(1) - resp[0].id.should.equal(sg1.id) - - resp = conn.get_all_security_groups() - resp.should.have.length_of(4) - - -@mock_ec2_deprecated -def test_authorize_bad_cidr_throws_invalid_parameter_value(): - conn = boto.connect_ec2('the_key', 'the_secret') - security_group = conn.create_security_group('test', 'test') - with assert_raises(EC2ResponseError) as cm: - security_group.authorize( - ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123") - cm.exception.code.should.equal('InvalidParameterValue') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_security_group_tagging(): - conn = boto.connect_vpc() - vpc = conn.create_vpc("10.0.0.0/16") - - sg = conn.create_security_group("test-sg", "Test SG", vpc.id) - - with assert_raises(EC2ResponseError) as ex: - sg.add_tag("Test", "Tag", dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') - - sg.add_tag("Test", "Tag") - - tag = conn.get_all_tags()[0] - tag.name.should.equal("Test") - tag.value.should.equal("Tag") - - group = conn.get_all_security_groups("test-sg")[0] - group.tags.should.have.length_of(1) - group.tags["Test"].should.equal("Tag") - - -@mock_ec2_deprecated -def test_security_group_tag_filtering(): - conn = boto.connect_ec2() - sg = conn.create_security_group("test-sg", "Test SG") - sg.add_tag("test-tag", "test-value") - - groups = conn.get_all_security_groups( - filters={"tag:test-tag": "test-value"}) - groups.should.have.length_of(1) - - -@mock_ec2_deprecated -def test_authorize_all_protocols_with_no_port_specification(): - conn = boto.connect_ec2() - sg = conn.create_security_group('test', 'test') - - success = sg.authorize(ip_protocol='-1', cidr_ip='0.0.0.0/0') - success.should.be.true - - sg = conn.get_all_security_groups('test')[0] - sg.rules[0].from_port.should.equal(None) - sg.rules[0].to_port.should.equal(None) - - -@mock_ec2_deprecated -def test_sec_group_rule_limit(): - ec2_conn = boto.connect_ec2() - sg = ec2_conn.create_security_group('test', 'test') - other_sg = ec2_conn.create_security_group('test_2', 'test_other') - - # INGRESS - with assert_raises(EC2ResponseError) as cm: - ec2_conn.authorize_security_group( - group_id=sg.id, ip_protocol='-1', - cidr_ip=['{0}.0.0.0/0'.format(i) for i in range(110)]) - cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') - - sg.rules.should.be.empty - # authorize a rule targeting a different sec group (because this count too) - success = ec2_conn.authorize_security_group( - group_id=sg.id, ip_protocol='-1', - src_security_group_group_id=other_sg.id) - success.should.be.true - # fill the rules up the limit - success = ec2_conn.authorize_security_group( - group_id=sg.id, ip_protocol='-1', - cidr_ip=['{0}.0.0.0/0'.format(i) for i in range(99)]) - success.should.be.true - # verify that we cannot authorize past the limit for a CIDR IP - with assert_raises(EC2ResponseError) as cm: - ec2_conn.authorize_security_group( - group_id=sg.id, ip_protocol='-1', cidr_ip=['100.0.0.0/0']) - cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') - # verify that we cannot authorize past the limit for a different sec group - with assert_raises(EC2ResponseError) as cm: - ec2_conn.authorize_security_group( - group_id=sg.id, ip_protocol='-1', - src_security_group_group_id=other_sg.id) - cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') - - # EGRESS - # authorize a rule targeting a different sec group (because this count too) - ec2_conn.authorize_security_group_egress( - group_id=sg.id, ip_protocol='-1', - src_group_id=other_sg.id) - # fill the rules up the limit - # remember that by default, when created a sec group contains 1 egress rule - # so our other_sg rule + 98 CIDR IP rules + 1 by default == 100 the limit - for i in range(98): - ec2_conn.authorize_security_group_egress( - group_id=sg.id, ip_protocol='-1', - cidr_ip='{0}.0.0.0/0'.format(i)) - # verify that we cannot authorize past the limit for a CIDR IP - with assert_raises(EC2ResponseError) as cm: - ec2_conn.authorize_security_group_egress( - group_id=sg.id, ip_protocol='-1', - cidr_ip='101.0.0.0/0') - cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') - # verify that we cannot authorize past the limit for a different sec group - with assert_raises(EC2ResponseError) as cm: - ec2_conn.authorize_security_group_egress( - group_id=sg.id, ip_protocol='-1', - src_group_id=other_sg.id) - cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') - - -@mock_ec2_deprecated -def test_sec_group_rule_limit_vpc(): - ec2_conn = boto.connect_ec2() - vpc_conn = boto.connect_vpc() - - vpc = vpc_conn.create_vpc('10.0.0.0/8') - - sg = ec2_conn.create_security_group('test', 'test', vpc_id=vpc.id) - other_sg = ec2_conn.create_security_group('test_2', 'test', vpc_id=vpc.id) - - # INGRESS - with assert_raises(EC2ResponseError) as cm: - ec2_conn.authorize_security_group( - group_id=sg.id, ip_protocol='-1', - cidr_ip=['{0}.0.0.0/0'.format(i) for i in range(110)]) - cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') - - sg.rules.should.be.empty - # authorize a rule targeting a different sec group (because this count too) - success = ec2_conn.authorize_security_group( - group_id=sg.id, ip_protocol='-1', - src_security_group_group_id=other_sg.id) - success.should.be.true - # fill the rules up the limit - success = ec2_conn.authorize_security_group( - group_id=sg.id, ip_protocol='-1', - cidr_ip=['{0}.0.0.0/0'.format(i) for i in range(49)]) - # verify that we cannot authorize past the limit for a CIDR IP - success.should.be.true - with assert_raises(EC2ResponseError) as cm: - ec2_conn.authorize_security_group( - group_id=sg.id, ip_protocol='-1', cidr_ip=['100.0.0.0/0']) - cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') - # verify that we cannot authorize past the limit for a different sec group - with assert_raises(EC2ResponseError) as cm: - ec2_conn.authorize_security_group( - group_id=sg.id, ip_protocol='-1', - src_security_group_group_id=other_sg.id) - cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') - - # EGRESS - # authorize a rule targeting a different sec group (because this count too) - ec2_conn.authorize_security_group_egress( - group_id=sg.id, ip_protocol='-1', - src_group_id=other_sg.id) - # fill the rules up the limit - # remember that by default, when created a sec group contains 1 egress rule - # so our other_sg rule + 48 CIDR IP rules + 1 by default == 50 the limit - for i in range(48): - ec2_conn.authorize_security_group_egress( - group_id=sg.id, ip_protocol='-1', - cidr_ip='{0}.0.0.0/0'.format(i)) - # verify that we cannot authorize past the limit for a CIDR IP - with assert_raises(EC2ResponseError) as cm: - ec2_conn.authorize_security_group_egress( - group_id=sg.id, ip_protocol='-1', - cidr_ip='50.0.0.0/0') - cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') - # verify that we cannot authorize past the limit for a different sec group - with assert_raises(EC2ResponseError) as cm: - ec2_conn.authorize_security_group_egress( - group_id=sg.id, ip_protocol='-1', - src_group_id=other_sg.id) - cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') - - -''' -Boto3 -''' - - -@mock_ec2 -def test_add_same_rule_twice_throws_error(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - - vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - sg = ec2.create_security_group( - GroupName='sg1', Description='Test security group sg1', VpcId=vpc.id) - - ip_permissions = [ - { - 'IpProtocol': 'tcp', - 'FromPort': 27017, - 'ToPort': 27017, - 'IpRanges': [{"CidrIp": "1.2.3.4/32"}] - }, - ] - sg.authorize_ingress(IpPermissions=ip_permissions) - - with assert_raises(ClientError) as ex: - sg.authorize_ingress(IpPermissions=ip_permissions) - - -@mock_ec2 -def test_security_group_tagging_boto3(): - conn = boto3.client('ec2', region_name='us-east-1') - - sg = conn.create_security_group(GroupName="test-sg", Description="Test SG") - - with assert_raises(ClientError) as ex: - conn.create_tags(Resources=[sg['GroupId']], Tags=[ - {'Key': 'Test', 'Value': 'Tag'}], DryRun=True) - ex.exception.response['Error']['Code'].should.equal('DryRunOperation') - ex.exception.response['ResponseMetadata'][ - 'HTTPStatusCode'].should.equal(400) - ex.exception.response['Error']['Message'].should.equal( - 'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') - - conn.create_tags(Resources=[sg['GroupId']], Tags=[ - {'Key': 'Test', 'Value': 'Tag'}]) - describe = conn.describe_security_groups( - Filters=[{'Name': 'tag-value', 'Values': ['Tag']}]) - tag = describe["SecurityGroups"][0]['Tags'][0] - tag['Value'].should.equal("Tag") - tag['Key'].should.equal("Test") - - -@mock_ec2 -def test_security_group_wildcard_tag_filter_boto3(): - conn = boto3.client('ec2', region_name='us-east-1') - sg = conn.create_security_group(GroupName="test-sg", Description="Test SG") - conn.create_tags(Resources=[sg['GroupId']], Tags=[ - {'Key': 'Test', 'Value': 'Tag'}]) - describe = conn.describe_security_groups( - Filters=[{'Name': 'tag-value', 'Values': ['*']}]) - - tag = describe["SecurityGroups"][0]['Tags'][0] - tag['Value'].should.equal("Tag") - tag['Key'].should.equal("Test") - - -@mock_ec2 -def test_authorize_and_revoke_in_bulk(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - - vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - - sg01 = ec2.create_security_group( - GroupName='sg01', Description='Test security group sg01', VpcId=vpc.id) - sg02 = ec2.create_security_group( - GroupName='sg02', Description='Test security group sg02', VpcId=vpc.id) - sg03 = ec2.create_security_group( - GroupName='sg03', Description='Test security group sg03') - - ip_permissions = [ - { - 'IpProtocol': 'tcp', - 'FromPort': 27017, - 'ToPort': 27017, - 'UserIdGroupPairs': [{'GroupId': sg02.id, 'GroupName': 'sg02', - 'UserId': sg02.owner_id}], - 'IpRanges': [] - }, - { - 'IpProtocol': 'tcp', - 'FromPort': 27018, - 'ToPort': 27018, - 'UserIdGroupPairs': [{'GroupId': sg02.id, 'UserId': sg02.owner_id}], - 'IpRanges': [] - }, - { - 'IpProtocol': 'tcp', - 'FromPort': 27017, - 'ToPort': 27017, - 'UserIdGroupPairs': [{'GroupName': 'sg03', 'UserId': sg03.owner_id}], - 'IpRanges': [] - } - ] - expected_ip_permissions = copy.deepcopy(ip_permissions) - expected_ip_permissions[1]['UserIdGroupPairs'][0]['GroupName'] = 'sg02' - expected_ip_permissions[2]['UserIdGroupPairs'][0]['GroupId'] = sg03.id - - sg01.authorize_ingress(IpPermissions=ip_permissions) - sg01.ip_permissions.should.have.length_of(3) - for ip_permission in expected_ip_permissions: - sg01.ip_permissions.should.contain(ip_permission) - - sg01.revoke_ingress(IpPermissions=ip_permissions) - sg01.ip_permissions.should.be.empty - for ip_permission in expected_ip_permissions: - sg01.ip_permissions.shouldnt.contain(ip_permission) - - sg01.authorize_egress(IpPermissions=ip_permissions) - sg01.ip_permissions_egress.should.have.length_of(4) - for ip_permission in expected_ip_permissions: - sg01.ip_permissions_egress.should.contain(ip_permission) - - sg01.revoke_egress(IpPermissions=ip_permissions) - sg01.ip_permissions_egress.should.have.length_of(1) - for ip_permission in expected_ip_permissions: - sg01.ip_permissions_egress.shouldnt.contain(ip_permission) - - -@mock_ec2 -def test_security_group_ingress_without_multirule(): - ec2 = boto3.resource('ec2', 'ca-central-1') - sg = ec2.create_security_group(Description='Test SG', GroupName='test-sg') - - assert len(sg.ip_permissions) == 0 - sg.authorize_ingress(CidrIp='192.168.0.1/32', FromPort=22, ToPort=22, IpProtocol='tcp') - - # Fails - assert len(sg.ip_permissions) == 1 - - -@mock_ec2 -def test_security_group_ingress_without_multirule_after_reload(): - ec2 = boto3.resource('ec2', 'ca-central-1') - sg = ec2.create_security_group(Description='Test SG', GroupName='test-sg') - - assert len(sg.ip_permissions) == 0 - sg.authorize_ingress(CidrIp='192.168.0.1/32', FromPort=22, ToPort=22, IpProtocol='tcp') - - # Also Fails - sg_after = ec2.SecurityGroup(sg.id) - assert len(sg_after.ip_permissions) == 1 - - -@mock_ec2_deprecated -def test_get_all_security_groups_filter_with_same_vpc_id(): - conn = boto.connect_ec2('the_key', 'the_secret') - vpc_id = 'vpc-5300000c' - security_group = conn.create_security_group( - 'test1', 'test1', vpc_id=vpc_id) - security_group2 = conn.create_security_group( - 'test2', 'test2', vpc_id=vpc_id) - - security_group.vpc_id.should.equal(vpc_id) - security_group2.vpc_id.should.equal(vpc_id) - - security_groups = conn.get_all_security_groups( - group_ids=[security_group.id], filters={'vpc-id': [vpc_id]}) - security_groups.should.have.length_of(1) - - with assert_raises(EC2ResponseError) as cm: - conn.get_all_security_groups(group_ids=['does_not_exist']) - cm.exception.code.should.equal('InvalidGroup.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none +from __future__ import unicode_literals + +import copy + +# Ensure 'assert_raises' context manager support for Python 2.6 +import tests.backport_assert_raises # noqa +from nose.tools import assert_raises + +import boto3 +import boto +from botocore.exceptions import ClientError +from boto.exception import EC2ResponseError +import sure # noqa + +from moto import mock_ec2, mock_ec2_deprecated + + +@mock_ec2_deprecated +def test_create_and_describe_security_group(): + conn = boto.connect_ec2('the_key', 'the_secret') + + with assert_raises(EC2ResponseError) as ex: + security_group = conn.create_security_group( + 'test security group', 'this is a test security group', dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateSecurityGroup operation: Request would have succeeded, but DryRun flag is set') + + security_group = conn.create_security_group( + 'test security group', 'this is a test security group') + + security_group.name.should.equal('test security group') + security_group.description.should.equal('this is a test security group') + + # Trying to create another group with the same name should throw an error + with assert_raises(EC2ResponseError) as cm: + conn.create_security_group( + 'test security group', 'this is a test security group') + cm.exception.code.should.equal('InvalidGroup.Duplicate') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + all_groups = conn.get_all_security_groups() + # The default group gets created automatically + all_groups.should.have.length_of(3) + group_names = [group.name for group in all_groups] + set(group_names).should.equal(set(["default", "test security group"])) + + +@mock_ec2_deprecated +def test_create_security_group_without_description_raises_error(): + conn = boto.connect_ec2('the_key', 'the_secret') + + with assert_raises(EC2ResponseError) as cm: + conn.create_security_group('test security group', '') + cm.exception.code.should.equal('MissingParameter') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_default_security_group(): + conn = boto.ec2.connect_to_region('us-east-1') + groups = conn.get_all_security_groups() + groups.should.have.length_of(2) + groups[0].name.should.equal("default") + + +@mock_ec2_deprecated +def test_create_and_describe_vpc_security_group(): + conn = boto.connect_ec2('the_key', 'the_secret') + vpc_id = 'vpc-5300000c' + security_group = conn.create_security_group( + 'test security group', 'this is a test security group', vpc_id=vpc_id) + + security_group.vpc_id.should.equal(vpc_id) + + security_group.name.should.equal('test security group') + security_group.description.should.equal('this is a test security group') + + # Trying to create another group with the same name in the same VPC should + # throw an error + with assert_raises(EC2ResponseError) as cm: + conn.create_security_group( + 'test security group', 'this is a test security group', vpc_id) + cm.exception.code.should.equal('InvalidGroup.Duplicate') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + all_groups = conn.get_all_security_groups(filters={'vpc_id': [vpc_id]}) + + all_groups[0].vpc_id.should.equal(vpc_id) + + all_groups.should.have.length_of(1) + all_groups[0].name.should.equal('test security group') + + +@mock_ec2_deprecated +def test_create_two_security_groups_with_same_name_in_different_vpc(): + conn = boto.connect_ec2('the_key', 'the_secret') + vpc_id = 'vpc-5300000c' + vpc_id2 = 'vpc-5300000d' + + conn.create_security_group( + 'test security group', 'this is a test security group', vpc_id) + conn.create_security_group( + 'test security group', 'this is a test security group', vpc_id2) + + all_groups = conn.get_all_security_groups() + + all_groups.should.have.length_of(4) + group_names = [group.name for group in all_groups] + # The default group is created automatically + set(group_names).should.equal(set(["default", "test security group"])) + + +@mock_ec2_deprecated +def test_deleting_security_groups(): + conn = boto.connect_ec2('the_key', 'the_secret') + security_group1 = conn.create_security_group('test1', 'test1') + conn.create_security_group('test2', 'test2') + + conn.get_all_security_groups().should.have.length_of(4) + + # Deleting a group that doesn't exist should throw an error + with assert_raises(EC2ResponseError) as cm: + conn.delete_security_group('foobar') + cm.exception.code.should.equal('InvalidGroup.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + # Delete by name + with assert_raises(EC2ResponseError) as ex: + conn.delete_security_group('test2', dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the DeleteSecurityGroup operation: Request would have succeeded, but DryRun flag is set') + + conn.delete_security_group('test2') + conn.get_all_security_groups().should.have.length_of(3) + + # Delete by group id + conn.delete_security_group(group_id=security_group1.id) + conn.get_all_security_groups().should.have.length_of(2) + + +@mock_ec2_deprecated +def test_delete_security_group_in_vpc(): + conn = boto.connect_ec2('the_key', 'the_secret') + vpc_id = "vpc-12345" + security_group1 = conn.create_security_group('test1', 'test1', vpc_id) + + # this should not throw an exception + conn.delete_security_group(group_id=security_group1.id) + + +@mock_ec2_deprecated +def test_authorize_ip_range_and_revoke(): + conn = boto.connect_ec2('the_key', 'the_secret') + security_group = conn.create_security_group('test', 'test') + + with assert_raises(EC2ResponseError) as ex: + success = security_group.authorize( + ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32", dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the GrantSecurityGroupIngress operation: Request would have succeeded, but DryRun flag is set') + + success = security_group.authorize( + ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32") + assert success.should.be.true + + security_group = conn.get_all_security_groups(groupnames=['test'])[0] + int(security_group.rules[0].to_port).should.equal(2222) + security_group.rules[0].grants[ + 0].cidr_ip.should.equal("123.123.123.123/32") + + # Wrong Cidr should throw error + with assert_raises(EC2ResponseError) as cm: + security_group.revoke(ip_protocol="tcp", from_port="22", + to_port="2222", cidr_ip="123.123.123.122/32") + cm.exception.code.should.equal('InvalidPermission.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + # Actually revoke + with assert_raises(EC2ResponseError) as ex: + security_group.revoke(ip_protocol="tcp", from_port="22", + to_port="2222", cidr_ip="123.123.123.123/32", dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the RevokeSecurityGroupIngress operation: Request would have succeeded, but DryRun flag is set') + + security_group.revoke(ip_protocol="tcp", from_port="22", + to_port="2222", cidr_ip="123.123.123.123/32") + + security_group = conn.get_all_security_groups()[0] + security_group.rules.should.have.length_of(0) + + # Test for egress as well + egress_security_group = conn.create_security_group( + 'testegress', 'testegress', vpc_id='vpc-3432589') + + with assert_raises(EC2ResponseError) as ex: + success = conn.authorize_security_group_egress( + egress_security_group.id, "tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32", dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the GrantSecurityGroupEgress operation: Request would have succeeded, but DryRun flag is set') + + success = conn.authorize_security_group_egress( + egress_security_group.id, "tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32") + assert success.should.be.true + egress_security_group = conn.get_all_security_groups( + groupnames='testegress')[0] + # There are two egress rules associated with the security group: + # the default outbound rule and the new one + int(egress_security_group.rules_egress[1].to_port).should.equal(2222) + egress_security_group.rules_egress[1].grants[ + 0].cidr_ip.should.equal("123.123.123.123/32") + + # Wrong Cidr should throw error + egress_security_group.revoke.when.called_with( + ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.122/32").should.throw(EC2ResponseError) + + # Actually revoke + with assert_raises(EC2ResponseError) as ex: + conn.revoke_security_group_egress( + egress_security_group.id, "tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32", dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the RevokeSecurityGroupEgress operation: Request would have succeeded, but DryRun flag is set') + + conn.revoke_security_group_egress( + egress_security_group.id, "tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32") + + egress_security_group = conn.get_all_security_groups()[0] + # There is still the default outbound rule + egress_security_group.rules_egress.should.have.length_of(1) + + +@mock_ec2_deprecated +def test_authorize_other_group_and_revoke(): + conn = boto.connect_ec2('the_key', 'the_secret') + security_group = conn.create_security_group('test', 'test') + other_security_group = conn.create_security_group('other', 'other') + wrong_group = conn.create_security_group('wrong', 'wrong') + + success = security_group.authorize( + ip_protocol="tcp", from_port="22", to_port="2222", src_group=other_security_group) + assert success.should.be.true + + security_group = [ + group for group in conn.get_all_security_groups() if group.name == 'test'][0] + int(security_group.rules[0].to_port).should.equal(2222) + security_group.rules[0].grants[ + 0].group_id.should.equal(other_security_group.id) + + # Wrong source group should throw error + with assert_raises(EC2ResponseError) as cm: + security_group.revoke(ip_protocol="tcp", from_port="22", + to_port="2222", src_group=wrong_group) + cm.exception.code.should.equal('InvalidPermission.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + # Actually revoke + security_group.revoke(ip_protocol="tcp", from_port="22", + to_port="2222", src_group=other_security_group) + + security_group = [ + group for group in conn.get_all_security_groups() if group.name == 'test'][0] + security_group.rules.should.have.length_of(0) + + +@mock_ec2 +def test_authorize_other_group_egress_and_revoke(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + + sg01 = ec2.create_security_group( + GroupName='sg01', Description='Test security group sg01', VpcId=vpc.id) + sg02 = ec2.create_security_group( + GroupName='sg02', Description='Test security group sg02', VpcId=vpc.id) + + ip_permission = { + 'IpProtocol': 'tcp', + 'FromPort': 27017, + 'ToPort': 27017, + 'UserIdGroupPairs': [{'GroupId': sg02.id, 'GroupName': 'sg02', 'UserId': sg02.owner_id}], + 'IpRanges': [] + } + + sg01.authorize_egress(IpPermissions=[ip_permission]) + sg01.ip_permissions_egress.should.have.length_of(2) + sg01.ip_permissions_egress.should.contain(ip_permission) + + sg01.revoke_egress(IpPermissions=[ip_permission]) + sg01.ip_permissions_egress.should.have.length_of(1) + + +@mock_ec2_deprecated +def test_authorize_group_in_vpc(): + conn = boto.connect_ec2('the_key', 'the_secret') + vpc_id = "vpc-12345" + + # create 2 groups in a vpc + security_group = conn.create_security_group('test1', 'test1', vpc_id) + other_security_group = conn.create_security_group('test2', 'test2', vpc_id) + + success = security_group.authorize( + ip_protocol="tcp", from_port="22", to_port="2222", src_group=other_security_group) + success.should.be.true + + # Check that the rule is accurate + security_group = [ + group for group in conn.get_all_security_groups() if group.name == 'test1'][0] + int(security_group.rules[0].to_port).should.equal(2222) + security_group.rules[0].grants[ + 0].group_id.should.equal(other_security_group.id) + + # Now remove the rule + success = security_group.revoke( + ip_protocol="tcp", from_port="22", to_port="2222", src_group=other_security_group) + success.should.be.true + + # And check that it gets revoked + security_group = [ + group for group in conn.get_all_security_groups() if group.name == 'test1'][0] + security_group.rules.should.have.length_of(0) + + +@mock_ec2_deprecated +def test_get_all_security_groups(): + conn = boto.connect_ec2() + sg1 = conn.create_security_group( + name='test1', description='test1', vpc_id='vpc-mjm05d27') + conn.create_security_group(name='test2', description='test2') + + resp = conn.get_all_security_groups(groupnames=['test1']) + resp.should.have.length_of(1) + resp[0].id.should.equal(sg1.id) + + with assert_raises(EC2ResponseError) as cm: + conn.get_all_security_groups(groupnames=['does_not_exist']) + cm.exception.code.should.equal('InvalidGroup.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + resp.should.have.length_of(1) + resp[0].id.should.equal(sg1.id) + + resp = conn.get_all_security_groups(filters={'vpc-id': ['vpc-mjm05d27']}) + resp.should.have.length_of(1) + resp[0].id.should.equal(sg1.id) + + resp = conn.get_all_security_groups(filters={'vpc_id': ['vpc-mjm05d27']}) + resp.should.have.length_of(1) + resp[0].id.should.equal(sg1.id) + + resp = conn.get_all_security_groups(filters={'description': ['test1']}) + resp.should.have.length_of(1) + resp[0].id.should.equal(sg1.id) + + resp = conn.get_all_security_groups() + resp.should.have.length_of(4) + + +@mock_ec2_deprecated +def test_authorize_bad_cidr_throws_invalid_parameter_value(): + conn = boto.connect_ec2('the_key', 'the_secret') + security_group = conn.create_security_group('test', 'test') + with assert_raises(EC2ResponseError) as cm: + security_group.authorize( + ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123") + cm.exception.code.should.equal('InvalidParameterValue') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_security_group_tagging(): + conn = boto.connect_vpc() + vpc = conn.create_vpc("10.0.0.0/16") + + sg = conn.create_security_group("test-sg", "Test SG", vpc.id) + + with assert_raises(EC2ResponseError) as ex: + sg.add_tag("Test", "Tag", dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') + + sg.add_tag("Test", "Tag") + + tag = conn.get_all_tags()[0] + tag.name.should.equal("Test") + tag.value.should.equal("Tag") + + group = conn.get_all_security_groups("test-sg")[0] + group.tags.should.have.length_of(1) + group.tags["Test"].should.equal("Tag") + + +@mock_ec2_deprecated +def test_security_group_tag_filtering(): + conn = boto.connect_ec2() + sg = conn.create_security_group("test-sg", "Test SG") + sg.add_tag("test-tag", "test-value") + + groups = conn.get_all_security_groups( + filters={"tag:test-tag": "test-value"}) + groups.should.have.length_of(1) + + +@mock_ec2_deprecated +def test_authorize_all_protocols_with_no_port_specification(): + conn = boto.connect_ec2() + sg = conn.create_security_group('test', 'test') + + success = sg.authorize(ip_protocol='-1', cidr_ip='0.0.0.0/0') + success.should.be.true + + sg = conn.get_all_security_groups('test')[0] + sg.rules[0].from_port.should.equal(None) + sg.rules[0].to_port.should.equal(None) + + +@mock_ec2_deprecated +def test_sec_group_rule_limit(): + ec2_conn = boto.connect_ec2() + sg = ec2_conn.create_security_group('test', 'test') + other_sg = ec2_conn.create_security_group('test_2', 'test_other') + + # INGRESS + with assert_raises(EC2ResponseError) as cm: + ec2_conn.authorize_security_group( + group_id=sg.id, ip_protocol='-1', + cidr_ip=['{0}.0.0.0/0'.format(i) for i in range(110)]) + cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') + + sg.rules.should.be.empty + # authorize a rule targeting a different sec group (because this count too) + success = ec2_conn.authorize_security_group( + group_id=sg.id, ip_protocol='-1', + src_security_group_group_id=other_sg.id) + success.should.be.true + # fill the rules up the limit + success = ec2_conn.authorize_security_group( + group_id=sg.id, ip_protocol='-1', + cidr_ip=['{0}.0.0.0/0'.format(i) for i in range(99)]) + success.should.be.true + # verify that we cannot authorize past the limit for a CIDR IP + with assert_raises(EC2ResponseError) as cm: + ec2_conn.authorize_security_group( + group_id=sg.id, ip_protocol='-1', cidr_ip=['100.0.0.0/0']) + cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') + # verify that we cannot authorize past the limit for a different sec group + with assert_raises(EC2ResponseError) as cm: + ec2_conn.authorize_security_group( + group_id=sg.id, ip_protocol='-1', + src_security_group_group_id=other_sg.id) + cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') + + # EGRESS + # authorize a rule targeting a different sec group (because this count too) + ec2_conn.authorize_security_group_egress( + group_id=sg.id, ip_protocol='-1', + src_group_id=other_sg.id) + # fill the rules up the limit + # remember that by default, when created a sec group contains 1 egress rule + # so our other_sg rule + 98 CIDR IP rules + 1 by default == 100 the limit + for i in range(98): + ec2_conn.authorize_security_group_egress( + group_id=sg.id, ip_protocol='-1', + cidr_ip='{0}.0.0.0/0'.format(i)) + # verify that we cannot authorize past the limit for a CIDR IP + with assert_raises(EC2ResponseError) as cm: + ec2_conn.authorize_security_group_egress( + group_id=sg.id, ip_protocol='-1', + cidr_ip='101.0.0.0/0') + cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') + # verify that we cannot authorize past the limit for a different sec group + with assert_raises(EC2ResponseError) as cm: + ec2_conn.authorize_security_group_egress( + group_id=sg.id, ip_protocol='-1', + src_group_id=other_sg.id) + cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') + + +@mock_ec2_deprecated +def test_sec_group_rule_limit_vpc(): + ec2_conn = boto.connect_ec2() + vpc_conn = boto.connect_vpc() + + vpc = vpc_conn.create_vpc('10.0.0.0/16') + + sg = ec2_conn.create_security_group('test', 'test', vpc_id=vpc.id) + other_sg = ec2_conn.create_security_group('test_2', 'test', vpc_id=vpc.id) + + # INGRESS + with assert_raises(EC2ResponseError) as cm: + ec2_conn.authorize_security_group( + group_id=sg.id, ip_protocol='-1', + cidr_ip=['{0}.0.0.0/0'.format(i) for i in range(110)]) + cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') + + sg.rules.should.be.empty + # authorize a rule targeting a different sec group (because this count too) + success = ec2_conn.authorize_security_group( + group_id=sg.id, ip_protocol='-1', + src_security_group_group_id=other_sg.id) + success.should.be.true + # fill the rules up the limit + success = ec2_conn.authorize_security_group( + group_id=sg.id, ip_protocol='-1', + cidr_ip=['{0}.0.0.0/0'.format(i) for i in range(49)]) + # verify that we cannot authorize past the limit for a CIDR IP + success.should.be.true + with assert_raises(EC2ResponseError) as cm: + ec2_conn.authorize_security_group( + group_id=sg.id, ip_protocol='-1', cidr_ip=['100.0.0.0/0']) + cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') + # verify that we cannot authorize past the limit for a different sec group + with assert_raises(EC2ResponseError) as cm: + ec2_conn.authorize_security_group( + group_id=sg.id, ip_protocol='-1', + src_security_group_group_id=other_sg.id) + cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') + + # EGRESS + # authorize a rule targeting a different sec group (because this count too) + ec2_conn.authorize_security_group_egress( + group_id=sg.id, ip_protocol='-1', + src_group_id=other_sg.id) + # fill the rules up the limit + # remember that by default, when created a sec group contains 1 egress rule + # so our other_sg rule + 48 CIDR IP rules + 1 by default == 50 the limit + for i in range(48): + ec2_conn.authorize_security_group_egress( + group_id=sg.id, ip_protocol='-1', + cidr_ip='{0}.0.0.0/0'.format(i)) + # verify that we cannot authorize past the limit for a CIDR IP + with assert_raises(EC2ResponseError) as cm: + ec2_conn.authorize_security_group_egress( + group_id=sg.id, ip_protocol='-1', + cidr_ip='50.0.0.0/0') + cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') + # verify that we cannot authorize past the limit for a different sec group + with assert_raises(EC2ResponseError) as cm: + ec2_conn.authorize_security_group_egress( + group_id=sg.id, ip_protocol='-1', + src_group_id=other_sg.id) + cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') + + +''' +Boto3 +''' + + +@mock_ec2 +def test_add_same_rule_twice_throws_error(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + sg = ec2.create_security_group( + GroupName='sg1', Description='Test security group sg1', VpcId=vpc.id) + + ip_permissions = [ + { + 'IpProtocol': 'tcp', + 'FromPort': 27017, + 'ToPort': 27017, + 'IpRanges': [{"CidrIp": "1.2.3.4/32"}] + }, + ] + sg.authorize_ingress(IpPermissions=ip_permissions) + + with assert_raises(ClientError) as ex: + sg.authorize_ingress(IpPermissions=ip_permissions) + + +@mock_ec2 +def test_security_group_tagging_boto3(): + conn = boto3.client('ec2', region_name='us-east-1') + + sg = conn.create_security_group(GroupName="test-sg", Description="Test SG") + + with assert_raises(ClientError) as ex: + conn.create_tags(Resources=[sg['GroupId']], Tags=[ + {'Key': 'Test', 'Value': 'Tag'}], DryRun=True) + ex.exception.response['Error']['Code'].should.equal('DryRunOperation') + ex.exception.response['ResponseMetadata'][ + 'HTTPStatusCode'].should.equal(400) + ex.exception.response['Error']['Message'].should.equal( + 'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') + + conn.create_tags(Resources=[sg['GroupId']], Tags=[ + {'Key': 'Test', 'Value': 'Tag'}]) + describe = conn.describe_security_groups( + Filters=[{'Name': 'tag-value', 'Values': ['Tag']}]) + tag = describe["SecurityGroups"][0]['Tags'][0] + tag['Value'].should.equal("Tag") + tag['Key'].should.equal("Test") + + +@mock_ec2 +def test_security_group_wildcard_tag_filter_boto3(): + conn = boto3.client('ec2', region_name='us-east-1') + sg = conn.create_security_group(GroupName="test-sg", Description="Test SG") + conn.create_tags(Resources=[sg['GroupId']], Tags=[ + {'Key': 'Test', 'Value': 'Tag'}]) + describe = conn.describe_security_groups( + Filters=[{'Name': 'tag-value', 'Values': ['*']}]) + + tag = describe["SecurityGroups"][0]['Tags'][0] + tag['Value'].should.equal("Tag") + tag['Key'].should.equal("Test") + + +@mock_ec2 +def test_authorize_and_revoke_in_bulk(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + + sg01 = ec2.create_security_group( + GroupName='sg01', Description='Test security group sg01', VpcId=vpc.id) + sg02 = ec2.create_security_group( + GroupName='sg02', Description='Test security group sg02', VpcId=vpc.id) + sg03 = ec2.create_security_group( + GroupName='sg03', Description='Test security group sg03') + + ip_permissions = [ + { + 'IpProtocol': 'tcp', + 'FromPort': 27017, + 'ToPort': 27017, + 'UserIdGroupPairs': [{'GroupId': sg02.id, 'GroupName': 'sg02', + 'UserId': sg02.owner_id}], + 'IpRanges': [] + }, + { + 'IpProtocol': 'tcp', + 'FromPort': 27018, + 'ToPort': 27018, + 'UserIdGroupPairs': [{'GroupId': sg02.id, 'UserId': sg02.owner_id}], + 'IpRanges': [] + }, + { + 'IpProtocol': 'tcp', + 'FromPort': 27017, + 'ToPort': 27017, + 'UserIdGroupPairs': [{'GroupName': 'sg03', 'UserId': sg03.owner_id}], + 'IpRanges': [] + } + ] + expected_ip_permissions = copy.deepcopy(ip_permissions) + expected_ip_permissions[1]['UserIdGroupPairs'][0]['GroupName'] = 'sg02' + expected_ip_permissions[2]['UserIdGroupPairs'][0]['GroupId'] = sg03.id + + sg01.authorize_ingress(IpPermissions=ip_permissions) + sg01.ip_permissions.should.have.length_of(3) + for ip_permission in expected_ip_permissions: + sg01.ip_permissions.should.contain(ip_permission) + + sg01.revoke_ingress(IpPermissions=ip_permissions) + sg01.ip_permissions.should.be.empty + for ip_permission in expected_ip_permissions: + sg01.ip_permissions.shouldnt.contain(ip_permission) + + sg01.authorize_egress(IpPermissions=ip_permissions) + sg01.ip_permissions_egress.should.have.length_of(4) + for ip_permission in expected_ip_permissions: + sg01.ip_permissions_egress.should.contain(ip_permission) + + sg01.revoke_egress(IpPermissions=ip_permissions) + sg01.ip_permissions_egress.should.have.length_of(1) + for ip_permission in expected_ip_permissions: + sg01.ip_permissions_egress.shouldnt.contain(ip_permission) + + +@mock_ec2 +def test_security_group_ingress_without_multirule(): + ec2 = boto3.resource('ec2', 'ca-central-1') + sg = ec2.create_security_group(Description='Test SG', GroupName='test-sg') + + assert len(sg.ip_permissions) == 0 + sg.authorize_ingress(CidrIp='192.168.0.1/32', FromPort=22, ToPort=22, IpProtocol='tcp') + + # Fails + assert len(sg.ip_permissions) == 1 + + +@mock_ec2 +def test_security_group_ingress_without_multirule_after_reload(): + ec2 = boto3.resource('ec2', 'ca-central-1') + sg = ec2.create_security_group(Description='Test SG', GroupName='test-sg') + + assert len(sg.ip_permissions) == 0 + sg.authorize_ingress(CidrIp='192.168.0.1/32', FromPort=22, ToPort=22, IpProtocol='tcp') + + # Also Fails + sg_after = ec2.SecurityGroup(sg.id) + assert len(sg_after.ip_permissions) == 1 + + +@mock_ec2_deprecated +def test_get_all_security_groups_filter_with_same_vpc_id(): + conn = boto.connect_ec2('the_key', 'the_secret') + vpc_id = 'vpc-5300000c' + security_group = conn.create_security_group( + 'test1', 'test1', vpc_id=vpc_id) + security_group2 = conn.create_security_group( + 'test2', 'test2', vpc_id=vpc_id) + + security_group.vpc_id.should.equal(vpc_id) + security_group2.vpc_id.should.equal(vpc_id) + + security_groups = conn.get_all_security_groups( + group_ids=[security_group.id], filters={'vpc-id': [vpc_id]}) + security_groups.should.have.length_of(1) + + with assert_raises(EC2ResponseError) as cm: + conn.get_all_security_groups(group_ids=['does_not_exist']) + cm.exception.code.should.equal('InvalidGroup.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none diff --git a/tests/test_ec2/test_spot_fleet.py b/tests/test_ec2/test_spot_fleet.py index 190f3b1f1..6221d633f 100644 --- a/tests/test_ec2/test_spot_fleet.py +++ b/tests/test_ec2/test_spot_fleet.py @@ -7,7 +7,7 @@ from moto import mock_ec2 def get_subnet_id(conn): - vpc = conn.create_vpc(CidrBlock="10.0.0.0/8")['Vpc'] + vpc = conn.create_vpc(CidrBlock="10.0.0.0/16")['Vpc'] subnet = conn.create_subnet( VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet'] subnet_id = subnet['SubnetId'] diff --git a/tests/test_ec2/test_spot_instances.py b/tests/test_ec2/test_spot_instances.py index 51590ed46..ab08d392c 100644 --- a/tests/test_ec2/test_spot_instances.py +++ b/tests/test_ec2/test_spot_instances.py @@ -1,268 +1,268 @@ -from __future__ import unicode_literals -from nose.tools import assert_raises -import datetime - -import boto -import boto3 -from boto.exception import EC2ResponseError -from botocore.exceptions import ClientError -import pytz -import sure # noqa - -from moto import mock_ec2, mock_ec2_deprecated -from moto.backends import get_model -from moto.core.utils import iso_8601_datetime_with_milliseconds - - -@mock_ec2 -def test_request_spot_instances(): - conn = boto3.client('ec2', 'us-east-1') - vpc = conn.create_vpc(CidrBlock="10.0.0.0/8")['Vpc'] - subnet = conn.create_subnet( - VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet'] - subnet_id = subnet['SubnetId'] - - conn.create_security_group(GroupName='group1', Description='description') - conn.create_security_group(GroupName='group2', Description='description') - - start_dt = datetime.datetime(2013, 1, 1).replace(tzinfo=pytz.utc) - end_dt = datetime.datetime(2013, 1, 2).replace(tzinfo=pytz.utc) - start = iso_8601_datetime_with_milliseconds(start_dt) - end = iso_8601_datetime_with_milliseconds(end_dt) - - with assert_raises(ClientError) as ex: - request = conn.request_spot_instances( - SpotPrice="0.5", InstanceCount=1, Type='one-time', - ValidFrom=start, ValidUntil=end, LaunchGroup="the-group", - AvailabilityZoneGroup='my-group', - LaunchSpecification={ - "ImageId": 'ami-abcd1234', - "KeyName": "test", - "SecurityGroups": ['group1', 'group2'], - "UserData": "some test data", - "InstanceType": 'm1.small', - "Placement": { - "AvailabilityZone": 'us-east-1c', - }, - "KernelId": "test-kernel", - "RamdiskId": "test-ramdisk", - "Monitoring": { - "Enabled": True, - }, - "SubnetId": subnet_id, - }, - DryRun=True, - ) - ex.exception.response['Error']['Code'].should.equal('DryRunOperation') - ex.exception.response['ResponseMetadata'][ - 'HTTPStatusCode'].should.equal(400) - ex.exception.response['Error']['Message'].should.equal( - 'An error occurred (DryRunOperation) when calling the RequestSpotInstance operation: Request would have succeeded, but DryRun flag is set') - - request = conn.request_spot_instances( - SpotPrice="0.5", InstanceCount=1, Type='one-time', - ValidFrom=start, ValidUntil=end, LaunchGroup="the-group", - AvailabilityZoneGroup='my-group', - LaunchSpecification={ - "ImageId": 'ami-abcd1234', - "KeyName": "test", - "SecurityGroups": ['group1', 'group2'], - "UserData": "some test data", - "InstanceType": 'm1.small', - "Placement": { - "AvailabilityZone": 'us-east-1c', - }, - "KernelId": "test-kernel", - "RamdiskId": "test-ramdisk", - "Monitoring": { - "Enabled": True, - }, - "SubnetId": subnet_id, - }, - ) - - requests = conn.describe_spot_instance_requests()['SpotInstanceRequests'] - requests.should.have.length_of(1) - request = requests[0] - - request['State'].should.equal("open") - request['SpotPrice'].should.equal("0.5") - request['Type'].should.equal('one-time') - request['ValidFrom'].should.equal(start_dt) - request['ValidUntil'].should.equal(end_dt) - request['LaunchGroup'].should.equal("the-group") - request['AvailabilityZoneGroup'].should.equal('my-group') - - launch_spec = request['LaunchSpecification'] - security_group_names = [group['GroupName'] - for group in launch_spec['SecurityGroups']] - set(security_group_names).should.equal(set(['group1', 'group2'])) - - launch_spec['ImageId'].should.equal('ami-abcd1234') - launch_spec['KeyName'].should.equal("test") - launch_spec['InstanceType'].should.equal('m1.small') - launch_spec['KernelId'].should.equal("test-kernel") - launch_spec['RamdiskId'].should.equal("test-ramdisk") - launch_spec['SubnetId'].should.equal(subnet_id) - - -@mock_ec2 -def test_request_spot_instances_default_arguments(): - """ - Test that moto set the correct default arguments - """ - conn = boto3.client('ec2', 'us-east-1') - - request = conn.request_spot_instances( - SpotPrice="0.5", - LaunchSpecification={ - "ImageId": 'ami-abcd1234', - } - ) - - requests = conn.describe_spot_instance_requests()['SpotInstanceRequests'] - requests.should.have.length_of(1) - request = requests[0] - - request['State'].should.equal("open") - request['SpotPrice'].should.equal("0.5") - request['Type'].should.equal('one-time') - request.shouldnt.contain('ValidFrom') - request.shouldnt.contain('ValidUntil') - request.shouldnt.contain('LaunchGroup') - request.shouldnt.contain('AvailabilityZoneGroup') - - launch_spec = request['LaunchSpecification'] - - security_group_names = [group['GroupName'] - for group in launch_spec['SecurityGroups']] - security_group_names.should.equal(["default"]) - - launch_spec['ImageId'].should.equal('ami-abcd1234') - request.shouldnt.contain('KeyName') - launch_spec['InstanceType'].should.equal('m1.small') - request.shouldnt.contain('KernelId') - request.shouldnt.contain('RamdiskId') - request.shouldnt.contain('SubnetId') - - -@mock_ec2_deprecated -def test_cancel_spot_instance_request(): - conn = boto.connect_ec2() - - conn.request_spot_instances( - price=0.5, image_id='ami-abcd1234', - ) - - requests = conn.get_all_spot_instance_requests() - requests.should.have.length_of(1) - - with assert_raises(EC2ResponseError) as ex: - conn.cancel_spot_instance_requests([requests[0].id], dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the CancelSpotInstance operation: Request would have succeeded, but DryRun flag is set') - - conn.cancel_spot_instance_requests([requests[0].id]) - - requests = conn.get_all_spot_instance_requests() - requests.should.have.length_of(0) - - -@mock_ec2_deprecated -def test_request_spot_instances_fulfilled(): - """ - Test that moto correctly fullfills a spot instance request - """ - conn = boto.ec2.connect_to_region("us-east-1") - - request = conn.request_spot_instances( - price=0.5, image_id='ami-abcd1234', - ) - - requests = conn.get_all_spot_instance_requests() - requests.should.have.length_of(1) - request = requests[0] - - request.state.should.equal("open") - - get_model('SpotInstanceRequest', 'us-east-1')[0].state = 'active' - - requests = conn.get_all_spot_instance_requests() - requests.should.have.length_of(1) - request = requests[0] - - request.state.should.equal("active") - - -@mock_ec2_deprecated -def test_tag_spot_instance_request(): - """ - Test that moto correctly tags a spot instance request - """ - conn = boto.connect_ec2() - - request = conn.request_spot_instances( - price=0.5, image_id='ami-abcd1234', - ) - request[0].add_tag('tag1', 'value1') - request[0].add_tag('tag2', 'value2') - - requests = conn.get_all_spot_instance_requests() - requests.should.have.length_of(1) - request = requests[0] - - tag_dict = dict(request.tags) - tag_dict.should.equal({'tag1': 'value1', 'tag2': 'value2'}) - - -@mock_ec2_deprecated -def test_get_all_spot_instance_requests_filtering(): - """ - Test that moto correctly filters spot instance requests - """ - conn = boto.connect_ec2() - - request1 = conn.request_spot_instances( - price=0.5, image_id='ami-abcd1234', - ) - request2 = conn.request_spot_instances( - price=0.5, image_id='ami-abcd1234', - ) - conn.request_spot_instances( - price=0.5, image_id='ami-abcd1234', - ) - request1[0].add_tag('tag1', 'value1') - request1[0].add_tag('tag2', 'value2') - request2[0].add_tag('tag1', 'value1') - request2[0].add_tag('tag2', 'wrong') - - requests = conn.get_all_spot_instance_requests(filters={'state': 'active'}) - requests.should.have.length_of(0) - - requests = conn.get_all_spot_instance_requests(filters={'state': 'open'}) - requests.should.have.length_of(3) - - requests = conn.get_all_spot_instance_requests( - filters={'tag:tag1': 'value1'}) - requests.should.have.length_of(2) - - requests = conn.get_all_spot_instance_requests( - filters={'tag:tag1': 'value1', 'tag:tag2': 'value2'}) - requests.should.have.length_of(1) - - -@mock_ec2_deprecated -def test_request_spot_instances_setting_instance_id(): - conn = boto.ec2.connect_to_region("us-east-1") - request = conn.request_spot_instances( - price=0.5, image_id='ami-abcd1234') - - req = get_model('SpotInstanceRequest', 'us-east-1')[0] - req.state = 'active' - req.instance_id = 'i-12345678' - - request = conn.get_all_spot_instance_requests()[0] - assert request.state == 'active' - assert request.instance_id == 'i-12345678' +from __future__ import unicode_literals +from nose.tools import assert_raises +import datetime + +import boto +import boto3 +from boto.exception import EC2ResponseError +from botocore.exceptions import ClientError +import pytz +import sure # noqa + +from moto import mock_ec2, mock_ec2_deprecated +from moto.backends import get_model +from moto.core.utils import iso_8601_datetime_with_milliseconds + + +@mock_ec2 +def test_request_spot_instances(): + conn = boto3.client('ec2', 'us-east-1') + vpc = conn.create_vpc(CidrBlock="10.0.0.0/16")['Vpc'] + subnet = conn.create_subnet( + VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet'] + subnet_id = subnet['SubnetId'] + + conn.create_security_group(GroupName='group1', Description='description') + conn.create_security_group(GroupName='group2', Description='description') + + start_dt = datetime.datetime(2013, 1, 1).replace(tzinfo=pytz.utc) + end_dt = datetime.datetime(2013, 1, 2).replace(tzinfo=pytz.utc) + start = iso_8601_datetime_with_milliseconds(start_dt) + end = iso_8601_datetime_with_milliseconds(end_dt) + + with assert_raises(ClientError) as ex: + request = conn.request_spot_instances( + SpotPrice="0.5", InstanceCount=1, Type='one-time', + ValidFrom=start, ValidUntil=end, LaunchGroup="the-group", + AvailabilityZoneGroup='my-group', + LaunchSpecification={ + "ImageId": 'ami-abcd1234', + "KeyName": "test", + "SecurityGroups": ['group1', 'group2'], + "UserData": "some test data", + "InstanceType": 'm1.small', + "Placement": { + "AvailabilityZone": 'us-east-1c', + }, + "KernelId": "test-kernel", + "RamdiskId": "test-ramdisk", + "Monitoring": { + "Enabled": True, + }, + "SubnetId": subnet_id, + }, + DryRun=True, + ) + ex.exception.response['Error']['Code'].should.equal('DryRunOperation') + ex.exception.response['ResponseMetadata'][ + 'HTTPStatusCode'].should.equal(400) + ex.exception.response['Error']['Message'].should.equal( + 'An error occurred (DryRunOperation) when calling the RequestSpotInstance operation: Request would have succeeded, but DryRun flag is set') + + request = conn.request_spot_instances( + SpotPrice="0.5", InstanceCount=1, Type='one-time', + ValidFrom=start, ValidUntil=end, LaunchGroup="the-group", + AvailabilityZoneGroup='my-group', + LaunchSpecification={ + "ImageId": 'ami-abcd1234', + "KeyName": "test", + "SecurityGroups": ['group1', 'group2'], + "UserData": "some test data", + "InstanceType": 'm1.small', + "Placement": { + "AvailabilityZone": 'us-east-1c', + }, + "KernelId": "test-kernel", + "RamdiskId": "test-ramdisk", + "Monitoring": { + "Enabled": True, + }, + "SubnetId": subnet_id, + }, + ) + + requests = conn.describe_spot_instance_requests()['SpotInstanceRequests'] + requests.should.have.length_of(1) + request = requests[0] + + request['State'].should.equal("open") + request['SpotPrice'].should.equal("0.5") + request['Type'].should.equal('one-time') + request['ValidFrom'].should.equal(start_dt) + request['ValidUntil'].should.equal(end_dt) + request['LaunchGroup'].should.equal("the-group") + request['AvailabilityZoneGroup'].should.equal('my-group') + + launch_spec = request['LaunchSpecification'] + security_group_names = [group['GroupName'] + for group in launch_spec['SecurityGroups']] + set(security_group_names).should.equal(set(['group1', 'group2'])) + + launch_spec['ImageId'].should.equal('ami-abcd1234') + launch_spec['KeyName'].should.equal("test") + launch_spec['InstanceType'].should.equal('m1.small') + launch_spec['KernelId'].should.equal("test-kernel") + launch_spec['RamdiskId'].should.equal("test-ramdisk") + launch_spec['SubnetId'].should.equal(subnet_id) + + +@mock_ec2 +def test_request_spot_instances_default_arguments(): + """ + Test that moto set the correct default arguments + """ + conn = boto3.client('ec2', 'us-east-1') + + request = conn.request_spot_instances( + SpotPrice="0.5", + LaunchSpecification={ + "ImageId": 'ami-abcd1234', + } + ) + + requests = conn.describe_spot_instance_requests()['SpotInstanceRequests'] + requests.should.have.length_of(1) + request = requests[0] + + request['State'].should.equal("open") + request['SpotPrice'].should.equal("0.5") + request['Type'].should.equal('one-time') + request.shouldnt.contain('ValidFrom') + request.shouldnt.contain('ValidUntil') + request.shouldnt.contain('LaunchGroup') + request.shouldnt.contain('AvailabilityZoneGroup') + + launch_spec = request['LaunchSpecification'] + + security_group_names = [group['GroupName'] + for group in launch_spec['SecurityGroups']] + security_group_names.should.equal(["default"]) + + launch_spec['ImageId'].should.equal('ami-abcd1234') + request.shouldnt.contain('KeyName') + launch_spec['InstanceType'].should.equal('m1.small') + request.shouldnt.contain('KernelId') + request.shouldnt.contain('RamdiskId') + request.shouldnt.contain('SubnetId') + + +@mock_ec2_deprecated +def test_cancel_spot_instance_request(): + conn = boto.connect_ec2() + + conn.request_spot_instances( + price=0.5, image_id='ami-abcd1234', + ) + + requests = conn.get_all_spot_instance_requests() + requests.should.have.length_of(1) + + with assert_raises(EC2ResponseError) as ex: + conn.cancel_spot_instance_requests([requests[0].id], dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CancelSpotInstance operation: Request would have succeeded, but DryRun flag is set') + + conn.cancel_spot_instance_requests([requests[0].id]) + + requests = conn.get_all_spot_instance_requests() + requests.should.have.length_of(0) + + +@mock_ec2_deprecated +def test_request_spot_instances_fulfilled(): + """ + Test that moto correctly fullfills a spot instance request + """ + conn = boto.ec2.connect_to_region("us-east-1") + + request = conn.request_spot_instances( + price=0.5, image_id='ami-abcd1234', + ) + + requests = conn.get_all_spot_instance_requests() + requests.should.have.length_of(1) + request = requests[0] + + request.state.should.equal("open") + + get_model('SpotInstanceRequest', 'us-east-1')[0].state = 'active' + + requests = conn.get_all_spot_instance_requests() + requests.should.have.length_of(1) + request = requests[0] + + request.state.should.equal("active") + + +@mock_ec2_deprecated +def test_tag_spot_instance_request(): + """ + Test that moto correctly tags a spot instance request + """ + conn = boto.connect_ec2() + + request = conn.request_spot_instances( + price=0.5, image_id='ami-abcd1234', + ) + request[0].add_tag('tag1', 'value1') + request[0].add_tag('tag2', 'value2') + + requests = conn.get_all_spot_instance_requests() + requests.should.have.length_of(1) + request = requests[0] + + tag_dict = dict(request.tags) + tag_dict.should.equal({'tag1': 'value1', 'tag2': 'value2'}) + + +@mock_ec2_deprecated +def test_get_all_spot_instance_requests_filtering(): + """ + Test that moto correctly filters spot instance requests + """ + conn = boto.connect_ec2() + + request1 = conn.request_spot_instances( + price=0.5, image_id='ami-abcd1234', + ) + request2 = conn.request_spot_instances( + price=0.5, image_id='ami-abcd1234', + ) + conn.request_spot_instances( + price=0.5, image_id='ami-abcd1234', + ) + request1[0].add_tag('tag1', 'value1') + request1[0].add_tag('tag2', 'value2') + request2[0].add_tag('tag1', 'value1') + request2[0].add_tag('tag2', 'wrong') + + requests = conn.get_all_spot_instance_requests(filters={'state': 'active'}) + requests.should.have.length_of(0) + + requests = conn.get_all_spot_instance_requests(filters={'state': 'open'}) + requests.should.have.length_of(3) + + requests = conn.get_all_spot_instance_requests( + filters={'tag:tag1': 'value1'}) + requests.should.have.length_of(2) + + requests = conn.get_all_spot_instance_requests( + filters={'tag:tag1': 'value1', 'tag:tag2': 'value2'}) + requests.should.have.length_of(1) + + +@mock_ec2_deprecated +def test_request_spot_instances_setting_instance_id(): + conn = boto.ec2.connect_to_region("us-east-1") + request = conn.request_spot_instances( + price=0.5, image_id='ami-abcd1234') + + req = get_model('SpotInstanceRequest', 'us-east-1')[0] + req.state = 'active' + req.instance_id = 'i-12345678' + + request = conn.get_all_spot_instance_requests()[0] + assert request.state == 'active' + assert request.instance_id == 'i-12345678' diff --git a/tests/test_ec2/test_subnets.py b/tests/test_ec2/test_subnets.py index 3fb122807..38571b285 100644 --- a/tests/test_ec2/test_subnets.py +++ b/tests/test_ec2/test_subnets.py @@ -1,291 +1,340 @@ -from __future__ import unicode_literals -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises # noqa -from nose.tools import assert_raises - -import boto3 -import boto -import boto.vpc -from boto.exception import EC2ResponseError -from botocore.exceptions import ParamValidationError -import json -import sure # noqa - -from moto import mock_cloudformation_deprecated, mock_ec2, mock_ec2_deprecated - - -@mock_ec2_deprecated -def test_subnets(): - ec2 = boto.connect_ec2('the_key', 'the_secret') - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") - - all_subnets = conn.get_all_subnets() - all_subnets.should.have.length_of(1 + len(ec2.get_all_zones())) - - conn.delete_subnet(subnet.id) - - all_subnets = conn.get_all_subnets() - all_subnets.should.have.length_of(0 + len(ec2.get_all_zones())) - - with assert_raises(EC2ResponseError) as cm: - conn.delete_subnet(subnet.id) - cm.exception.code.should.equal('InvalidSubnetID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_subnet_create_vpc_validation(): - conn = boto.connect_vpc('the_key', 'the_secret') - - with assert_raises(EC2ResponseError) as cm: - conn.create_subnet("vpc-abcd1234", "10.0.0.0/18") - cm.exception.code.should.equal('InvalidVpcID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_subnet_tagging(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") - - subnet.add_tag("a key", "some value") - - tag = conn.get_all_tags()[0] - tag.name.should.equal("a key") - tag.value.should.equal("some value") - - # Refresh the subnet - subnet = conn.get_all_subnets(subnet_ids=[subnet.id])[0] - subnet.tags.should.have.length_of(1) - subnet.tags["a key"].should.equal("some value") - - -@mock_ec2_deprecated -def test_subnet_should_have_proper_availability_zone_set(): - conn = boto.vpc.connect_to_region('us-west-1') - vpcA = conn.create_vpc("10.0.0.0/16") - subnetA = conn.create_subnet( - vpcA.id, "10.0.0.0/24", availability_zone='us-west-1b') - subnetA.availability_zone.should.equal('us-west-1b') - - -@mock_ec2 -def test_default_subnet(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - - default_vpc = list(ec2.vpcs.all())[0] - default_vpc.cidr_block.should.equal('172.31.0.0/16') - default_vpc.reload() - default_vpc.is_default.should.be.ok - - subnet = ec2.create_subnet( - VpcId=default_vpc.id, CidrBlock='172.31.0.0/20', AvailabilityZone='us-west-1a') - subnet.reload() - subnet.map_public_ip_on_launch.shouldnt.be.ok - - -@mock_ec2_deprecated -def test_non_default_subnet(): - vpc_cli = boto.vpc.connect_to_region('us-west-1') - - # Create the non default VPC - vpc = vpc_cli.create_vpc("10.0.0.0/16") - vpc.is_default.shouldnt.be.ok - - subnet = vpc_cli.create_subnet(vpc.id, "10.0.0.0/24") - subnet = vpc_cli.get_all_subnets(subnet_ids=[subnet.id])[0] - subnet.mapPublicIpOnLaunch.should.equal('false') - - -@mock_ec2 -def test_boto3_non_default_subnet(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - - # Create the non default VPC - vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - vpc.reload() - vpc.is_default.shouldnt.be.ok - - subnet = ec2.create_subnet( - VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-1a') - subnet.reload() - subnet.map_public_ip_on_launch.shouldnt.be.ok - - -@mock_ec2 -def test_modify_subnet_attribute(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - client = boto3.client('ec2', region_name='us-west-1') - - # Get the default VPC - vpc = list(ec2.vpcs.all())[0] - - subnet = ec2.create_subnet( - VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-1a') - - # 'map_public_ip_on_launch' is set when calling 'DescribeSubnets' action - subnet.reload() - - # For non default subnet, attribute value should be 'False' - subnet.map_public_ip_on_launch.shouldnt.be.ok - - client.modify_subnet_attribute( - SubnetId=subnet.id, MapPublicIpOnLaunch={'Value': False}) - subnet.reload() - subnet.map_public_ip_on_launch.shouldnt.be.ok - - client.modify_subnet_attribute( - SubnetId=subnet.id, MapPublicIpOnLaunch={'Value': True}) - subnet.reload() - subnet.map_public_ip_on_launch.should.be.ok - - -@mock_ec2 -def test_modify_subnet_attribute_validation(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - client = boto3.client('ec2', region_name='us-west-1') - vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - subnet = ec2.create_subnet( - VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-1a') - - with assert_raises(ParamValidationError): - client.modify_subnet_attribute( - SubnetId=subnet.id, MapPublicIpOnLaunch={'Value': 'invalid'}) - - -@mock_ec2_deprecated -def test_subnet_get_by_id(): - ec2 = boto.ec2.connect_to_region('us-west-1') - conn = boto.vpc.connect_to_region('us-west-1') - vpcA = conn.create_vpc("10.0.0.0/16") - subnetA = conn.create_subnet( - vpcA.id, "10.0.0.0/24", availability_zone='us-west-1a') - vpcB = conn.create_vpc("10.0.0.0/16") - subnetB1 = conn.create_subnet( - vpcB.id, "10.0.0.0/24", availability_zone='us-west-1a') - subnetB2 = conn.create_subnet( - vpcB.id, "10.0.1.0/24", availability_zone='us-west-1b') - - subnets_by_id = conn.get_all_subnets(subnet_ids=[subnetA.id, subnetB1.id]) - subnets_by_id.should.have.length_of(2) - subnets_by_id = tuple(map(lambda s: s.id, subnets_by_id)) - subnetA.id.should.be.within(subnets_by_id) - subnetB1.id.should.be.within(subnets_by_id) - - with assert_raises(EC2ResponseError) as cm: - conn.get_all_subnets(subnet_ids=['subnet-does_not_exist']) - cm.exception.code.should.equal('InvalidSubnetID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_get_subnets_filtering(): - ec2 = boto.ec2.connect_to_region('us-west-1') - conn = boto.vpc.connect_to_region('us-west-1') - vpcA = conn.create_vpc("10.0.0.0/16") - subnetA = conn.create_subnet( - vpcA.id, "10.0.0.0/24", availability_zone='us-west-1a') - vpcB = conn.create_vpc("10.0.0.0/16") - subnetB1 = conn.create_subnet( - vpcB.id, "10.0.0.0/24", availability_zone='us-west-1a') - subnetB2 = conn.create_subnet( - vpcB.id, "10.0.1.0/24", availability_zone='us-west-1b') - - all_subnets = conn.get_all_subnets() - all_subnets.should.have.length_of(3 + len(ec2.get_all_zones())) - - # Filter by VPC ID - subnets_by_vpc = conn.get_all_subnets(filters={'vpc-id': vpcB.id}) - subnets_by_vpc.should.have.length_of(2) - set([subnet.id for subnet in subnets_by_vpc]).should.equal( - set([subnetB1.id, subnetB2.id])) - - # Filter by CIDR variations - subnets_by_cidr1 = conn.get_all_subnets(filters={'cidr': "10.0.0.0/24"}) - subnets_by_cidr1.should.have.length_of(2) - set([subnet.id for subnet in subnets_by_cidr1] - ).should.equal(set([subnetA.id, subnetB1.id])) - - subnets_by_cidr2 = conn.get_all_subnets( - filters={'cidr-block': "10.0.0.0/24"}) - subnets_by_cidr2.should.have.length_of(2) - set([subnet.id for subnet in subnets_by_cidr2] - ).should.equal(set([subnetA.id, subnetB1.id])) - - subnets_by_cidr3 = conn.get_all_subnets( - filters={'cidrBlock': "10.0.0.0/24"}) - subnets_by_cidr3.should.have.length_of(2) - set([subnet.id for subnet in subnets_by_cidr3] - ).should.equal(set([subnetA.id, subnetB1.id])) - - # Filter by VPC ID and CIDR - subnets_by_vpc_and_cidr = conn.get_all_subnets( - filters={'vpc-id': vpcB.id, 'cidr': "10.0.0.0/24"}) - subnets_by_vpc_and_cidr.should.have.length_of(1) - set([subnet.id for subnet in subnets_by_vpc_and_cidr] - ).should.equal(set([subnetB1.id])) - - # Filter by subnet ID - subnets_by_id = conn.get_all_subnets(filters={'subnet-id': subnetA.id}) - subnets_by_id.should.have.length_of(1) - set([subnet.id for subnet in subnets_by_id]).should.equal(set([subnetA.id])) - - # Filter by availabilityZone - subnets_by_az = conn.get_all_subnets( - filters={'availabilityZone': 'us-west-1a', 'vpc-id': vpcB.id}) - subnets_by_az.should.have.length_of(1) - set([subnet.id for subnet in subnets_by_az] - ).should.equal(set([subnetB1.id])) - - # Filter by defaultForAz - - subnets_by_az = conn.get_all_subnets(filters={'defaultForAz': "true"}) - subnets_by_az.should.have.length_of(len(conn.get_all_zones())) - - # Unsupported filter - conn.get_all_subnets.when.called_with( - filters={'not-implemented-filter': 'foobar'}).should.throw(NotImplementedError) - - -@mock_ec2_deprecated -@mock_cloudformation_deprecated -def test_subnet_tags_through_cloudformation(): - vpc_conn = boto.vpc.connect_to_region('us-west-1') - vpc = vpc_conn.create_vpc("10.0.0.0/16") - - subnet_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "testSubnet": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "VpcId": vpc.id, - "CidrBlock": "10.0.0.0/24", - "AvailabilityZone": "us-west-1b", - "Tags": [{ - "Key": "foo", - "Value": "bar", - }, { - "Key": "blah", - "Value": "baz", - }] - } - } - } - } - cf_conn = boto.cloudformation.connect_to_region("us-west-1") - template_json = json.dumps(subnet_template) - cf_conn.create_stack( - "test_stack", - template_body=template_json, - ) - - subnet = vpc_conn.get_all_subnets(filters={'cidrBlock': '10.0.0.0/24'})[0] - subnet.tags["foo"].should.equal("bar") - subnet.tags["blah"].should.equal("baz") +from __future__ import unicode_literals +# Ensure 'assert_raises' context manager support for Python 2.6 +import tests.backport_assert_raises # noqa +from nose.tools import assert_raises + +import boto3 +import boto +import boto.vpc +from boto.exception import EC2ResponseError +from botocore.exceptions import ParamValidationError, ClientError +import json +import sure # noqa + +from moto import mock_cloudformation_deprecated, mock_ec2, mock_ec2_deprecated + + +@mock_ec2_deprecated +def test_subnets(): + ec2 = boto.connect_ec2('the_key', 'the_secret') + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") + + all_subnets = conn.get_all_subnets() + all_subnets.should.have.length_of(1 + len(ec2.get_all_zones())) + + conn.delete_subnet(subnet.id) + + all_subnets = conn.get_all_subnets() + all_subnets.should.have.length_of(0 + len(ec2.get_all_zones())) + + with assert_raises(EC2ResponseError) as cm: + conn.delete_subnet(subnet.id) + cm.exception.code.should.equal('InvalidSubnetID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_subnet_create_vpc_validation(): + conn = boto.connect_vpc('the_key', 'the_secret') + + with assert_raises(EC2ResponseError) as cm: + conn.create_subnet("vpc-abcd1234", "10.0.0.0/18") + cm.exception.code.should.equal('InvalidVpcID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_subnet_tagging(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") + + subnet.add_tag("a key", "some value") + + tag = conn.get_all_tags()[0] + tag.name.should.equal("a key") + tag.value.should.equal("some value") + + # Refresh the subnet + subnet = conn.get_all_subnets(subnet_ids=[subnet.id])[0] + subnet.tags.should.have.length_of(1) + subnet.tags["a key"].should.equal("some value") + + +@mock_ec2_deprecated +def test_subnet_should_have_proper_availability_zone_set(): + conn = boto.vpc.connect_to_region('us-west-1') + vpcA = conn.create_vpc("10.0.0.0/16") + subnetA = conn.create_subnet( + vpcA.id, "10.0.0.0/24", availability_zone='us-west-1b') + subnetA.availability_zone.should.equal('us-west-1b') + + +@mock_ec2 +def test_default_subnet(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + default_vpc = list(ec2.vpcs.all())[0] + default_vpc.cidr_block.should.equal('172.31.0.0/16') + default_vpc.reload() + default_vpc.is_default.should.be.ok + + subnet = ec2.create_subnet( + VpcId=default_vpc.id, CidrBlock='172.31.48.0/20', AvailabilityZone='us-west-1a') + subnet.reload() + subnet.map_public_ip_on_launch.shouldnt.be.ok + + +@mock_ec2_deprecated +def test_non_default_subnet(): + vpc_cli = boto.vpc.connect_to_region('us-west-1') + + # Create the non default VPC + vpc = vpc_cli.create_vpc("10.0.0.0/16") + vpc.is_default.shouldnt.be.ok + + subnet = vpc_cli.create_subnet(vpc.id, "10.0.0.0/24") + subnet = vpc_cli.get_all_subnets(subnet_ids=[subnet.id])[0] + subnet.mapPublicIpOnLaunch.should.equal('false') + + +@mock_ec2 +def test_boto3_non_default_subnet(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + # Create the non default VPC + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + vpc.reload() + vpc.is_default.shouldnt.be.ok + + subnet = ec2.create_subnet( + VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-1a') + subnet.reload() + subnet.map_public_ip_on_launch.shouldnt.be.ok + + +@mock_ec2 +def test_modify_subnet_attribute(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + client = boto3.client('ec2', region_name='us-west-1') + + # Get the default VPC + vpc = list(ec2.vpcs.all())[0] + + subnet = ec2.create_subnet( + VpcId=vpc.id, CidrBlock="172.31.48.0/20", AvailabilityZone='us-west-1a') + + # 'map_public_ip_on_launch' is set when calling 'DescribeSubnets' action + subnet.reload() + + # For non default subnet, attribute value should be 'False' + subnet.map_public_ip_on_launch.shouldnt.be.ok + + client.modify_subnet_attribute( + SubnetId=subnet.id, MapPublicIpOnLaunch={'Value': False}) + subnet.reload() + subnet.map_public_ip_on_launch.shouldnt.be.ok + + client.modify_subnet_attribute( + SubnetId=subnet.id, MapPublicIpOnLaunch={'Value': True}) + subnet.reload() + subnet.map_public_ip_on_launch.should.be.ok + + +@mock_ec2 +def test_modify_subnet_attribute_validation(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + client = boto3.client('ec2', region_name='us-west-1') + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + subnet = ec2.create_subnet( + VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-1a') + + with assert_raises(ParamValidationError): + client.modify_subnet_attribute( + SubnetId=subnet.id, MapPublicIpOnLaunch={'Value': 'invalid'}) + + +@mock_ec2_deprecated +def test_subnet_get_by_id(): + ec2 = boto.ec2.connect_to_region('us-west-1') + conn = boto.vpc.connect_to_region('us-west-1') + vpcA = conn.create_vpc("10.0.0.0/16") + subnetA = conn.create_subnet( + vpcA.id, "10.0.0.0/24", availability_zone='us-west-1a') + vpcB = conn.create_vpc("10.0.0.0/16") + subnetB1 = conn.create_subnet( + vpcB.id, "10.0.0.0/24", availability_zone='us-west-1a') + subnetB2 = conn.create_subnet( + vpcB.id, "10.0.1.0/24", availability_zone='us-west-1b') + + subnets_by_id = conn.get_all_subnets(subnet_ids=[subnetA.id, subnetB1.id]) + subnets_by_id.should.have.length_of(2) + subnets_by_id = tuple(map(lambda s: s.id, subnets_by_id)) + subnetA.id.should.be.within(subnets_by_id) + subnetB1.id.should.be.within(subnets_by_id) + + with assert_raises(EC2ResponseError) as cm: + conn.get_all_subnets(subnet_ids=['subnet-does_not_exist']) + cm.exception.code.should.equal('InvalidSubnetID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_get_subnets_filtering(): + ec2 = boto.ec2.connect_to_region('us-west-1') + conn = boto.vpc.connect_to_region('us-west-1') + vpcA = conn.create_vpc("10.0.0.0/16") + subnetA = conn.create_subnet( + vpcA.id, "10.0.0.0/24", availability_zone='us-west-1a') + vpcB = conn.create_vpc("10.0.0.0/16") + subnetB1 = conn.create_subnet( + vpcB.id, "10.0.0.0/24", availability_zone='us-west-1a') + subnetB2 = conn.create_subnet( + vpcB.id, "10.0.1.0/24", availability_zone='us-west-1b') + + all_subnets = conn.get_all_subnets() + all_subnets.should.have.length_of(3 + len(ec2.get_all_zones())) + + # Filter by VPC ID + subnets_by_vpc = conn.get_all_subnets(filters={'vpc-id': vpcB.id}) + subnets_by_vpc.should.have.length_of(2) + set([subnet.id for subnet in subnets_by_vpc]).should.equal( + set([subnetB1.id, subnetB2.id])) + + # Filter by CIDR variations + subnets_by_cidr1 = conn.get_all_subnets(filters={'cidr': "10.0.0.0/24"}) + subnets_by_cidr1.should.have.length_of(2) + set([subnet.id for subnet in subnets_by_cidr1] + ).should.equal(set([subnetA.id, subnetB1.id])) + + subnets_by_cidr2 = conn.get_all_subnets( + filters={'cidr-block': "10.0.0.0/24"}) + subnets_by_cidr2.should.have.length_of(2) + set([subnet.id for subnet in subnets_by_cidr2] + ).should.equal(set([subnetA.id, subnetB1.id])) + + subnets_by_cidr3 = conn.get_all_subnets( + filters={'cidrBlock': "10.0.0.0/24"}) + subnets_by_cidr3.should.have.length_of(2) + set([subnet.id for subnet in subnets_by_cidr3] + ).should.equal(set([subnetA.id, subnetB1.id])) + + # Filter by VPC ID and CIDR + subnets_by_vpc_and_cidr = conn.get_all_subnets( + filters={'vpc-id': vpcB.id, 'cidr': "10.0.0.0/24"}) + subnets_by_vpc_and_cidr.should.have.length_of(1) + set([subnet.id for subnet in subnets_by_vpc_and_cidr] + ).should.equal(set([subnetB1.id])) + + # Filter by subnet ID + subnets_by_id = conn.get_all_subnets(filters={'subnet-id': subnetA.id}) + subnets_by_id.should.have.length_of(1) + set([subnet.id for subnet in subnets_by_id]).should.equal(set([subnetA.id])) + + # Filter by availabilityZone + subnets_by_az = conn.get_all_subnets( + filters={'availabilityZone': 'us-west-1a', 'vpc-id': vpcB.id}) + subnets_by_az.should.have.length_of(1) + set([subnet.id for subnet in subnets_by_az] + ).should.equal(set([subnetB1.id])) + + # Filter by defaultForAz + + subnets_by_az = conn.get_all_subnets(filters={'defaultForAz': "true"}) + subnets_by_az.should.have.length_of(len(conn.get_all_zones())) + + # Unsupported filter + conn.get_all_subnets.when.called_with( + filters={'not-implemented-filter': 'foobar'}).should.throw(NotImplementedError) + + +@mock_ec2_deprecated +@mock_cloudformation_deprecated +def test_subnet_tags_through_cloudformation(): + vpc_conn = boto.vpc.connect_to_region('us-west-1') + vpc = vpc_conn.create_vpc("10.0.0.0/16") + + subnet_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "testSubnet": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "VpcId": vpc.id, + "CidrBlock": "10.0.0.0/24", + "AvailabilityZone": "us-west-1b", + "Tags": [{ + "Key": "foo", + "Value": "bar", + }, { + "Key": "blah", + "Value": "baz", + }] + } + } + } + } + cf_conn = boto.cloudformation.connect_to_region("us-west-1") + template_json = json.dumps(subnet_template) + cf_conn.create_stack( + "test_stack", + template_body=template_json, + ) + + subnet = vpc_conn.get_all_subnets(filters={'cidrBlock': '10.0.0.0/24'})[0] + subnet.tags["foo"].should.equal("bar") + subnet.tags["blah"].should.equal("baz") + + +@mock_ec2 +def test_create_subnet_with_invalid_cidr_range(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + vpc.reload() + vpc.is_default.shouldnt.be.ok + + subnet_cidr_block = '10.1.0.0/20' + with assert_raises(ClientError) as ex: + subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock=subnet_cidr_block) + str(ex.exception).should.equal( + "An error occurred (InvalidSubnet.Range) when calling the CreateSubnet " + "operation: The CIDR '{}' is invalid.".format(subnet_cidr_block)) + + +@mock_ec2 +def test_create_subnet_with_invalid_cidr_block_parameter(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + vpc.reload() + vpc.is_default.shouldnt.be.ok + + subnet_cidr_block = '1000.1.0.0/20' + with assert_raises(ClientError) as ex: + subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock=subnet_cidr_block) + str(ex.exception).should.equal( + "An error occurred (InvalidParameterValue) when calling the CreateSubnet " + "operation: Value ({}) for parameter cidrBlock is invalid. This is not a valid CIDR block.".format(subnet_cidr_block)) + + +@mock_ec2 +def test_create_subnets_with_overlapping_cidr_blocks(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + vpc.reload() + vpc.is_default.shouldnt.be.ok + + subnet_cidr_block = '10.0.0.0/24' + with assert_raises(ClientError) as ex: + subnet1 = ec2.create_subnet(VpcId=vpc.id, CidrBlock=subnet_cidr_block) + subnet2 = ec2.create_subnet(VpcId=vpc.id, CidrBlock=subnet_cidr_block) + str(ex.exception).should.equal( + "An error occurred (InvalidSubnet.Conflict) when calling the CreateSubnet " + "operation: The CIDR '{}' conflicts with another subnet".format(subnet_cidr_block)) diff --git a/tests/test_ec2/test_utils.py b/tests/test_ec2/test_utils.py index 3e7a37a7a..49192dc79 100644 --- a/tests/test_ec2/test_utils.py +++ b/tests/test_ec2/test_utils.py @@ -1,8 +1,12 @@ -from moto.ec2 import utils - - -def test_random_key_pair(): - key_pair = utils.random_key_pair() - assert len(key_pair['fingerprint']) == 59 - assert key_pair['material'].startswith('---- BEGIN RSA PRIVATE KEY ----') - assert key_pair['material'].endswith('-----END RSA PRIVATE KEY-----') +from moto.ec2 import utils + +from .helpers import rsa_check_private_key + + +def test_random_key_pair(): + key_pair = utils.random_key_pair() + rsa_check_private_key(key_pair['material']) + + # AWS uses MD5 fingerprints, which are 47 characters long, *not* SHA1 + # fingerprints with 59 characters. + assert len(key_pair['fingerprint']) == 47 diff --git a/tests/test_ec2/test_vpc_peering.py b/tests/test_ec2/test_vpc_peering.py index 082499a72..edfbfb3c2 100644 --- a/tests/test_ec2/test_vpc_peering.py +++ b/tests/test_ec2/test_vpc_peering.py @@ -107,14 +107,19 @@ def test_vpc_peering_connections_cross_region(): ec2_apn1 = boto3.resource('ec2', region_name='ap-northeast-1') vpc_apn1 = ec2_apn1.create_vpc(CidrBlock='10.20.0.0/16') # create peering - vpc_pcx = ec2_usw1.create_vpc_peering_connection( + vpc_pcx_usw1 = ec2_usw1.create_vpc_peering_connection( VpcId=vpc_usw1.id, PeerVpcId=vpc_apn1.id, PeerRegion='ap-northeast-1', ) - vpc_pcx.status['Code'].should.equal('initiating-request') - vpc_pcx.requester_vpc.id.should.equal(vpc_usw1.id) - vpc_pcx.accepter_vpc.id.should.equal(vpc_apn1.id) + vpc_pcx_usw1.status['Code'].should.equal('initiating-request') + vpc_pcx_usw1.requester_vpc.id.should.equal(vpc_usw1.id) + vpc_pcx_usw1.accepter_vpc.id.should.equal(vpc_apn1.id) + # test cross region vpc peering connection exist + vpc_pcx_apn1 = ec2_apn1.VpcPeeringConnection(vpc_pcx_usw1.id) + vpc_pcx_apn1.id.should.equal(vpc_pcx_usw1.id) + vpc_pcx_apn1.requester_vpc.id.should.equal(vpc_usw1.id) + vpc_pcx_apn1.accepter_vpc.id.should.equal(vpc_apn1.id) @mock_ec2 @@ -131,3 +136,148 @@ def test_vpc_peering_connections_cross_region_fail(): PeerVpcId=vpc_apn1.id, PeerRegion='ap-northeast-2') cm.exception.response['Error']['Code'].should.equal('InvalidVpcID.NotFound') + + +@mock_ec2 +def test_vpc_peering_connections_cross_region_accept(): + # create vpc in us-west-1 and ap-northeast-1 + ec2_usw1 = boto3.resource('ec2', region_name='us-west-1') + vpc_usw1 = ec2_usw1.create_vpc(CidrBlock='10.90.0.0/16') + ec2_apn1 = boto3.resource('ec2', region_name='ap-northeast-1') + vpc_apn1 = ec2_apn1.create_vpc(CidrBlock='10.20.0.0/16') + # create peering + vpc_pcx_usw1 = ec2_usw1.create_vpc_peering_connection( + VpcId=vpc_usw1.id, + PeerVpcId=vpc_apn1.id, + PeerRegion='ap-northeast-1', + ) + # accept peering from ap-northeast-1 + ec2_apn1 = boto3.client('ec2', region_name='ap-northeast-1') + ec2_usw1 = boto3.client('ec2', region_name='us-west-1') + acp_pcx_apn1 = ec2_apn1.accept_vpc_peering_connection( + VpcPeeringConnectionId=vpc_pcx_usw1.id + ) + des_pcx_apn1 = ec2_usw1.describe_vpc_peering_connections( + VpcPeeringConnectionIds=[vpc_pcx_usw1.id] + ) + des_pcx_usw1 = ec2_usw1.describe_vpc_peering_connections( + VpcPeeringConnectionIds=[vpc_pcx_usw1.id] + ) + acp_pcx_apn1['VpcPeeringConnection']['Status']['Code'].should.equal('active') + des_pcx_apn1['VpcPeeringConnections'][0]['Status']['Code'].should.equal('active') + des_pcx_usw1['VpcPeeringConnections'][0]['Status']['Code'].should.equal('active') + + +@mock_ec2 +def test_vpc_peering_connections_cross_region_reject(): + # create vpc in us-west-1 and ap-northeast-1 + ec2_usw1 = boto3.resource('ec2', region_name='us-west-1') + vpc_usw1 = ec2_usw1.create_vpc(CidrBlock='10.90.0.0/16') + ec2_apn1 = boto3.resource('ec2', region_name='ap-northeast-1') + vpc_apn1 = ec2_apn1.create_vpc(CidrBlock='10.20.0.0/16') + # create peering + vpc_pcx_usw1 = ec2_usw1.create_vpc_peering_connection( + VpcId=vpc_usw1.id, + PeerVpcId=vpc_apn1.id, + PeerRegion='ap-northeast-1', + ) + # reject peering from ap-northeast-1 + ec2_apn1 = boto3.client('ec2', region_name='ap-northeast-1') + ec2_usw1 = boto3.client('ec2', region_name='us-west-1') + rej_pcx_apn1 = ec2_apn1.reject_vpc_peering_connection( + VpcPeeringConnectionId=vpc_pcx_usw1.id + ) + des_pcx_apn1 = ec2_usw1.describe_vpc_peering_connections( + VpcPeeringConnectionIds=[vpc_pcx_usw1.id] + ) + des_pcx_usw1 = ec2_usw1.describe_vpc_peering_connections( + VpcPeeringConnectionIds=[vpc_pcx_usw1.id] + ) + rej_pcx_apn1['Return'].should.equal(True) + des_pcx_apn1['VpcPeeringConnections'][0]['Status']['Code'].should.equal('rejected') + des_pcx_usw1['VpcPeeringConnections'][0]['Status']['Code'].should.equal('rejected') + + +@mock_ec2 +def test_vpc_peering_connections_cross_region_delete(): + # create vpc in us-west-1 and ap-northeast-1 + ec2_usw1 = boto3.resource('ec2', region_name='us-west-1') + vpc_usw1 = ec2_usw1.create_vpc(CidrBlock='10.90.0.0/16') + ec2_apn1 = boto3.resource('ec2', region_name='ap-northeast-1') + vpc_apn1 = ec2_apn1.create_vpc(CidrBlock='10.20.0.0/16') + # create peering + vpc_pcx_usw1 = ec2_usw1.create_vpc_peering_connection( + VpcId=vpc_usw1.id, + PeerVpcId=vpc_apn1.id, + PeerRegion='ap-northeast-1', + ) + # reject peering from ap-northeast-1 + ec2_apn1 = boto3.client('ec2', region_name='ap-northeast-1') + ec2_usw1 = boto3.client('ec2', region_name='us-west-1') + del_pcx_apn1 = ec2_apn1.delete_vpc_peering_connection( + VpcPeeringConnectionId=vpc_pcx_usw1.id + ) + des_pcx_apn1 = ec2_usw1.describe_vpc_peering_connections( + VpcPeeringConnectionIds=[vpc_pcx_usw1.id] + ) + des_pcx_usw1 = ec2_usw1.describe_vpc_peering_connections( + VpcPeeringConnectionIds=[vpc_pcx_usw1.id] + ) + del_pcx_apn1['Return'].should.equal(True) + des_pcx_apn1['VpcPeeringConnections'][0]['Status']['Code'].should.equal('deleted') + des_pcx_usw1['VpcPeeringConnections'][0]['Status']['Code'].should.equal('deleted') + + +@mock_ec2 +def test_vpc_peering_connections_cross_region_accept_wrong_region(): + # create vpc in us-west-1 and ap-northeast-1 + ec2_usw1 = boto3.resource('ec2', region_name='us-west-1') + vpc_usw1 = ec2_usw1.create_vpc(CidrBlock='10.90.0.0/16') + ec2_apn1 = boto3.resource('ec2', region_name='ap-northeast-1') + vpc_apn1 = ec2_apn1.create_vpc(CidrBlock='10.20.0.0/16') + # create peering + vpc_pcx_usw1 = ec2_usw1.create_vpc_peering_connection( + VpcId=vpc_usw1.id, + PeerVpcId=vpc_apn1.id, + PeerRegion='ap-northeast-1', + ) + + # accept wrong peering from us-west-1 which will raise error + ec2_apn1 = boto3.client('ec2', region_name='ap-northeast-1') + ec2_usw1 = boto3.client('ec2', region_name='us-west-1') + with assert_raises(ClientError) as cm: + ec2_usw1.accept_vpc_peering_connection( + VpcPeeringConnectionId=vpc_pcx_usw1.id + ) + cm.exception.response['Error']['Code'].should.equal('OperationNotPermitted') + exp_msg = 'Incorrect region ({0}) specified for this request.VPC ' \ + 'peering connection {1} must be ' \ + 'accepted in region {2}'.format('us-west-1', vpc_pcx_usw1.id, 'ap-northeast-1') + cm.exception.response['Error']['Message'].should.equal(exp_msg) + + +@mock_ec2 +def test_vpc_peering_connections_cross_region_reject_wrong_region(): + # create vpc in us-west-1 and ap-northeast-1 + ec2_usw1 = boto3.resource('ec2', region_name='us-west-1') + vpc_usw1 = ec2_usw1.create_vpc(CidrBlock='10.90.0.0/16') + ec2_apn1 = boto3.resource('ec2', region_name='ap-northeast-1') + vpc_apn1 = ec2_apn1.create_vpc(CidrBlock='10.20.0.0/16') + # create peering + vpc_pcx_usw1 = ec2_usw1.create_vpc_peering_connection( + VpcId=vpc_usw1.id, + PeerVpcId=vpc_apn1.id, + PeerRegion='ap-northeast-1', + ) + # reject wrong peering from us-west-1 which will raise error + ec2_apn1 = boto3.client('ec2', region_name='ap-northeast-1') + ec2_usw1 = boto3.client('ec2', region_name='us-west-1') + with assert_raises(ClientError) as cm: + ec2_usw1.reject_vpc_peering_connection( + VpcPeeringConnectionId=vpc_pcx_usw1.id + ) + cm.exception.response['Error']['Code'].should.equal('OperationNotPermitted') + exp_msg = 'Incorrect region ({0}) specified for this request.VPC ' \ + 'peering connection {1} must be accepted or ' \ + 'rejected in region {2}'.format('us-west-1', vpc_pcx_usw1.id, 'ap-northeast-1') + cm.exception.response['Error']['Message'].should.equal(exp_msg) diff --git a/tests/test_ec2/test_vpcs.py b/tests/test_ec2/test_vpcs.py index 4556e5ea0..ad17deb3c 100644 --- a/tests/test_ec2/test_vpcs.py +++ b/tests/test_ec2/test_vpcs.py @@ -1,541 +1,565 @@ -from __future__ import unicode_literals -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises # flake8: noqa -from nose.tools import assert_raises -from moto.ec2.exceptions import EC2ClientError -from botocore.exceptions import ClientError - -import boto3 -import boto -from boto.exception import EC2ResponseError -import sure # noqa - -from moto import mock_ec2, mock_ec2_deprecated - -SAMPLE_DOMAIN_NAME = u'example.com' -SAMPLE_NAME_SERVERS = [u'10.0.0.6', u'10.0.0.7'] - - -@mock_ec2_deprecated -def test_vpcs(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - vpc.cidr_block.should.equal('10.0.0.0/16') - - all_vpcs = conn.get_all_vpcs() - all_vpcs.should.have.length_of(2) - - vpc.delete() - - all_vpcs = conn.get_all_vpcs() - all_vpcs.should.have.length_of(1) - - with assert_raises(EC2ResponseError) as cm: - conn.delete_vpc("vpc-1234abcd") - cm.exception.code.should.equal('InvalidVpcID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_vpc_defaults(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - - conn.get_all_vpcs().should.have.length_of(2) - conn.get_all_route_tables().should.have.length_of(2) - conn.get_all_security_groups( - filters={'vpc-id': [vpc.id]}).should.have.length_of(1) - - vpc.delete() - - conn.get_all_vpcs().should.have.length_of(1) - conn.get_all_route_tables().should.have.length_of(1) - conn.get_all_security_groups( - filters={'vpc-id': [vpc.id]}).should.have.length_of(0) - - -@mock_ec2_deprecated -def test_vpc_isdefault_filter(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - conn.get_all_vpcs(filters={'isDefault': 'true'}).should.have.length_of(1) - vpc.delete() - conn.get_all_vpcs(filters={'isDefault': 'true'}).should.have.length_of(1) - - -@mock_ec2_deprecated -def test_multiple_vpcs_default_filter(): - conn = boto.connect_vpc('the_key', 'the_secret') - conn.create_vpc("10.8.0.0/16") - conn.create_vpc("10.0.0.0/16") - conn.create_vpc("192.168.0.0/16") - conn.get_all_vpcs().should.have.length_of(4) - vpc = conn.get_all_vpcs(filters={'isDefault': 'true'}) - vpc.should.have.length_of(1) - vpc[0].cidr_block.should.equal('172.31.0.0/16') - - -@mock_ec2_deprecated -def test_vpc_state_available_filter(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - conn.create_vpc("10.1.0.0/16") - conn.get_all_vpcs(filters={'state': 'available'}).should.have.length_of(3) - vpc.delete() - conn.get_all_vpcs(filters={'state': 'available'}).should.have.length_of(2) - - -@mock_ec2_deprecated -def test_vpc_tagging(): - conn = boto.connect_vpc() - vpc = conn.create_vpc("10.0.0.0/16") - - vpc.add_tag("a key", "some value") - tag = conn.get_all_tags()[0] - tag.name.should.equal("a key") - tag.value.should.equal("some value") - - # Refresh the vpc - vpc = conn.get_all_vpcs(vpc_ids=[vpc.id])[0] - vpc.tags.should.have.length_of(1) - vpc.tags["a key"].should.equal("some value") - - -@mock_ec2_deprecated -def test_vpc_get_by_id(): - conn = boto.connect_vpc() - vpc1 = conn.create_vpc("10.0.0.0/16") - vpc2 = conn.create_vpc("10.0.0.0/16") - conn.create_vpc("10.0.0.0/16") - - vpcs = conn.get_all_vpcs(vpc_ids=[vpc1.id, vpc2.id]) - vpcs.should.have.length_of(2) - vpc_ids = tuple(map(lambda v: v.id, vpcs)) - vpc1.id.should.be.within(vpc_ids) - vpc2.id.should.be.within(vpc_ids) - - with assert_raises(EC2ResponseError) as cm: - conn.get_all_vpcs(vpc_ids=['vpc-does_not_exist']) - cm.exception.code.should.equal('InvalidVpcID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_vpc_get_by_cidr_block(): - conn = boto.connect_vpc() - vpc1 = conn.create_vpc("10.0.0.0/16") - vpc2 = conn.create_vpc("10.0.0.0/16") - conn.create_vpc("10.0.0.0/24") - - vpcs = conn.get_all_vpcs(filters={'cidr': '10.0.0.0/16'}) - vpcs.should.have.length_of(2) - vpc_ids = tuple(map(lambda v: v.id, vpcs)) - vpc1.id.should.be.within(vpc_ids) - vpc2.id.should.be.within(vpc_ids) - - -@mock_ec2_deprecated -def test_vpc_get_by_dhcp_options_id(): - conn = boto.connect_vpc() - dhcp_options = conn.create_dhcp_options( - SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) - vpc1 = conn.create_vpc("10.0.0.0/16") - vpc2 = conn.create_vpc("10.0.0.0/16") - conn.create_vpc("10.0.0.0/24") - - conn.associate_dhcp_options(dhcp_options.id, vpc1.id) - conn.associate_dhcp_options(dhcp_options.id, vpc2.id) - - vpcs = conn.get_all_vpcs(filters={'dhcp-options-id': dhcp_options.id}) - vpcs.should.have.length_of(2) - vpc_ids = tuple(map(lambda v: v.id, vpcs)) - vpc1.id.should.be.within(vpc_ids) - vpc2.id.should.be.within(vpc_ids) - - -@mock_ec2_deprecated -def test_vpc_get_by_tag(): - conn = boto.connect_vpc() - vpc1 = conn.create_vpc("10.0.0.0/16") - vpc2 = conn.create_vpc("10.0.0.0/16") - vpc3 = conn.create_vpc("10.0.0.0/24") - - vpc1.add_tag('Name', 'TestVPC') - vpc2.add_tag('Name', 'TestVPC') - vpc3.add_tag('Name', 'TestVPC2') - - vpcs = conn.get_all_vpcs(filters={'tag:Name': 'TestVPC'}) - vpcs.should.have.length_of(2) - vpc_ids = tuple(map(lambda v: v.id, vpcs)) - vpc1.id.should.be.within(vpc_ids) - vpc2.id.should.be.within(vpc_ids) - - -@mock_ec2_deprecated -def test_vpc_get_by_tag_key_superset(): - conn = boto.connect_vpc() - vpc1 = conn.create_vpc("10.0.0.0/16") - vpc2 = conn.create_vpc("10.0.0.0/16") - vpc3 = conn.create_vpc("10.0.0.0/24") - - vpc1.add_tag('Name', 'TestVPC') - vpc1.add_tag('Key', 'TestVPC2') - vpc2.add_tag('Name', 'TestVPC') - vpc2.add_tag('Key', 'TestVPC2') - vpc3.add_tag('Key', 'TestVPC2') - - vpcs = conn.get_all_vpcs(filters={'tag-key': 'Name'}) - vpcs.should.have.length_of(2) - vpc_ids = tuple(map(lambda v: v.id, vpcs)) - vpc1.id.should.be.within(vpc_ids) - vpc2.id.should.be.within(vpc_ids) - - -@mock_ec2_deprecated -def test_vpc_get_by_tag_key_subset(): - conn = boto.connect_vpc() - vpc1 = conn.create_vpc("10.0.0.0/16") - vpc2 = conn.create_vpc("10.0.0.0/16") - vpc3 = conn.create_vpc("10.0.0.0/24") - - vpc1.add_tag('Name', 'TestVPC') - vpc1.add_tag('Key', 'TestVPC2') - vpc2.add_tag('Name', 'TestVPC') - vpc2.add_tag('Key', 'TestVPC2') - vpc3.add_tag('Test', 'TestVPC2') - - vpcs = conn.get_all_vpcs(filters={'tag-key': ['Name', 'Key']}) - vpcs.should.have.length_of(2) - vpc_ids = tuple(map(lambda v: v.id, vpcs)) - vpc1.id.should.be.within(vpc_ids) - vpc2.id.should.be.within(vpc_ids) - - -@mock_ec2_deprecated -def test_vpc_get_by_tag_value_superset(): - conn = boto.connect_vpc() - vpc1 = conn.create_vpc("10.0.0.0/16") - vpc2 = conn.create_vpc("10.0.0.0/16") - vpc3 = conn.create_vpc("10.0.0.0/24") - - vpc1.add_tag('Name', 'TestVPC') - vpc1.add_tag('Key', 'TestVPC2') - vpc2.add_tag('Name', 'TestVPC') - vpc2.add_tag('Key', 'TestVPC2') - vpc3.add_tag('Key', 'TestVPC2') - - vpcs = conn.get_all_vpcs(filters={'tag-value': 'TestVPC'}) - vpcs.should.have.length_of(2) - vpc_ids = tuple(map(lambda v: v.id, vpcs)) - vpc1.id.should.be.within(vpc_ids) - vpc2.id.should.be.within(vpc_ids) - - -@mock_ec2_deprecated -def test_vpc_get_by_tag_value_subset(): - conn = boto.connect_vpc() - vpc1 = conn.create_vpc("10.0.0.0/16") - vpc2 = conn.create_vpc("10.0.0.0/16") - conn.create_vpc("10.0.0.0/24") - - vpc1.add_tag('Name', 'TestVPC') - vpc1.add_tag('Key', 'TestVPC2') - vpc2.add_tag('Name', 'TestVPC') - vpc2.add_tag('Key', 'TestVPC2') - - vpcs = conn.get_all_vpcs(filters={'tag-value': ['TestVPC', 'TestVPC2']}) - vpcs.should.have.length_of(2) - vpc_ids = tuple(map(lambda v: v.id, vpcs)) - vpc1.id.should.be.within(vpc_ids) - vpc2.id.should.be.within(vpc_ids) - - -@mock_ec2 -def test_default_vpc(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - - # Create the default VPC - default_vpc = list(ec2.vpcs.all())[0] - default_vpc.cidr_block.should.equal('172.31.0.0/16') - default_vpc.instance_tenancy.should.equal('default') - default_vpc.reload() - default_vpc.is_default.should.be.ok - - # Test default values for VPC attributes - response = default_vpc.describe_attribute(Attribute='enableDnsSupport') - attr = response.get('EnableDnsSupport') - attr.get('Value').should.be.ok - - response = default_vpc.describe_attribute(Attribute='enableDnsHostnames') - attr = response.get('EnableDnsHostnames') - attr.get('Value').should.be.ok - - -@mock_ec2 -def test_non_default_vpc(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - - # Create the default VPC - this already exists when backend instantiated! - #ec2.create_vpc(CidrBlock='172.31.0.0/16') - - # Create the non default VPC - vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - vpc.reload() - vpc.is_default.shouldnt.be.ok - - # Test default instance_tenancy - vpc.instance_tenancy.should.equal('default') - - # Test default values for VPC attributes - response = vpc.describe_attribute(Attribute='enableDnsSupport') - attr = response.get('EnableDnsSupport') - attr.get('Value').should.be.ok - - response = vpc.describe_attribute(Attribute='enableDnsHostnames') - attr = response.get('EnableDnsHostnames') - attr.get('Value').shouldnt.be.ok - - # Check Primary CIDR Block Associations - cidr_block_association_set = next(iter(vpc.cidr_block_association_set), None) - cidr_block_association_set['CidrBlockState']['State'].should.equal('associated') - cidr_block_association_set['CidrBlock'].should.equal(vpc.cidr_block) - cidr_block_association_set['AssociationId'].should.contain('vpc-cidr-assoc') - - -@mock_ec2 -def test_vpc_dedicated_tenancy(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - - # Create the default VPC - ec2.create_vpc(CidrBlock='172.31.0.0/16') - - # Create the non default VPC - vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16', InstanceTenancy='dedicated') - vpc.reload() - vpc.is_default.shouldnt.be.ok - - vpc.instance_tenancy.should.equal('dedicated') - - -@mock_ec2 -def test_vpc_modify_enable_dns_support(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - - # Create the default VPC - ec2.create_vpc(CidrBlock='172.31.0.0/16') - - vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - - # Test default values for VPC attributes - response = vpc.describe_attribute(Attribute='enableDnsSupport') - attr = response.get('EnableDnsSupport') - attr.get('Value').should.be.ok - - vpc.modify_attribute(EnableDnsSupport={'Value': False}) - - response = vpc.describe_attribute(Attribute='enableDnsSupport') - attr = response.get('EnableDnsSupport') - attr.get('Value').shouldnt.be.ok - - -@mock_ec2 -def test_vpc_modify_enable_dns_hostnames(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - - # Create the default VPC - ec2.create_vpc(CidrBlock='172.31.0.0/16') - - vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - # Test default values for VPC attributes - response = vpc.describe_attribute(Attribute='enableDnsHostnames') - attr = response.get('EnableDnsHostnames') - attr.get('Value').shouldnt.be.ok - - vpc.modify_attribute(EnableDnsHostnames={'Value': True}) - - response = vpc.describe_attribute(Attribute='enableDnsHostnames') - attr = response.get('EnableDnsHostnames') - attr.get('Value').should.be.ok - - -@mock_ec2_deprecated -def test_vpc_associate_dhcp_options(): - conn = boto.connect_vpc() - dhcp_options = conn.create_dhcp_options( - SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) - vpc = conn.create_vpc("10.0.0.0/16") - - conn.associate_dhcp_options(dhcp_options.id, vpc.id) - - vpc.update() - dhcp_options.id.should.equal(vpc.dhcp_options_id) - - -@mock_ec2 -def test_associate_vpc_ipv4_cidr_block(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - - vpc = ec2.create_vpc(CidrBlock='10.10.42.0/24') - - # Associate/Extend vpc CIDR range up to 5 ciders - for i in range(43, 47): - response = ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc.id, CidrBlock='10.10.{}.0/24'.format(i)) - response['CidrBlockAssociation']['CidrBlockState']['State'].should.equal('associating') - response['CidrBlockAssociation']['CidrBlock'].should.equal('10.10.{}.0/24'.format(i)) - response['CidrBlockAssociation']['AssociationId'].should.contain('vpc-cidr-assoc') - - # Check all associations exist - vpc = ec2.Vpc(vpc.id) - vpc.cidr_block_association_set.should.have.length_of(5) - vpc.cidr_block_association_set[2]['CidrBlockState']['State'].should.equal('associated') - vpc.cidr_block_association_set[4]['CidrBlockState']['State'].should.equal('associated') - - # Check error on adding 6th association. - with assert_raises(ClientError) as ex: - response = ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc.id, CidrBlock='10.10.50.0/22') - str(ex.exception).should.equal( - "An error occurred (CidrLimitExceeded) when calling the AssociateVpcCidrBlock " - "operation: This network '{}' has met its maximum number of allowed CIDRs: 5".format(vpc.id)) - -@mock_ec2 -def test_disassociate_vpc_ipv4_cidr_block(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - - vpc = ec2.create_vpc(CidrBlock='10.10.42.0/24') - ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc.id, CidrBlock='10.10.43.0/24') - - # Remove an extended cidr block - vpc = ec2.Vpc(vpc.id) - non_default_assoc_cidr_block = next(iter([x for x in vpc.cidr_block_association_set if vpc.cidr_block != x['CidrBlock']]), None) - response = ec2.meta.client.disassociate_vpc_cidr_block(AssociationId=non_default_assoc_cidr_block['AssociationId']) - response['CidrBlockAssociation']['CidrBlockState']['State'].should.equal('disassociating') - response['CidrBlockAssociation']['CidrBlock'].should.equal(non_default_assoc_cidr_block['CidrBlock']) - response['CidrBlockAssociation']['AssociationId'].should.equal(non_default_assoc_cidr_block['AssociationId']) - - # Error attempting to delete a non-existent CIDR_BLOCK association - with assert_raises(ClientError) as ex: - response = ec2.meta.client.disassociate_vpc_cidr_block(AssociationId='vpc-cidr-assoc-BORING123') - str(ex.exception).should.equal( - "An error occurred (InvalidVpcCidrBlockAssociationIdError.NotFound) when calling the " - "DisassociateVpcCidrBlock operation: The vpc CIDR block association ID " - "'vpc-cidr-assoc-BORING123' does not exist") - - # Error attempting to delete Primary CIDR BLOCK association - vpc_base_cidr_assoc_id = next(iter([x for x in vpc.cidr_block_association_set - if vpc.cidr_block == x['CidrBlock']]), {})['AssociationId'] - - with assert_raises(ClientError) as ex: - response = ec2.meta.client.disassociate_vpc_cidr_block(AssociationId=vpc_base_cidr_assoc_id) - str(ex.exception).should.equal( - "An error occurred (OperationNotPermitted) when calling the DisassociateVpcCidrBlock operation: " - "The vpc CIDR block with association ID {} may not be disassociated. It is the primary " - "IPv4 CIDR block of the VPC".format(vpc_base_cidr_assoc_id)) - -@mock_ec2 -def test_cidr_block_association_filters(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - vpc1 = ec2.create_vpc(CidrBlock='10.90.0.0/16') - vpc2 = ec2.create_vpc(CidrBlock='10.91.0.0/16') - ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc2.id, CidrBlock='10.10.0.0/19') - vpc3 = ec2.create_vpc(CidrBlock='10.92.0.0/24') - ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc3.id, CidrBlock='10.92.1.0/24') - ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc3.id, CidrBlock='10.92.2.0/24') - vpc3_assoc_response = ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc3.id, CidrBlock='10.92.3.0/24') - - # Test filters for a cidr-block in all VPCs cidr-block-associations - filtered_vpcs = list(ec2.vpcs.filter(Filters=[{'Name': 'cidr-block-association.cidr-block', - 'Values': ['10.10.0.0/19']}])) - filtered_vpcs.should.be.length_of(1) - filtered_vpcs[0].id.should.equal(vpc2.id) - - # Test filter for association id in VPCs - association_id = vpc3_assoc_response['CidrBlockAssociation']['AssociationId'] - filtered_vpcs = list(ec2.vpcs.filter(Filters=[{'Name': 'cidr-block-association.association-id', - 'Values': [association_id]}])) - filtered_vpcs.should.be.length_of(1) - filtered_vpcs[0].id.should.equal(vpc3.id) - - # Test filter for association state in VPC - this will never show anything in this test - filtered_vpcs = list(ec2.vpcs.filter(Filters=[{'Name': 'cidr-block-association.association-id', - 'Values': ['failing']}])) - filtered_vpcs.should.be.length_of(0) - -@mock_ec2 -def test_vpc_associate_ipv6_cidr_block(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - - # Test create VPC with IPV6 cidr range - vpc = ec2.create_vpc(CidrBlock='10.10.42.0/24', AmazonProvidedIpv6CidrBlock=True) - ipv6_cidr_block_association_set = next(iter(vpc.ipv6_cidr_block_association_set), None) - ipv6_cidr_block_association_set['Ipv6CidrBlockState']['State'].should.equal('associated') - ipv6_cidr_block_association_set['Ipv6CidrBlock'].should.contain('::/56') - ipv6_cidr_block_association_set['AssociationId'].should.contain('vpc-cidr-assoc') - - # Test Fail on adding 2nd IPV6 association - AWS only allows 1 at this time! - with assert_raises(ClientError) as ex: - response = ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc.id, AmazonProvidedIpv6CidrBlock=True) - str(ex.exception).should.equal( - "An error occurred (CidrLimitExceeded) when calling the AssociateVpcCidrBlock " - "operation: This network '{}' has met its maximum number of allowed CIDRs: 1".format(vpc.id)) - - # Test associate ipv6 cidr block after vpc created - vpc = ec2.create_vpc(CidrBlock='10.10.50.0/24') - response = ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc.id, AmazonProvidedIpv6CidrBlock=True) - response['Ipv6CidrBlockAssociation']['Ipv6CidrBlockState']['State'].should.equal('associating') - response['Ipv6CidrBlockAssociation']['Ipv6CidrBlock'].should.contain('::/56') - response['Ipv6CidrBlockAssociation']['AssociationId'].should.contain('vpc-cidr-assoc-') - - # Check on describe vpc that has ipv6 cidr block association - vpc = ec2.Vpc(vpc.id) - vpc.ipv6_cidr_block_association_set.should.be.length_of(1) - - -@mock_ec2 -def test_vpc_disassociate_ipv6_cidr_block(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - - # Test create VPC with IPV6 cidr range - vpc = ec2.create_vpc(CidrBlock='10.10.42.0/24', AmazonProvidedIpv6CidrBlock=True) - # Test disassociating the only IPV6 - assoc_id = vpc.ipv6_cidr_block_association_set[0]['AssociationId'] - response = ec2.meta.client.disassociate_vpc_cidr_block(AssociationId=assoc_id) - response['Ipv6CidrBlockAssociation']['Ipv6CidrBlockState']['State'].should.equal('disassociating') - response['Ipv6CidrBlockAssociation']['Ipv6CidrBlock'].should.contain('::/56') - response['Ipv6CidrBlockAssociation']['AssociationId'].should.equal(assoc_id) - - -@mock_ec2 -def test_ipv6_cidr_block_association_filters(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - vpc1 = ec2.create_vpc(CidrBlock='10.90.0.0/16') - - vpc2 = ec2.create_vpc(CidrBlock='10.91.0.0/16', AmazonProvidedIpv6CidrBlock=True) - vpc2_assoc_ipv6_assoc_id = vpc2.ipv6_cidr_block_association_set[0]['AssociationId'] - ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc2.id, CidrBlock='10.10.0.0/19') - - vpc3 = ec2.create_vpc(CidrBlock='10.92.0.0/24') - ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc3.id, CidrBlock='10.92.1.0/24') - ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc3.id, CidrBlock='10.92.2.0/24') - response = ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc3.id, AmazonProvidedIpv6CidrBlock=True) - vpc3_ipv6_cidr_block = response['Ipv6CidrBlockAssociation']['Ipv6CidrBlock'] - - vpc4 = ec2.create_vpc(CidrBlock='10.95.0.0/16') # Here for its looks - - # Test filters for an ipv6 cidr-block in all VPCs cidr-block-associations - filtered_vpcs = list(ec2.vpcs.filter(Filters=[{'Name': 'ipv6-cidr-block-association.ipv6-cidr-block', - 'Values': [vpc3_ipv6_cidr_block]}])) - filtered_vpcs.should.be.length_of(1) - filtered_vpcs[0].id.should.equal(vpc3.id) - - # Test filter for association id in VPCs - filtered_vpcs = list(ec2.vpcs.filter(Filters=[{'Name': 'ipv6-cidr-block-association.association-id', - 'Values': [vpc2_assoc_ipv6_assoc_id]}])) - filtered_vpcs.should.be.length_of(1) - filtered_vpcs[0].id.should.equal(vpc2.id) - - # Test filter for association state in VPC - this will never show anything in this test - filtered_vpcs = list(ec2.vpcs.filter(Filters=[{'Name': 'ipv6-cidr-block-association.state', - 'Values': ['associated']}])) - filtered_vpcs.should.be.length_of(2) # 2 of 4 VPCs +from __future__ import unicode_literals +# Ensure 'assert_raises' context manager support for Python 2.6 +import tests.backport_assert_raises # flake8: noqa +from nose.tools import assert_raises +from moto.ec2.exceptions import EC2ClientError +from botocore.exceptions import ClientError + +import boto3 +import boto +from boto.exception import EC2ResponseError +import sure # noqa + +from moto import mock_ec2, mock_ec2_deprecated + +SAMPLE_DOMAIN_NAME = u'example.com' +SAMPLE_NAME_SERVERS = [u'10.0.0.6', u'10.0.0.7'] + + +@mock_ec2_deprecated +def test_vpcs(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + vpc.cidr_block.should.equal('10.0.0.0/16') + + all_vpcs = conn.get_all_vpcs() + all_vpcs.should.have.length_of(2) + + vpc.delete() + + all_vpcs = conn.get_all_vpcs() + all_vpcs.should.have.length_of(1) + + with assert_raises(EC2ResponseError) as cm: + conn.delete_vpc("vpc-1234abcd") + cm.exception.code.should.equal('InvalidVpcID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_vpc_defaults(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + + conn.get_all_vpcs().should.have.length_of(2) + conn.get_all_route_tables().should.have.length_of(2) + conn.get_all_security_groups( + filters={'vpc-id': [vpc.id]}).should.have.length_of(1) + + vpc.delete() + + conn.get_all_vpcs().should.have.length_of(1) + conn.get_all_route_tables().should.have.length_of(1) + conn.get_all_security_groups( + filters={'vpc-id': [vpc.id]}).should.have.length_of(0) + + +@mock_ec2_deprecated +def test_vpc_isdefault_filter(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + conn.get_all_vpcs(filters={'isDefault': 'true'}).should.have.length_of(1) + vpc.delete() + conn.get_all_vpcs(filters={'isDefault': 'true'}).should.have.length_of(1) + + +@mock_ec2_deprecated +def test_multiple_vpcs_default_filter(): + conn = boto.connect_vpc('the_key', 'the_secret') + conn.create_vpc("10.8.0.0/16") + conn.create_vpc("10.0.0.0/16") + conn.create_vpc("192.168.0.0/16") + conn.get_all_vpcs().should.have.length_of(4) + vpc = conn.get_all_vpcs(filters={'isDefault': 'true'}) + vpc.should.have.length_of(1) + vpc[0].cidr_block.should.equal('172.31.0.0/16') + + +@mock_ec2_deprecated +def test_vpc_state_available_filter(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + conn.create_vpc("10.1.0.0/16") + conn.get_all_vpcs(filters={'state': 'available'}).should.have.length_of(3) + vpc.delete() + conn.get_all_vpcs(filters={'state': 'available'}).should.have.length_of(2) + + +@mock_ec2_deprecated +def test_vpc_tagging(): + conn = boto.connect_vpc() + vpc = conn.create_vpc("10.0.0.0/16") + + vpc.add_tag("a key", "some value") + tag = conn.get_all_tags()[0] + tag.name.should.equal("a key") + tag.value.should.equal("some value") + + # Refresh the vpc + vpc = conn.get_all_vpcs(vpc_ids=[vpc.id])[0] + vpc.tags.should.have.length_of(1) + vpc.tags["a key"].should.equal("some value") + + +@mock_ec2_deprecated +def test_vpc_get_by_id(): + conn = boto.connect_vpc() + vpc1 = conn.create_vpc("10.0.0.0/16") + vpc2 = conn.create_vpc("10.0.0.0/16") + conn.create_vpc("10.0.0.0/16") + + vpcs = conn.get_all_vpcs(vpc_ids=[vpc1.id, vpc2.id]) + vpcs.should.have.length_of(2) + vpc_ids = tuple(map(lambda v: v.id, vpcs)) + vpc1.id.should.be.within(vpc_ids) + vpc2.id.should.be.within(vpc_ids) + + with assert_raises(EC2ResponseError) as cm: + conn.get_all_vpcs(vpc_ids=['vpc-does_not_exist']) + cm.exception.code.should.equal('InvalidVpcID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_vpc_get_by_cidr_block(): + conn = boto.connect_vpc() + vpc1 = conn.create_vpc("10.0.0.0/16") + vpc2 = conn.create_vpc("10.0.0.0/16") + conn.create_vpc("10.0.0.0/24") + + vpcs = conn.get_all_vpcs(filters={'cidr': '10.0.0.0/16'}) + vpcs.should.have.length_of(2) + vpc_ids = tuple(map(lambda v: v.id, vpcs)) + vpc1.id.should.be.within(vpc_ids) + vpc2.id.should.be.within(vpc_ids) + + +@mock_ec2_deprecated +def test_vpc_get_by_dhcp_options_id(): + conn = boto.connect_vpc() + dhcp_options = conn.create_dhcp_options( + SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) + vpc1 = conn.create_vpc("10.0.0.0/16") + vpc2 = conn.create_vpc("10.0.0.0/16") + conn.create_vpc("10.0.0.0/24") + + conn.associate_dhcp_options(dhcp_options.id, vpc1.id) + conn.associate_dhcp_options(dhcp_options.id, vpc2.id) + + vpcs = conn.get_all_vpcs(filters={'dhcp-options-id': dhcp_options.id}) + vpcs.should.have.length_of(2) + vpc_ids = tuple(map(lambda v: v.id, vpcs)) + vpc1.id.should.be.within(vpc_ids) + vpc2.id.should.be.within(vpc_ids) + + +@mock_ec2_deprecated +def test_vpc_get_by_tag(): + conn = boto.connect_vpc() + vpc1 = conn.create_vpc("10.0.0.0/16") + vpc2 = conn.create_vpc("10.0.0.0/16") + vpc3 = conn.create_vpc("10.0.0.0/24") + + vpc1.add_tag('Name', 'TestVPC') + vpc2.add_tag('Name', 'TestVPC') + vpc3.add_tag('Name', 'TestVPC2') + + vpcs = conn.get_all_vpcs(filters={'tag:Name': 'TestVPC'}) + vpcs.should.have.length_of(2) + vpc_ids = tuple(map(lambda v: v.id, vpcs)) + vpc1.id.should.be.within(vpc_ids) + vpc2.id.should.be.within(vpc_ids) + + +@mock_ec2_deprecated +def test_vpc_get_by_tag_key_superset(): + conn = boto.connect_vpc() + vpc1 = conn.create_vpc("10.0.0.0/16") + vpc2 = conn.create_vpc("10.0.0.0/16") + vpc3 = conn.create_vpc("10.0.0.0/24") + + vpc1.add_tag('Name', 'TestVPC') + vpc1.add_tag('Key', 'TestVPC2') + vpc2.add_tag('Name', 'TestVPC') + vpc2.add_tag('Key', 'TestVPC2') + vpc3.add_tag('Key', 'TestVPC2') + + vpcs = conn.get_all_vpcs(filters={'tag-key': 'Name'}) + vpcs.should.have.length_of(2) + vpc_ids = tuple(map(lambda v: v.id, vpcs)) + vpc1.id.should.be.within(vpc_ids) + vpc2.id.should.be.within(vpc_ids) + + +@mock_ec2_deprecated +def test_vpc_get_by_tag_key_subset(): + conn = boto.connect_vpc() + vpc1 = conn.create_vpc("10.0.0.0/16") + vpc2 = conn.create_vpc("10.0.0.0/16") + vpc3 = conn.create_vpc("10.0.0.0/24") + + vpc1.add_tag('Name', 'TestVPC') + vpc1.add_tag('Key', 'TestVPC2') + vpc2.add_tag('Name', 'TestVPC') + vpc2.add_tag('Key', 'TestVPC2') + vpc3.add_tag('Test', 'TestVPC2') + + vpcs = conn.get_all_vpcs(filters={'tag-key': ['Name', 'Key']}) + vpcs.should.have.length_of(2) + vpc_ids = tuple(map(lambda v: v.id, vpcs)) + vpc1.id.should.be.within(vpc_ids) + vpc2.id.should.be.within(vpc_ids) + + +@mock_ec2_deprecated +def test_vpc_get_by_tag_value_superset(): + conn = boto.connect_vpc() + vpc1 = conn.create_vpc("10.0.0.0/16") + vpc2 = conn.create_vpc("10.0.0.0/16") + vpc3 = conn.create_vpc("10.0.0.0/24") + + vpc1.add_tag('Name', 'TestVPC') + vpc1.add_tag('Key', 'TestVPC2') + vpc2.add_tag('Name', 'TestVPC') + vpc2.add_tag('Key', 'TestVPC2') + vpc3.add_tag('Key', 'TestVPC2') + + vpcs = conn.get_all_vpcs(filters={'tag-value': 'TestVPC'}) + vpcs.should.have.length_of(2) + vpc_ids = tuple(map(lambda v: v.id, vpcs)) + vpc1.id.should.be.within(vpc_ids) + vpc2.id.should.be.within(vpc_ids) + + +@mock_ec2_deprecated +def test_vpc_get_by_tag_value_subset(): + conn = boto.connect_vpc() + vpc1 = conn.create_vpc("10.0.0.0/16") + vpc2 = conn.create_vpc("10.0.0.0/16") + conn.create_vpc("10.0.0.0/24") + + vpc1.add_tag('Name', 'TestVPC') + vpc1.add_tag('Key', 'TestVPC2') + vpc2.add_tag('Name', 'TestVPC') + vpc2.add_tag('Key', 'TestVPC2') + + vpcs = conn.get_all_vpcs(filters={'tag-value': ['TestVPC', 'TestVPC2']}) + vpcs.should.have.length_of(2) + vpc_ids = tuple(map(lambda v: v.id, vpcs)) + vpc1.id.should.be.within(vpc_ids) + vpc2.id.should.be.within(vpc_ids) + + +@mock_ec2 +def test_default_vpc(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + # Create the default VPC + default_vpc = list(ec2.vpcs.all())[0] + default_vpc.cidr_block.should.equal('172.31.0.0/16') + default_vpc.instance_tenancy.should.equal('default') + default_vpc.reload() + default_vpc.is_default.should.be.ok + + # Test default values for VPC attributes + response = default_vpc.describe_attribute(Attribute='enableDnsSupport') + attr = response.get('EnableDnsSupport') + attr.get('Value').should.be.ok + + response = default_vpc.describe_attribute(Attribute='enableDnsHostnames') + attr = response.get('EnableDnsHostnames') + attr.get('Value').should.be.ok + + +@mock_ec2 +def test_non_default_vpc(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + # Create the default VPC - this already exists when backend instantiated! + #ec2.create_vpc(CidrBlock='172.31.0.0/16') + + # Create the non default VPC + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + vpc.reload() + vpc.is_default.shouldnt.be.ok + + # Test default instance_tenancy + vpc.instance_tenancy.should.equal('default') + + # Test default values for VPC attributes + response = vpc.describe_attribute(Attribute='enableDnsSupport') + attr = response.get('EnableDnsSupport') + attr.get('Value').should.be.ok + + response = vpc.describe_attribute(Attribute='enableDnsHostnames') + attr = response.get('EnableDnsHostnames') + attr.get('Value').shouldnt.be.ok + + # Check Primary CIDR Block Associations + cidr_block_association_set = next(iter(vpc.cidr_block_association_set), None) + cidr_block_association_set['CidrBlockState']['State'].should.equal('associated') + cidr_block_association_set['CidrBlock'].should.equal(vpc.cidr_block) + cidr_block_association_set['AssociationId'].should.contain('vpc-cidr-assoc') + + +@mock_ec2 +def test_vpc_dedicated_tenancy(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + # Create the default VPC + ec2.create_vpc(CidrBlock='172.31.0.0/16') + + # Create the non default VPC + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16', InstanceTenancy='dedicated') + vpc.reload() + vpc.is_default.shouldnt.be.ok + + vpc.instance_tenancy.should.equal('dedicated') + + +@mock_ec2 +def test_vpc_modify_enable_dns_support(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + # Create the default VPC + ec2.create_vpc(CidrBlock='172.31.0.0/16') + + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + + # Test default values for VPC attributes + response = vpc.describe_attribute(Attribute='enableDnsSupport') + attr = response.get('EnableDnsSupport') + attr.get('Value').should.be.ok + + vpc.modify_attribute(EnableDnsSupport={'Value': False}) + + response = vpc.describe_attribute(Attribute='enableDnsSupport') + attr = response.get('EnableDnsSupport') + attr.get('Value').shouldnt.be.ok + + +@mock_ec2 +def test_vpc_modify_enable_dns_hostnames(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + # Create the default VPC + ec2.create_vpc(CidrBlock='172.31.0.0/16') + + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + # Test default values for VPC attributes + response = vpc.describe_attribute(Attribute='enableDnsHostnames') + attr = response.get('EnableDnsHostnames') + attr.get('Value').shouldnt.be.ok + + vpc.modify_attribute(EnableDnsHostnames={'Value': True}) + + response = vpc.describe_attribute(Attribute='enableDnsHostnames') + attr = response.get('EnableDnsHostnames') + attr.get('Value').should.be.ok + + +@mock_ec2_deprecated +def test_vpc_associate_dhcp_options(): + conn = boto.connect_vpc() + dhcp_options = conn.create_dhcp_options( + SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) + vpc = conn.create_vpc("10.0.0.0/16") + + conn.associate_dhcp_options(dhcp_options.id, vpc.id) + + vpc.update() + dhcp_options.id.should.equal(vpc.dhcp_options_id) + + +@mock_ec2 +def test_associate_vpc_ipv4_cidr_block(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + vpc = ec2.create_vpc(CidrBlock='10.10.42.0/24') + + # Associate/Extend vpc CIDR range up to 5 ciders + for i in range(43, 47): + response = ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc.id, CidrBlock='10.10.{}.0/24'.format(i)) + response['CidrBlockAssociation']['CidrBlockState']['State'].should.equal('associating') + response['CidrBlockAssociation']['CidrBlock'].should.equal('10.10.{}.0/24'.format(i)) + response['CidrBlockAssociation']['AssociationId'].should.contain('vpc-cidr-assoc') + + # Check all associations exist + vpc = ec2.Vpc(vpc.id) + vpc.cidr_block_association_set.should.have.length_of(5) + vpc.cidr_block_association_set[2]['CidrBlockState']['State'].should.equal('associated') + vpc.cidr_block_association_set[4]['CidrBlockState']['State'].should.equal('associated') + + # Check error on adding 6th association. + with assert_raises(ClientError) as ex: + response = ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc.id, CidrBlock='10.10.50.0/22') + str(ex.exception).should.equal( + "An error occurred (CidrLimitExceeded) when calling the AssociateVpcCidrBlock " + "operation: This network '{}' has met its maximum number of allowed CIDRs: 5".format(vpc.id)) + +@mock_ec2 +def test_disassociate_vpc_ipv4_cidr_block(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + vpc = ec2.create_vpc(CidrBlock='10.10.42.0/24') + ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc.id, CidrBlock='10.10.43.0/24') + + # Remove an extended cidr block + vpc = ec2.Vpc(vpc.id) + non_default_assoc_cidr_block = next(iter([x for x in vpc.cidr_block_association_set if vpc.cidr_block != x['CidrBlock']]), None) + response = ec2.meta.client.disassociate_vpc_cidr_block(AssociationId=non_default_assoc_cidr_block['AssociationId']) + response['CidrBlockAssociation']['CidrBlockState']['State'].should.equal('disassociating') + response['CidrBlockAssociation']['CidrBlock'].should.equal(non_default_assoc_cidr_block['CidrBlock']) + response['CidrBlockAssociation']['AssociationId'].should.equal(non_default_assoc_cidr_block['AssociationId']) + + # Error attempting to delete a non-existent CIDR_BLOCK association + with assert_raises(ClientError) as ex: + response = ec2.meta.client.disassociate_vpc_cidr_block(AssociationId='vpc-cidr-assoc-BORING123') + str(ex.exception).should.equal( + "An error occurred (InvalidVpcCidrBlockAssociationIdError.NotFound) when calling the " + "DisassociateVpcCidrBlock operation: The vpc CIDR block association ID " + "'vpc-cidr-assoc-BORING123' does not exist") + + # Error attempting to delete Primary CIDR BLOCK association + vpc_base_cidr_assoc_id = next(iter([x for x in vpc.cidr_block_association_set + if vpc.cidr_block == x['CidrBlock']]), {})['AssociationId'] + + with assert_raises(ClientError) as ex: + response = ec2.meta.client.disassociate_vpc_cidr_block(AssociationId=vpc_base_cidr_assoc_id) + str(ex.exception).should.equal( + "An error occurred (OperationNotPermitted) when calling the DisassociateVpcCidrBlock operation: " + "The vpc CIDR block with association ID {} may not be disassociated. It is the primary " + "IPv4 CIDR block of the VPC".format(vpc_base_cidr_assoc_id)) + +@mock_ec2 +def test_cidr_block_association_filters(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + vpc1 = ec2.create_vpc(CidrBlock='10.90.0.0/16') + vpc2 = ec2.create_vpc(CidrBlock='10.91.0.0/16') + ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc2.id, CidrBlock='10.10.0.0/19') + vpc3 = ec2.create_vpc(CidrBlock='10.92.0.0/24') + ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc3.id, CidrBlock='10.92.1.0/24') + ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc3.id, CidrBlock='10.92.2.0/24') + vpc3_assoc_response = ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc3.id, CidrBlock='10.92.3.0/24') + + # Test filters for a cidr-block in all VPCs cidr-block-associations + filtered_vpcs = list(ec2.vpcs.filter(Filters=[{'Name': 'cidr-block-association.cidr-block', + 'Values': ['10.10.0.0/19']}])) + filtered_vpcs.should.be.length_of(1) + filtered_vpcs[0].id.should.equal(vpc2.id) + + # Test filter for association id in VPCs + association_id = vpc3_assoc_response['CidrBlockAssociation']['AssociationId'] + filtered_vpcs = list(ec2.vpcs.filter(Filters=[{'Name': 'cidr-block-association.association-id', + 'Values': [association_id]}])) + filtered_vpcs.should.be.length_of(1) + filtered_vpcs[0].id.should.equal(vpc3.id) + + # Test filter for association state in VPC - this will never show anything in this test + filtered_vpcs = list(ec2.vpcs.filter(Filters=[{'Name': 'cidr-block-association.association-id', + 'Values': ['failing']}])) + filtered_vpcs.should.be.length_of(0) + +@mock_ec2 +def test_vpc_associate_ipv6_cidr_block(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + # Test create VPC with IPV6 cidr range + vpc = ec2.create_vpc(CidrBlock='10.10.42.0/24', AmazonProvidedIpv6CidrBlock=True) + ipv6_cidr_block_association_set = next(iter(vpc.ipv6_cidr_block_association_set), None) + ipv6_cidr_block_association_set['Ipv6CidrBlockState']['State'].should.equal('associated') + ipv6_cidr_block_association_set['Ipv6CidrBlock'].should.contain('::/56') + ipv6_cidr_block_association_set['AssociationId'].should.contain('vpc-cidr-assoc') + + # Test Fail on adding 2nd IPV6 association - AWS only allows 1 at this time! + with assert_raises(ClientError) as ex: + response = ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc.id, AmazonProvidedIpv6CidrBlock=True) + str(ex.exception).should.equal( + "An error occurred (CidrLimitExceeded) when calling the AssociateVpcCidrBlock " + "operation: This network '{}' has met its maximum number of allowed CIDRs: 1".format(vpc.id)) + + # Test associate ipv6 cidr block after vpc created + vpc = ec2.create_vpc(CidrBlock='10.10.50.0/24') + response = ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc.id, AmazonProvidedIpv6CidrBlock=True) + response['Ipv6CidrBlockAssociation']['Ipv6CidrBlockState']['State'].should.equal('associating') + response['Ipv6CidrBlockAssociation']['Ipv6CidrBlock'].should.contain('::/56') + response['Ipv6CidrBlockAssociation']['AssociationId'].should.contain('vpc-cidr-assoc-') + + # Check on describe vpc that has ipv6 cidr block association + vpc = ec2.Vpc(vpc.id) + vpc.ipv6_cidr_block_association_set.should.be.length_of(1) + + +@mock_ec2 +def test_vpc_disassociate_ipv6_cidr_block(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + # Test create VPC with IPV6 cidr range + vpc = ec2.create_vpc(CidrBlock='10.10.42.0/24', AmazonProvidedIpv6CidrBlock=True) + # Test disassociating the only IPV6 + assoc_id = vpc.ipv6_cidr_block_association_set[0]['AssociationId'] + response = ec2.meta.client.disassociate_vpc_cidr_block(AssociationId=assoc_id) + response['Ipv6CidrBlockAssociation']['Ipv6CidrBlockState']['State'].should.equal('disassociating') + response['Ipv6CidrBlockAssociation']['Ipv6CidrBlock'].should.contain('::/56') + response['Ipv6CidrBlockAssociation']['AssociationId'].should.equal(assoc_id) + + +@mock_ec2 +def test_ipv6_cidr_block_association_filters(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + vpc1 = ec2.create_vpc(CidrBlock='10.90.0.0/16') + + vpc2 = ec2.create_vpc(CidrBlock='10.91.0.0/16', AmazonProvidedIpv6CidrBlock=True) + vpc2_assoc_ipv6_assoc_id = vpc2.ipv6_cidr_block_association_set[0]['AssociationId'] + ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc2.id, CidrBlock='10.10.0.0/19') + + vpc3 = ec2.create_vpc(CidrBlock='10.92.0.0/24') + ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc3.id, CidrBlock='10.92.1.0/24') + ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc3.id, CidrBlock='10.92.2.0/24') + response = ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc3.id, AmazonProvidedIpv6CidrBlock=True) + vpc3_ipv6_cidr_block = response['Ipv6CidrBlockAssociation']['Ipv6CidrBlock'] + + vpc4 = ec2.create_vpc(CidrBlock='10.95.0.0/16') # Here for its looks + + # Test filters for an ipv6 cidr-block in all VPCs cidr-block-associations + filtered_vpcs = list(ec2.vpcs.filter(Filters=[{'Name': 'ipv6-cidr-block-association.ipv6-cidr-block', + 'Values': [vpc3_ipv6_cidr_block]}])) + filtered_vpcs.should.be.length_of(1) + filtered_vpcs[0].id.should.equal(vpc3.id) + + # Test filter for association id in VPCs + filtered_vpcs = list(ec2.vpcs.filter(Filters=[{'Name': 'ipv6-cidr-block-association.association-id', + 'Values': [vpc2_assoc_ipv6_assoc_id]}])) + filtered_vpcs.should.be.length_of(1) + filtered_vpcs[0].id.should.equal(vpc2.id) + + # Test filter for association state in VPC - this will never show anything in this test + filtered_vpcs = list(ec2.vpcs.filter(Filters=[{'Name': 'ipv6-cidr-block-association.state', + 'Values': ['associated']}])) + filtered_vpcs.should.be.length_of(2) # 2 of 4 VPCs + + +@mock_ec2 +def test_create_vpc_with_invalid_cidr_block_parameter(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + vpc_cidr_block = '1000.1.0.0/20' + with assert_raises(ClientError) as ex: + vpc = ec2.create_vpc(CidrBlock=vpc_cidr_block) + str(ex.exception).should.equal( + "An error occurred (InvalidParameterValue) when calling the CreateVpc " + "operation: Value ({}) for parameter cidrBlock is invalid. This is not a valid CIDR block.".format(vpc_cidr_block)) + + +@mock_ec2 +def test_create_vpc_with_invalid_cidr_range(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + vpc_cidr_block = '10.1.0.0/29' + with assert_raises(ClientError) as ex: + vpc = ec2.create_vpc(CidrBlock=vpc_cidr_block) + str(ex.exception).should.equal( + "An error occurred (InvalidVpc.Range) when calling the CreateVpc " + "operation: The CIDR '{}' is invalid.".format(vpc_cidr_block)) diff --git a/tests/test_ecs/test_ecs_boto3.py b/tests/test_ecs/test_ecs_boto3.py index 3bf25b8fc..b147c4159 100644 --- a/tests/test_ecs/test_ecs_boto3.py +++ b/tests/test_ecs/test_ecs_boto3.py @@ -388,23 +388,32 @@ def test_list_services(): cluster='test_ecs_cluster', serviceName='test_ecs_service1', taskDefinition='test_ecs_task', + schedulingStrategy='REPLICA', desiredCount=2 ) _ = client.create_service( cluster='test_ecs_cluster', serviceName='test_ecs_service2', taskDefinition='test_ecs_task', + schedulingStrategy='DAEMON', desiredCount=2 ) - response = client.list_services( + unfiltered_response = client.list_services( cluster='test_ecs_cluster' ) - len(response['serviceArns']).should.equal(2) - response['serviceArns'][0].should.equal( + len(unfiltered_response['serviceArns']).should.equal(2) + unfiltered_response['serviceArns'][0].should.equal( 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service1') - response['serviceArns'][1].should.equal( + unfiltered_response['serviceArns'][1].should.equal( 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service2') + filtered_response = client.list_services( + cluster='test_ecs_cluster', + schedulingStrategy='REPLICA' + ) + len(filtered_response['serviceArns']).should.equal(1) + filtered_response['serviceArns'][0].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service1') @mock_ecs def test_describe_services(): diff --git a/tests/test_elb/test_elb.py b/tests/test_elb/test_elb.py index 6c6492894..447896f15 100644 --- a/tests/test_elb/test_elb.py +++ b/tests/test_elb/test_elb.py @@ -1,982 +1,982 @@ -from __future__ import unicode_literals -import boto3 -import botocore -import boto -import boto.ec2.elb -from boto.ec2.elb import HealthCheck -from boto.ec2.elb.attributes import ( - ConnectionSettingAttribute, - ConnectionDrainingAttribute, - AccessLogAttribute, -) -from botocore.exceptions import ClientError -from boto.exception import BotoServerError -from nose.tools import assert_raises -import sure # noqa - -from moto import mock_elb, mock_ec2, mock_elb_deprecated, mock_ec2_deprecated - - -@mock_elb_deprecated -@mock_ec2_deprecated -def test_create_load_balancer(): - conn = boto.connect_elb() - ec2 = boto.connect_ec2('the_key', 'the_secret') - - security_group = ec2.create_security_group('sg-abc987', 'description') - - zones = ['us-east-1a', 'us-east-1b'] - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - conn.create_load_balancer('my-lb', zones, ports, scheme='internal', security_groups=[security_group.id]) - - balancers = conn.get_all_load_balancers() - balancer = balancers[0] - balancer.name.should.equal("my-lb") - balancer.scheme.should.equal("internal") - list(balancer.security_groups).should.equal([security_group.id]) - set(balancer.availability_zones).should.equal( - set(['us-east-1a', 'us-east-1b'])) - listener1 = balancer.listeners[0] - listener1.load_balancer_port.should.equal(80) - listener1.instance_port.should.equal(8080) - listener1.protocol.should.equal("HTTP") - listener2 = balancer.listeners[1] - listener2.load_balancer_port.should.equal(443) - listener2.instance_port.should.equal(8443) - listener2.protocol.should.equal("TCP") - - -@mock_elb_deprecated -def test_getting_missing_elb(): - conn = boto.connect_elb() - conn.get_all_load_balancers.when.called_with( - load_balancer_names='aaa').should.throw(BotoServerError) - - -@mock_elb_deprecated -def test_create_elb_in_multiple_region(): - zones = ['us-east-1a', 'us-east-1b'] - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - - west1_conn = boto.ec2.elb.connect_to_region("us-west-1") - west1_conn.create_load_balancer('my-lb', zones, ports) - - west2_conn = boto.ec2.elb.connect_to_region("us-west-2") - west2_conn.create_load_balancer('my-lb', zones, ports) - - list(west1_conn.get_all_load_balancers()).should.have.length_of(1) - list(west2_conn.get_all_load_balancers()).should.have.length_of(1) - - -@mock_elb_deprecated -def test_create_load_balancer_with_certificate(): - conn = boto.connect_elb() - - zones = ['us-east-1a'] - ports = [ - (443, 8443, 'https', 'arn:aws:iam:123456789012:server-certificate/test-cert')] - conn.create_load_balancer('my-lb', zones, ports) - - balancers = conn.get_all_load_balancers() - balancer = balancers[0] - balancer.name.should.equal("my-lb") - balancer.scheme.should.equal("internet-facing") - set(balancer.availability_zones).should.equal(set(['us-east-1a'])) - listener = balancer.listeners[0] - listener.load_balancer_port.should.equal(443) - listener.instance_port.should.equal(8443) - listener.protocol.should.equal("HTTPS") - listener.ssl_certificate_id.should.equal( - 'arn:aws:iam:123456789012:server-certificate/test-cert') - - -@mock_elb -def test_create_and_delete_boto3_support(): - client = boto3.client('elb', region_name='us-east-1') - - client.create_load_balancer( - LoadBalancerName='my-lb', - Listeners=[ - {'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], - AvailabilityZones=['us-east-1a', 'us-east-1b'] - ) - list(client.describe_load_balancers()[ - 'LoadBalancerDescriptions']).should.have.length_of(1) - - client.delete_load_balancer( - LoadBalancerName='my-lb' - ) - list(client.describe_load_balancers()[ - 'LoadBalancerDescriptions']).should.have.length_of(0) - - -@mock_elb -def test_create_load_balancer_with_no_listeners_defined(): - client = boto3.client('elb', region_name='us-east-1') - - with assert_raises(ClientError): - client.create_load_balancer( - LoadBalancerName='my-lb', - Listeners=[], - AvailabilityZones=['us-east-1a', 'us-east-1b'] - ) - - -@mock_elb -def test_describe_paginated_balancers(): - client = boto3.client('elb', region_name='us-east-1') - - for i in range(51): - client.create_load_balancer( - LoadBalancerName='my-lb%d' % i, - Listeners=[ - {'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], - AvailabilityZones=['us-east-1a', 'us-east-1b'] - ) - - resp = client.describe_load_balancers() - resp['LoadBalancerDescriptions'].should.have.length_of(50) - resp['NextMarker'].should.equal(resp['LoadBalancerDescriptions'][-1]['LoadBalancerName']) - resp2 = client.describe_load_balancers(Marker=resp['NextMarker']) - resp2['LoadBalancerDescriptions'].should.have.length_of(1) - assert 'NextToken' not in resp2.keys() - - -@mock_elb -@mock_ec2 -def test_apply_security_groups_to_load_balancer(): - client = boto3.client('elb', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - security_group = ec2.create_security_group( - GroupName='sg01', Description='Test security group sg01', VpcId=vpc.id) - - client.create_load_balancer( - LoadBalancerName='my-lb', - Listeners=[ - {'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], - AvailabilityZones=['us-east-1a', 'us-east-1b'] - ) - - response = client.apply_security_groups_to_load_balancer( - LoadBalancerName='my-lb', - SecurityGroups=[security_group.id]) - - assert response['SecurityGroups'] == [security_group.id] - balancer = client.describe_load_balancers()['LoadBalancerDescriptions'][0] - assert balancer['SecurityGroups'] == [security_group.id] - - # Using a not-real security group raises an error - with assert_raises(ClientError) as error: - response = client.apply_security_groups_to_load_balancer( - LoadBalancerName='my-lb', - SecurityGroups=['not-really-a-security-group']) - assert "One or more of the specified security groups do not exist." in str(error.exception) - - -@mock_elb_deprecated -def test_add_listener(): - conn = boto.connect_elb() - zones = ['us-east-1a', 'us-east-1b'] - ports = [(80, 8080, 'http')] - conn.create_load_balancer('my-lb', zones, ports) - new_listener = (443, 8443, 'tcp') - conn.create_load_balancer_listeners('my-lb', [new_listener]) - balancers = conn.get_all_load_balancers() - balancer = balancers[0] - listener1 = balancer.listeners[0] - listener1.load_balancer_port.should.equal(80) - listener1.instance_port.should.equal(8080) - listener1.protocol.should.equal("HTTP") - listener2 = balancer.listeners[1] - listener2.load_balancer_port.should.equal(443) - listener2.instance_port.should.equal(8443) - listener2.protocol.should.equal("TCP") - - -@mock_elb_deprecated -def test_delete_listener(): - conn = boto.connect_elb() - - zones = ['us-east-1a', 'us-east-1b'] - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - conn.create_load_balancer('my-lb', zones, ports) - conn.delete_load_balancer_listeners('my-lb', [443]) - balancers = conn.get_all_load_balancers() - balancer = balancers[0] - listener1 = balancer.listeners[0] - listener1.load_balancer_port.should.equal(80) - listener1.instance_port.should.equal(8080) - listener1.protocol.should.equal("HTTP") - balancer.listeners.should.have.length_of(1) - - -@mock_elb -def test_create_and_delete_listener_boto3_support(): - client = boto3.client('elb', region_name='us-east-1') - - client.create_load_balancer( - LoadBalancerName='my-lb', - Listeners=[{'Protocol': 'http', - 'LoadBalancerPort': 80, 'InstancePort': 8080}], - AvailabilityZones=['us-east-1a', 'us-east-1b'] - ) - list(client.describe_load_balancers()[ - 'LoadBalancerDescriptions']).should.have.length_of(1) - - client.create_load_balancer_listeners( - LoadBalancerName='my-lb', - Listeners=[ - {'Protocol': 'tcp', 'LoadBalancerPort': 443, 'InstancePort': 8443}] - ) - balancer = client.describe_load_balancers()['LoadBalancerDescriptions'][0] - list(balancer['ListenerDescriptions']).should.have.length_of(2) - balancer['ListenerDescriptions'][0][ - 'Listener']['Protocol'].should.equal('HTTP') - balancer['ListenerDescriptions'][0]['Listener'][ - 'LoadBalancerPort'].should.equal(80) - balancer['ListenerDescriptions'][0]['Listener'][ - 'InstancePort'].should.equal(8080) - balancer['ListenerDescriptions'][1][ - 'Listener']['Protocol'].should.equal('TCP') - balancer['ListenerDescriptions'][1]['Listener'][ - 'LoadBalancerPort'].should.equal(443) - balancer['ListenerDescriptions'][1]['Listener'][ - 'InstancePort'].should.equal(8443) - - # Creating this listener with an conflicting definition throws error - with assert_raises(ClientError): - client.create_load_balancer_listeners( - LoadBalancerName='my-lb', - Listeners=[ - {'Protocol': 'tcp', 'LoadBalancerPort': 443, 'InstancePort': 1234}] - ) - - client.delete_load_balancer_listeners( - LoadBalancerName='my-lb', - LoadBalancerPorts=[443]) - - balancer = client.describe_load_balancers()['LoadBalancerDescriptions'][0] - list(balancer['ListenerDescriptions']).should.have.length_of(1) - - -@mock_elb_deprecated -def test_set_sslcertificate(): - conn = boto.connect_elb() - - zones = ['us-east-1a', 'us-east-1b'] - ports = [(443, 8443, 'tcp')] - conn.create_load_balancer('my-lb', zones, ports) - conn.set_lb_listener_SSL_certificate('my-lb', '443', 'arn:certificate') - balancers = conn.get_all_load_balancers() - balancer = balancers[0] - listener1 = balancer.listeners[0] - listener1.load_balancer_port.should.equal(443) - listener1.instance_port.should.equal(8443) - listener1.protocol.should.equal("TCP") - listener1.ssl_certificate_id.should.equal("arn:certificate") - - -@mock_elb_deprecated -def test_get_load_balancers_by_name(): - conn = boto.connect_elb() - - zones = ['us-east-1a', 'us-east-1b'] - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - conn.create_load_balancer('my-lb1', zones, ports) - conn.create_load_balancer('my-lb2', zones, ports) - conn.create_load_balancer('my-lb3', zones, ports) - - conn.get_all_load_balancers().should.have.length_of(3) - conn.get_all_load_balancers( - load_balancer_names=['my-lb1']).should.have.length_of(1) - conn.get_all_load_balancers( - load_balancer_names=['my-lb1', 'my-lb2']).should.have.length_of(2) - - -@mock_elb_deprecated -def test_delete_load_balancer(): - conn = boto.connect_elb() - - zones = ['us-east-1a'] - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - conn.create_load_balancer('my-lb', zones, ports) - - balancers = conn.get_all_load_balancers() - balancers.should.have.length_of(1) - - conn.delete_load_balancer("my-lb") - balancers = conn.get_all_load_balancers() - balancers.should.have.length_of(0) - - -@mock_elb_deprecated -def test_create_health_check(): - conn = boto.connect_elb() - - hc = HealthCheck( - interval=20, - healthy_threshold=3, - unhealthy_threshold=5, - target='HTTP:8080/health', - timeout=23, - ) - - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - lb = conn.create_load_balancer('my-lb', [], ports) - lb.configure_health_check(hc) - - balancer = conn.get_all_load_balancers()[0] - health_check = balancer.health_check - health_check.interval.should.equal(20) - health_check.healthy_threshold.should.equal(3) - health_check.unhealthy_threshold.should.equal(5) - health_check.target.should.equal('HTTP:8080/health') - health_check.timeout.should.equal(23) - - -@mock_elb -def test_create_health_check_boto3(): - client = boto3.client('elb', region_name='us-east-1') - - client.create_load_balancer( - LoadBalancerName='my-lb', - Listeners=[{'Protocol': 'http', - 'LoadBalancerPort': 80, 'InstancePort': 8080}], - AvailabilityZones=['us-east-1a', 'us-east-1b'] - ) - client.configure_health_check( - LoadBalancerName='my-lb', - HealthCheck={ - 'Target': 'HTTP:8080/health', - 'Interval': 20, - 'Timeout': 23, - 'HealthyThreshold': 3, - 'UnhealthyThreshold': 5 - } - ) - - balancer = client.describe_load_balancers()['LoadBalancerDescriptions'][0] - balancer['HealthCheck']['Target'].should.equal('HTTP:8080/health') - balancer['HealthCheck']['Interval'].should.equal(20) - balancer['HealthCheck']['Timeout'].should.equal(23) - balancer['HealthCheck']['HealthyThreshold'].should.equal(3) - balancer['HealthCheck']['UnhealthyThreshold'].should.equal(5) - - -@mock_ec2_deprecated -@mock_elb_deprecated -def test_register_instances(): - ec2_conn = boto.connect_ec2() - reservation = ec2_conn.run_instances('ami-1234abcd', 2) - instance_id1 = reservation.instances[0].id - instance_id2 = reservation.instances[1].id - - conn = boto.connect_elb() - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - lb = conn.create_load_balancer('my-lb', [], ports) - - lb.register_instances([instance_id1, instance_id2]) - - balancer = conn.get_all_load_balancers()[0] - instance_ids = [instance.id for instance in balancer.instances] - set(instance_ids).should.equal(set([instance_id1, instance_id2])) - - -@mock_ec2 -@mock_elb -def test_register_instances_boto3(): - ec2 = boto3.resource('ec2', region_name='us-east-1') - response = ec2.create_instances( - ImageId='ami-1234abcd', MinCount=2, MaxCount=2) - instance_id1 = response[0].id - instance_id2 = response[1].id - - client = boto3.client('elb', region_name='us-east-1') - client.create_load_balancer( - LoadBalancerName='my-lb', - Listeners=[{'Protocol': 'http', - 'LoadBalancerPort': 80, 'InstancePort': 8080}], - AvailabilityZones=['us-east-1a', 'us-east-1b'] - ) - client.register_instances_with_load_balancer( - LoadBalancerName='my-lb', - Instances=[ - {'InstanceId': instance_id1}, - {'InstanceId': instance_id2} - ] - ) - balancer = client.describe_load_balancers()['LoadBalancerDescriptions'][0] - instance_ids = [instance['InstanceId'] - for instance in balancer['Instances']] - set(instance_ids).should.equal(set([instance_id1, instance_id2])) - - -@mock_ec2_deprecated -@mock_elb_deprecated -def test_deregister_instances(): - ec2_conn = boto.connect_ec2() - reservation = ec2_conn.run_instances('ami-1234abcd', 2) - instance_id1 = reservation.instances[0].id - instance_id2 = reservation.instances[1].id - - conn = boto.connect_elb() - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - lb = conn.create_load_balancer('my-lb', [], ports) - - lb.register_instances([instance_id1, instance_id2]) - - balancer = conn.get_all_load_balancers()[0] - balancer.instances.should.have.length_of(2) - balancer.deregister_instances([instance_id1]) - - balancer.instances.should.have.length_of(1) - balancer.instances[0].id.should.equal(instance_id2) - - -@mock_ec2 -@mock_elb -def test_deregister_instances_boto3(): - ec2 = boto3.resource('ec2', region_name='us-east-1') - response = ec2.create_instances( - ImageId='ami-1234abcd', MinCount=2, MaxCount=2) - instance_id1 = response[0].id - instance_id2 = response[1].id - - client = boto3.client('elb', region_name='us-east-1') - client.create_load_balancer( - LoadBalancerName='my-lb', - Listeners=[{'Protocol': 'http', - 'LoadBalancerPort': 80, 'InstancePort': 8080}], - AvailabilityZones=['us-east-1a', 'us-east-1b'] - ) - client.register_instances_with_load_balancer( - LoadBalancerName='my-lb', - Instances=[ - {'InstanceId': instance_id1}, - {'InstanceId': instance_id2} - ] - ) - - balancer = client.describe_load_balancers()['LoadBalancerDescriptions'][0] - balancer['Instances'].should.have.length_of(2) - - client.deregister_instances_from_load_balancer( - LoadBalancerName='my-lb', - Instances=[ - {'InstanceId': instance_id1} - ] - ) - - balancer = client.describe_load_balancers()['LoadBalancerDescriptions'][0] - balancer['Instances'].should.have.length_of(1) - balancer['Instances'][0]['InstanceId'].should.equal(instance_id2) - - -@mock_elb_deprecated -def test_default_attributes(): - conn = boto.connect_elb() - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - lb = conn.create_load_balancer('my-lb', [], ports) - attributes = lb.get_attributes() - - attributes.cross_zone_load_balancing.enabled.should.be.false - attributes.connection_draining.enabled.should.be.false - attributes.access_log.enabled.should.be.false - attributes.connecting_settings.idle_timeout.should.equal(60) - - -@mock_elb_deprecated -def test_cross_zone_load_balancing_attribute(): - conn = boto.connect_elb() - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - lb = conn.create_load_balancer('my-lb', [], ports) - - conn.modify_lb_attribute("my-lb", "CrossZoneLoadBalancing", True) - attributes = lb.get_attributes(force=True) - attributes.cross_zone_load_balancing.enabled.should.be.true - - conn.modify_lb_attribute("my-lb", "CrossZoneLoadBalancing", False) - attributes = lb.get_attributes(force=True) - attributes.cross_zone_load_balancing.enabled.should.be.false - - -@mock_elb_deprecated -def test_connection_draining_attribute(): - conn = boto.connect_elb() - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - lb = conn.create_load_balancer('my-lb', [], ports) - - connection_draining = ConnectionDrainingAttribute() - connection_draining.enabled = True - connection_draining.timeout = 60 - - conn.modify_lb_attribute( - "my-lb", "ConnectionDraining", connection_draining) - attributes = lb.get_attributes(force=True) - attributes.connection_draining.enabled.should.be.true - attributes.connection_draining.timeout.should.equal(60) - - connection_draining.timeout = 30 - conn.modify_lb_attribute( - "my-lb", "ConnectionDraining", connection_draining) - attributes = lb.get_attributes(force=True) - attributes.connection_draining.timeout.should.equal(30) - - connection_draining.enabled = False - conn.modify_lb_attribute( - "my-lb", "ConnectionDraining", connection_draining) - attributes = lb.get_attributes(force=True) - attributes.connection_draining.enabled.should.be.false - - -@mock_elb_deprecated -def test_access_log_attribute(): - conn = boto.connect_elb() - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - lb = conn.create_load_balancer('my-lb', [], ports) - - access_log = AccessLogAttribute() - access_log.enabled = True - access_log.s3_bucket_name = 'bucket' - access_log.s3_bucket_prefix = 'prefix' - access_log.emit_interval = 60 - - conn.modify_lb_attribute("my-lb", "AccessLog", access_log) - attributes = lb.get_attributes(force=True) - attributes.access_log.enabled.should.be.true - attributes.access_log.s3_bucket_name.should.equal("bucket") - attributes.access_log.s3_bucket_prefix.should.equal("prefix") - attributes.access_log.emit_interval.should.equal(60) - - access_log.enabled = False - conn.modify_lb_attribute("my-lb", "AccessLog", access_log) - attributes = lb.get_attributes(force=True) - attributes.access_log.enabled.should.be.false - - -@mock_elb_deprecated -def test_connection_settings_attribute(): - conn = boto.connect_elb() - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - lb = conn.create_load_balancer('my-lb', [], ports) - - connection_settings = ConnectionSettingAttribute(conn) - connection_settings.idle_timeout = 120 - - conn.modify_lb_attribute( - "my-lb", "ConnectingSettings", connection_settings) - attributes = lb.get_attributes(force=True) - attributes.connecting_settings.idle_timeout.should.equal(120) - - connection_settings.idle_timeout = 60 - conn.modify_lb_attribute( - "my-lb", "ConnectingSettings", connection_settings) - attributes = lb.get_attributes(force=True) - attributes.connecting_settings.idle_timeout.should.equal(60) - - -@mock_elb_deprecated -def test_create_lb_cookie_stickiness_policy(): - conn = boto.connect_elb() - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - lb = conn.create_load_balancer('my-lb', [], ports) - cookie_expiration_period = 60 - policy_name = "LBCookieStickinessPolicy" - - lb.create_cookie_stickiness_policy(cookie_expiration_period, policy_name) - - lb = conn.get_all_load_balancers()[0] - # There appears to be a quirk about boto, whereby it returns a unicode - # string for cookie_expiration_period, despite being stated in - # documentation to be a long numeric. - # - # To work around that, this value is converted to an int and checked. - cookie_expiration_period_response_str = lb.policies.lb_cookie_stickiness_policies[ - 0].cookie_expiration_period - int(cookie_expiration_period_response_str).should.equal( - cookie_expiration_period) - lb.policies.lb_cookie_stickiness_policies[ - 0].policy_name.should.equal(policy_name) - - -@mock_elb_deprecated -def test_create_lb_cookie_stickiness_policy_no_expiry(): - conn = boto.connect_elb() - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - lb = conn.create_load_balancer('my-lb', [], ports) - policy_name = "LBCookieStickinessPolicy" - - lb.create_cookie_stickiness_policy(None, policy_name) - - lb = conn.get_all_load_balancers()[0] - lb.policies.lb_cookie_stickiness_policies[ - 0].cookie_expiration_period.should.be.none - lb.policies.lb_cookie_stickiness_policies[ - 0].policy_name.should.equal(policy_name) - - -@mock_elb_deprecated -def test_create_app_cookie_stickiness_policy(): - conn = boto.connect_elb() - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - lb = conn.create_load_balancer('my-lb', [], ports) - cookie_name = "my-stickiness-policy" - policy_name = "AppCookieStickinessPolicy" - - lb.create_app_cookie_stickiness_policy(cookie_name, policy_name) - - lb = conn.get_all_load_balancers()[0] - lb.policies.app_cookie_stickiness_policies[ - 0].cookie_name.should.equal(cookie_name) - lb.policies.app_cookie_stickiness_policies[ - 0].policy_name.should.equal(policy_name) - - -@mock_elb_deprecated -def test_create_lb_policy(): - conn = boto.connect_elb() - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - lb = conn.create_load_balancer('my-lb', [], ports) - policy_name = "ProxyPolicy" - - lb.create_lb_policy(policy_name, 'ProxyProtocolPolicyType', { - 'ProxyProtocol': True}) - - lb = conn.get_all_load_balancers()[0] - lb.policies.other_policies[0].policy_name.should.equal(policy_name) - - -@mock_elb_deprecated -def test_set_policies_of_listener(): - conn = boto.connect_elb() - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - lb = conn.create_load_balancer('my-lb', [], ports) - listener_port = 80 - policy_name = "my-stickiness-policy" - - # boto docs currently state that zero or one policy may be associated - # with a given listener - - # in a real flow, it is necessary first to create a policy, - # then to set that policy to the listener - lb.create_cookie_stickiness_policy(None, policy_name) - lb.set_policies_of_listener(listener_port, [policy_name]) - - lb = conn.get_all_load_balancers()[0] - listener = lb.listeners[0] - listener.load_balancer_port.should.equal(listener_port) - # by contrast to a backend, a listener stores only policy name strings - listener.policy_names[0].should.equal(policy_name) - - -@mock_elb_deprecated -def test_set_policies_of_backend_server(): - conn = boto.connect_elb() - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - lb = conn.create_load_balancer('my-lb', [], ports) - instance_port = 8080 - policy_name = "ProxyPolicy" - - # in a real flow, it is necessary first to create a policy, - # then to set that policy to the backend - lb.create_lb_policy(policy_name, 'ProxyProtocolPolicyType', { - 'ProxyProtocol': True}) - lb.set_policies_of_backend_server(instance_port, [policy_name]) - - lb = conn.get_all_load_balancers()[0] - backend = lb.backends[0] - backend.instance_port.should.equal(instance_port) - # by contrast to a listener, a backend stores OtherPolicy objects - backend.policies[0].policy_name.should.equal(policy_name) - - -@mock_ec2_deprecated -@mock_elb_deprecated -def test_describe_instance_health(): - ec2_conn = boto.connect_ec2() - reservation = ec2_conn.run_instances('ami-1234abcd', 2) - instance_id1 = reservation.instances[0].id - instance_id2 = reservation.instances[1].id - - conn = boto.connect_elb() - zones = ['us-east-1a', 'us-east-1b'] - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - lb = conn.create_load_balancer('my-lb', zones, ports) - - instances_health = conn.describe_instance_health('my-lb') - instances_health.should.be.empty - - lb.register_instances([instance_id1, instance_id2]) - - instances_health = conn.describe_instance_health('my-lb') - instances_health.should.have.length_of(2) - for instance_health in instances_health: - instance_health.instance_id.should.be.within( - [instance_id1, instance_id2]) - instance_health.state.should.equal('InService') - - instances_health = conn.describe_instance_health('my-lb', [instance_id1]) - instances_health.should.have.length_of(1) - instances_health[0].instance_id.should.equal(instance_id1) - instances_health[0].state.should.equal('InService') - - -@mock_ec2 -@mock_elb -def test_describe_instance_health_boto3(): - elb = boto3.client('elb', region_name="us-east-1") - ec2 = boto3.client('ec2', region_name="us-east-1") - instances = ec2.run_instances(MinCount=2, MaxCount=2)['Instances'] - lb_name = "my_load_balancer" - elb.create_load_balancer( - Listeners=[{ - 'InstancePort': 80, - 'LoadBalancerPort': 8080, - 'Protocol': 'HTTP' - }], - LoadBalancerName=lb_name, - ) - elb.register_instances_with_load_balancer( - LoadBalancerName=lb_name, - Instances=[{'InstanceId': instances[0]['InstanceId']}] - ) - instances_health = elb.describe_instance_health( - LoadBalancerName=lb_name, - Instances=[{'InstanceId': instance['InstanceId']} for instance in instances] - ) - instances_health['InstanceStates'].should.have.length_of(2) - instances_health['InstanceStates'][0]['InstanceId'].\ - should.equal(instances[0]['InstanceId']) - instances_health['InstanceStates'][0]['State'].\ - should.equal('InService') - instances_health['InstanceStates'][1]['InstanceId'].\ - should.equal(instances[1]['InstanceId']) - instances_health['InstanceStates'][1]['State'].\ - should.equal('Unknown') - - -@mock_elb -def test_add_remove_tags(): - client = boto3.client('elb', region_name='us-east-1') - - client.add_tags.when.called_with(LoadBalancerNames=['my-lb'], - Tags=[{ - 'Key': 'a', - 'Value': 'b' - }]).should.throw(botocore.exceptions.ClientError) - - client.create_load_balancer( - LoadBalancerName='my-lb', - Listeners=[ - {'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], - AvailabilityZones=['us-east-1a', 'us-east-1b'] - ) - - list(client.describe_load_balancers()[ - 'LoadBalancerDescriptions']).should.have.length_of(1) - - client.add_tags(LoadBalancerNames=['my-lb'], - Tags=[{ - 'Key': 'a', - 'Value': 'b' - }]) - - tags = dict([(d['Key'], d['Value']) for d in client.describe_tags( - LoadBalancerNames=['my-lb'])['TagDescriptions'][0]['Tags']]) - tags.should.have.key('a').which.should.equal('b') - - client.add_tags(LoadBalancerNames=['my-lb'], - Tags=[{ - 'Key': 'a', - 'Value': 'b' - }, { - 'Key': 'b', - 'Value': 'b' - }, { - 'Key': 'c', - 'Value': 'b' - }, { - 'Key': 'd', - 'Value': 'b' - }, { - 'Key': 'e', - 'Value': 'b' - }, { - 'Key': 'f', - 'Value': 'b' - }, { - 'Key': 'g', - 'Value': 'b' - }, { - 'Key': 'h', - 'Value': 'b' - }, { - 'Key': 'i', - 'Value': 'b' - }, { - 'Key': 'j', - 'Value': 'b' - }]) - - client.add_tags.when.called_with(LoadBalancerNames=['my-lb'], - Tags=[{ - 'Key': 'k', - 'Value': 'b' - }]).should.throw(botocore.exceptions.ClientError) - - client.add_tags(LoadBalancerNames=['my-lb'], - Tags=[{ - 'Key': 'j', - 'Value': 'c' - }]) - - tags = dict([(d['Key'], d['Value']) for d in client.describe_tags( - LoadBalancerNames=['my-lb'])['TagDescriptions'][0]['Tags']]) - - tags.should.have.key('a').which.should.equal('b') - tags.should.have.key('b').which.should.equal('b') - tags.should.have.key('c').which.should.equal('b') - tags.should.have.key('d').which.should.equal('b') - tags.should.have.key('e').which.should.equal('b') - tags.should.have.key('f').which.should.equal('b') - tags.should.have.key('g').which.should.equal('b') - tags.should.have.key('h').which.should.equal('b') - tags.should.have.key('i').which.should.equal('b') - tags.should.have.key('j').which.should.equal('c') - tags.shouldnt.have.key('k') - - client.remove_tags(LoadBalancerNames=['my-lb'], - Tags=[{ - 'Key': 'a' - }]) - - tags = dict([(d['Key'], d['Value']) for d in client.describe_tags( - LoadBalancerNames=['my-lb'])['TagDescriptions'][0]['Tags']]) - - tags.shouldnt.have.key('a') - tags.should.have.key('b').which.should.equal('b') - tags.should.have.key('c').which.should.equal('b') - tags.should.have.key('d').which.should.equal('b') - tags.should.have.key('e').which.should.equal('b') - tags.should.have.key('f').which.should.equal('b') - tags.should.have.key('g').which.should.equal('b') - tags.should.have.key('h').which.should.equal('b') - tags.should.have.key('i').which.should.equal('b') - tags.should.have.key('j').which.should.equal('c') - - client.create_load_balancer( - LoadBalancerName='other-lb', - Listeners=[ - {'Protocol': 'tcp', 'LoadBalancerPort': 433, 'InstancePort': 8433}], - AvailabilityZones=['us-east-1a', 'us-east-1b'] - ) - - client.add_tags(LoadBalancerNames=['other-lb'], - Tags=[{ - 'Key': 'other', - 'Value': 'something' - }]) - - lb_tags = dict([(l['LoadBalancerName'], dict([(d['Key'], d['Value']) for d in l['Tags']])) - for l in client.describe_tags(LoadBalancerNames=['my-lb', 'other-lb'])['TagDescriptions']]) - - lb_tags.should.have.key('my-lb') - lb_tags.should.have.key('other-lb') - - lb_tags['my-lb'].shouldnt.have.key('other') - lb_tags[ - 'other-lb'].should.have.key('other').which.should.equal('something') - - -@mock_elb -def test_create_with_tags(): - client = boto3.client('elb', region_name='us-east-1') - - client.create_load_balancer( - LoadBalancerName='my-lb', - Listeners=[ - {'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], - AvailabilityZones=['us-east-1a', 'us-east-1b'], - Tags=[{ - 'Key': 'k', - 'Value': 'v' - }] - ) - - tags = dict((d['Key'], d['Value']) for d in client.describe_tags( - LoadBalancerNames=['my-lb'])['TagDescriptions'][0]['Tags']) - tags.should.have.key('k').which.should.equal('v') - - -@mock_elb -def test_modify_attributes(): - client = boto3.client('elb', region_name='us-east-1') - - client.create_load_balancer( - LoadBalancerName='my-lb', - Listeners=[{'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], - AvailabilityZones=['us-east-1a', 'us-east-1b'] - ) - - # Default ConnectionDraining timeout of 300 seconds - client.modify_load_balancer_attributes( - LoadBalancerName='my-lb', - LoadBalancerAttributes={ - 'ConnectionDraining': {'Enabled': True}, - } - ) - lb_attrs = client.describe_load_balancer_attributes(LoadBalancerName='my-lb') - lb_attrs['LoadBalancerAttributes']['ConnectionDraining']['Enabled'].should.equal(True) - lb_attrs['LoadBalancerAttributes']['ConnectionDraining']['Timeout'].should.equal(300) - - # specify a custom ConnectionDraining timeout - client.modify_load_balancer_attributes( - LoadBalancerName='my-lb', - LoadBalancerAttributes={ - 'ConnectionDraining': { - 'Enabled': True, - 'Timeout': 45, - }, - } - ) - lb_attrs = client.describe_load_balancer_attributes(LoadBalancerName='my-lb') - lb_attrs['LoadBalancerAttributes']['ConnectionDraining']['Enabled'].should.equal(True) - lb_attrs['LoadBalancerAttributes']['ConnectionDraining']['Timeout'].should.equal(45) - - -@mock_ec2 -@mock_elb -def test_subnets(): - ec2 = boto3.resource('ec2', region_name='us-east-1') - vpc = ec2.create_vpc( - CidrBlock='172.28.7.0/24', - InstanceTenancy='default' - ) - subnet = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26' - ) - client = boto3.client('elb', region_name='us-east-1') - client.create_load_balancer( - LoadBalancerName='my-lb', - Listeners=[ - {'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], - Subnets=[subnet.id] - ) - - lb = client.describe_load_balancers()['LoadBalancerDescriptions'][0] - lb.should.have.key('Subnets').which.should.have.length_of(1) - lb['Subnets'][0].should.equal(subnet.id) - - lb.should.have.key('VPCId').which.should.equal(vpc.id) - - -@mock_elb_deprecated -def test_create_load_balancer_duplicate(): - conn = boto.connect_elb() - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - conn.create_load_balancer('my-lb', [], ports) - conn.create_load_balancer.when.called_with( - 'my-lb', [], ports).should.throw(BotoServerError) +from __future__ import unicode_literals +import boto3 +import botocore +import boto +import boto.ec2.elb +from boto.ec2.elb import HealthCheck +from boto.ec2.elb.attributes import ( + ConnectionSettingAttribute, + ConnectionDrainingAttribute, + AccessLogAttribute, +) +from botocore.exceptions import ClientError +from boto.exception import BotoServerError +from nose.tools import assert_raises +import sure # noqa + +from moto import mock_elb, mock_ec2, mock_elb_deprecated, mock_ec2_deprecated + + +@mock_elb_deprecated +@mock_ec2_deprecated +def test_create_load_balancer(): + conn = boto.connect_elb() + ec2 = boto.ec2.connect_to_region("us-east-1") + + security_group = ec2.create_security_group('sg-abc987', 'description') + + zones = ['us-east-1a', 'us-east-1b'] + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + conn.create_load_balancer('my-lb', zones, ports, scheme='internal', security_groups=[security_group.id]) + + balancers = conn.get_all_load_balancers() + balancer = balancers[0] + balancer.name.should.equal("my-lb") + balancer.scheme.should.equal("internal") + list(balancer.security_groups).should.equal([security_group.id]) + set(balancer.availability_zones).should.equal( + set(['us-east-1a', 'us-east-1b'])) + listener1 = balancer.listeners[0] + listener1.load_balancer_port.should.equal(80) + listener1.instance_port.should.equal(8080) + listener1.protocol.should.equal("HTTP") + listener2 = balancer.listeners[1] + listener2.load_balancer_port.should.equal(443) + listener2.instance_port.should.equal(8443) + listener2.protocol.should.equal("TCP") + + +@mock_elb_deprecated +def test_getting_missing_elb(): + conn = boto.connect_elb() + conn.get_all_load_balancers.when.called_with( + load_balancer_names='aaa').should.throw(BotoServerError) + + +@mock_elb_deprecated +def test_create_elb_in_multiple_region(): + zones = ['us-east-1a', 'us-east-1b'] + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + + west1_conn = boto.ec2.elb.connect_to_region("us-west-1") + west1_conn.create_load_balancer('my-lb', zones, ports) + + west2_conn = boto.ec2.elb.connect_to_region("us-west-2") + west2_conn.create_load_balancer('my-lb', zones, ports) + + list(west1_conn.get_all_load_balancers()).should.have.length_of(1) + list(west2_conn.get_all_load_balancers()).should.have.length_of(1) + + +@mock_elb_deprecated +def test_create_load_balancer_with_certificate(): + conn = boto.connect_elb() + + zones = ['us-east-1a'] + ports = [ + (443, 8443, 'https', 'arn:aws:iam:123456789012:server-certificate/test-cert')] + conn.create_load_balancer('my-lb', zones, ports) + + balancers = conn.get_all_load_balancers() + balancer = balancers[0] + balancer.name.should.equal("my-lb") + balancer.scheme.should.equal("internet-facing") + set(balancer.availability_zones).should.equal(set(['us-east-1a'])) + listener = balancer.listeners[0] + listener.load_balancer_port.should.equal(443) + listener.instance_port.should.equal(8443) + listener.protocol.should.equal("HTTPS") + listener.ssl_certificate_id.should.equal( + 'arn:aws:iam:123456789012:server-certificate/test-cert') + + +@mock_elb +def test_create_and_delete_boto3_support(): + client = boto3.client('elb', region_name='us-east-1') + + client.create_load_balancer( + LoadBalancerName='my-lb', + Listeners=[ + {'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], + AvailabilityZones=['us-east-1a', 'us-east-1b'] + ) + list(client.describe_load_balancers()[ + 'LoadBalancerDescriptions']).should.have.length_of(1) + + client.delete_load_balancer( + LoadBalancerName='my-lb' + ) + list(client.describe_load_balancers()[ + 'LoadBalancerDescriptions']).should.have.length_of(0) + + +@mock_elb +def test_create_load_balancer_with_no_listeners_defined(): + client = boto3.client('elb', region_name='us-east-1') + + with assert_raises(ClientError): + client.create_load_balancer( + LoadBalancerName='my-lb', + Listeners=[], + AvailabilityZones=['us-east-1a', 'us-east-1b'] + ) + + +@mock_elb +def test_describe_paginated_balancers(): + client = boto3.client('elb', region_name='us-east-1') + + for i in range(51): + client.create_load_balancer( + LoadBalancerName='my-lb%d' % i, + Listeners=[ + {'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], + AvailabilityZones=['us-east-1a', 'us-east-1b'] + ) + + resp = client.describe_load_balancers() + resp['LoadBalancerDescriptions'].should.have.length_of(50) + resp['NextMarker'].should.equal(resp['LoadBalancerDescriptions'][-1]['LoadBalancerName']) + resp2 = client.describe_load_balancers(Marker=resp['NextMarker']) + resp2['LoadBalancerDescriptions'].should.have.length_of(1) + assert 'NextToken' not in resp2.keys() + + +@mock_elb +@mock_ec2 +def test_apply_security_groups_to_load_balancer(): + client = boto3.client('elb', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + security_group = ec2.create_security_group( + GroupName='sg01', Description='Test security group sg01', VpcId=vpc.id) + + client.create_load_balancer( + LoadBalancerName='my-lb', + Listeners=[ + {'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], + AvailabilityZones=['us-east-1a', 'us-east-1b'] + ) + + response = client.apply_security_groups_to_load_balancer( + LoadBalancerName='my-lb', + SecurityGroups=[security_group.id]) + + assert response['SecurityGroups'] == [security_group.id] + balancer = client.describe_load_balancers()['LoadBalancerDescriptions'][0] + assert balancer['SecurityGroups'] == [security_group.id] + + # Using a not-real security group raises an error + with assert_raises(ClientError) as error: + response = client.apply_security_groups_to_load_balancer( + LoadBalancerName='my-lb', + SecurityGroups=['not-really-a-security-group']) + assert "One or more of the specified security groups do not exist." in str(error.exception) + + +@mock_elb_deprecated +def test_add_listener(): + conn = boto.connect_elb() + zones = ['us-east-1a', 'us-east-1b'] + ports = [(80, 8080, 'http')] + conn.create_load_balancer('my-lb', zones, ports) + new_listener = (443, 8443, 'tcp') + conn.create_load_balancer_listeners('my-lb', [new_listener]) + balancers = conn.get_all_load_balancers() + balancer = balancers[0] + listener1 = balancer.listeners[0] + listener1.load_balancer_port.should.equal(80) + listener1.instance_port.should.equal(8080) + listener1.protocol.should.equal("HTTP") + listener2 = balancer.listeners[1] + listener2.load_balancer_port.should.equal(443) + listener2.instance_port.should.equal(8443) + listener2.protocol.should.equal("TCP") + + +@mock_elb_deprecated +def test_delete_listener(): + conn = boto.connect_elb() + + zones = ['us-east-1a', 'us-east-1b'] + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + conn.create_load_balancer('my-lb', zones, ports) + conn.delete_load_balancer_listeners('my-lb', [443]) + balancers = conn.get_all_load_balancers() + balancer = balancers[0] + listener1 = balancer.listeners[0] + listener1.load_balancer_port.should.equal(80) + listener1.instance_port.should.equal(8080) + listener1.protocol.should.equal("HTTP") + balancer.listeners.should.have.length_of(1) + + +@mock_elb +def test_create_and_delete_listener_boto3_support(): + client = boto3.client('elb', region_name='us-east-1') + + client.create_load_balancer( + LoadBalancerName='my-lb', + Listeners=[{'Protocol': 'http', + 'LoadBalancerPort': 80, 'InstancePort': 8080}], + AvailabilityZones=['us-east-1a', 'us-east-1b'] + ) + list(client.describe_load_balancers()[ + 'LoadBalancerDescriptions']).should.have.length_of(1) + + client.create_load_balancer_listeners( + LoadBalancerName='my-lb', + Listeners=[ + {'Protocol': 'tcp', 'LoadBalancerPort': 443, 'InstancePort': 8443}] + ) + balancer = client.describe_load_balancers()['LoadBalancerDescriptions'][0] + list(balancer['ListenerDescriptions']).should.have.length_of(2) + balancer['ListenerDescriptions'][0][ + 'Listener']['Protocol'].should.equal('HTTP') + balancer['ListenerDescriptions'][0]['Listener'][ + 'LoadBalancerPort'].should.equal(80) + balancer['ListenerDescriptions'][0]['Listener'][ + 'InstancePort'].should.equal(8080) + balancer['ListenerDescriptions'][1][ + 'Listener']['Protocol'].should.equal('TCP') + balancer['ListenerDescriptions'][1]['Listener'][ + 'LoadBalancerPort'].should.equal(443) + balancer['ListenerDescriptions'][1]['Listener'][ + 'InstancePort'].should.equal(8443) + + # Creating this listener with an conflicting definition throws error + with assert_raises(ClientError): + client.create_load_balancer_listeners( + LoadBalancerName='my-lb', + Listeners=[ + {'Protocol': 'tcp', 'LoadBalancerPort': 443, 'InstancePort': 1234}] + ) + + client.delete_load_balancer_listeners( + LoadBalancerName='my-lb', + LoadBalancerPorts=[443]) + + balancer = client.describe_load_balancers()['LoadBalancerDescriptions'][0] + list(balancer['ListenerDescriptions']).should.have.length_of(1) + + +@mock_elb_deprecated +def test_set_sslcertificate(): + conn = boto.connect_elb() + + zones = ['us-east-1a', 'us-east-1b'] + ports = [(443, 8443, 'tcp')] + conn.create_load_balancer('my-lb', zones, ports) + conn.set_lb_listener_SSL_certificate('my-lb', '443', 'arn:certificate') + balancers = conn.get_all_load_balancers() + balancer = balancers[0] + listener1 = balancer.listeners[0] + listener1.load_balancer_port.should.equal(443) + listener1.instance_port.should.equal(8443) + listener1.protocol.should.equal("TCP") + listener1.ssl_certificate_id.should.equal("arn:certificate") + + +@mock_elb_deprecated +def test_get_load_balancers_by_name(): + conn = boto.connect_elb() + + zones = ['us-east-1a', 'us-east-1b'] + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + conn.create_load_balancer('my-lb1', zones, ports) + conn.create_load_balancer('my-lb2', zones, ports) + conn.create_load_balancer('my-lb3', zones, ports) + + conn.get_all_load_balancers().should.have.length_of(3) + conn.get_all_load_balancers( + load_balancer_names=['my-lb1']).should.have.length_of(1) + conn.get_all_load_balancers( + load_balancer_names=['my-lb1', 'my-lb2']).should.have.length_of(2) + + +@mock_elb_deprecated +def test_delete_load_balancer(): + conn = boto.connect_elb() + + zones = ['us-east-1a'] + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + conn.create_load_balancer('my-lb', zones, ports) + + balancers = conn.get_all_load_balancers() + balancers.should.have.length_of(1) + + conn.delete_load_balancer("my-lb") + balancers = conn.get_all_load_balancers() + balancers.should.have.length_of(0) + + +@mock_elb_deprecated +def test_create_health_check(): + conn = boto.connect_elb() + + hc = HealthCheck( + interval=20, + healthy_threshold=3, + unhealthy_threshold=5, + target='HTTP:8080/health', + timeout=23, + ) + + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + lb = conn.create_load_balancer('my-lb', [], ports) + lb.configure_health_check(hc) + + balancer = conn.get_all_load_balancers()[0] + health_check = balancer.health_check + health_check.interval.should.equal(20) + health_check.healthy_threshold.should.equal(3) + health_check.unhealthy_threshold.should.equal(5) + health_check.target.should.equal('HTTP:8080/health') + health_check.timeout.should.equal(23) + + +@mock_elb +def test_create_health_check_boto3(): + client = boto3.client('elb', region_name='us-east-1') + + client.create_load_balancer( + LoadBalancerName='my-lb', + Listeners=[{'Protocol': 'http', + 'LoadBalancerPort': 80, 'InstancePort': 8080}], + AvailabilityZones=['us-east-1a', 'us-east-1b'] + ) + client.configure_health_check( + LoadBalancerName='my-lb', + HealthCheck={ + 'Target': 'HTTP:8080/health', + 'Interval': 20, + 'Timeout': 23, + 'HealthyThreshold': 3, + 'UnhealthyThreshold': 5 + } + ) + + balancer = client.describe_load_balancers()['LoadBalancerDescriptions'][0] + balancer['HealthCheck']['Target'].should.equal('HTTP:8080/health') + balancer['HealthCheck']['Interval'].should.equal(20) + balancer['HealthCheck']['Timeout'].should.equal(23) + balancer['HealthCheck']['HealthyThreshold'].should.equal(3) + balancer['HealthCheck']['UnhealthyThreshold'].should.equal(5) + + +@mock_ec2_deprecated +@mock_elb_deprecated +def test_register_instances(): + ec2_conn = boto.connect_ec2() + reservation = ec2_conn.run_instances('ami-1234abcd', 2) + instance_id1 = reservation.instances[0].id + instance_id2 = reservation.instances[1].id + + conn = boto.connect_elb() + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + lb = conn.create_load_balancer('my-lb', [], ports) + + lb.register_instances([instance_id1, instance_id2]) + + balancer = conn.get_all_load_balancers()[0] + instance_ids = [instance.id for instance in balancer.instances] + set(instance_ids).should.equal(set([instance_id1, instance_id2])) + + +@mock_ec2 +@mock_elb +def test_register_instances_boto3(): + ec2 = boto3.resource('ec2', region_name='us-east-1') + response = ec2.create_instances( + ImageId='ami-1234abcd', MinCount=2, MaxCount=2) + instance_id1 = response[0].id + instance_id2 = response[1].id + + client = boto3.client('elb', region_name='us-east-1') + client.create_load_balancer( + LoadBalancerName='my-lb', + Listeners=[{'Protocol': 'http', + 'LoadBalancerPort': 80, 'InstancePort': 8080}], + AvailabilityZones=['us-east-1a', 'us-east-1b'] + ) + client.register_instances_with_load_balancer( + LoadBalancerName='my-lb', + Instances=[ + {'InstanceId': instance_id1}, + {'InstanceId': instance_id2} + ] + ) + balancer = client.describe_load_balancers()['LoadBalancerDescriptions'][0] + instance_ids = [instance['InstanceId'] + for instance in balancer['Instances']] + set(instance_ids).should.equal(set([instance_id1, instance_id2])) + + +@mock_ec2_deprecated +@mock_elb_deprecated +def test_deregister_instances(): + ec2_conn = boto.connect_ec2() + reservation = ec2_conn.run_instances('ami-1234abcd', 2) + instance_id1 = reservation.instances[0].id + instance_id2 = reservation.instances[1].id + + conn = boto.connect_elb() + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + lb = conn.create_load_balancer('my-lb', [], ports) + + lb.register_instances([instance_id1, instance_id2]) + + balancer = conn.get_all_load_balancers()[0] + balancer.instances.should.have.length_of(2) + balancer.deregister_instances([instance_id1]) + + balancer.instances.should.have.length_of(1) + balancer.instances[0].id.should.equal(instance_id2) + + +@mock_ec2 +@mock_elb +def test_deregister_instances_boto3(): + ec2 = boto3.resource('ec2', region_name='us-east-1') + response = ec2.create_instances( + ImageId='ami-1234abcd', MinCount=2, MaxCount=2) + instance_id1 = response[0].id + instance_id2 = response[1].id + + client = boto3.client('elb', region_name='us-east-1') + client.create_load_balancer( + LoadBalancerName='my-lb', + Listeners=[{'Protocol': 'http', + 'LoadBalancerPort': 80, 'InstancePort': 8080}], + AvailabilityZones=['us-east-1a', 'us-east-1b'] + ) + client.register_instances_with_load_balancer( + LoadBalancerName='my-lb', + Instances=[ + {'InstanceId': instance_id1}, + {'InstanceId': instance_id2} + ] + ) + + balancer = client.describe_load_balancers()['LoadBalancerDescriptions'][0] + balancer['Instances'].should.have.length_of(2) + + client.deregister_instances_from_load_balancer( + LoadBalancerName='my-lb', + Instances=[ + {'InstanceId': instance_id1} + ] + ) + + balancer = client.describe_load_balancers()['LoadBalancerDescriptions'][0] + balancer['Instances'].should.have.length_of(1) + balancer['Instances'][0]['InstanceId'].should.equal(instance_id2) + + +@mock_elb_deprecated +def test_default_attributes(): + conn = boto.connect_elb() + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + lb = conn.create_load_balancer('my-lb', [], ports) + attributes = lb.get_attributes() + + attributes.cross_zone_load_balancing.enabled.should.be.false + attributes.connection_draining.enabled.should.be.false + attributes.access_log.enabled.should.be.false + attributes.connecting_settings.idle_timeout.should.equal(60) + + +@mock_elb_deprecated +def test_cross_zone_load_balancing_attribute(): + conn = boto.connect_elb() + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + lb = conn.create_load_balancer('my-lb', [], ports) + + conn.modify_lb_attribute("my-lb", "CrossZoneLoadBalancing", True) + attributes = lb.get_attributes(force=True) + attributes.cross_zone_load_balancing.enabled.should.be.true + + conn.modify_lb_attribute("my-lb", "CrossZoneLoadBalancing", False) + attributes = lb.get_attributes(force=True) + attributes.cross_zone_load_balancing.enabled.should.be.false + + +@mock_elb_deprecated +def test_connection_draining_attribute(): + conn = boto.connect_elb() + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + lb = conn.create_load_balancer('my-lb', [], ports) + + connection_draining = ConnectionDrainingAttribute() + connection_draining.enabled = True + connection_draining.timeout = 60 + + conn.modify_lb_attribute( + "my-lb", "ConnectionDraining", connection_draining) + attributes = lb.get_attributes(force=True) + attributes.connection_draining.enabled.should.be.true + attributes.connection_draining.timeout.should.equal(60) + + connection_draining.timeout = 30 + conn.modify_lb_attribute( + "my-lb", "ConnectionDraining", connection_draining) + attributes = lb.get_attributes(force=True) + attributes.connection_draining.timeout.should.equal(30) + + connection_draining.enabled = False + conn.modify_lb_attribute( + "my-lb", "ConnectionDraining", connection_draining) + attributes = lb.get_attributes(force=True) + attributes.connection_draining.enabled.should.be.false + + +@mock_elb_deprecated +def test_access_log_attribute(): + conn = boto.connect_elb() + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + lb = conn.create_load_balancer('my-lb', [], ports) + + access_log = AccessLogAttribute() + access_log.enabled = True + access_log.s3_bucket_name = 'bucket' + access_log.s3_bucket_prefix = 'prefix' + access_log.emit_interval = 60 + + conn.modify_lb_attribute("my-lb", "AccessLog", access_log) + attributes = lb.get_attributes(force=True) + attributes.access_log.enabled.should.be.true + attributes.access_log.s3_bucket_name.should.equal("bucket") + attributes.access_log.s3_bucket_prefix.should.equal("prefix") + attributes.access_log.emit_interval.should.equal(60) + + access_log.enabled = False + conn.modify_lb_attribute("my-lb", "AccessLog", access_log) + attributes = lb.get_attributes(force=True) + attributes.access_log.enabled.should.be.false + + +@mock_elb_deprecated +def test_connection_settings_attribute(): + conn = boto.connect_elb() + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + lb = conn.create_load_balancer('my-lb', [], ports) + + connection_settings = ConnectionSettingAttribute(conn) + connection_settings.idle_timeout = 120 + + conn.modify_lb_attribute( + "my-lb", "ConnectingSettings", connection_settings) + attributes = lb.get_attributes(force=True) + attributes.connecting_settings.idle_timeout.should.equal(120) + + connection_settings.idle_timeout = 60 + conn.modify_lb_attribute( + "my-lb", "ConnectingSettings", connection_settings) + attributes = lb.get_attributes(force=True) + attributes.connecting_settings.idle_timeout.should.equal(60) + + +@mock_elb_deprecated +def test_create_lb_cookie_stickiness_policy(): + conn = boto.connect_elb() + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + lb = conn.create_load_balancer('my-lb', [], ports) + cookie_expiration_period = 60 + policy_name = "LBCookieStickinessPolicy" + + lb.create_cookie_stickiness_policy(cookie_expiration_period, policy_name) + + lb = conn.get_all_load_balancers()[0] + # There appears to be a quirk about boto, whereby it returns a unicode + # string for cookie_expiration_period, despite being stated in + # documentation to be a long numeric. + # + # To work around that, this value is converted to an int and checked. + cookie_expiration_period_response_str = lb.policies.lb_cookie_stickiness_policies[ + 0].cookie_expiration_period + int(cookie_expiration_period_response_str).should.equal( + cookie_expiration_period) + lb.policies.lb_cookie_stickiness_policies[ + 0].policy_name.should.equal(policy_name) + + +@mock_elb_deprecated +def test_create_lb_cookie_stickiness_policy_no_expiry(): + conn = boto.connect_elb() + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + lb = conn.create_load_balancer('my-lb', [], ports) + policy_name = "LBCookieStickinessPolicy" + + lb.create_cookie_stickiness_policy(None, policy_name) + + lb = conn.get_all_load_balancers()[0] + lb.policies.lb_cookie_stickiness_policies[ + 0].cookie_expiration_period.should.be.none + lb.policies.lb_cookie_stickiness_policies[ + 0].policy_name.should.equal(policy_name) + + +@mock_elb_deprecated +def test_create_app_cookie_stickiness_policy(): + conn = boto.connect_elb() + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + lb = conn.create_load_balancer('my-lb', [], ports) + cookie_name = "my-stickiness-policy" + policy_name = "AppCookieStickinessPolicy" + + lb.create_app_cookie_stickiness_policy(cookie_name, policy_name) + + lb = conn.get_all_load_balancers()[0] + lb.policies.app_cookie_stickiness_policies[ + 0].cookie_name.should.equal(cookie_name) + lb.policies.app_cookie_stickiness_policies[ + 0].policy_name.should.equal(policy_name) + + +@mock_elb_deprecated +def test_create_lb_policy(): + conn = boto.connect_elb() + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + lb = conn.create_load_balancer('my-lb', [], ports) + policy_name = "ProxyPolicy" + + lb.create_lb_policy(policy_name, 'ProxyProtocolPolicyType', { + 'ProxyProtocol': True}) + + lb = conn.get_all_load_balancers()[0] + lb.policies.other_policies[0].policy_name.should.equal(policy_name) + + +@mock_elb_deprecated +def test_set_policies_of_listener(): + conn = boto.connect_elb() + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + lb = conn.create_load_balancer('my-lb', [], ports) + listener_port = 80 + policy_name = "my-stickiness-policy" + + # boto docs currently state that zero or one policy may be associated + # with a given listener + + # in a real flow, it is necessary first to create a policy, + # then to set that policy to the listener + lb.create_cookie_stickiness_policy(None, policy_name) + lb.set_policies_of_listener(listener_port, [policy_name]) + + lb = conn.get_all_load_balancers()[0] + listener = lb.listeners[0] + listener.load_balancer_port.should.equal(listener_port) + # by contrast to a backend, a listener stores only policy name strings + listener.policy_names[0].should.equal(policy_name) + + +@mock_elb_deprecated +def test_set_policies_of_backend_server(): + conn = boto.connect_elb() + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + lb = conn.create_load_balancer('my-lb', [], ports) + instance_port = 8080 + policy_name = "ProxyPolicy" + + # in a real flow, it is necessary first to create a policy, + # then to set that policy to the backend + lb.create_lb_policy(policy_name, 'ProxyProtocolPolicyType', { + 'ProxyProtocol': True}) + lb.set_policies_of_backend_server(instance_port, [policy_name]) + + lb = conn.get_all_load_balancers()[0] + backend = lb.backends[0] + backend.instance_port.should.equal(instance_port) + # by contrast to a listener, a backend stores OtherPolicy objects + backend.policies[0].policy_name.should.equal(policy_name) + + +@mock_ec2_deprecated +@mock_elb_deprecated +def test_describe_instance_health(): + ec2_conn = boto.connect_ec2() + reservation = ec2_conn.run_instances('ami-1234abcd', 2) + instance_id1 = reservation.instances[0].id + instance_id2 = reservation.instances[1].id + + conn = boto.connect_elb() + zones = ['us-east-1a', 'us-east-1b'] + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + lb = conn.create_load_balancer('my-lb', zones, ports) + + instances_health = conn.describe_instance_health('my-lb') + instances_health.should.be.empty + + lb.register_instances([instance_id1, instance_id2]) + + instances_health = conn.describe_instance_health('my-lb') + instances_health.should.have.length_of(2) + for instance_health in instances_health: + instance_health.instance_id.should.be.within( + [instance_id1, instance_id2]) + instance_health.state.should.equal('InService') + + instances_health = conn.describe_instance_health('my-lb', [instance_id1]) + instances_health.should.have.length_of(1) + instances_health[0].instance_id.should.equal(instance_id1) + instances_health[0].state.should.equal('InService') + + +@mock_ec2 +@mock_elb +def test_describe_instance_health_boto3(): + elb = boto3.client('elb', region_name="us-east-1") + ec2 = boto3.client('ec2', region_name="us-east-1") + instances = ec2.run_instances(MinCount=2, MaxCount=2)['Instances'] + lb_name = "my_load_balancer" + elb.create_load_balancer( + Listeners=[{ + 'InstancePort': 80, + 'LoadBalancerPort': 8080, + 'Protocol': 'HTTP' + }], + LoadBalancerName=lb_name, + ) + elb.register_instances_with_load_balancer( + LoadBalancerName=lb_name, + Instances=[{'InstanceId': instances[0]['InstanceId']}] + ) + instances_health = elb.describe_instance_health( + LoadBalancerName=lb_name, + Instances=[{'InstanceId': instance['InstanceId']} for instance in instances] + ) + instances_health['InstanceStates'].should.have.length_of(2) + instances_health['InstanceStates'][0]['InstanceId'].\ + should.equal(instances[0]['InstanceId']) + instances_health['InstanceStates'][0]['State'].\ + should.equal('InService') + instances_health['InstanceStates'][1]['InstanceId'].\ + should.equal(instances[1]['InstanceId']) + instances_health['InstanceStates'][1]['State'].\ + should.equal('Unknown') + + +@mock_elb +def test_add_remove_tags(): + client = boto3.client('elb', region_name='us-east-1') + + client.add_tags.when.called_with(LoadBalancerNames=['my-lb'], + Tags=[{ + 'Key': 'a', + 'Value': 'b' + }]).should.throw(botocore.exceptions.ClientError) + + client.create_load_balancer( + LoadBalancerName='my-lb', + Listeners=[ + {'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], + AvailabilityZones=['us-east-1a', 'us-east-1b'] + ) + + list(client.describe_load_balancers()[ + 'LoadBalancerDescriptions']).should.have.length_of(1) + + client.add_tags(LoadBalancerNames=['my-lb'], + Tags=[{ + 'Key': 'a', + 'Value': 'b' + }]) + + tags = dict([(d['Key'], d['Value']) for d in client.describe_tags( + LoadBalancerNames=['my-lb'])['TagDescriptions'][0]['Tags']]) + tags.should.have.key('a').which.should.equal('b') + + client.add_tags(LoadBalancerNames=['my-lb'], + Tags=[{ + 'Key': 'a', + 'Value': 'b' + }, { + 'Key': 'b', + 'Value': 'b' + }, { + 'Key': 'c', + 'Value': 'b' + }, { + 'Key': 'd', + 'Value': 'b' + }, { + 'Key': 'e', + 'Value': 'b' + }, { + 'Key': 'f', + 'Value': 'b' + }, { + 'Key': 'g', + 'Value': 'b' + }, { + 'Key': 'h', + 'Value': 'b' + }, { + 'Key': 'i', + 'Value': 'b' + }, { + 'Key': 'j', + 'Value': 'b' + }]) + + client.add_tags.when.called_with(LoadBalancerNames=['my-lb'], + Tags=[{ + 'Key': 'k', + 'Value': 'b' + }]).should.throw(botocore.exceptions.ClientError) + + client.add_tags(LoadBalancerNames=['my-lb'], + Tags=[{ + 'Key': 'j', + 'Value': 'c' + }]) + + tags = dict([(d['Key'], d['Value']) for d in client.describe_tags( + LoadBalancerNames=['my-lb'])['TagDescriptions'][0]['Tags']]) + + tags.should.have.key('a').which.should.equal('b') + tags.should.have.key('b').which.should.equal('b') + tags.should.have.key('c').which.should.equal('b') + tags.should.have.key('d').which.should.equal('b') + tags.should.have.key('e').which.should.equal('b') + tags.should.have.key('f').which.should.equal('b') + tags.should.have.key('g').which.should.equal('b') + tags.should.have.key('h').which.should.equal('b') + tags.should.have.key('i').which.should.equal('b') + tags.should.have.key('j').which.should.equal('c') + tags.shouldnt.have.key('k') + + client.remove_tags(LoadBalancerNames=['my-lb'], + Tags=[{ + 'Key': 'a' + }]) + + tags = dict([(d['Key'], d['Value']) for d in client.describe_tags( + LoadBalancerNames=['my-lb'])['TagDescriptions'][0]['Tags']]) + + tags.shouldnt.have.key('a') + tags.should.have.key('b').which.should.equal('b') + tags.should.have.key('c').which.should.equal('b') + tags.should.have.key('d').which.should.equal('b') + tags.should.have.key('e').which.should.equal('b') + tags.should.have.key('f').which.should.equal('b') + tags.should.have.key('g').which.should.equal('b') + tags.should.have.key('h').which.should.equal('b') + tags.should.have.key('i').which.should.equal('b') + tags.should.have.key('j').which.should.equal('c') + + client.create_load_balancer( + LoadBalancerName='other-lb', + Listeners=[ + {'Protocol': 'tcp', 'LoadBalancerPort': 433, 'InstancePort': 8433}], + AvailabilityZones=['us-east-1a', 'us-east-1b'] + ) + + client.add_tags(LoadBalancerNames=['other-lb'], + Tags=[{ + 'Key': 'other', + 'Value': 'something' + }]) + + lb_tags = dict([(l['LoadBalancerName'], dict([(d['Key'], d['Value']) for d in l['Tags']])) + for l in client.describe_tags(LoadBalancerNames=['my-lb', 'other-lb'])['TagDescriptions']]) + + lb_tags.should.have.key('my-lb') + lb_tags.should.have.key('other-lb') + + lb_tags['my-lb'].shouldnt.have.key('other') + lb_tags[ + 'other-lb'].should.have.key('other').which.should.equal('something') + + +@mock_elb +def test_create_with_tags(): + client = boto3.client('elb', region_name='us-east-1') + + client.create_load_balancer( + LoadBalancerName='my-lb', + Listeners=[ + {'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], + AvailabilityZones=['us-east-1a', 'us-east-1b'], + Tags=[{ + 'Key': 'k', + 'Value': 'v' + }] + ) + + tags = dict((d['Key'], d['Value']) for d in client.describe_tags( + LoadBalancerNames=['my-lb'])['TagDescriptions'][0]['Tags']) + tags.should.have.key('k').which.should.equal('v') + + +@mock_elb +def test_modify_attributes(): + client = boto3.client('elb', region_name='us-east-1') + + client.create_load_balancer( + LoadBalancerName='my-lb', + Listeners=[{'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], + AvailabilityZones=['us-east-1a', 'us-east-1b'] + ) + + # Default ConnectionDraining timeout of 300 seconds + client.modify_load_balancer_attributes( + LoadBalancerName='my-lb', + LoadBalancerAttributes={ + 'ConnectionDraining': {'Enabled': True}, + } + ) + lb_attrs = client.describe_load_balancer_attributes(LoadBalancerName='my-lb') + lb_attrs['LoadBalancerAttributes']['ConnectionDraining']['Enabled'].should.equal(True) + lb_attrs['LoadBalancerAttributes']['ConnectionDraining']['Timeout'].should.equal(300) + + # specify a custom ConnectionDraining timeout + client.modify_load_balancer_attributes( + LoadBalancerName='my-lb', + LoadBalancerAttributes={ + 'ConnectionDraining': { + 'Enabled': True, + 'Timeout': 45, + }, + } + ) + lb_attrs = client.describe_load_balancer_attributes(LoadBalancerName='my-lb') + lb_attrs['LoadBalancerAttributes']['ConnectionDraining']['Enabled'].should.equal(True) + lb_attrs['LoadBalancerAttributes']['ConnectionDraining']['Timeout'].should.equal(45) + + +@mock_ec2 +@mock_elb +def test_subnets(): + ec2 = boto3.resource('ec2', region_name='us-east-1') + vpc = ec2.create_vpc( + CidrBlock='172.28.7.0/24', + InstanceTenancy='default' + ) + subnet = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26' + ) + client = boto3.client('elb', region_name='us-east-1') + client.create_load_balancer( + LoadBalancerName='my-lb', + Listeners=[ + {'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], + Subnets=[subnet.id] + ) + + lb = client.describe_load_balancers()['LoadBalancerDescriptions'][0] + lb.should.have.key('Subnets').which.should.have.length_of(1) + lb['Subnets'][0].should.equal(subnet.id) + + lb.should.have.key('VPCId').which.should.equal(vpc.id) + + +@mock_elb_deprecated +def test_create_load_balancer_duplicate(): + conn = boto.connect_elb() + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + conn.create_load_balancer('my-lb', [], ports) + conn.create_load_balancer.when.called_with( + 'my-lb', [], ports).should.throw(BotoServerError) diff --git a/tests/test_elbv2/test_elbv2.py b/tests/test_elbv2/test_elbv2.py index cf0722bb2..03273ad3a 100644 --- a/tests/test_elbv2/test_elbv2.py +++ b/tests/test_elbv2/test_elbv2.py @@ -1,1588 +1,1728 @@ -from __future__ import unicode_literals - -import json -import os -import boto3 -import botocore -from botocore.exceptions import ClientError -from nose.tools import assert_raises -import sure # noqa - -from moto import mock_elbv2, mock_ec2, mock_acm, mock_cloudformation -from moto.elbv2 import elbv2_backends - - -@mock_elbv2 -@mock_ec2 -def test_create_load_balancer(): - conn = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - security_group = ec2.create_security_group( - GroupName='a-security-group', Description='First One') - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1b') - - response = conn.create_load_balancer( - Name='my-lb', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) - - lb = response.get('LoadBalancers')[0] - - lb.get('DNSName').should.equal("my-lb-1.us-east-1.elb.amazonaws.com") - lb.get('LoadBalancerArn').should.equal( - 'arn:aws:elasticloadbalancing:us-east-1:1:loadbalancer/my-lb/50dc6c495c0c9188') - lb.get('SecurityGroups').should.equal([security_group.id]) - lb.get('AvailabilityZones').should.equal([ - {'SubnetId': subnet1.id, 'ZoneName': 'us-east-1a'}, - {'SubnetId': subnet2.id, 'ZoneName': 'us-east-1b'}]) - - # Ensure the tags persisted - response = conn.describe_tags(ResourceArns=[lb.get('LoadBalancerArn')]) - tags = {d['Key']: d['Value'] - for d in response['TagDescriptions'][0]['Tags']} - tags.should.equal({'key_name': 'a_value'}) - - -@mock_elbv2 -@mock_ec2 -def test_describe_load_balancers(): - conn = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - security_group = ec2.create_security_group( - GroupName='a-security-group', Description='First One') - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1b') - - conn.create_load_balancer( - Name='my-lb', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) - - response = conn.describe_load_balancers() - - response.get('LoadBalancers').should.have.length_of(1) - lb = response.get('LoadBalancers')[0] - lb.get('LoadBalancerName').should.equal('my-lb') - - response = conn.describe_load_balancers( - LoadBalancerArns=[lb.get('LoadBalancerArn')]) - response.get('LoadBalancers')[0].get( - 'LoadBalancerName').should.equal('my-lb') - - response = conn.describe_load_balancers(Names=['my-lb']) - response.get('LoadBalancers')[0].get( - 'LoadBalancerName').should.equal('my-lb') - - with assert_raises(ClientError): - conn.describe_load_balancers(LoadBalancerArns=['not-a/real/arn']) - with assert_raises(ClientError): - conn.describe_load_balancers(Names=['nope']) - - -@mock_elbv2 -@mock_ec2 -def test_add_remove_tags(): - conn = boto3.client('elbv2', region_name='us-east-1') - - ec2 = boto3.resource('ec2', region_name='us-east-1') - - security_group = ec2.create_security_group( - GroupName='a-security-group', Description='First One') - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1b') - - conn.create_load_balancer( - Name='my-lb', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) - - lbs = conn.describe_load_balancers()['LoadBalancers'] - lbs.should.have.length_of(1) - lb = lbs[0] - - with assert_raises(ClientError): - conn.add_tags(ResourceArns=['missing-arn'], - Tags=[{ - 'Key': 'a', - 'Value': 'b' - }]) - - conn.add_tags(ResourceArns=[lb.get('LoadBalancerArn')], - Tags=[{ - 'Key': 'a', - 'Value': 'b' - }]) - - tags = {d['Key']: d['Value'] for d in conn.describe_tags( - ResourceArns=[lb.get('LoadBalancerArn')])['TagDescriptions'][0]['Tags']} - tags.should.have.key('a').which.should.equal('b') - - conn.add_tags(ResourceArns=[lb.get('LoadBalancerArn')], - Tags=[{ - 'Key': 'a', - 'Value': 'b' - }, { - 'Key': 'b', - 'Value': 'b' - }, { - 'Key': 'c', - 'Value': 'b' - }, { - 'Key': 'd', - 'Value': 'b' - }, { - 'Key': 'e', - 'Value': 'b' - }, { - 'Key': 'f', - 'Value': 'b' - }, { - 'Key': 'g', - 'Value': 'b' - }, { - 'Key': 'h', - 'Value': 'b' - }, { - 'Key': 'j', - 'Value': 'b' - }]) - - conn.add_tags.when.called_with(ResourceArns=[lb.get('LoadBalancerArn')], - Tags=[{ - 'Key': 'k', - 'Value': 'b' - }]).should.throw(botocore.exceptions.ClientError) - - conn.add_tags(ResourceArns=[lb.get('LoadBalancerArn')], - Tags=[{ - 'Key': 'j', - 'Value': 'c' - }]) - - tags = {d['Key']: d['Value'] for d in conn.describe_tags( - ResourceArns=[lb.get('LoadBalancerArn')])['TagDescriptions'][0]['Tags']} - - tags.should.have.key('a').which.should.equal('b') - tags.should.have.key('b').which.should.equal('b') - tags.should.have.key('c').which.should.equal('b') - tags.should.have.key('d').which.should.equal('b') - tags.should.have.key('e').which.should.equal('b') - tags.should.have.key('f').which.should.equal('b') - tags.should.have.key('g').which.should.equal('b') - tags.should.have.key('h').which.should.equal('b') - tags.should.have.key('j').which.should.equal('c') - tags.shouldnt.have.key('k') - - conn.remove_tags(ResourceArns=[lb.get('LoadBalancerArn')], - TagKeys=['a']) - - tags = {d['Key']: d['Value'] for d in conn.describe_tags( - ResourceArns=[lb.get('LoadBalancerArn')])['TagDescriptions'][0]['Tags']} - - tags.shouldnt.have.key('a') - tags.should.have.key('b').which.should.equal('b') - tags.should.have.key('c').which.should.equal('b') - tags.should.have.key('d').which.should.equal('b') - tags.should.have.key('e').which.should.equal('b') - tags.should.have.key('f').which.should.equal('b') - tags.should.have.key('g').which.should.equal('b') - tags.should.have.key('h').which.should.equal('b') - tags.should.have.key('j').which.should.equal('c') - - -@mock_elbv2 -@mock_ec2 -def test_create_elb_in_multiple_region(): - for region in ['us-west-1', 'us-west-2']: - conn = boto3.client('elbv2', region_name=region) - ec2 = boto3.resource('ec2', region_name=region) - - security_group = ec2.create_security_group( - GroupName='a-security-group', Description='First One') - vpc = ec2.create_vpc( - CidrBlock='172.28.7.0/24', - InstanceTenancy='default') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone=region + 'a') - subnet2 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone=region + 'b') - - conn.create_load_balancer( - Name='my-lb', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) - - list( - boto3.client( - 'elbv2', - region_name='us-west-1').describe_load_balancers().get('LoadBalancers') - ).should.have.length_of(1) - list( - boto3.client( - 'elbv2', - region_name='us-west-2').describe_load_balancers().get('LoadBalancers') - ).should.have.length_of(1) - - -@mock_elbv2 -@mock_ec2 -def test_create_target_group_and_listeners(): - conn = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - security_group = ec2.create_security_group( - GroupName='a-security-group', Description='First One') - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1b') - - response = conn.create_load_balancer( - Name='my-lb', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) - - load_balancer_arn = response.get('LoadBalancers')[0].get('LoadBalancerArn') - - # Can't create a target group with an invalid protocol - with assert_raises(ClientError): - conn.create_target_group( - Name='a-target', - Protocol='HTTP', - Port=8080, - VpcId=vpc.id, - HealthCheckProtocol='/HTTP', - HealthCheckPort='8080', - HealthCheckPath='/', - HealthCheckIntervalSeconds=5, - HealthCheckTimeoutSeconds=5, - HealthyThresholdCount=5, - UnhealthyThresholdCount=2, - Matcher={'HttpCode': '200'}) - response = conn.create_target_group( - Name='a-target', - Protocol='HTTP', - Port=8080, - VpcId=vpc.id, - HealthCheckProtocol='HTTP', - HealthCheckPort='8080', - HealthCheckPath='/', - HealthCheckIntervalSeconds=5, - HealthCheckTimeoutSeconds=5, - HealthyThresholdCount=5, - UnhealthyThresholdCount=2, - Matcher={'HttpCode': '200'}) - target_group = response.get('TargetGroups')[0] - target_group_arn = target_group['TargetGroupArn'] - - # Add tags to the target group - conn.add_tags(ResourceArns=[target_group_arn], Tags=[ - {'Key': 'target', 'Value': 'group'}]) - conn.describe_tags(ResourceArns=[target_group_arn])['TagDescriptions'][0]['Tags'].should.equal( - [{'Key': 'target', 'Value': 'group'}]) - - # Check it's in the describe_target_groups response - response = conn.describe_target_groups() - response.get('TargetGroups').should.have.length_of(1) - - # Plain HTTP listener - response = conn.create_listener( - LoadBalancerArn=load_balancer_arn, - Protocol='HTTP', - Port=80, - DefaultActions=[{'Type': 'forward', 'TargetGroupArn': target_group.get('TargetGroupArn')}]) - listener = response.get('Listeners')[0] - listener.get('Port').should.equal(80) - listener.get('Protocol').should.equal('HTTP') - listener.get('DefaultActions').should.equal([{ - 'TargetGroupArn': target_group.get('TargetGroupArn'), - 'Type': 'forward'}]) - http_listener_arn = listener.get('ListenerArn') - - response = conn.describe_target_groups(LoadBalancerArn=load_balancer_arn, - Names=['a-target']) - response.get('TargetGroups').should.have.length_of(1) - - # And another with SSL - response = conn.create_listener( - LoadBalancerArn=load_balancer_arn, - Protocol='HTTPS', - Port=443, - Certificates=[ - {'CertificateArn': 'arn:aws:iam:123456789012:server-certificate/test-cert'}], - DefaultActions=[{'Type': 'forward', 'TargetGroupArn': target_group.get('TargetGroupArn')}]) - listener = response.get('Listeners')[0] - listener.get('Port').should.equal(443) - listener.get('Protocol').should.equal('HTTPS') - listener.get('Certificates').should.equal([{ - 'CertificateArn': 'arn:aws:iam:123456789012:server-certificate/test-cert', - }]) - listener.get('DefaultActions').should.equal([{ - 'TargetGroupArn': target_group.get('TargetGroupArn'), - 'Type': 'forward'}]) - - https_listener_arn = listener.get('ListenerArn') - - response = conn.describe_listeners(LoadBalancerArn=load_balancer_arn) - response.get('Listeners').should.have.length_of(2) - response = conn.describe_listeners(ListenerArns=[https_listener_arn]) - response.get('Listeners').should.have.length_of(1) - listener = response.get('Listeners')[0] - listener.get('Port').should.equal(443) - listener.get('Protocol').should.equal('HTTPS') - - response = conn.describe_listeners( - ListenerArns=[ - http_listener_arn, - https_listener_arn]) - response.get('Listeners').should.have.length_of(2) - - # Try to delete the target group and it fails because there's a - # listener referencing it - with assert_raises(ClientError) as e: - conn.delete_target_group( - TargetGroupArn=target_group.get('TargetGroupArn')) - e.exception.operation_name.should.equal('DeleteTargetGroup') - e.exception.args.should.equal(("An error occurred (ResourceInUse) when calling the DeleteTargetGroup operation: The target group 'arn:aws:elasticloadbalancing:us-east-1:1:targetgroup/a-target/50dc6c495c0c9188' is currently in use by a listener or a rule", )) # NOQA - - # Delete one listener - response = conn.describe_listeners(LoadBalancerArn=load_balancer_arn) - response.get('Listeners').should.have.length_of(2) - conn.delete_listener(ListenerArn=http_listener_arn) - response = conn.describe_listeners(LoadBalancerArn=load_balancer_arn) - response.get('Listeners').should.have.length_of(1) - - # Then delete the load balancer - conn.delete_load_balancer(LoadBalancerArn=load_balancer_arn) - - # It's gone - response = conn.describe_load_balancers() - response.get('LoadBalancers').should.have.length_of(0) - - # And it deleted the remaining listener - response = conn.describe_listeners( - ListenerArns=[ - http_listener_arn, - https_listener_arn]) - response.get('Listeners').should.have.length_of(0) - - # But not the target groups - response = conn.describe_target_groups() - response.get('TargetGroups').should.have.length_of(1) - - # Which we'll now delete - conn.delete_target_group(TargetGroupArn=target_group.get('TargetGroupArn')) - response = conn.describe_target_groups() - response.get('TargetGroups').should.have.length_of(0) - - -@mock_elbv2 -@mock_ec2 -def test_create_target_group_without_non_required_parameters(): - conn = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - security_group = ec2.create_security_group( - GroupName='a-security-group', Description='First One') - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1b') - - response = conn.create_load_balancer( - Name='my-lb', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) - - # request without HealthCheckIntervalSeconds parameter - # which is default to 30 seconds - response = conn.create_target_group( - Name='a-target', - Protocol='HTTP', - Port=8080, - VpcId=vpc.id, - HealthCheckProtocol='HTTP', - HealthCheckPort='8080' - ) - target_group = response.get('TargetGroups')[0] - target_group.should_not.be.none - - -@mock_elbv2 -@mock_ec2 -def test_create_invalid_target_group(): - conn = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - - # Fail to create target group with name which length is 33 - long_name = 'A' * 33 - with assert_raises(ClientError): - conn.create_target_group( - Name=long_name, - Protocol='HTTP', - Port=8080, - VpcId=vpc.id, - HealthCheckProtocol='HTTP', - HealthCheckPort='8080', - HealthCheckPath='/', - HealthCheckIntervalSeconds=5, - HealthCheckTimeoutSeconds=5, - HealthyThresholdCount=5, - UnhealthyThresholdCount=2, - Matcher={'HttpCode': '200'}) - - invalid_names = [ - '-name', - 'name-', - '-name-', - 'example.com', - 'test@test', - 'Na--me'] - for name in invalid_names: - with assert_raises(ClientError): - conn.create_target_group( - Name=name, - Protocol='HTTP', - Port=8080, - VpcId=vpc.id, - HealthCheckProtocol='HTTP', - HealthCheckPort='8080', - HealthCheckPath='/', - HealthCheckIntervalSeconds=5, - HealthCheckTimeoutSeconds=5, - HealthyThresholdCount=5, - UnhealthyThresholdCount=2, - Matcher={'HttpCode': '200'}) - - valid_names = ['name', 'Name', '000'] - for name in valid_names: - conn.create_target_group( - Name=name, - Protocol='HTTP', - Port=8080, - VpcId=vpc.id, - HealthCheckProtocol='HTTP', - HealthCheckPort='8080', - HealthCheckPath='/', - HealthCheckIntervalSeconds=5, - HealthCheckTimeoutSeconds=5, - HealthyThresholdCount=5, - UnhealthyThresholdCount=2, - Matcher={'HttpCode': '200'}) - - -@mock_elbv2 -@mock_ec2 -def test_describe_paginated_balancers(): - conn = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - security_group = ec2.create_security_group( - GroupName='a-security-group', Description='First One') - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1b') - - for i in range(51): - conn.create_load_balancer( - Name='my-lb%d' % i, - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) - - resp = conn.describe_load_balancers() - resp['LoadBalancers'].should.have.length_of(50) - resp['NextMarker'].should.equal( - resp['LoadBalancers'][-1]['LoadBalancerName']) - resp2 = conn.describe_load_balancers(Marker=resp['NextMarker']) - resp2['LoadBalancers'].should.have.length_of(1) - assert 'NextToken' not in resp2.keys() - - -@mock_elbv2 -@mock_ec2 -def test_delete_load_balancer(): - conn = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - security_group = ec2.create_security_group( - GroupName='a-security-group', Description='First One') - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1b') - - response = conn.create_load_balancer( - Name='my-lb', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) - - response.get('LoadBalancers').should.have.length_of(1) - lb = response.get('LoadBalancers')[0] - - conn.delete_load_balancer(LoadBalancerArn=lb.get('LoadBalancerArn')) - balancers = conn.describe_load_balancers().get('LoadBalancers') - balancers.should.have.length_of(0) - - -@mock_ec2 -@mock_elbv2 -def test_register_targets(): - conn = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - security_group = ec2.create_security_group( - GroupName='a-security-group', Description='First One') - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1b') - - conn.create_load_balancer( - Name='my-lb', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) - - response = conn.create_target_group( - Name='a-target', - Protocol='HTTP', - Port=8080, - VpcId=vpc.id, - HealthCheckProtocol='HTTP', - HealthCheckPort='8080', - HealthCheckPath='/', - HealthCheckIntervalSeconds=5, - HealthCheckTimeoutSeconds=5, - HealthyThresholdCount=5, - UnhealthyThresholdCount=2, - Matcher={'HttpCode': '200'}) - target_group = response.get('TargetGroups')[0] - - # No targets registered yet - response = conn.describe_target_health( - TargetGroupArn=target_group.get('TargetGroupArn')) - response.get('TargetHealthDescriptions').should.have.length_of(0) - - response = ec2.create_instances( - ImageId='ami-1234abcd', MinCount=2, MaxCount=2) - instance_id1 = response[0].id - instance_id2 = response[1].id - - response = conn.register_targets( - TargetGroupArn=target_group.get('TargetGroupArn'), - Targets=[ - { - 'Id': instance_id1, - 'Port': 5060, - }, - { - 'Id': instance_id2, - 'Port': 4030, - }, - ]) - - response = conn.describe_target_health( - TargetGroupArn=target_group.get('TargetGroupArn')) - response.get('TargetHealthDescriptions').should.have.length_of(2) - - response = conn.deregister_targets( - TargetGroupArn=target_group.get('TargetGroupArn'), - Targets=[{'Id': instance_id2}]) - - response = conn.describe_target_health( - TargetGroupArn=target_group.get('TargetGroupArn')) - response.get('TargetHealthDescriptions').should.have.length_of(1) - - -@mock_ec2 -@mock_elbv2 -def test_target_group_attributes(): - conn = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - security_group = ec2.create_security_group( - GroupName='a-security-group', Description='First One') - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1b') - - response = conn.create_load_balancer( - Name='my-lb', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) - - response = conn.create_target_group( - Name='a-target', - Protocol='HTTP', - Port=8080, - VpcId=vpc.id, - HealthCheckProtocol='HTTP', - HealthCheckPort='8080', - HealthCheckPath='/', - HealthCheckIntervalSeconds=5, - HealthCheckTimeoutSeconds=5, - HealthyThresholdCount=5, - UnhealthyThresholdCount=2, - Matcher={'HttpCode': '200'}) - target_group = response.get('TargetGroups')[0] - - # Check it's in the describe_target_groups response - response = conn.describe_target_groups() - response.get('TargetGroups').should.have.length_of(1) - target_group_arn = target_group['TargetGroupArn'] - - # check if Names filter works - response = conn.describe_target_groups(Names=[]) - response = conn.describe_target_groups(Names=['a-target']) - response.get('TargetGroups').should.have.length_of(1) - target_group_arn = target_group['TargetGroupArn'] - - # The attributes should start with the two defaults - response = conn.describe_target_group_attributes( - TargetGroupArn=target_group_arn) - response['Attributes'].should.have.length_of(2) - attributes = {attr['Key']: attr['Value'] - for attr in response['Attributes']} - attributes['deregistration_delay.timeout_seconds'].should.equal('300') - attributes['stickiness.enabled'].should.equal('false') - - # Add cookie stickiness - response = conn.modify_target_group_attributes( - TargetGroupArn=target_group_arn, - Attributes=[ - { - 'Key': 'stickiness.enabled', - 'Value': 'true', - }, - { - 'Key': 'stickiness.type', - 'Value': 'lb_cookie', - }, - ]) - - # The response should have only the keys updated - response['Attributes'].should.have.length_of(2) - attributes = {attr['Key']: attr['Value'] - for attr in response['Attributes']} - attributes['stickiness.type'].should.equal('lb_cookie') - attributes['stickiness.enabled'].should.equal('true') - - # These new values should be in the full attribute list - response = conn.describe_target_group_attributes( - TargetGroupArn=target_group_arn) - response['Attributes'].should.have.length_of(3) - attributes = {attr['Key']: attr['Value'] - for attr in response['Attributes']} - attributes['stickiness.type'].should.equal('lb_cookie') - attributes['stickiness.enabled'].should.equal('true') - - -@mock_elbv2 -@mock_ec2 -def test_handle_listener_rules(): - conn = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - security_group = ec2.create_security_group( - GroupName='a-security-group', Description='First One') - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1b') - - response = conn.create_load_balancer( - Name='my-lb', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) - - load_balancer_arn = response.get('LoadBalancers')[0].get('LoadBalancerArn') - - # Can't create a target group with an invalid protocol - with assert_raises(ClientError): - conn.create_target_group( - Name='a-target', - Protocol='HTTP', - Port=8080, - VpcId=vpc.id, - HealthCheckProtocol='/HTTP', - HealthCheckPort='8080', - HealthCheckPath='/', - HealthCheckIntervalSeconds=5, - HealthCheckTimeoutSeconds=5, - HealthyThresholdCount=5, - UnhealthyThresholdCount=2, - Matcher={'HttpCode': '200'}) - response = conn.create_target_group( - Name='a-target', - Protocol='HTTP', - Port=8080, - VpcId=vpc.id, - HealthCheckProtocol='HTTP', - HealthCheckPort='8080', - HealthCheckPath='/', - HealthCheckIntervalSeconds=5, - HealthCheckTimeoutSeconds=5, - HealthyThresholdCount=5, - UnhealthyThresholdCount=2, - Matcher={'HttpCode': '200'}) - target_group = response.get('TargetGroups')[0] - - # Plain HTTP listener - response = conn.create_listener( - LoadBalancerArn=load_balancer_arn, - Protocol='HTTP', - Port=80, - DefaultActions=[{'Type': 'forward', 'TargetGroupArn': target_group.get('TargetGroupArn')}]) - listener = response.get('Listeners')[0] - listener.get('Port').should.equal(80) - listener.get('Protocol').should.equal('HTTP') - listener.get('DefaultActions').should.equal([{ - 'TargetGroupArn': target_group.get('TargetGroupArn'), - 'Type': 'forward'}]) - http_listener_arn = listener.get('ListenerArn') - - # create first rule - priority = 100 - host = 'xxx.example.com' - path_pattern = 'foobar' - created_rule = conn.create_rule( - ListenerArn=http_listener_arn, - Priority=priority, - Conditions=[{ - 'Field': 'host-header', - 'Values': [host] - }, - { - 'Field': 'path-pattern', - 'Values': [path_pattern] - }], - Actions=[{ - 'TargetGroupArn': target_group.get('TargetGroupArn'), - 'Type': 'forward' - }] - )['Rules'][0] - created_rule['Priority'].should.equal('100') - - # check if rules is sorted by priority - priority = 50 - host = 'yyy.example.com' - path_pattern = 'foobar' - rules = conn.create_rule( - ListenerArn=http_listener_arn, - Priority=priority, - Conditions=[{ - 'Field': 'host-header', - 'Values': [host] - }, - { - 'Field': 'path-pattern', - 'Values': [path_pattern] - }], - Actions=[{ - 'TargetGroupArn': target_group.get('TargetGroupArn'), - 'Type': 'forward' - }] - ) - - # test for PriorityInUse - with assert_raises(ClientError): - conn.create_rule( - ListenerArn=http_listener_arn, - Priority=priority, - Conditions=[{ - 'Field': 'host-header', - 'Values': [host] - }, - { - 'Field': 'path-pattern', - 'Values': [path_pattern] - }], - Actions=[{ - 'TargetGroupArn': target_group.get('TargetGroupArn'), - 'Type': 'forward' - }] - ) - - # test for describe listeners - obtained_rules = conn.describe_rules(ListenerArn=http_listener_arn) - len(obtained_rules['Rules']).should.equal(3) - priorities = [rule['Priority'] for rule in obtained_rules['Rules']] - priorities.should.equal(['50', '100', 'default']) - - first_rule = obtained_rules['Rules'][0] - second_rule = obtained_rules['Rules'][1] - obtained_rules = conn.describe_rules(RuleArns=[first_rule['RuleArn']]) - obtained_rules['Rules'].should.equal([first_rule]) - - # test for pagination - obtained_rules = conn.describe_rules( - ListenerArn=http_listener_arn, PageSize=1) - len(obtained_rules['Rules']).should.equal(1) - obtained_rules.should.have.key('NextMarker') - next_marker = obtained_rules['NextMarker'] - - following_rules = conn.describe_rules( - ListenerArn=http_listener_arn, - PageSize=1, - Marker=next_marker) - len(following_rules['Rules']).should.equal(1) - following_rules.should.have.key('NextMarker') - following_rules['Rules'][0]['RuleArn'].should_not.equal( - obtained_rules['Rules'][0]['RuleArn']) - - # test for invalid describe rule request - with assert_raises(ClientError): - conn.describe_rules() - with assert_raises(ClientError): - conn.describe_rules(RuleArns=[]) - with assert_raises(ClientError): - conn.describe_rules( - ListenerArn=http_listener_arn, - RuleArns=[first_rule['RuleArn']] - ) - - # modify rule partially - new_host = 'new.example.com' - new_path_pattern = 'new_path' - modified_rule = conn.modify_rule( - RuleArn=first_rule['RuleArn'], - Conditions=[{ - 'Field': 'host-header', - 'Values': [new_host] - }, - { - 'Field': 'path-pattern', - 'Values': [new_path_pattern] - }] - )['Rules'][0] - - rules = conn.describe_rules(ListenerArn=http_listener_arn) - obtained_rule = rules['Rules'][0] - modified_rule.should.equal(obtained_rule) - obtained_rule['Conditions'][0]['Values'][0].should.equal(new_host) - obtained_rule['Conditions'][1]['Values'][0].should.equal(new_path_pattern) - obtained_rule['Actions'][0]['TargetGroupArn'].should.equal( - target_group.get('TargetGroupArn')) - - # modify priority - conn.set_rule_priorities( - RulePriorities=[ - {'RuleArn': first_rule['RuleArn'], - 'Priority': int(first_rule['Priority']) - 1} - ] - ) - with assert_raises(ClientError): - conn.set_rule_priorities( - RulePriorities=[ - {'RuleArn': first_rule['RuleArn'], 'Priority': 999}, - {'RuleArn': second_rule['RuleArn'], 'Priority': 999} - ] - ) - - # delete - arn = first_rule['RuleArn'] - conn.delete_rule(RuleArn=arn) - rules = conn.describe_rules(ListenerArn=http_listener_arn)['Rules'] - len(rules).should.equal(2) - - # test for invalid action type - safe_priority = 2 - with assert_raises(ClientError): - conn.create_rule( - ListenerArn=http_listener_arn, - Priority=safe_priority, - Conditions=[{ - 'Field': 'host-header', - 'Values': [host] - }, - { - 'Field': 'path-pattern', - 'Values': [path_pattern] - }], - Actions=[{ - 'TargetGroupArn': target_group.get('TargetGroupArn'), - 'Type': 'forward2' - }] - ) - - # test for invalid action type - safe_priority = 2 - invalid_target_group_arn = target_group.get('TargetGroupArn') + 'x' - with assert_raises(ClientError): - conn.create_rule( - ListenerArn=http_listener_arn, - Priority=safe_priority, - Conditions=[{ - 'Field': 'host-header', - 'Values': [host] - }, - { - 'Field': 'path-pattern', - 'Values': [path_pattern] - }], - Actions=[{ - 'TargetGroupArn': invalid_target_group_arn, - 'Type': 'forward' - }] - ) - - # test for invalid condition field_name - safe_priority = 2 - with assert_raises(ClientError): - conn.create_rule( - ListenerArn=http_listener_arn, - Priority=safe_priority, - Conditions=[{ - 'Field': 'xxxxxxx', - 'Values': [host] - }], - Actions=[{ - 'TargetGroupArn': target_group.get('TargetGroupArn'), - 'Type': 'forward' - }] - ) - - # test for emptry condition value - safe_priority = 2 - with assert_raises(ClientError): - conn.create_rule( - ListenerArn=http_listener_arn, - Priority=safe_priority, - Conditions=[{ - 'Field': 'host-header', - 'Values': [] - }], - Actions=[{ - 'TargetGroupArn': target_group.get('TargetGroupArn'), - 'Type': 'forward' - }] - ) - - # test for multiple condition value - safe_priority = 2 - with assert_raises(ClientError): - conn.create_rule( - ListenerArn=http_listener_arn, - Priority=safe_priority, - Conditions=[{ - 'Field': 'host-header', - 'Values': [host, host] - }], - Actions=[{ - 'TargetGroupArn': target_group.get('TargetGroupArn'), - 'Type': 'forward' - }] - ) - - -@mock_elbv2 -@mock_ec2 -def test_describe_invalid_target_group(): - conn = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - security_group = ec2.create_security_group( - GroupName='a-security-group', Description='First One') - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1b') - - response = conn.create_load_balancer( - Name='my-lb', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) - - response.get('LoadBalancers')[0].get('LoadBalancerArn') - - response = conn.create_target_group( - Name='a-target', - Protocol='HTTP', - Port=8080, - VpcId=vpc.id, - HealthCheckProtocol='HTTP', - HealthCheckPort='8080', - HealthCheckPath='/', - HealthCheckIntervalSeconds=5, - HealthCheckTimeoutSeconds=5, - HealthyThresholdCount=5, - UnhealthyThresholdCount=2, - Matcher={'HttpCode': '200'}) - - # Check error raises correctly - with assert_raises(ClientError): - conn.describe_target_groups(Names=['invalid']) - - -@mock_elbv2 -@mock_ec2 -def test_describe_target_groups_no_arguments(): - conn = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - security_group = ec2.create_security_group( - GroupName='a-security-group', Description='First One') - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1b') - - response = conn.create_load_balancer( - Name='my-lb', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) - - response.get('LoadBalancers')[0].get('LoadBalancerArn') - - conn.create_target_group( - Name='a-target', - Protocol='HTTP', - Port=8080, - VpcId=vpc.id, - HealthCheckProtocol='HTTP', - HealthCheckPort='8080', - HealthCheckPath='/', - HealthCheckIntervalSeconds=5, - HealthCheckTimeoutSeconds=5, - HealthyThresholdCount=5, - UnhealthyThresholdCount=2, - Matcher={'HttpCode': '200'}) - - assert len(conn.describe_target_groups()['TargetGroups']) == 1 - - -@mock_elbv2 -def test_describe_account_limits(): - client = boto3.client('elbv2', region_name='eu-central-1') - - resp = client.describe_account_limits() - resp['Limits'][0].should.contain('Name') - resp['Limits'][0].should.contain('Max') - - -@mock_elbv2 -def test_describe_ssl_policies(): - client = boto3.client('elbv2', region_name='eu-central-1') - - resp = client.describe_ssl_policies() - len(resp['SslPolicies']).should.equal(5) - - resp = client.describe_ssl_policies(Names=['ELBSecurityPolicy-TLS-1-2-2017-01', 'ELBSecurityPolicy-2016-08']) - len(resp['SslPolicies']).should.equal(2) - - -@mock_elbv2 -@mock_ec2 -def test_set_ip_address_type(): - client = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - security_group = ec2.create_security_group( - GroupName='a-security-group', Description='First One') - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1b') - - response = client.create_load_balancer( - Name='my-lb', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) - arn = response['LoadBalancers'][0]['LoadBalancerArn'] - - # Internal LBs cant be dualstack yet - with assert_raises(ClientError): - client.set_ip_address_type( - LoadBalancerArn=arn, - IpAddressType='dualstack' - ) - - # Create internet facing one - response = client.create_load_balancer( - Name='my-lb2', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internet-facing', - Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) - arn = response['LoadBalancers'][0]['LoadBalancerArn'] - - client.set_ip_address_type( - LoadBalancerArn=arn, - IpAddressType='dualstack' - ) - - -@mock_elbv2 -@mock_ec2 -def test_set_security_groups(): - client = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - security_group = ec2.create_security_group( - GroupName='a-security-group', Description='First One') - security_group2 = ec2.create_security_group( - GroupName='b-security-group', Description='Second One') - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1b') - - response = client.create_load_balancer( - Name='my-lb', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) - arn = response['LoadBalancers'][0]['LoadBalancerArn'] - - client.set_security_groups( - LoadBalancerArn=arn, - SecurityGroups=[security_group.id, security_group2.id] - ) - - resp = client.describe_load_balancers(LoadBalancerArns=[arn]) - len(resp['LoadBalancers'][0]['SecurityGroups']).should.equal(2) - - with assert_raises(ClientError): - client.set_security_groups( - LoadBalancerArn=arn, - SecurityGroups=['non_existant'] - ) - - -@mock_elbv2 -@mock_ec2 -def test_set_subnets(): - client = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - security_group = ec2.create_security_group( - GroupName='a-security-group', Description='First One') - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1b') - subnet3 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1c') - - response = client.create_load_balancer( - Name='my-lb', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) - arn = response['LoadBalancers'][0]['LoadBalancerArn'] - - client.set_subnets( - LoadBalancerArn=arn, - Subnets=[subnet1.id, subnet2.id, subnet3.id] - ) - - resp = client.describe_load_balancers(LoadBalancerArns=[arn]) - len(resp['LoadBalancers'][0]['AvailabilityZones']).should.equal(3) - - # Only 1 AZ - with assert_raises(ClientError): - client.set_subnets( - LoadBalancerArn=arn, - Subnets=[subnet1.id] - ) - - # Multiple subnets in same AZ - with assert_raises(ClientError): - client.set_subnets( - LoadBalancerArn=arn, - Subnets=[subnet1.id, subnet2.id, subnet2.id] - ) - - -@mock_elbv2 -@mock_ec2 -def test_set_subnets(): - client = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - security_group = ec2.create_security_group( - GroupName='a-security-group', Description='First One') - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1b') - - response = client.create_load_balancer( - Name='my-lb', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) - arn = response['LoadBalancers'][0]['LoadBalancerArn'] - - client.modify_load_balancer_attributes( - LoadBalancerArn=arn, - Attributes=[{'Key': 'idle_timeout.timeout_seconds', 'Value': '600'}] - ) - - # Check its 600 not 60 - response = client.describe_load_balancer_attributes( - LoadBalancerArn=arn - ) - idle_timeout = list(filter(lambda item: item['Key'] == 'idle_timeout.timeout_seconds', response['Attributes']))[0] - idle_timeout['Value'].should.equal('600') - - -@mock_elbv2 -@mock_ec2 -def test_modify_target_group(): - client = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - - response = client.create_target_group( - Name='a-target', - Protocol='HTTP', - Port=8080, - VpcId=vpc.id, - HealthCheckProtocol='HTTP', - HealthCheckPort='8080', - HealthCheckPath='/', - HealthCheckIntervalSeconds=5, - HealthCheckTimeoutSeconds=5, - HealthyThresholdCount=5, - UnhealthyThresholdCount=2, - Matcher={'HttpCode': '200'}) - arn = response.get('TargetGroups')[0]['TargetGroupArn'] - - client.modify_target_group( - TargetGroupArn=arn, - HealthCheckProtocol='HTTPS', - HealthCheckPort='8081', - HealthCheckPath='/status', - HealthCheckIntervalSeconds=10, - HealthCheckTimeoutSeconds=10, - HealthyThresholdCount=10, - UnhealthyThresholdCount=4, - Matcher={'HttpCode': '200-399'} - ) - - response = client.describe_target_groups( - TargetGroupArns=[arn] - ) - response['TargetGroups'][0]['Matcher']['HttpCode'].should.equal('200-399') - response['TargetGroups'][0]['HealthCheckIntervalSeconds'].should.equal(10) - response['TargetGroups'][0]['HealthCheckPath'].should.equal('/status') - response['TargetGroups'][0]['HealthCheckPort'].should.equal('8081') - response['TargetGroups'][0]['HealthCheckProtocol'].should.equal('HTTPS') - response['TargetGroups'][0]['HealthCheckTimeoutSeconds'].should.equal(10) - response['TargetGroups'][0]['HealthyThresholdCount'].should.equal(10) - response['TargetGroups'][0]['UnhealthyThresholdCount'].should.equal(4) - - -@mock_elbv2 -@mock_ec2 -@mock_acm -def test_modify_listener_http_to_https(): - client = boto3.client('elbv2', region_name='eu-central-1') - acm = boto3.client('acm', region_name='eu-central-1') - ec2 = boto3.resource('ec2', region_name='eu-central-1') - - security_group = ec2.create_security_group( - GroupName='a-security-group', Description='First One') - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='eu-central-1a') - subnet2 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='eu-central-1b') - - response = client.create_load_balancer( - Name='my-lb', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) - - load_balancer_arn = response.get('LoadBalancers')[0].get('LoadBalancerArn') - - response = client.create_target_group( - Name='a-target', - Protocol='HTTP', - Port=8080, - VpcId=vpc.id, - HealthCheckProtocol='HTTP', - HealthCheckPort='8080', - HealthCheckPath='/', - HealthCheckIntervalSeconds=5, - HealthCheckTimeoutSeconds=5, - HealthyThresholdCount=5, - UnhealthyThresholdCount=2, - Matcher={'HttpCode': '200'}) - target_group = response.get('TargetGroups')[0] - target_group_arn = target_group['TargetGroupArn'] - - # Plain HTTP listener - response = client.create_listener( - LoadBalancerArn=load_balancer_arn, - Protocol='HTTP', - Port=80, - DefaultActions=[{'Type': 'forward', 'TargetGroupArn': target_group_arn}] - ) - listener_arn = response['Listeners'][0]['ListenerArn'] - - response = acm.request_certificate( - DomainName='google.com', - SubjectAlternativeNames=['google.com', 'www.google.com', 'mail.google.com'], - ) - google_arn = response['CertificateArn'] - response = acm.request_certificate( - DomainName='yahoo.com', - SubjectAlternativeNames=['yahoo.com', 'www.yahoo.com', 'mail.yahoo.com'], - ) - yahoo_arn = response['CertificateArn'] - - response = client.modify_listener( - ListenerArn=listener_arn, - Port=443, - Protocol='HTTPS', - SslPolicy='ELBSecurityPolicy-TLS-1-2-2017-01', - Certificates=[ - {'CertificateArn': google_arn, 'IsDefault': False}, - {'CertificateArn': yahoo_arn, 'IsDefault': True} - ], - DefaultActions=[ - {'Type': 'forward', 'TargetGroupArn': target_group_arn} - ] - ) - response['Listeners'][0]['Port'].should.equal(443) - response['Listeners'][0]['Protocol'].should.equal('HTTPS') - response['Listeners'][0]['SslPolicy'].should.equal('ELBSecurityPolicy-TLS-1-2-2017-01') - len(response['Listeners'][0]['Certificates']).should.equal(2) - - # Check default cert, can't do this in server mode - if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'false': - listener = elbv2_backends['eu-central-1'].load_balancers[load_balancer_arn].listeners[listener_arn] - listener.certificate.should.equal(yahoo_arn) - - # No default cert - with assert_raises(ClientError): - client.modify_listener( - ListenerArn=listener_arn, - Port=443, - Protocol='HTTPS', - SslPolicy='ELBSecurityPolicy-TLS-1-2-2017-01', - Certificates=[ - {'CertificateArn': google_arn, 'IsDefault': False} - ], - DefaultActions=[ - {'Type': 'forward', 'TargetGroupArn': target_group_arn} - ] - ) - - # Bad cert - with assert_raises(ClientError): - client.modify_listener( - ListenerArn=listener_arn, - Port=443, - Protocol='HTTPS', - SslPolicy='ELBSecurityPolicy-TLS-1-2-2017-01', - Certificates=[ - {'CertificateArn': 'lalala', 'IsDefault': True} - ], - DefaultActions=[ - {'Type': 'forward', 'TargetGroupArn': target_group_arn} - ] - ) - - -@mock_ec2 -@mock_elbv2 -@mock_cloudformation -def test_create_target_groups_through_cloudformation(): - cfn_conn = boto3.client('cloudformation', region_name='us-east-1') - elbv2_client = boto3.client('elbv2', region_name='us-east-1') - - # test that setting a name manually as well as letting cloudformation create a name both work - # this is a special case because test groups have a name length limit of 22 characters, and must be unique - # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticloadbalancingv2-targetgroup.html#cfn-elasticloadbalancingv2-targetgroup-name - template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "ECS Cluster Test CloudFormation", - "Resources": { - "testVPC": { - "Type": "AWS::EC2::VPC", - "Properties": { - "CidrBlock": "10.0.0.0/16", - }, - }, - "testGroup1": { - "Type": "AWS::ElasticLoadBalancingV2::TargetGroup", - "Properties": { - "Port": 80, - "Protocol": "HTTP", - "VpcId": {"Ref": "testVPC"}, - }, - }, - "testGroup2": { - "Type": "AWS::ElasticLoadBalancingV2::TargetGroup", - "Properties": { - "Port": 90, - "Protocol": "HTTP", - "VpcId": {"Ref": "testVPC"}, - }, - }, - "testGroup3": { - "Type": "AWS::ElasticLoadBalancingV2::TargetGroup", - "Properties": { - "Name": "MyTargetGroup", - "Port": 70, - "Protocol": "HTTPS", - "VpcId": {"Ref": "testVPC"}, - }, - }, - } - } - template_json = json.dumps(template) - cfn_conn.create_stack( - StackName="test-stack", - TemplateBody=template_json, - ) - - describe_target_groups_response = elbv2_client.describe_target_groups() - target_group_dicts = describe_target_groups_response['TargetGroups'] - assert len(target_group_dicts) == 3 - - # there should be 2 target groups with the same prefix of 10 characters (since the random suffix is 12) - # and one named MyTargetGroup - assert len([tg for tg in target_group_dicts if tg['TargetGroupName'] == 'MyTargetGroup']) == 1 - assert len( - [tg for tg in target_group_dicts if tg['TargetGroupName'].startswith('test-stack')] - ) == 2 +from __future__ import unicode_literals + +import json +import os +import boto3 +import botocore +from botocore.exceptions import ClientError +from nose.tools import assert_raises +import sure # noqa + +from moto import mock_elbv2, mock_ec2, mock_acm, mock_cloudformation +from moto.elbv2 import elbv2_backends + + +@mock_elbv2 +@mock_ec2 +def test_create_load_balancer(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.0/26', + AvailabilityZone='us-east-1b') + + response = conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + lb = response.get('LoadBalancers')[0] + + lb.get('DNSName').should.equal("my-lb-1.us-east-1.elb.amazonaws.com") + lb.get('LoadBalancerArn').should.equal( + 'arn:aws:elasticloadbalancing:us-east-1:1:loadbalancer/my-lb/50dc6c495c0c9188') + lb.get('SecurityGroups').should.equal([security_group.id]) + lb.get('AvailabilityZones').should.equal([ + {'SubnetId': subnet1.id, 'ZoneName': 'us-east-1a'}, + {'SubnetId': subnet2.id, 'ZoneName': 'us-east-1b'}]) + + # Ensure the tags persisted + response = conn.describe_tags(ResourceArns=[lb.get('LoadBalancerArn')]) + tags = {d['Key']: d['Value'] + for d in response['TagDescriptions'][0]['Tags']} + tags.should.equal({'key_name': 'a_value'}) + + +@mock_elbv2 +@mock_ec2 +def test_describe_load_balancers(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.0/26', + AvailabilityZone='us-east-1b') + + conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + response = conn.describe_load_balancers() + + response.get('LoadBalancers').should.have.length_of(1) + lb = response.get('LoadBalancers')[0] + lb.get('LoadBalancerName').should.equal('my-lb') + + response = conn.describe_load_balancers( + LoadBalancerArns=[lb.get('LoadBalancerArn')]) + response.get('LoadBalancers')[0].get( + 'LoadBalancerName').should.equal('my-lb') + + response = conn.describe_load_balancers(Names=['my-lb']) + response.get('LoadBalancers')[0].get( + 'LoadBalancerName').should.equal('my-lb') + + with assert_raises(ClientError): + conn.describe_load_balancers(LoadBalancerArns=['not-a/real/arn']) + with assert_raises(ClientError): + conn.describe_load_balancers(Names=['nope']) + + +@mock_elbv2 +@mock_ec2 +def test_add_remove_tags(): + conn = boto3.client('elbv2', region_name='us-east-1') + + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.0/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1b') + + conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + lbs = conn.describe_load_balancers()['LoadBalancers'] + lbs.should.have.length_of(1) + lb = lbs[0] + + with assert_raises(ClientError): + conn.add_tags(ResourceArns=['missing-arn'], + Tags=[{ + 'Key': 'a', + 'Value': 'b' + }]) + + conn.add_tags(ResourceArns=[lb.get('LoadBalancerArn')], + Tags=[{ + 'Key': 'a', + 'Value': 'b' + }]) + + tags = {d['Key']: d['Value'] for d in conn.describe_tags( + ResourceArns=[lb.get('LoadBalancerArn')])['TagDescriptions'][0]['Tags']} + tags.should.have.key('a').which.should.equal('b') + + conn.add_tags(ResourceArns=[lb.get('LoadBalancerArn')], + Tags=[{ + 'Key': 'a', + 'Value': 'b' + }, { + 'Key': 'b', + 'Value': 'b' + }, { + 'Key': 'c', + 'Value': 'b' + }, { + 'Key': 'd', + 'Value': 'b' + }, { + 'Key': 'e', + 'Value': 'b' + }, { + 'Key': 'f', + 'Value': 'b' + }, { + 'Key': 'g', + 'Value': 'b' + }, { + 'Key': 'h', + 'Value': 'b' + }, { + 'Key': 'j', + 'Value': 'b' + }]) + + conn.add_tags.when.called_with(ResourceArns=[lb.get('LoadBalancerArn')], + Tags=[{ + 'Key': 'k', + 'Value': 'b' + }]).should.throw(botocore.exceptions.ClientError) + + conn.add_tags(ResourceArns=[lb.get('LoadBalancerArn')], + Tags=[{ + 'Key': 'j', + 'Value': 'c' + }]) + + tags = {d['Key']: d['Value'] for d in conn.describe_tags( + ResourceArns=[lb.get('LoadBalancerArn')])['TagDescriptions'][0]['Tags']} + + tags.should.have.key('a').which.should.equal('b') + tags.should.have.key('b').which.should.equal('b') + tags.should.have.key('c').which.should.equal('b') + tags.should.have.key('d').which.should.equal('b') + tags.should.have.key('e').which.should.equal('b') + tags.should.have.key('f').which.should.equal('b') + tags.should.have.key('g').which.should.equal('b') + tags.should.have.key('h').which.should.equal('b') + tags.should.have.key('j').which.should.equal('c') + tags.shouldnt.have.key('k') + + conn.remove_tags(ResourceArns=[lb.get('LoadBalancerArn')], + TagKeys=['a']) + + tags = {d['Key']: d['Value'] for d in conn.describe_tags( + ResourceArns=[lb.get('LoadBalancerArn')])['TagDescriptions'][0]['Tags']} + + tags.shouldnt.have.key('a') + tags.should.have.key('b').which.should.equal('b') + tags.should.have.key('c').which.should.equal('b') + tags.should.have.key('d').which.should.equal('b') + tags.should.have.key('e').which.should.equal('b') + tags.should.have.key('f').which.should.equal('b') + tags.should.have.key('g').which.should.equal('b') + tags.should.have.key('h').which.should.equal('b') + tags.should.have.key('j').which.should.equal('c') + + +@mock_elbv2 +@mock_ec2 +def test_create_elb_in_multiple_region(): + for region in ['us-west-1', 'us-west-2']: + conn = boto3.client('elbv2', region_name=region) + ec2 = boto3.resource('ec2', region_name=region) + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc( + CidrBlock='172.28.7.0/24', + InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.0/26', + AvailabilityZone=region + 'a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone=region + 'b') + + conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + list( + boto3.client( + 'elbv2', + region_name='us-west-1').describe_load_balancers().get('LoadBalancers') + ).should.have.length_of(1) + list( + boto3.client( + 'elbv2', + region_name='us-west-2').describe_load_balancers().get('LoadBalancers') + ).should.have.length_of(1) + + +@mock_elbv2 +@mock_ec2 +def test_create_target_group_and_listeners(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.0/26', + AvailabilityZone='us-east-1b') + + response = conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + load_balancer_arn = response.get('LoadBalancers')[0].get('LoadBalancerArn') + + # Can't create a target group with an invalid protocol + with assert_raises(ClientError): + conn.create_target_group( + Name='a-target', + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='/HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) + response = conn.create_target_group( + Name='a-target', + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) + target_group = response.get('TargetGroups')[0] + target_group_arn = target_group['TargetGroupArn'] + + # Add tags to the target group + conn.add_tags(ResourceArns=[target_group_arn], Tags=[ + {'Key': 'target', 'Value': 'group'}]) + conn.describe_tags(ResourceArns=[target_group_arn])['TagDescriptions'][0]['Tags'].should.equal( + [{'Key': 'target', 'Value': 'group'}]) + + # Check it's in the describe_target_groups response + response = conn.describe_target_groups() + response.get('TargetGroups').should.have.length_of(1) + + # Plain HTTP listener + response = conn.create_listener( + LoadBalancerArn=load_balancer_arn, + Protocol='HTTP', + Port=80, + DefaultActions=[{'Type': 'forward', 'TargetGroupArn': target_group.get('TargetGroupArn')}]) + listener = response.get('Listeners')[0] + listener.get('Port').should.equal(80) + listener.get('Protocol').should.equal('HTTP') + listener.get('DefaultActions').should.equal([{ + 'TargetGroupArn': target_group.get('TargetGroupArn'), + 'Type': 'forward'}]) + http_listener_arn = listener.get('ListenerArn') + + response = conn.describe_target_groups(LoadBalancerArn=load_balancer_arn, + Names=['a-target']) + response.get('TargetGroups').should.have.length_of(1) + + # And another with SSL + response = conn.create_listener( + LoadBalancerArn=load_balancer_arn, + Protocol='HTTPS', + Port=443, + Certificates=[ + {'CertificateArn': 'arn:aws:iam:123456789012:server-certificate/test-cert'}], + DefaultActions=[{'Type': 'forward', 'TargetGroupArn': target_group.get('TargetGroupArn')}]) + listener = response.get('Listeners')[0] + listener.get('Port').should.equal(443) + listener.get('Protocol').should.equal('HTTPS') + listener.get('Certificates').should.equal([{ + 'CertificateArn': 'arn:aws:iam:123456789012:server-certificate/test-cert', + }]) + listener.get('DefaultActions').should.equal([{ + 'TargetGroupArn': target_group.get('TargetGroupArn'), + 'Type': 'forward'}]) + + https_listener_arn = listener.get('ListenerArn') + + response = conn.describe_listeners(LoadBalancerArn=load_balancer_arn) + response.get('Listeners').should.have.length_of(2) + response = conn.describe_listeners(ListenerArns=[https_listener_arn]) + response.get('Listeners').should.have.length_of(1) + listener = response.get('Listeners')[0] + listener.get('Port').should.equal(443) + listener.get('Protocol').should.equal('HTTPS') + + response = conn.describe_listeners( + ListenerArns=[ + http_listener_arn, + https_listener_arn]) + response.get('Listeners').should.have.length_of(2) + + # Try to delete the target group and it fails because there's a + # listener referencing it + with assert_raises(ClientError) as e: + conn.delete_target_group( + TargetGroupArn=target_group.get('TargetGroupArn')) + e.exception.operation_name.should.equal('DeleteTargetGroup') + e.exception.args.should.equal(("An error occurred (ResourceInUse) when calling the DeleteTargetGroup operation: The target group 'arn:aws:elasticloadbalancing:us-east-1:1:targetgroup/a-target/50dc6c495c0c9188' is currently in use by a listener or a rule", )) # NOQA + + # Delete one listener + response = conn.describe_listeners(LoadBalancerArn=load_balancer_arn) + response.get('Listeners').should.have.length_of(2) + conn.delete_listener(ListenerArn=http_listener_arn) + response = conn.describe_listeners(LoadBalancerArn=load_balancer_arn) + response.get('Listeners').should.have.length_of(1) + + # Then delete the load balancer + conn.delete_load_balancer(LoadBalancerArn=load_balancer_arn) + + # It's gone + response = conn.describe_load_balancers() + response.get('LoadBalancers').should.have.length_of(0) + + # And it deleted the remaining listener + response = conn.describe_listeners( + ListenerArns=[ + http_listener_arn, + https_listener_arn]) + response.get('Listeners').should.have.length_of(0) + + # But not the target groups + response = conn.describe_target_groups() + response.get('TargetGroups').should.have.length_of(1) + + # Which we'll now delete + conn.delete_target_group(TargetGroupArn=target_group.get('TargetGroupArn')) + response = conn.describe_target_groups() + response.get('TargetGroups').should.have.length_of(0) + + +@mock_elbv2 +@mock_ec2 +def test_create_target_group_without_non_required_parameters(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.0/26', + AvailabilityZone='us-east-1b') + + response = conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + # request without HealthCheckIntervalSeconds parameter + # which is default to 30 seconds + response = conn.create_target_group( + Name='a-target', + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='HTTP', + HealthCheckPort='8080' + ) + target_group = response.get('TargetGroups')[0] + target_group.should_not.be.none + + +@mock_elbv2 +@mock_ec2 +def test_create_invalid_target_group(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + + # Fail to create target group with name which length is 33 + long_name = 'A' * 33 + with assert_raises(ClientError): + conn.create_target_group( + Name=long_name, + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) + + invalid_names = [ + '-name', + 'name-', + '-name-', + 'example.com', + 'test@test', + 'Na--me'] + for name in invalid_names: + with assert_raises(ClientError): + conn.create_target_group( + Name=name, + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) + + valid_names = ['name', 'Name', '000'] + for name in valid_names: + conn.create_target_group( + Name=name, + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) + + +@mock_elbv2 +@mock_ec2 +def test_describe_paginated_balancers(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.0/26', + AvailabilityZone='us-east-1b') + + for i in range(51): + conn.create_load_balancer( + Name='my-lb%d' % i, + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + resp = conn.describe_load_balancers() + resp['LoadBalancers'].should.have.length_of(50) + resp['NextMarker'].should.equal( + resp['LoadBalancers'][-1]['LoadBalancerName']) + resp2 = conn.describe_load_balancers(Marker=resp['NextMarker']) + resp2['LoadBalancers'].should.have.length_of(1) + assert 'NextToken' not in resp2.keys() + + +@mock_elbv2 +@mock_ec2 +def test_delete_load_balancer(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.0/26', + AvailabilityZone='us-east-1b') + + response = conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + response.get('LoadBalancers').should.have.length_of(1) + lb = response.get('LoadBalancers')[0] + + conn.delete_load_balancer(LoadBalancerArn=lb.get('LoadBalancerArn')) + balancers = conn.describe_load_balancers().get('LoadBalancers') + balancers.should.have.length_of(0) + + +@mock_ec2 +@mock_elbv2 +def test_register_targets(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.0/26', + AvailabilityZone='us-east-1b') + + conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + response = conn.create_target_group( + Name='a-target', + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) + target_group = response.get('TargetGroups')[0] + + # No targets registered yet + response = conn.describe_target_health( + TargetGroupArn=target_group.get('TargetGroupArn')) + response.get('TargetHealthDescriptions').should.have.length_of(0) + + response = ec2.create_instances( + ImageId='ami-1234abcd', MinCount=2, MaxCount=2) + instance_id1 = response[0].id + instance_id2 = response[1].id + + response = conn.register_targets( + TargetGroupArn=target_group.get('TargetGroupArn'), + Targets=[ + { + 'Id': instance_id1, + 'Port': 5060, + }, + { + 'Id': instance_id2, + 'Port': 4030, + }, + ]) + + response = conn.describe_target_health( + TargetGroupArn=target_group.get('TargetGroupArn')) + response.get('TargetHealthDescriptions').should.have.length_of(2) + + response = conn.deregister_targets( + TargetGroupArn=target_group.get('TargetGroupArn'), + Targets=[{'Id': instance_id2}]) + + response = conn.describe_target_health( + TargetGroupArn=target_group.get('TargetGroupArn')) + response.get('TargetHealthDescriptions').should.have.length_of(1) + + +@mock_ec2 +@mock_elbv2 +def test_target_group_attributes(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.0/26', + AvailabilityZone='us-east-1b') + + response = conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + response = conn.create_target_group( + Name='a-target', + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) + target_group = response.get('TargetGroups')[0] + + # Check it's in the describe_target_groups response + response = conn.describe_target_groups() + response.get('TargetGroups').should.have.length_of(1) + target_group_arn = target_group['TargetGroupArn'] + + # check if Names filter works + response = conn.describe_target_groups(Names=[]) + response = conn.describe_target_groups(Names=['a-target']) + response.get('TargetGroups').should.have.length_of(1) + target_group_arn = target_group['TargetGroupArn'] + + # The attributes should start with the two defaults + response = conn.describe_target_group_attributes( + TargetGroupArn=target_group_arn) + response['Attributes'].should.have.length_of(2) + attributes = {attr['Key']: attr['Value'] + for attr in response['Attributes']} + attributes['deregistration_delay.timeout_seconds'].should.equal('300') + attributes['stickiness.enabled'].should.equal('false') + + # Add cookie stickiness + response = conn.modify_target_group_attributes( + TargetGroupArn=target_group_arn, + Attributes=[ + { + 'Key': 'stickiness.enabled', + 'Value': 'true', + }, + { + 'Key': 'stickiness.type', + 'Value': 'lb_cookie', + }, + ]) + + # The response should have only the keys updated + response['Attributes'].should.have.length_of(2) + attributes = {attr['Key']: attr['Value'] + for attr in response['Attributes']} + attributes['stickiness.type'].should.equal('lb_cookie') + attributes['stickiness.enabled'].should.equal('true') + + # These new values should be in the full attribute list + response = conn.describe_target_group_attributes( + TargetGroupArn=target_group_arn) + response['Attributes'].should.have.length_of(3) + attributes = {attr['Key']: attr['Value'] + for attr in response['Attributes']} + attributes['stickiness.type'].should.equal('lb_cookie') + attributes['stickiness.enabled'].should.equal('true') + + +@mock_elbv2 +@mock_ec2 +def test_handle_listener_rules(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.0/26', + AvailabilityZone='us-east-1b') + + response = conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + load_balancer_arn = response.get('LoadBalancers')[0].get('LoadBalancerArn') + + # Can't create a target group with an invalid protocol + with assert_raises(ClientError): + conn.create_target_group( + Name='a-target', + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='/HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) + response = conn.create_target_group( + Name='a-target', + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) + target_group = response.get('TargetGroups')[0] + + # Plain HTTP listener + response = conn.create_listener( + LoadBalancerArn=load_balancer_arn, + Protocol='HTTP', + Port=80, + DefaultActions=[{'Type': 'forward', 'TargetGroupArn': target_group.get('TargetGroupArn')}]) + listener = response.get('Listeners')[0] + listener.get('Port').should.equal(80) + listener.get('Protocol').should.equal('HTTP') + listener.get('DefaultActions').should.equal([{ + 'TargetGroupArn': target_group.get('TargetGroupArn'), + 'Type': 'forward'}]) + http_listener_arn = listener.get('ListenerArn') + + # create first rule + priority = 100 + host = 'xxx.example.com' + path_pattern = 'foobar' + created_rule = conn.create_rule( + ListenerArn=http_listener_arn, + Priority=priority, + Conditions=[{ + 'Field': 'host-header', + 'Values': [host] + }, + { + 'Field': 'path-pattern', + 'Values': [path_pattern] + }], + Actions=[{ + 'TargetGroupArn': target_group.get('TargetGroupArn'), + 'Type': 'forward' + }] + )['Rules'][0] + created_rule['Priority'].should.equal('100') + + # check if rules is sorted by priority + priority = 50 + host = 'yyy.example.com' + path_pattern = 'foobar' + rules = conn.create_rule( + ListenerArn=http_listener_arn, + Priority=priority, + Conditions=[{ + 'Field': 'host-header', + 'Values': [host] + }, + { + 'Field': 'path-pattern', + 'Values': [path_pattern] + }], + Actions=[{ + 'TargetGroupArn': target_group.get('TargetGroupArn'), + 'Type': 'forward' + }] + ) + + # test for PriorityInUse + with assert_raises(ClientError): + conn.create_rule( + ListenerArn=http_listener_arn, + Priority=priority, + Conditions=[{ + 'Field': 'host-header', + 'Values': [host] + }, + { + 'Field': 'path-pattern', + 'Values': [path_pattern] + }], + Actions=[{ + 'TargetGroupArn': target_group.get('TargetGroupArn'), + 'Type': 'forward' + }] + ) + + # test for describe listeners + obtained_rules = conn.describe_rules(ListenerArn=http_listener_arn) + len(obtained_rules['Rules']).should.equal(3) + priorities = [rule['Priority'] for rule in obtained_rules['Rules']] + priorities.should.equal(['50', '100', 'default']) + + first_rule = obtained_rules['Rules'][0] + second_rule = obtained_rules['Rules'][1] + obtained_rules = conn.describe_rules(RuleArns=[first_rule['RuleArn']]) + obtained_rules['Rules'].should.equal([first_rule]) + + # test for pagination + obtained_rules = conn.describe_rules( + ListenerArn=http_listener_arn, PageSize=1) + len(obtained_rules['Rules']).should.equal(1) + obtained_rules.should.have.key('NextMarker') + next_marker = obtained_rules['NextMarker'] + + following_rules = conn.describe_rules( + ListenerArn=http_listener_arn, + PageSize=1, + Marker=next_marker) + len(following_rules['Rules']).should.equal(1) + following_rules.should.have.key('NextMarker') + following_rules['Rules'][0]['RuleArn'].should_not.equal( + obtained_rules['Rules'][0]['RuleArn']) + + # test for invalid describe rule request + with assert_raises(ClientError): + conn.describe_rules() + with assert_raises(ClientError): + conn.describe_rules(RuleArns=[]) + with assert_raises(ClientError): + conn.describe_rules( + ListenerArn=http_listener_arn, + RuleArns=[first_rule['RuleArn']] + ) + + # modify rule partially + new_host = 'new.example.com' + new_path_pattern = 'new_path' + modified_rule = conn.modify_rule( + RuleArn=first_rule['RuleArn'], + Conditions=[{ + 'Field': 'host-header', + 'Values': [new_host] + }, + { + 'Field': 'path-pattern', + 'Values': [new_path_pattern] + }] + )['Rules'][0] + + rules = conn.describe_rules(ListenerArn=http_listener_arn) + obtained_rule = rules['Rules'][0] + modified_rule.should.equal(obtained_rule) + obtained_rule['Conditions'][0]['Values'][0].should.equal(new_host) + obtained_rule['Conditions'][1]['Values'][0].should.equal(new_path_pattern) + obtained_rule['Actions'][0]['TargetGroupArn'].should.equal( + target_group.get('TargetGroupArn')) + + # modify priority + conn.set_rule_priorities( + RulePriorities=[ + {'RuleArn': first_rule['RuleArn'], + 'Priority': int(first_rule['Priority']) - 1} + ] + ) + with assert_raises(ClientError): + conn.set_rule_priorities( + RulePriorities=[ + {'RuleArn': first_rule['RuleArn'], 'Priority': 999}, + {'RuleArn': second_rule['RuleArn'], 'Priority': 999} + ] + ) + + # delete + arn = first_rule['RuleArn'] + conn.delete_rule(RuleArn=arn) + rules = conn.describe_rules(ListenerArn=http_listener_arn)['Rules'] + len(rules).should.equal(2) + + # test for invalid action type + safe_priority = 2 + with assert_raises(ClientError): + conn.create_rule( + ListenerArn=http_listener_arn, + Priority=safe_priority, + Conditions=[{ + 'Field': 'host-header', + 'Values': [host] + }, + { + 'Field': 'path-pattern', + 'Values': [path_pattern] + }], + Actions=[{ + 'TargetGroupArn': target_group.get('TargetGroupArn'), + 'Type': 'forward2' + }] + ) + + # test for invalid action type + safe_priority = 2 + invalid_target_group_arn = target_group.get('TargetGroupArn') + 'x' + with assert_raises(ClientError): + conn.create_rule( + ListenerArn=http_listener_arn, + Priority=safe_priority, + Conditions=[{ + 'Field': 'host-header', + 'Values': [host] + }, + { + 'Field': 'path-pattern', + 'Values': [path_pattern] + }], + Actions=[{ + 'TargetGroupArn': invalid_target_group_arn, + 'Type': 'forward' + }] + ) + + # test for invalid condition field_name + safe_priority = 2 + with assert_raises(ClientError): + conn.create_rule( + ListenerArn=http_listener_arn, + Priority=safe_priority, + Conditions=[{ + 'Field': 'xxxxxxx', + 'Values': [host] + }], + Actions=[{ + 'TargetGroupArn': target_group.get('TargetGroupArn'), + 'Type': 'forward' + }] + ) + + # test for emptry condition value + safe_priority = 2 + with assert_raises(ClientError): + conn.create_rule( + ListenerArn=http_listener_arn, + Priority=safe_priority, + Conditions=[{ + 'Field': 'host-header', + 'Values': [] + }], + Actions=[{ + 'TargetGroupArn': target_group.get('TargetGroupArn'), + 'Type': 'forward' + }] + ) + + # test for multiple condition value + safe_priority = 2 + with assert_raises(ClientError): + conn.create_rule( + ListenerArn=http_listener_arn, + Priority=safe_priority, + Conditions=[{ + 'Field': 'host-header', + 'Values': [host, host] + }], + Actions=[{ + 'TargetGroupArn': target_group.get('TargetGroupArn'), + 'Type': 'forward' + }] + ) + + +@mock_elbv2 +@mock_ec2 +def test_describe_invalid_target_group(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.0/26', + AvailabilityZone='us-east-1b') + + response = conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + response.get('LoadBalancers')[0].get('LoadBalancerArn') + + response = conn.create_target_group( + Name='a-target', + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) + + # Check error raises correctly + with assert_raises(ClientError): + conn.describe_target_groups(Names=['invalid']) + + +@mock_elbv2 +@mock_ec2 +def test_describe_target_groups_no_arguments(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.0/26', + AvailabilityZone='us-east-1b') + + response = conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + response.get('LoadBalancers')[0].get('LoadBalancerArn') + + conn.create_target_group( + Name='a-target', + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) + + assert len(conn.describe_target_groups()['TargetGroups']) == 1 + + +@mock_elbv2 +def test_describe_account_limits(): + client = boto3.client('elbv2', region_name='eu-central-1') + + resp = client.describe_account_limits() + resp['Limits'][0].should.contain('Name') + resp['Limits'][0].should.contain('Max') + + +@mock_elbv2 +def test_describe_ssl_policies(): + client = boto3.client('elbv2', region_name='eu-central-1') + + resp = client.describe_ssl_policies() + len(resp['SslPolicies']).should.equal(5) + + resp = client.describe_ssl_policies(Names=['ELBSecurityPolicy-TLS-1-2-2017-01', 'ELBSecurityPolicy-2016-08']) + len(resp['SslPolicies']).should.equal(2) + + +@mock_elbv2 +@mock_ec2 +def test_set_ip_address_type(): + client = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.0/26', + AvailabilityZone='us-east-1b') + + response = client.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + arn = response['LoadBalancers'][0]['LoadBalancerArn'] + + # Internal LBs cant be dualstack yet + with assert_raises(ClientError): + client.set_ip_address_type( + LoadBalancerArn=arn, + IpAddressType='dualstack' + ) + + # Create internet facing one + response = client.create_load_balancer( + Name='my-lb2', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internet-facing', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + arn = response['LoadBalancers'][0]['LoadBalancerArn'] + + client.set_ip_address_type( + LoadBalancerArn=arn, + IpAddressType='dualstack' + ) + + +@mock_elbv2 +@mock_ec2 +def test_set_security_groups(): + client = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + security_group2 = ec2.create_security_group( + GroupName='b-security-group', Description='Second One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.0/26', + AvailabilityZone='us-east-1b') + + response = client.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + arn = response['LoadBalancers'][0]['LoadBalancerArn'] + + client.set_security_groups( + LoadBalancerArn=arn, + SecurityGroups=[security_group.id, security_group2.id] + ) + + resp = client.describe_load_balancers(LoadBalancerArns=[arn]) + len(resp['LoadBalancers'][0]['SecurityGroups']).should.equal(2) + + with assert_raises(ClientError): + client.set_security_groups( + LoadBalancerArn=arn, + SecurityGroups=['non_existant'] + ) + + +@mock_elbv2 +@mock_ec2 +def test_set_subnets(): + client = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.0/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.64/26', + AvailabilityZone='us-east-1b') + subnet3 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1c') + + response = client.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + arn = response['LoadBalancers'][0]['LoadBalancerArn'] + + client.set_subnets( + LoadBalancerArn=arn, + Subnets=[subnet1.id, subnet2.id, subnet3.id] + ) + + resp = client.describe_load_balancers(LoadBalancerArns=[arn]) + len(resp['LoadBalancers'][0]['AvailabilityZones']).should.equal(3) + + # Only 1 AZ + with assert_raises(ClientError): + client.set_subnets( + LoadBalancerArn=arn, + Subnets=[subnet1.id] + ) + + # Multiple subnets in same AZ + with assert_raises(ClientError): + client.set_subnets( + LoadBalancerArn=arn, + Subnets=[subnet1.id, subnet2.id, subnet2.id] + ) + + +@mock_elbv2 +@mock_ec2 +def test_set_subnets(): + client = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.0/26', + AvailabilityZone='us-east-1b') + + response = client.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + arn = response['LoadBalancers'][0]['LoadBalancerArn'] + + client.modify_load_balancer_attributes( + LoadBalancerArn=arn, + Attributes=[{'Key': 'idle_timeout.timeout_seconds', 'Value': '600'}] + ) + + # Check its 600 not 60 + response = client.describe_load_balancer_attributes( + LoadBalancerArn=arn + ) + idle_timeout = list(filter(lambda item: item['Key'] == 'idle_timeout.timeout_seconds', response['Attributes']))[0] + idle_timeout['Value'].should.equal('600') + + +@mock_elbv2 +@mock_ec2 +def test_modify_target_group(): + client = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + + response = client.create_target_group( + Name='a-target', + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) + arn = response.get('TargetGroups')[0]['TargetGroupArn'] + + client.modify_target_group( + TargetGroupArn=arn, + HealthCheckProtocol='HTTPS', + HealthCheckPort='8081', + HealthCheckPath='/status', + HealthCheckIntervalSeconds=10, + HealthCheckTimeoutSeconds=10, + HealthyThresholdCount=10, + UnhealthyThresholdCount=4, + Matcher={'HttpCode': '200-399'} + ) + + response = client.describe_target_groups( + TargetGroupArns=[arn] + ) + response['TargetGroups'][0]['Matcher']['HttpCode'].should.equal('200-399') + response['TargetGroups'][0]['HealthCheckIntervalSeconds'].should.equal(10) + response['TargetGroups'][0]['HealthCheckPath'].should.equal('/status') + response['TargetGroups'][0]['HealthCheckPort'].should.equal('8081') + response['TargetGroups'][0]['HealthCheckProtocol'].should.equal('HTTPS') + response['TargetGroups'][0]['HealthCheckTimeoutSeconds'].should.equal(10) + response['TargetGroups'][0]['HealthyThresholdCount'].should.equal(10) + response['TargetGroups'][0]['UnhealthyThresholdCount'].should.equal(4) + + +@mock_elbv2 +@mock_ec2 +@mock_acm +def test_modify_listener_http_to_https(): + client = boto3.client('elbv2', region_name='eu-central-1') + acm = boto3.client('acm', region_name='eu-central-1') + ec2 = boto3.resource('ec2', region_name='eu-central-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='eu-central-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.0/26', + AvailabilityZone='eu-central-1b') + + response = client.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + load_balancer_arn = response.get('LoadBalancers')[0].get('LoadBalancerArn') + + response = client.create_target_group( + Name='a-target', + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) + target_group = response.get('TargetGroups')[0] + target_group_arn = target_group['TargetGroupArn'] + + # Plain HTTP listener + response = client.create_listener( + LoadBalancerArn=load_balancer_arn, + Protocol='HTTP', + Port=80, + DefaultActions=[{'Type': 'forward', 'TargetGroupArn': target_group_arn}] + ) + listener_arn = response['Listeners'][0]['ListenerArn'] + + response = acm.request_certificate( + DomainName='google.com', + SubjectAlternativeNames=['google.com', 'www.google.com', 'mail.google.com'], + ) + google_arn = response['CertificateArn'] + response = acm.request_certificate( + DomainName='yahoo.com', + SubjectAlternativeNames=['yahoo.com', 'www.yahoo.com', 'mail.yahoo.com'], + ) + yahoo_arn = response['CertificateArn'] + + response = client.modify_listener( + ListenerArn=listener_arn, + Port=443, + Protocol='HTTPS', + SslPolicy='ELBSecurityPolicy-TLS-1-2-2017-01', + Certificates=[ + {'CertificateArn': google_arn, 'IsDefault': False}, + {'CertificateArn': yahoo_arn, 'IsDefault': True} + ], + DefaultActions=[ + {'Type': 'forward', 'TargetGroupArn': target_group_arn} + ] + ) + response['Listeners'][0]['Port'].should.equal(443) + response['Listeners'][0]['Protocol'].should.equal('HTTPS') + response['Listeners'][0]['SslPolicy'].should.equal('ELBSecurityPolicy-TLS-1-2-2017-01') + len(response['Listeners'][0]['Certificates']).should.equal(2) + + # Check default cert, can't do this in server mode + if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'false': + listener = elbv2_backends['eu-central-1'].load_balancers[load_balancer_arn].listeners[listener_arn] + listener.certificate.should.equal(yahoo_arn) + + # No default cert + with assert_raises(ClientError): + client.modify_listener( + ListenerArn=listener_arn, + Port=443, + Protocol='HTTPS', + SslPolicy='ELBSecurityPolicy-TLS-1-2-2017-01', + Certificates=[ + {'CertificateArn': google_arn, 'IsDefault': False} + ], + DefaultActions=[ + {'Type': 'forward', 'TargetGroupArn': target_group_arn} + ] + ) + + # Bad cert + with assert_raises(ClientError): + client.modify_listener( + ListenerArn=listener_arn, + Port=443, + Protocol='HTTPS', + SslPolicy='ELBSecurityPolicy-TLS-1-2-2017-01', + Certificates=[ + {'CertificateArn': 'lalala', 'IsDefault': True} + ], + DefaultActions=[ + {'Type': 'forward', 'TargetGroupArn': target_group_arn} + ] + ) + + +@mock_ec2 +@mock_elbv2 +@mock_cloudformation +def test_create_target_groups_through_cloudformation(): + cfn_conn = boto3.client('cloudformation', region_name='us-east-1') + elbv2_client = boto3.client('elbv2', region_name='us-east-1') + + # test that setting a name manually as well as letting cloudformation create a name both work + # this is a special case because test groups have a name length limit of 22 characters, and must be unique + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticloadbalancingv2-targetgroup.html#cfn-elasticloadbalancingv2-targetgroup-name + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "ECS Cluster Test CloudFormation", + "Resources": { + "testVPC": { + "Type": "AWS::EC2::VPC", + "Properties": { + "CidrBlock": "10.0.0.0/16", + }, + }, + "testGroup1": { + "Type": "AWS::ElasticLoadBalancingV2::TargetGroup", + "Properties": { + "Port": 80, + "Protocol": "HTTP", + "VpcId": {"Ref": "testVPC"}, + }, + }, + "testGroup2": { + "Type": "AWS::ElasticLoadBalancingV2::TargetGroup", + "Properties": { + "Port": 90, + "Protocol": "HTTP", + "VpcId": {"Ref": "testVPC"}, + }, + }, + "testGroup3": { + "Type": "AWS::ElasticLoadBalancingV2::TargetGroup", + "Properties": { + "Name": "MyTargetGroup", + "Port": 70, + "Protocol": "HTTPS", + "VpcId": {"Ref": "testVPC"}, + }, + }, + } + } + template_json = json.dumps(template) + cfn_conn.create_stack( + StackName="test-stack", + TemplateBody=template_json, + ) + + describe_target_groups_response = elbv2_client.describe_target_groups() + target_group_dicts = describe_target_groups_response['TargetGroups'] + assert len(target_group_dicts) == 3 + + # there should be 2 target groups with the same prefix of 10 characters (since the random suffix is 12) + # and one named MyTargetGroup + assert len([tg for tg in target_group_dicts if tg['TargetGroupName'] == 'MyTargetGroup']) == 1 + assert len( + [tg for tg in target_group_dicts if tg['TargetGroupName'].startswith('test-stack')] + ) == 2 + + +@mock_elbv2 +@mock_ec2 +def test_redirect_action_listener_rule(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.128/26', + AvailabilityZone='us-east-1b') + + response = conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + load_balancer_arn = response.get('LoadBalancers')[0].get('LoadBalancerArn') + + response = conn.create_listener(LoadBalancerArn=load_balancer_arn, + Protocol='HTTP', + Port=80, + DefaultActions=[ + {'Type': 'redirect', + 'RedirectConfig': { + 'Protocol': 'HTTPS', + 'Port': '443', + 'StatusCode': 'HTTP_301' + }}]) + + listener = response.get('Listeners')[0] + expected_default_actions = [{ + 'Type': 'redirect', + 'RedirectConfig': { + 'Protocol': 'HTTPS', + 'Port': '443', + 'StatusCode': 'HTTP_301' + } + }] + listener.get('DefaultActions').should.equal(expected_default_actions) + listener_arn = listener.get('ListenerArn') + + describe_rules_response = conn.describe_rules(ListenerArn=listener_arn) + describe_rules_response['Rules'][0]['Actions'].should.equal(expected_default_actions) + + describe_listener_response = conn.describe_listeners(ListenerArns=[listener_arn, ]) + describe_listener_actions = describe_listener_response['Listeners'][0]['DefaultActions'] + describe_listener_actions.should.equal(expected_default_actions) + + modify_listener_response = conn.modify_listener(ListenerArn=listener_arn, Port=81) + modify_listener_actions = modify_listener_response['Listeners'][0]['DefaultActions'] + modify_listener_actions.should.equal(expected_default_actions) + + +@mock_elbv2 +@mock_cloudformation +def test_redirect_action_listener_rule_cloudformation(): + cnf_conn = boto3.client('cloudformation', region_name='us-east-1') + elbv2_client = boto3.client('elbv2', region_name='us-east-1') + + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "ECS Cluster Test CloudFormation", + "Resources": { + "testVPC": { + "Type": "AWS::EC2::VPC", + "Properties": { + "CidrBlock": "10.0.0.0/16", + }, + }, + "subnet1": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "CidrBlock": "10.0.0.0/24", + "VpcId": {"Ref": "testVPC"}, + "AvalabilityZone": "us-east-1b", + }, + }, + "subnet2": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "CidrBlock": "10.0.1.0/24", + "VpcId": {"Ref": "testVPC"}, + "AvalabilityZone": "us-east-1b", + }, + }, + "testLb": { + "Type": "AWS::ElasticLoadBalancingV2::LoadBalancer", + "Properties": { + "Name": "my-lb", + "Subnets": [{"Ref": "subnet1"}, {"Ref": "subnet2"}], + "Type": "application", + "SecurityGroups": [], + } + }, + "testListener": { + "Type": "AWS::ElasticLoadBalancingV2::Listener", + "Properties": { + "LoadBalancerArn": {"Ref": "testLb"}, + "Port": 80, + "Protocol": "HTTP", + "DefaultActions": [{ + "Type": "redirect", + "RedirectConfig": { + "Port": "443", + "Protocol": "HTTPS", + "StatusCode": "HTTP_301", + } + }] + } + + } + } + } + template_json = json.dumps(template) + cnf_conn.create_stack(StackName="test-stack", TemplateBody=template_json) + + describe_load_balancers_response = elbv2_client.describe_load_balancers(Names=['my-lb',]) + describe_load_balancers_response['LoadBalancers'].should.have.length_of(1) + load_balancer_arn = describe_load_balancers_response['LoadBalancers'][0]['LoadBalancerArn'] + + describe_listeners_response = elbv2_client.describe_listeners(LoadBalancerArn=load_balancer_arn) + + describe_listeners_response['Listeners'].should.have.length_of(1) + describe_listeners_response['Listeners'][0]['DefaultActions'].should.equal([{ + 'Type': 'redirect', + 'RedirectConfig': { + 'Port': '443', 'Protocol': 'HTTPS', 'StatusCode': 'HTTP_301', + } + },]) diff --git a/tests/test_emr/test_emr_boto3.py b/tests/test_emr/test_emr_boto3.py index 28fff455b..b9a5025d9 100644 --- a/tests/test_emr/test_emr_boto3.py +++ b/tests/test_emr/test_emr_boto3.py @@ -1,720 +1,761 @@ -# -*- coding: utf-8 -*- -from __future__ import unicode_literals -import time -from copy import deepcopy -from datetime import datetime - -import boto3 -import pytz -import six -import sure # noqa -from botocore.exceptions import ClientError -from nose.tools import assert_raises - -from moto import mock_emr - - -run_job_flow_args = dict( - Instances={ - 'InstanceCount': 3, - 'KeepJobFlowAliveWhenNoSteps': True, - 'MasterInstanceType': 'c3.medium', - 'Placement': {'AvailabilityZone': 'us-east-1a'}, - 'SlaveInstanceType': 'c3.xlarge', - }, - JobFlowRole='EMR_EC2_DefaultRole', - LogUri='s3://mybucket/log', - Name='cluster', - ServiceRole='EMR_DefaultRole', - VisibleToAllUsers=True) - - -input_instance_groups = [ - {'InstanceCount': 1, - 'InstanceRole': 'MASTER', - 'InstanceType': 'c1.medium', - 'Market': 'ON_DEMAND', - 'Name': 'master'}, - {'InstanceCount': 3, - 'InstanceRole': 'CORE', - 'InstanceType': 'c1.medium', - 'Market': 'ON_DEMAND', - 'Name': 'core'}, - {'InstanceCount': 6, - 'InstanceRole': 'TASK', - 'InstanceType': 'c1.large', - 'Market': 'SPOT', - 'Name': 'task-1', - 'BidPrice': '0.07'}, - {'InstanceCount': 10, - 'InstanceRole': 'TASK', - 'InstanceType': 'c1.xlarge', - 'Market': 'SPOT', - 'Name': 'task-2', - 'BidPrice': '0.05'}, -] - - -@mock_emr -def test_describe_cluster(): - client = boto3.client('emr', region_name='us-east-1') - - args = deepcopy(run_job_flow_args) - args['Applications'] = [{'Name': 'Spark', 'Version': '2.4.2'}] - args['Configurations'] = [ - {'Classification': 'yarn-site', - 'Properties': {'someproperty': 'somevalue', - 'someotherproperty': 'someothervalue'}}, - {'Classification': 'nested-configs', - 'Properties': {}, - 'Configurations': [ - { - 'Classification': 'nested-config', - 'Properties': { - 'nested-property': 'nested-value' - } - } - ]} - ] - args['Instances']['AdditionalMasterSecurityGroups'] = ['additional-master'] - args['Instances']['AdditionalSlaveSecurityGroups'] = ['additional-slave'] - args['Instances']['Ec2KeyName'] = 'mykey' - args['Instances']['Ec2SubnetId'] = 'subnet-8be41cec' - args['Instances']['EmrManagedMasterSecurityGroup'] = 'master-security-group' - args['Instances']['EmrManagedSlaveSecurityGroup'] = 'slave-security-group' - args['Instances']['KeepJobFlowAliveWhenNoSteps'] = False - args['Instances']['ServiceAccessSecurityGroup'] = 'service-access-security-group' - args['Tags'] = [{'Key': 'tag1', 'Value': 'val1'}, - {'Key': 'tag2', 'Value': 'val2'}] - - cluster_id = client.run_job_flow(**args)['JobFlowId'] - - cl = client.describe_cluster(ClusterId=cluster_id)['Cluster'] - cl['Applications'][0]['Name'].should.equal('Spark') - cl['Applications'][0]['Version'].should.equal('2.4.2') - cl['AutoTerminate'].should.equal(True) - - config = cl['Configurations'][0] - config['Classification'].should.equal('yarn-site') - config['Properties'].should.equal(args['Configurations'][0]['Properties']) - - nested_config = cl['Configurations'][1] - nested_config['Classification'].should.equal('nested-configs') - nested_config['Properties'].should.equal(args['Configurations'][1]['Properties']) - - attrs = cl['Ec2InstanceAttributes'] - attrs['AdditionalMasterSecurityGroups'].should.equal( - args['Instances']['AdditionalMasterSecurityGroups']) - attrs['AdditionalSlaveSecurityGroups'].should.equal( - args['Instances']['AdditionalSlaveSecurityGroups']) - attrs['Ec2AvailabilityZone'].should.equal('us-east-1a') - attrs['Ec2KeyName'].should.equal(args['Instances']['Ec2KeyName']) - attrs['Ec2SubnetId'].should.equal(args['Instances']['Ec2SubnetId']) - attrs['EmrManagedMasterSecurityGroup'].should.equal( - args['Instances']['EmrManagedMasterSecurityGroup']) - attrs['EmrManagedSlaveSecurityGroup'].should.equal( - args['Instances']['EmrManagedSlaveSecurityGroup']) - attrs['IamInstanceProfile'].should.equal(args['JobFlowRole']) - attrs['ServiceAccessSecurityGroup'].should.equal( - args['Instances']['ServiceAccessSecurityGroup']) - cl['Id'].should.equal(cluster_id) - cl['LogUri'].should.equal(args['LogUri']) - cl['MasterPublicDnsName'].should.be.a(six.string_types) - cl['Name'].should.equal(args['Name']) - cl['NormalizedInstanceHours'].should.equal(0) - # cl['ReleaseLabel'].should.equal('emr-5.0.0') - cl.shouldnt.have.key('RequestedAmiVersion') - cl['RunningAmiVersion'].should.equal('1.0.0') - # cl['SecurityConfiguration'].should.be.a(six.string_types) - cl['ServiceRole'].should.equal(args['ServiceRole']) - - status = cl['Status'] - status['State'].should.equal('TERMINATED') - # cluster['Status']['StateChangeReason'] - status['Timeline']['CreationDateTime'].should.be.a('datetime.datetime') - # status['Timeline']['EndDateTime'].should.equal(datetime(2014, 1, 24, 2, 19, 46, tzinfo=pytz.utc)) - status['Timeline']['ReadyDateTime'].should.be.a('datetime.datetime') - - dict((t['Key'], t['Value']) for t in cl['Tags']).should.equal( - dict((t['Key'], t['Value']) for t in args['Tags'])) - - cl['TerminationProtected'].should.equal(False) - cl['VisibleToAllUsers'].should.equal(True) - - -@mock_emr -def test_describe_cluster_not_found(): - conn = boto3.client('emr', region_name='us-east-1') - raised = False - try: - cluster = conn.describe_cluster(ClusterId='DummyId') - except ClientError as e: - if e.response['Error']['Code'] == "ResourceNotFoundException": - raised = True - raised.should.equal(True) - - -@mock_emr -def test_describe_job_flows(): - client = boto3.client('emr', region_name='us-east-1') - args = deepcopy(run_job_flow_args) - expected = {} - - for idx in range(4): - cluster_name = 'cluster' + str(idx) - args['Name'] = cluster_name - cluster_id = client.run_job_flow(**args)['JobFlowId'] - expected[cluster_id] = { - 'Id': cluster_id, - 'Name': cluster_name, - 'State': 'WAITING' - } - - # need sleep since it appears the timestamp is always rounded to - # the nearest second internally - time.sleep(1) - timestamp = datetime.now(pytz.utc) - time.sleep(1) - - for idx in range(4, 6): - cluster_name = 'cluster' + str(idx) - args['Name'] = cluster_name - cluster_id = client.run_job_flow(**args)['JobFlowId'] - client.terminate_job_flows(JobFlowIds=[cluster_id]) - expected[cluster_id] = { - 'Id': cluster_id, - 'Name': cluster_name, - 'State': 'TERMINATED' - } - - resp = client.describe_job_flows() - resp['JobFlows'].should.have.length_of(6) - - for cluster_id, y in expected.items(): - resp = client.describe_job_flows(JobFlowIds=[cluster_id]) - resp['JobFlows'].should.have.length_of(1) - resp['JobFlows'][0]['JobFlowId'].should.equal(cluster_id) - - resp = client.describe_job_flows(JobFlowStates=['WAITING']) - resp['JobFlows'].should.have.length_of(4) - for x in resp['JobFlows']: - x['ExecutionStatusDetail']['State'].should.equal('WAITING') - - resp = client.describe_job_flows(CreatedBefore=timestamp) - resp['JobFlows'].should.have.length_of(4) - - resp = client.describe_job_flows(CreatedAfter=timestamp) - resp['JobFlows'].should.have.length_of(2) - - -@mock_emr -def test_describe_job_flow(): - client = boto3.client('emr', region_name='us-east-1') - - args = deepcopy(run_job_flow_args) - args['AmiVersion'] = '3.8.1' - args['Instances'].update( - {'Ec2KeyName': 'ec2keyname', - 'Ec2SubnetId': 'subnet-8be41cec', - 'HadoopVersion': '2.4.0'}) - args['VisibleToAllUsers'] = True - - cluster_id = client.run_job_flow(**args)['JobFlowId'] - - jf = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0] - - jf['AmiVersion'].should.equal(args['AmiVersion']) - jf.shouldnt.have.key('BootstrapActions') - esd = jf['ExecutionStatusDetail'] - esd['CreationDateTime'].should.be.a('datetime.datetime') - # esd['EndDateTime'].should.be.a('datetime.datetime') - # esd['LastStateChangeReason'].should.be.a(six.string_types) - esd['ReadyDateTime'].should.be.a('datetime.datetime') - esd['StartDateTime'].should.be.a('datetime.datetime') - esd['State'].should.equal('WAITING') - attrs = jf['Instances'] - attrs['Ec2KeyName'].should.equal(args['Instances']['Ec2KeyName']) - attrs['Ec2SubnetId'].should.equal(args['Instances']['Ec2SubnetId']) - attrs['HadoopVersion'].should.equal(args['Instances']['HadoopVersion']) - attrs['InstanceCount'].should.equal(args['Instances']['InstanceCount']) - for ig in attrs['InstanceGroups']: - # ig['BidPrice'] - ig['CreationDateTime'].should.be.a('datetime.datetime') - # ig['EndDateTime'].should.be.a('datetime.datetime') - ig['InstanceGroupId'].should.be.a(six.string_types) - ig['InstanceRequestCount'].should.be.a(int) - ig['InstanceRole'].should.be.within(['MASTER', 'CORE']) - ig['InstanceRunningCount'].should.be.a(int) - ig['InstanceType'].should.be.within(['c3.medium', 'c3.xlarge']) - # ig['LastStateChangeReason'].should.be.a(six.string_types) - ig['Market'].should.equal('ON_DEMAND') - ig['Name'].should.be.a(six.string_types) - ig['ReadyDateTime'].should.be.a('datetime.datetime') - ig['StartDateTime'].should.be.a('datetime.datetime') - ig['State'].should.equal('RUNNING') - attrs['KeepJobFlowAliveWhenNoSteps'].should.equal(True) - # attrs['MasterInstanceId'].should.be.a(six.string_types) - attrs['MasterInstanceType'].should.equal( - args['Instances']['MasterInstanceType']) - attrs['MasterPublicDnsName'].should.be.a(six.string_types) - attrs['NormalizedInstanceHours'].should.equal(0) - attrs['Placement']['AvailabilityZone'].should.equal( - args['Instances']['Placement']['AvailabilityZone']) - attrs['SlaveInstanceType'].should.equal( - args['Instances']['SlaveInstanceType']) - attrs['TerminationProtected'].should.equal(False) - jf['JobFlowId'].should.equal(cluster_id) - jf['JobFlowRole'].should.equal(args['JobFlowRole']) - jf['LogUri'].should.equal(args['LogUri']) - jf['Name'].should.equal(args['Name']) - jf['ServiceRole'].should.equal(args['ServiceRole']) - jf['Steps'].should.equal([]) - jf['SupportedProducts'].should.equal([]) - jf['VisibleToAllUsers'].should.equal(True) - - -@mock_emr -def test_list_clusters(): - client = boto3.client('emr', region_name='us-east-1') - args = deepcopy(run_job_flow_args) - expected = {} - - for idx in range(40): - cluster_name = 'jobflow' + str(idx) - args['Name'] = cluster_name - cluster_id = client.run_job_flow(**args)['JobFlowId'] - expected[cluster_id] = { - 'Id': cluster_id, - 'Name': cluster_name, - 'NormalizedInstanceHours': 0, - 'State': 'WAITING' - } - - # need sleep since it appears the timestamp is always rounded to - # the nearest second internally - time.sleep(1) - timestamp = datetime.now(pytz.utc) - time.sleep(1) - - for idx in range(40, 70): - cluster_name = 'jobflow' + str(idx) - args['Name'] = cluster_name - cluster_id = client.run_job_flow(**args)['JobFlowId'] - client.terminate_job_flows(JobFlowIds=[cluster_id]) - expected[cluster_id] = { - 'Id': cluster_id, - 'Name': cluster_name, - 'NormalizedInstanceHours': 0, - 'State': 'TERMINATED' - } - - args = {} - while 1: - resp = client.list_clusters(**args) - clusters = resp['Clusters'] - len(clusters).should.be.lower_than_or_equal_to(50) - for x in clusters: - y = expected[x['Id']] - x['Id'].should.equal(y['Id']) - x['Name'].should.equal(y['Name']) - x['NormalizedInstanceHours'].should.equal( - y['NormalizedInstanceHours']) - x['Status']['State'].should.equal(y['State']) - x['Status']['Timeline'][ - 'CreationDateTime'].should.be.a('datetime.datetime') - if y['State'] == 'TERMINATED': - x['Status']['Timeline'][ - 'EndDateTime'].should.be.a('datetime.datetime') - else: - x['Status']['Timeline'].shouldnt.have.key('EndDateTime') - x['Status']['Timeline'][ - 'ReadyDateTime'].should.be.a('datetime.datetime') - marker = resp.get('Marker') - if marker is None: - break - args = {'Marker': marker} - - resp = client.list_clusters(ClusterStates=['TERMINATED']) - resp['Clusters'].should.have.length_of(30) - for x in resp['Clusters']: - x['Status']['State'].should.equal('TERMINATED') - - resp = client.list_clusters(CreatedBefore=timestamp) - resp['Clusters'].should.have.length_of(40) - - resp = client.list_clusters(CreatedAfter=timestamp) - resp['Clusters'].should.have.length_of(30) - - -@mock_emr -def test_run_job_flow(): - client = boto3.client('emr', region_name='us-east-1') - args = deepcopy(run_job_flow_args) - cluster_id = client.run_job_flow(**args)['JobFlowId'] - resp = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0] - resp['ExecutionStatusDetail']['State'].should.equal('WAITING') - resp['JobFlowId'].should.equal(cluster_id) - resp['Name'].should.equal(args['Name']) - resp['Instances']['MasterInstanceType'].should.equal( - args['Instances']['MasterInstanceType']) - resp['Instances']['SlaveInstanceType'].should.equal( - args['Instances']['SlaveInstanceType']) - resp['LogUri'].should.equal(args['LogUri']) - resp['VisibleToAllUsers'].should.equal(args['VisibleToAllUsers']) - resp['Instances']['NormalizedInstanceHours'].should.equal(0) - resp['Steps'].should.equal([]) - - -@mock_emr -def test_run_job_flow_with_invalid_params(): - client = boto3.client('emr', region_name='us-east-1') - with assert_raises(ClientError) as ex: - # cannot set both AmiVersion and ReleaseLabel - args = deepcopy(run_job_flow_args) - args['AmiVersion'] = '2.4' - args['ReleaseLabel'] = 'emr-5.0.0' - client.run_job_flow(**args) - ex.exception.response['Error']['Code'].should.equal('ValidationException') - - -@mock_emr -def test_run_job_flow_in_multiple_regions(): - regions = {} - for region in ['us-east-1', 'eu-west-1']: - client = boto3.client('emr', region_name=region) - args = deepcopy(run_job_flow_args) - args['Name'] = region - cluster_id = client.run_job_flow(**args)['JobFlowId'] - regions[region] = {'client': client, 'cluster_id': cluster_id} - - for region in regions.keys(): - client = regions[region]['client'] - resp = client.describe_cluster(ClusterId=regions[region]['cluster_id']) - resp['Cluster']['Name'].should.equal(region) - - -@mock_emr -def test_run_job_flow_with_new_params(): - client = boto3.client('emr', region_name='us-east-1') - resp = client.run_job_flow(**run_job_flow_args) - resp.should.have.key('JobFlowId') - - -@mock_emr -def test_run_job_flow_with_visible_to_all_users(): - client = boto3.client('emr', region_name='us-east-1') - for expected in (True, False): - args = deepcopy(run_job_flow_args) - args['VisibleToAllUsers'] = expected - resp = client.run_job_flow(**args) - cluster_id = resp['JobFlowId'] - resp = client.describe_cluster(ClusterId=cluster_id) - resp['Cluster']['VisibleToAllUsers'].should.equal(expected) - - -@mock_emr -def test_run_job_flow_with_instance_groups(): - input_groups = dict((g['Name'], g) for g in input_instance_groups) - client = boto3.client('emr', region_name='us-east-1') - args = deepcopy(run_job_flow_args) - args['Instances'] = {'InstanceGroups': input_instance_groups} - cluster_id = client.run_job_flow(**args)['JobFlowId'] - groups = client.list_instance_groups(ClusterId=cluster_id)[ - 'InstanceGroups'] - for x in groups: - y = input_groups[x['Name']] - x.should.have.key('Id') - x['RequestedInstanceCount'].should.equal(y['InstanceCount']) - x['InstanceGroupType'].should.equal(y['InstanceRole']) - x['InstanceType'].should.equal(y['InstanceType']) - x['Market'].should.equal(y['Market']) - if 'BidPrice' in y: - x['BidPrice'].should.equal(y['BidPrice']) - - -@mock_emr -def test_set_termination_protection(): - client = boto3.client('emr', region_name='us-east-1') - args = deepcopy(run_job_flow_args) - args['Instances']['TerminationProtected'] = False - resp = client.run_job_flow(**args) - cluster_id = resp['JobFlowId'] - resp = client.describe_cluster(ClusterId=cluster_id) - resp['Cluster']['TerminationProtected'].should.equal(False) - - for expected in (True, False): - resp = client.set_termination_protection(JobFlowIds=[cluster_id], - TerminationProtected=expected) - resp = client.describe_cluster(ClusterId=cluster_id) - resp['Cluster']['TerminationProtected'].should.equal(expected) - - -@mock_emr -def test_set_visible_to_all_users(): - client = boto3.client('emr', region_name='us-east-1') - args = deepcopy(run_job_flow_args) - args['VisibleToAllUsers'] = False - resp = client.run_job_flow(**args) - cluster_id = resp['JobFlowId'] - resp = client.describe_cluster(ClusterId=cluster_id) - resp['Cluster']['VisibleToAllUsers'].should.equal(False) - - for expected in (True, False): - resp = client.set_visible_to_all_users(JobFlowIds=[cluster_id], - VisibleToAllUsers=expected) - resp = client.describe_cluster(ClusterId=cluster_id) - resp['Cluster']['VisibleToAllUsers'].should.equal(expected) - - -@mock_emr -def test_terminate_job_flows(): - client = boto3.client('emr', region_name='us-east-1') - - resp = client.run_job_flow(**run_job_flow_args) - cluster_id = resp['JobFlowId'] - resp = client.describe_cluster(ClusterId=cluster_id) - resp['Cluster']['Status']['State'].should.equal('WAITING') - - resp = client.terminate_job_flows(JobFlowIds=[cluster_id]) - resp = client.describe_cluster(ClusterId=cluster_id) - resp['Cluster']['Status']['State'].should.equal('TERMINATED') - - -# testing multiple end points for each feature - -@mock_emr -def test_bootstrap_actions(): - bootstrap_actions = [ - {'Name': 'bs1', - 'ScriptBootstrapAction': { - 'Args': ['arg1', 'arg2'], - 'Path': 's3://path/to/script'}}, - {'Name': 'bs2', - 'ScriptBootstrapAction': { - 'Args': [], - 'Path': 's3://path/to/anotherscript'}} - ] - - client = boto3.client('emr', region_name='us-east-1') - args = deepcopy(run_job_flow_args) - args['BootstrapActions'] = bootstrap_actions - cluster_id = client.run_job_flow(**args)['JobFlowId'] - - cl = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0] - for x, y in zip(cl['BootstrapActions'], bootstrap_actions): - x['BootstrapActionConfig'].should.equal(y) - - resp = client.list_bootstrap_actions(ClusterId=cluster_id) - for x, y in zip(resp['BootstrapActions'], bootstrap_actions): - x['Name'].should.equal(y['Name']) - if 'Args' in y['ScriptBootstrapAction']: - x['Args'].should.equal(y['ScriptBootstrapAction']['Args']) - x['ScriptPath'].should.equal(y['ScriptBootstrapAction']['Path']) - - -@mock_emr -def test_instance_groups(): - input_groups = dict((g['Name'], g) for g in input_instance_groups) - - client = boto3.client('emr', region_name='us-east-1') - args = deepcopy(run_job_flow_args) - for key in ['MasterInstanceType', 'SlaveInstanceType', 'InstanceCount']: - del args['Instances'][key] - args['Instances']['InstanceGroups'] = input_instance_groups[:2] - cluster_id = client.run_job_flow(**args)['JobFlowId'] - - jf = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0] - base_instance_count = jf['Instances']['InstanceCount'] - - client.add_instance_groups( - JobFlowId=cluster_id, InstanceGroups=input_instance_groups[2:]) - - jf = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0] - jf['Instances']['InstanceCount'].should.equal( - sum(g['InstanceCount'] for g in input_instance_groups)) - for x in jf['Instances']['InstanceGroups']: - y = input_groups[x['Name']] - if hasattr(y, 'BidPrice'): - x['BidPrice'].should.equal('BidPrice') - x['CreationDateTime'].should.be.a('datetime.datetime') - # x['EndDateTime'].should.be.a('datetime.datetime') - x.should.have.key('InstanceGroupId') - x['InstanceRequestCount'].should.equal(y['InstanceCount']) - x['InstanceRole'].should.equal(y['InstanceRole']) - x['InstanceRunningCount'].should.equal(y['InstanceCount']) - x['InstanceType'].should.equal(y['InstanceType']) - # x['LastStateChangeReason'].should.equal(y['LastStateChangeReason']) - x['Market'].should.equal(y['Market']) - x['Name'].should.equal(y['Name']) - x['ReadyDateTime'].should.be.a('datetime.datetime') - x['StartDateTime'].should.be.a('datetime.datetime') - x['State'].should.equal('RUNNING') - - groups = client.list_instance_groups(ClusterId=cluster_id)[ - 'InstanceGroups'] - for x in groups: - y = input_groups[x['Name']] - if hasattr(y, 'BidPrice'): - x['BidPrice'].should.equal('BidPrice') - # Configurations - # EbsBlockDevices - # EbsOptimized - x.should.have.key('Id') - x['InstanceGroupType'].should.equal(y['InstanceRole']) - x['InstanceType'].should.equal(y['InstanceType']) - x['Market'].should.equal(y['Market']) - x['Name'].should.equal(y['Name']) - x['RequestedInstanceCount'].should.equal(y['InstanceCount']) - x['RunningInstanceCount'].should.equal(y['InstanceCount']) - # ShrinkPolicy - x['Status']['State'].should.equal('RUNNING') - x['Status']['StateChangeReason']['Code'].should.be.a(six.string_types) - # x['Status']['StateChangeReason']['Message'].should.be.a(six.string_types) - x['Status']['Timeline'][ - 'CreationDateTime'].should.be.a('datetime.datetime') - # x['Status']['Timeline']['EndDateTime'].should.be.a('datetime.datetime') - x['Status']['Timeline'][ - 'ReadyDateTime'].should.be.a('datetime.datetime') - - igs = dict((g['Name'], g) for g in groups) - client.modify_instance_groups( - InstanceGroups=[ - {'InstanceGroupId': igs['task-1']['Id'], - 'InstanceCount': 2}, - {'InstanceGroupId': igs['task-2']['Id'], - 'InstanceCount': 3}]) - jf = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0] - jf['Instances']['InstanceCount'].should.equal(base_instance_count + 5) - igs = dict((g['Name'], g) for g in jf['Instances']['InstanceGroups']) - igs['task-1']['InstanceRunningCount'].should.equal(2) - igs['task-2']['InstanceRunningCount'].should.equal(3) - - -@mock_emr -def test_steps(): - input_steps = [{ - 'HadoopJarStep': { - 'Args': [ - 'hadoop-streaming', - '-files', 's3://elasticmapreduce/samples/wordcount/wordSplitter.py#wordSplitter.py', - '-mapper', 'python wordSplitter.py', - '-input', 's3://elasticmapreduce/samples/wordcount/input', - '-output', 's3://output_bucket/output/wordcount_output', - '-reducer', 'aggregate' - ], - 'Jar': 'command-runner.jar', - }, - 'Name': 'My wordcount example', - }, { - 'HadoopJarStep': { - 'Args': [ - 'hadoop-streaming', - '-files', 's3://elasticmapreduce/samples/wordcount/wordSplitter2.py#wordSplitter2.py', - '-mapper', 'python wordSplitter2.py', - '-input', 's3://elasticmapreduce/samples/wordcount/input2', - '-output', 's3://output_bucket/output/wordcount_output2', - '-reducer', 'aggregate' - ], - 'Jar': 'command-runner.jar', - }, - 'Name': 'My wordcount example2', - }] - - # TODO: implementation and test for cancel_steps - - client = boto3.client('emr', region_name='us-east-1') - args = deepcopy(run_job_flow_args) - args['Steps'] = [input_steps[0]] - cluster_id = client.run_job_flow(**args)['JobFlowId'] - - jf = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0] - jf['Steps'].should.have.length_of(1) - - client.add_job_flow_steps(JobFlowId=cluster_id, Steps=[input_steps[1]]) - - jf = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0] - jf['Steps'].should.have.length_of(2) - for idx, (x, y) in enumerate(zip(jf['Steps'], input_steps)): - x['ExecutionStatusDetail'].should.have.key('CreationDateTime') - # x['ExecutionStatusDetail'].should.have.key('EndDateTime') - # x['ExecutionStatusDetail'].should.have.key('LastStateChangeReason') - # x['ExecutionStatusDetail'].should.have.key('StartDateTime') - x['ExecutionStatusDetail']['State'].should.equal( - 'STARTING' if idx == 0 else 'PENDING') - x['StepConfig']['ActionOnFailure'].should.equal('TERMINATE_CLUSTER') - x['StepConfig']['HadoopJarStep'][ - 'Args'].should.equal(y['HadoopJarStep']['Args']) - x['StepConfig']['HadoopJarStep'][ - 'Jar'].should.equal(y['HadoopJarStep']['Jar']) - if 'MainClass' in y['HadoopJarStep']: - x['StepConfig']['HadoopJarStep']['MainClass'].should.equal( - y['HadoopJarStep']['MainClass']) - if 'Properties' in y['HadoopJarStep']: - x['StepConfig']['HadoopJarStep']['Properties'].should.equal( - y['HadoopJarStep']['Properties']) - x['StepConfig']['Name'].should.equal(y['Name']) - - expected = dict((s['Name'], s) for s in input_steps) - - steps = client.list_steps(ClusterId=cluster_id)['Steps'] - steps.should.have.length_of(2) - for x in steps: - y = expected[x['Name']] - x['ActionOnFailure'].should.equal('TERMINATE_CLUSTER') - x['Config']['Args'].should.equal(y['HadoopJarStep']['Args']) - x['Config']['Jar'].should.equal(y['HadoopJarStep']['Jar']) - # x['Config']['MainClass'].should.equal(y['HadoopJarStep']['MainClass']) - # Properties - x['Id'].should.be.a(six.string_types) - x['Name'].should.equal(y['Name']) - x['Status']['State'].should.be.within(['STARTING', 'PENDING']) - # StateChangeReason - x['Status']['Timeline'][ - 'CreationDateTime'].should.be.a('datetime.datetime') - # x['Status']['Timeline']['EndDateTime'].should.be.a('datetime.datetime') - # x['Status']['Timeline']['StartDateTime'].should.be.a('datetime.datetime') - - x = client.describe_step(ClusterId=cluster_id, StepId=x['Id'])['Step'] - x['ActionOnFailure'].should.equal('TERMINATE_CLUSTER') - x['Config']['Args'].should.equal(y['HadoopJarStep']['Args']) - x['Config']['Jar'].should.equal(y['HadoopJarStep']['Jar']) - # x['Config']['MainClass'].should.equal(y['HadoopJarStep']['MainClass']) - # Properties - x['Id'].should.be.a(six.string_types) - x['Name'].should.equal(y['Name']) - x['Status']['State'].should.be.within(['STARTING', 'PENDING']) - # StateChangeReason - x['Status']['Timeline'][ - 'CreationDateTime'].should.be.a('datetime.datetime') - # x['Status']['Timeline']['EndDateTime'].should.be.a('datetime.datetime') - # x['Status']['Timeline']['StartDateTime'].should.be.a('datetime.datetime') - - step_id = steps[0]['Id'] - steps = client.list_steps(ClusterId=cluster_id, StepIds=[step_id])['Steps'] - steps.should.have.length_of(1) - steps[0]['Id'].should.equal(step_id) - - steps = client.list_steps(ClusterId=cluster_id, - StepStates=['STARTING'])['Steps'] - steps.should.have.length_of(1) - steps[0]['Id'].should.equal(step_id) - - -@mock_emr -def test_tags(): - input_tags = [{'Key': 'newkey1', 'Value': 'newval1'}, - {'Key': 'newkey2', 'Value': 'newval2'}] - - client = boto3.client('emr', region_name='us-east-1') - cluster_id = client.run_job_flow(**run_job_flow_args)['JobFlowId'] - - client.add_tags(ResourceId=cluster_id, Tags=input_tags) - resp = client.describe_cluster(ClusterId=cluster_id)['Cluster'] - resp['Tags'].should.have.length_of(2) - dict((t['Key'], t['Value']) for t in resp['Tags']).should.equal( - dict((t['Key'], t['Value']) for t in input_tags)) - - client.remove_tags(ResourceId=cluster_id, TagKeys=[ - t['Key'] for t in input_tags]) - resp = client.describe_cluster(ClusterId=cluster_id)['Cluster'] - resp['Tags'].should.equal([]) +# -*- coding: utf-8 -*- +from __future__ import unicode_literals +import time +from copy import deepcopy +from datetime import datetime + +import boto3 +import pytz +import six +import sure # noqa +from botocore.exceptions import ClientError +from nose.tools import assert_raises + +from moto import mock_emr + + +run_job_flow_args = dict( + Instances={ + 'InstanceCount': 3, + 'KeepJobFlowAliveWhenNoSteps': True, + 'MasterInstanceType': 'c3.medium', + 'Placement': {'AvailabilityZone': 'us-east-1a'}, + 'SlaveInstanceType': 'c3.xlarge', + }, + JobFlowRole='EMR_EC2_DefaultRole', + LogUri='s3://mybucket/log', + Name='cluster', + ServiceRole='EMR_DefaultRole', + VisibleToAllUsers=True) + + +input_instance_groups = [ + {'InstanceCount': 1, + 'InstanceRole': 'MASTER', + 'InstanceType': 'c1.medium', + 'Market': 'ON_DEMAND', + 'Name': 'master'}, + {'InstanceCount': 3, + 'InstanceRole': 'CORE', + 'InstanceType': 'c1.medium', + 'Market': 'ON_DEMAND', + 'Name': 'core'}, + {'InstanceCount': 6, + 'InstanceRole': 'TASK', + 'InstanceType': 'c1.large', + 'Market': 'SPOT', + 'Name': 'task-1', + 'BidPrice': '0.07'}, + {'InstanceCount': 10, + 'InstanceRole': 'TASK', + 'InstanceType': 'c1.xlarge', + 'Market': 'SPOT', + 'Name': 'task-2', + 'BidPrice': '0.05'}, +] + + +@mock_emr +def test_describe_cluster(): + client = boto3.client('emr', region_name='us-east-1') + + args = deepcopy(run_job_flow_args) + args['Applications'] = [{'Name': 'Spark', 'Version': '2.4.2'}] + args['Configurations'] = [ + {'Classification': 'yarn-site', + 'Properties': {'someproperty': 'somevalue', + 'someotherproperty': 'someothervalue'}}, + {'Classification': 'nested-configs', + 'Properties': {}, + 'Configurations': [ + { + 'Classification': 'nested-config', + 'Properties': { + 'nested-property': 'nested-value' + } + } + ]} + ] + args['Instances']['AdditionalMasterSecurityGroups'] = ['additional-master'] + args['Instances']['AdditionalSlaveSecurityGroups'] = ['additional-slave'] + args['Instances']['Ec2KeyName'] = 'mykey' + args['Instances']['Ec2SubnetId'] = 'subnet-8be41cec' + args['Instances']['EmrManagedMasterSecurityGroup'] = 'master-security-group' + args['Instances']['EmrManagedSlaveSecurityGroup'] = 'slave-security-group' + args['Instances']['KeepJobFlowAliveWhenNoSteps'] = False + args['Instances']['ServiceAccessSecurityGroup'] = 'service-access-security-group' + args['Tags'] = [{'Key': 'tag1', 'Value': 'val1'}, + {'Key': 'tag2', 'Value': 'val2'}] + + cluster_id = client.run_job_flow(**args)['JobFlowId'] + + cl = client.describe_cluster(ClusterId=cluster_id)['Cluster'] + cl['Applications'][0]['Name'].should.equal('Spark') + cl['Applications'][0]['Version'].should.equal('2.4.2') + cl['AutoTerminate'].should.equal(True) + + config = cl['Configurations'][0] + config['Classification'].should.equal('yarn-site') + config['Properties'].should.equal(args['Configurations'][0]['Properties']) + + nested_config = cl['Configurations'][1] + nested_config['Classification'].should.equal('nested-configs') + nested_config['Properties'].should.equal(args['Configurations'][1]['Properties']) + + attrs = cl['Ec2InstanceAttributes'] + attrs['AdditionalMasterSecurityGroups'].should.equal( + args['Instances']['AdditionalMasterSecurityGroups']) + attrs['AdditionalSlaveSecurityGroups'].should.equal( + args['Instances']['AdditionalSlaveSecurityGroups']) + attrs['Ec2AvailabilityZone'].should.equal('us-east-1a') + attrs['Ec2KeyName'].should.equal(args['Instances']['Ec2KeyName']) + attrs['Ec2SubnetId'].should.equal(args['Instances']['Ec2SubnetId']) + attrs['EmrManagedMasterSecurityGroup'].should.equal( + args['Instances']['EmrManagedMasterSecurityGroup']) + attrs['EmrManagedSlaveSecurityGroup'].should.equal( + args['Instances']['EmrManagedSlaveSecurityGroup']) + attrs['IamInstanceProfile'].should.equal(args['JobFlowRole']) + attrs['ServiceAccessSecurityGroup'].should.equal( + args['Instances']['ServiceAccessSecurityGroup']) + cl['Id'].should.equal(cluster_id) + cl['LogUri'].should.equal(args['LogUri']) + cl['MasterPublicDnsName'].should.be.a(six.string_types) + cl['Name'].should.equal(args['Name']) + cl['NormalizedInstanceHours'].should.equal(0) + # cl['ReleaseLabel'].should.equal('emr-5.0.0') + cl.shouldnt.have.key('RequestedAmiVersion') + cl['RunningAmiVersion'].should.equal('1.0.0') + # cl['SecurityConfiguration'].should.be.a(six.string_types) + cl['ServiceRole'].should.equal(args['ServiceRole']) + + status = cl['Status'] + status['State'].should.equal('TERMINATED') + # cluster['Status']['StateChangeReason'] + status['Timeline']['CreationDateTime'].should.be.a('datetime.datetime') + # status['Timeline']['EndDateTime'].should.equal(datetime(2014, 1, 24, 2, 19, 46, tzinfo=pytz.utc)) + status['Timeline']['ReadyDateTime'].should.be.a('datetime.datetime') + + dict((t['Key'], t['Value']) for t in cl['Tags']).should.equal( + dict((t['Key'], t['Value']) for t in args['Tags'])) + + cl['TerminationProtected'].should.equal(False) + cl['VisibleToAllUsers'].should.equal(True) + + +@mock_emr +def test_describe_cluster_not_found(): + conn = boto3.client('emr', region_name='us-east-1') + raised = False + try: + cluster = conn.describe_cluster(ClusterId='DummyId') + except ClientError as e: + if e.response['Error']['Code'] == "ResourceNotFoundException": + raised = True + raised.should.equal(True) + + +@mock_emr +def test_describe_job_flows(): + client = boto3.client('emr', region_name='us-east-1') + args = deepcopy(run_job_flow_args) + expected = {} + + for idx in range(4): + cluster_name = 'cluster' + str(idx) + args['Name'] = cluster_name + cluster_id = client.run_job_flow(**args)['JobFlowId'] + expected[cluster_id] = { + 'Id': cluster_id, + 'Name': cluster_name, + 'State': 'WAITING' + } + + # need sleep since it appears the timestamp is always rounded to + # the nearest second internally + time.sleep(1) + timestamp = datetime.now(pytz.utc) + time.sleep(1) + + for idx in range(4, 6): + cluster_name = 'cluster' + str(idx) + args['Name'] = cluster_name + cluster_id = client.run_job_flow(**args)['JobFlowId'] + client.terminate_job_flows(JobFlowIds=[cluster_id]) + expected[cluster_id] = { + 'Id': cluster_id, + 'Name': cluster_name, + 'State': 'TERMINATED' + } + + resp = client.describe_job_flows() + resp['JobFlows'].should.have.length_of(6) + + for cluster_id, y in expected.items(): + resp = client.describe_job_flows(JobFlowIds=[cluster_id]) + resp['JobFlows'].should.have.length_of(1) + resp['JobFlows'][0]['JobFlowId'].should.equal(cluster_id) + + resp = client.describe_job_flows(JobFlowStates=['WAITING']) + resp['JobFlows'].should.have.length_of(4) + for x in resp['JobFlows']: + x['ExecutionStatusDetail']['State'].should.equal('WAITING') + + resp = client.describe_job_flows(CreatedBefore=timestamp) + resp['JobFlows'].should.have.length_of(4) + + resp = client.describe_job_flows(CreatedAfter=timestamp) + resp['JobFlows'].should.have.length_of(2) + + +@mock_emr +def test_describe_job_flow(): + client = boto3.client('emr', region_name='us-east-1') + + args = deepcopy(run_job_flow_args) + args['AmiVersion'] = '3.8.1' + args['Instances'].update( + {'Ec2KeyName': 'ec2keyname', + 'Ec2SubnetId': 'subnet-8be41cec', + 'HadoopVersion': '2.4.0'}) + args['VisibleToAllUsers'] = True + + cluster_id = client.run_job_flow(**args)['JobFlowId'] + + jf = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0] + + jf['AmiVersion'].should.equal(args['AmiVersion']) + jf.shouldnt.have.key('BootstrapActions') + esd = jf['ExecutionStatusDetail'] + esd['CreationDateTime'].should.be.a('datetime.datetime') + # esd['EndDateTime'].should.be.a('datetime.datetime') + # esd['LastStateChangeReason'].should.be.a(six.string_types) + esd['ReadyDateTime'].should.be.a('datetime.datetime') + esd['StartDateTime'].should.be.a('datetime.datetime') + esd['State'].should.equal('WAITING') + attrs = jf['Instances'] + attrs['Ec2KeyName'].should.equal(args['Instances']['Ec2KeyName']) + attrs['Ec2SubnetId'].should.equal(args['Instances']['Ec2SubnetId']) + attrs['HadoopVersion'].should.equal(args['Instances']['HadoopVersion']) + attrs['InstanceCount'].should.equal(args['Instances']['InstanceCount']) + for ig in attrs['InstanceGroups']: + # ig['BidPrice'] + ig['CreationDateTime'].should.be.a('datetime.datetime') + # ig['EndDateTime'].should.be.a('datetime.datetime') + ig['InstanceGroupId'].should.be.a(six.string_types) + ig['InstanceRequestCount'].should.be.a(int) + ig['InstanceRole'].should.be.within(['MASTER', 'CORE']) + ig['InstanceRunningCount'].should.be.a(int) + ig['InstanceType'].should.be.within(['c3.medium', 'c3.xlarge']) + # ig['LastStateChangeReason'].should.be.a(six.string_types) + ig['Market'].should.equal('ON_DEMAND') + ig['Name'].should.be.a(six.string_types) + ig['ReadyDateTime'].should.be.a('datetime.datetime') + ig['StartDateTime'].should.be.a('datetime.datetime') + ig['State'].should.equal('RUNNING') + attrs['KeepJobFlowAliveWhenNoSteps'].should.equal(True) + # attrs['MasterInstanceId'].should.be.a(six.string_types) + attrs['MasterInstanceType'].should.equal( + args['Instances']['MasterInstanceType']) + attrs['MasterPublicDnsName'].should.be.a(six.string_types) + attrs['NormalizedInstanceHours'].should.equal(0) + attrs['Placement']['AvailabilityZone'].should.equal( + args['Instances']['Placement']['AvailabilityZone']) + attrs['SlaveInstanceType'].should.equal( + args['Instances']['SlaveInstanceType']) + attrs['TerminationProtected'].should.equal(False) + jf['JobFlowId'].should.equal(cluster_id) + jf['JobFlowRole'].should.equal(args['JobFlowRole']) + jf['LogUri'].should.equal(args['LogUri']) + jf['Name'].should.equal(args['Name']) + jf['ServiceRole'].should.equal(args['ServiceRole']) + jf['Steps'].should.equal([]) + jf['SupportedProducts'].should.equal([]) + jf['VisibleToAllUsers'].should.equal(True) + + +@mock_emr +def test_list_clusters(): + client = boto3.client('emr', region_name='us-east-1') + args = deepcopy(run_job_flow_args) + expected = {} + + for idx in range(40): + cluster_name = 'jobflow' + str(idx) + args['Name'] = cluster_name + cluster_id = client.run_job_flow(**args)['JobFlowId'] + expected[cluster_id] = { + 'Id': cluster_id, + 'Name': cluster_name, + 'NormalizedInstanceHours': 0, + 'State': 'WAITING' + } + + # need sleep since it appears the timestamp is always rounded to + # the nearest second internally + time.sleep(1) + timestamp = datetime.now(pytz.utc) + time.sleep(1) + + for idx in range(40, 70): + cluster_name = 'jobflow' + str(idx) + args['Name'] = cluster_name + cluster_id = client.run_job_flow(**args)['JobFlowId'] + client.terminate_job_flows(JobFlowIds=[cluster_id]) + expected[cluster_id] = { + 'Id': cluster_id, + 'Name': cluster_name, + 'NormalizedInstanceHours': 0, + 'State': 'TERMINATED' + } + + args = {} + while 1: + resp = client.list_clusters(**args) + clusters = resp['Clusters'] + len(clusters).should.be.lower_than_or_equal_to(50) + for x in clusters: + y = expected[x['Id']] + x['Id'].should.equal(y['Id']) + x['Name'].should.equal(y['Name']) + x['NormalizedInstanceHours'].should.equal( + y['NormalizedInstanceHours']) + x['Status']['State'].should.equal(y['State']) + x['Status']['Timeline'][ + 'CreationDateTime'].should.be.a('datetime.datetime') + if y['State'] == 'TERMINATED': + x['Status']['Timeline'][ + 'EndDateTime'].should.be.a('datetime.datetime') + else: + x['Status']['Timeline'].shouldnt.have.key('EndDateTime') + x['Status']['Timeline'][ + 'ReadyDateTime'].should.be.a('datetime.datetime') + marker = resp.get('Marker') + if marker is None: + break + args = {'Marker': marker} + + resp = client.list_clusters(ClusterStates=['TERMINATED']) + resp['Clusters'].should.have.length_of(30) + for x in resp['Clusters']: + x['Status']['State'].should.equal('TERMINATED') + + resp = client.list_clusters(CreatedBefore=timestamp) + resp['Clusters'].should.have.length_of(40) + + resp = client.list_clusters(CreatedAfter=timestamp) + resp['Clusters'].should.have.length_of(30) + + +@mock_emr +def test_run_job_flow(): + client = boto3.client('emr', region_name='us-east-1') + args = deepcopy(run_job_flow_args) + cluster_id = client.run_job_flow(**args)['JobFlowId'] + resp = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0] + resp['ExecutionStatusDetail']['State'].should.equal('WAITING') + resp['JobFlowId'].should.equal(cluster_id) + resp['Name'].should.equal(args['Name']) + resp['Instances']['MasterInstanceType'].should.equal( + args['Instances']['MasterInstanceType']) + resp['Instances']['SlaveInstanceType'].should.equal( + args['Instances']['SlaveInstanceType']) + resp['LogUri'].should.equal(args['LogUri']) + resp['VisibleToAllUsers'].should.equal(args['VisibleToAllUsers']) + resp['Instances']['NormalizedInstanceHours'].should.equal(0) + resp['Steps'].should.equal([]) + + +@mock_emr +def test_run_job_flow_with_invalid_params(): + client = boto3.client('emr', region_name='us-east-1') + with assert_raises(ClientError) as ex: + # cannot set both AmiVersion and ReleaseLabel + args = deepcopy(run_job_flow_args) + args['AmiVersion'] = '2.4' + args['ReleaseLabel'] = 'emr-5.0.0' + client.run_job_flow(**args) + ex.exception.response['Error']['Code'].should.equal('ValidationException') + + +@mock_emr +def test_run_job_flow_in_multiple_regions(): + regions = {} + for region in ['us-east-1', 'eu-west-1']: + client = boto3.client('emr', region_name=region) + args = deepcopy(run_job_flow_args) + args['Name'] = region + cluster_id = client.run_job_flow(**args)['JobFlowId'] + regions[region] = {'client': client, 'cluster_id': cluster_id} + + for region in regions.keys(): + client = regions[region]['client'] + resp = client.describe_cluster(ClusterId=regions[region]['cluster_id']) + resp['Cluster']['Name'].should.equal(region) + + +@mock_emr +def test_run_job_flow_with_new_params(): + client = boto3.client('emr', region_name='us-east-1') + resp = client.run_job_flow(**run_job_flow_args) + resp.should.have.key('JobFlowId') + + +@mock_emr +def test_run_job_flow_with_visible_to_all_users(): + client = boto3.client('emr', region_name='us-east-1') + for expected in (True, False): + args = deepcopy(run_job_flow_args) + args['VisibleToAllUsers'] = expected + resp = client.run_job_flow(**args) + cluster_id = resp['JobFlowId'] + resp = client.describe_cluster(ClusterId=cluster_id) + resp['Cluster']['VisibleToAllUsers'].should.equal(expected) + + +@mock_emr +def test_run_job_flow_with_instance_groups(): + input_groups = dict((g['Name'], g) for g in input_instance_groups) + client = boto3.client('emr', region_name='us-east-1') + args = deepcopy(run_job_flow_args) + args['Instances'] = {'InstanceGroups': input_instance_groups} + cluster_id = client.run_job_flow(**args)['JobFlowId'] + groups = client.list_instance_groups(ClusterId=cluster_id)[ + 'InstanceGroups'] + for x in groups: + y = input_groups[x['Name']] + x.should.have.key('Id') + x['RequestedInstanceCount'].should.equal(y['InstanceCount']) + x['InstanceGroupType'].should.equal(y['InstanceRole']) + x['InstanceType'].should.equal(y['InstanceType']) + x['Market'].should.equal(y['Market']) + if 'BidPrice' in y: + x['BidPrice'].should.equal(y['BidPrice']) + + +@mock_emr +def test_run_job_flow_with_custom_ami(): + client = boto3.client('emr', region_name='us-east-1') + + with assert_raises(ClientError) as ex: + # CustomAmiId available in Amazon EMR 5.7.0 and later + args = deepcopy(run_job_flow_args) + args['CustomAmiId'] = 'MyEmrCustomId' + args['ReleaseLabel'] = 'emr-5.6.0' + client.run_job_flow(**args) + ex.exception.response['Error']['Code'].should.equal('ValidationException') + ex.exception.response['Error']['Message'].should.equal('Custom AMI is not allowed') + + with assert_raises(ClientError) as ex: + args = deepcopy(run_job_flow_args) + args['CustomAmiId'] = 'MyEmrCustomId' + args['AmiVersion'] = '3.8.1' + client.run_job_flow(**args) + ex.exception.response['Error']['Code'].should.equal('ValidationException') + ex.exception.response['Error']['Message'].should.equal( + 'Custom AMI is not supported in this version of EMR') + + with assert_raises(ClientError) as ex: + # AMI version and release label exception raises before CustomAmi exception + args = deepcopy(run_job_flow_args) + args['CustomAmiId'] = 'MyEmrCustomId' + args['ReleaseLabel'] = 'emr-5.6.0' + args['AmiVersion'] = '3.8.1' + client.run_job_flow(**args) + ex.exception.response['Error']['Code'].should.equal('ValidationException') + ex.exception.response['Error']['Message'].should.contain( + 'Only one AMI version and release label may be specified.') + + args = deepcopy(run_job_flow_args) + args['CustomAmiId'] = 'MyEmrCustomAmi' + args['ReleaseLabel'] = 'emr-5.7.0' + cluster_id = client.run_job_flow(**args)['JobFlowId'] + resp = client.describe_cluster(ClusterId=cluster_id) + resp['Cluster']['CustomAmiId'].should.equal('MyEmrCustomAmi') + + +@mock_emr +def test_set_termination_protection(): + client = boto3.client('emr', region_name='us-east-1') + args = deepcopy(run_job_flow_args) + args['Instances']['TerminationProtected'] = False + resp = client.run_job_flow(**args) + cluster_id = resp['JobFlowId'] + resp = client.describe_cluster(ClusterId=cluster_id) + resp['Cluster']['TerminationProtected'].should.equal(False) + + for expected in (True, False): + resp = client.set_termination_protection(JobFlowIds=[cluster_id], + TerminationProtected=expected) + resp = client.describe_cluster(ClusterId=cluster_id) + resp['Cluster']['TerminationProtected'].should.equal(expected) + + +@mock_emr +def test_set_visible_to_all_users(): + client = boto3.client('emr', region_name='us-east-1') + args = deepcopy(run_job_flow_args) + args['VisibleToAllUsers'] = False + resp = client.run_job_flow(**args) + cluster_id = resp['JobFlowId'] + resp = client.describe_cluster(ClusterId=cluster_id) + resp['Cluster']['VisibleToAllUsers'].should.equal(False) + + for expected in (True, False): + resp = client.set_visible_to_all_users(JobFlowIds=[cluster_id], + VisibleToAllUsers=expected) + resp = client.describe_cluster(ClusterId=cluster_id) + resp['Cluster']['VisibleToAllUsers'].should.equal(expected) + + +@mock_emr +def test_terminate_job_flows(): + client = boto3.client('emr', region_name='us-east-1') + + resp = client.run_job_flow(**run_job_flow_args) + cluster_id = resp['JobFlowId'] + resp = client.describe_cluster(ClusterId=cluster_id) + resp['Cluster']['Status']['State'].should.equal('WAITING') + + resp = client.terminate_job_flows(JobFlowIds=[cluster_id]) + resp = client.describe_cluster(ClusterId=cluster_id) + resp['Cluster']['Status']['State'].should.equal('TERMINATED') + + +# testing multiple end points for each feature + +@mock_emr +def test_bootstrap_actions(): + bootstrap_actions = [ + {'Name': 'bs1', + 'ScriptBootstrapAction': { + 'Args': ['arg1', 'arg2'], + 'Path': 's3://path/to/script'}}, + {'Name': 'bs2', + 'ScriptBootstrapAction': { + 'Args': [], + 'Path': 's3://path/to/anotherscript'}} + ] + + client = boto3.client('emr', region_name='us-east-1') + args = deepcopy(run_job_flow_args) + args['BootstrapActions'] = bootstrap_actions + cluster_id = client.run_job_flow(**args)['JobFlowId'] + + cl = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0] + for x, y in zip(cl['BootstrapActions'], bootstrap_actions): + x['BootstrapActionConfig'].should.equal(y) + + resp = client.list_bootstrap_actions(ClusterId=cluster_id) + for x, y in zip(resp['BootstrapActions'], bootstrap_actions): + x['Name'].should.equal(y['Name']) + if 'Args' in y['ScriptBootstrapAction']: + x['Args'].should.equal(y['ScriptBootstrapAction']['Args']) + x['ScriptPath'].should.equal(y['ScriptBootstrapAction']['Path']) + + +@mock_emr +def test_instance_groups(): + input_groups = dict((g['Name'], g) for g in input_instance_groups) + + client = boto3.client('emr', region_name='us-east-1') + args = deepcopy(run_job_flow_args) + for key in ['MasterInstanceType', 'SlaveInstanceType', 'InstanceCount']: + del args['Instances'][key] + args['Instances']['InstanceGroups'] = input_instance_groups[:2] + cluster_id = client.run_job_flow(**args)['JobFlowId'] + + jf = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0] + base_instance_count = jf['Instances']['InstanceCount'] + + client.add_instance_groups( + JobFlowId=cluster_id, InstanceGroups=input_instance_groups[2:]) + + jf = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0] + jf['Instances']['InstanceCount'].should.equal( + sum(g['InstanceCount'] for g in input_instance_groups)) + for x in jf['Instances']['InstanceGroups']: + y = input_groups[x['Name']] + if hasattr(y, 'BidPrice'): + x['BidPrice'].should.equal('BidPrice') + x['CreationDateTime'].should.be.a('datetime.datetime') + # x['EndDateTime'].should.be.a('datetime.datetime') + x.should.have.key('InstanceGroupId') + x['InstanceRequestCount'].should.equal(y['InstanceCount']) + x['InstanceRole'].should.equal(y['InstanceRole']) + x['InstanceRunningCount'].should.equal(y['InstanceCount']) + x['InstanceType'].should.equal(y['InstanceType']) + # x['LastStateChangeReason'].should.equal(y['LastStateChangeReason']) + x['Market'].should.equal(y['Market']) + x['Name'].should.equal(y['Name']) + x['ReadyDateTime'].should.be.a('datetime.datetime') + x['StartDateTime'].should.be.a('datetime.datetime') + x['State'].should.equal('RUNNING') + + groups = client.list_instance_groups(ClusterId=cluster_id)[ + 'InstanceGroups'] + for x in groups: + y = input_groups[x['Name']] + if hasattr(y, 'BidPrice'): + x['BidPrice'].should.equal('BidPrice') + # Configurations + # EbsBlockDevices + # EbsOptimized + x.should.have.key('Id') + x['InstanceGroupType'].should.equal(y['InstanceRole']) + x['InstanceType'].should.equal(y['InstanceType']) + x['Market'].should.equal(y['Market']) + x['Name'].should.equal(y['Name']) + x['RequestedInstanceCount'].should.equal(y['InstanceCount']) + x['RunningInstanceCount'].should.equal(y['InstanceCount']) + # ShrinkPolicy + x['Status']['State'].should.equal('RUNNING') + x['Status']['StateChangeReason']['Code'].should.be.a(six.string_types) + # x['Status']['StateChangeReason']['Message'].should.be.a(six.string_types) + x['Status']['Timeline'][ + 'CreationDateTime'].should.be.a('datetime.datetime') + # x['Status']['Timeline']['EndDateTime'].should.be.a('datetime.datetime') + x['Status']['Timeline'][ + 'ReadyDateTime'].should.be.a('datetime.datetime') + + igs = dict((g['Name'], g) for g in groups) + client.modify_instance_groups( + InstanceGroups=[ + {'InstanceGroupId': igs['task-1']['Id'], + 'InstanceCount': 2}, + {'InstanceGroupId': igs['task-2']['Id'], + 'InstanceCount': 3}]) + jf = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0] + jf['Instances']['InstanceCount'].should.equal(base_instance_count + 5) + igs = dict((g['Name'], g) for g in jf['Instances']['InstanceGroups']) + igs['task-1']['InstanceRunningCount'].should.equal(2) + igs['task-2']['InstanceRunningCount'].should.equal(3) + + +@mock_emr +def test_steps(): + input_steps = [{ + 'HadoopJarStep': { + 'Args': [ + 'hadoop-streaming', + '-files', 's3://elasticmapreduce/samples/wordcount/wordSplitter.py#wordSplitter.py', + '-mapper', 'python wordSplitter.py', + '-input', 's3://elasticmapreduce/samples/wordcount/input', + '-output', 's3://output_bucket/output/wordcount_output', + '-reducer', 'aggregate' + ], + 'Jar': 'command-runner.jar', + }, + 'Name': 'My wordcount example', + }, { + 'HadoopJarStep': { + 'Args': [ + 'hadoop-streaming', + '-files', 's3://elasticmapreduce/samples/wordcount/wordSplitter2.py#wordSplitter2.py', + '-mapper', 'python wordSplitter2.py', + '-input', 's3://elasticmapreduce/samples/wordcount/input2', + '-output', 's3://output_bucket/output/wordcount_output2', + '-reducer', 'aggregate' + ], + 'Jar': 'command-runner.jar', + }, + 'Name': 'My wordcount example2', + }] + + # TODO: implementation and test for cancel_steps + + client = boto3.client('emr', region_name='us-east-1') + args = deepcopy(run_job_flow_args) + args['Steps'] = [input_steps[0]] + cluster_id = client.run_job_flow(**args)['JobFlowId'] + + jf = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0] + jf['Steps'].should.have.length_of(1) + + client.add_job_flow_steps(JobFlowId=cluster_id, Steps=[input_steps[1]]) + + jf = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0] + jf['Steps'].should.have.length_of(2) + for idx, (x, y) in enumerate(zip(jf['Steps'], input_steps)): + x['ExecutionStatusDetail'].should.have.key('CreationDateTime') + # x['ExecutionStatusDetail'].should.have.key('EndDateTime') + # x['ExecutionStatusDetail'].should.have.key('LastStateChangeReason') + # x['ExecutionStatusDetail'].should.have.key('StartDateTime') + x['ExecutionStatusDetail']['State'].should.equal( + 'STARTING' if idx == 0 else 'PENDING') + x['StepConfig']['ActionOnFailure'].should.equal('TERMINATE_CLUSTER') + x['StepConfig']['HadoopJarStep'][ + 'Args'].should.equal(y['HadoopJarStep']['Args']) + x['StepConfig']['HadoopJarStep'][ + 'Jar'].should.equal(y['HadoopJarStep']['Jar']) + if 'MainClass' in y['HadoopJarStep']: + x['StepConfig']['HadoopJarStep']['MainClass'].should.equal( + y['HadoopJarStep']['MainClass']) + if 'Properties' in y['HadoopJarStep']: + x['StepConfig']['HadoopJarStep']['Properties'].should.equal( + y['HadoopJarStep']['Properties']) + x['StepConfig']['Name'].should.equal(y['Name']) + + expected = dict((s['Name'], s) for s in input_steps) + + steps = client.list_steps(ClusterId=cluster_id)['Steps'] + steps.should.have.length_of(2) + for x in steps: + y = expected[x['Name']] + x['ActionOnFailure'].should.equal('TERMINATE_CLUSTER') + x['Config']['Args'].should.equal(y['HadoopJarStep']['Args']) + x['Config']['Jar'].should.equal(y['HadoopJarStep']['Jar']) + # x['Config']['MainClass'].should.equal(y['HadoopJarStep']['MainClass']) + # Properties + x['Id'].should.be.a(six.string_types) + x['Name'].should.equal(y['Name']) + x['Status']['State'].should.be.within(['STARTING', 'PENDING']) + # StateChangeReason + x['Status']['Timeline'][ + 'CreationDateTime'].should.be.a('datetime.datetime') + # x['Status']['Timeline']['EndDateTime'].should.be.a('datetime.datetime') + # x['Status']['Timeline']['StartDateTime'].should.be.a('datetime.datetime') + + x = client.describe_step(ClusterId=cluster_id, StepId=x['Id'])['Step'] + x['ActionOnFailure'].should.equal('TERMINATE_CLUSTER') + x['Config']['Args'].should.equal(y['HadoopJarStep']['Args']) + x['Config']['Jar'].should.equal(y['HadoopJarStep']['Jar']) + # x['Config']['MainClass'].should.equal(y['HadoopJarStep']['MainClass']) + # Properties + x['Id'].should.be.a(six.string_types) + x['Name'].should.equal(y['Name']) + x['Status']['State'].should.be.within(['STARTING', 'PENDING']) + # StateChangeReason + x['Status']['Timeline'][ + 'CreationDateTime'].should.be.a('datetime.datetime') + # x['Status']['Timeline']['EndDateTime'].should.be.a('datetime.datetime') + # x['Status']['Timeline']['StartDateTime'].should.be.a('datetime.datetime') + + step_id = steps[0]['Id'] + steps = client.list_steps(ClusterId=cluster_id, StepIds=[step_id])['Steps'] + steps.should.have.length_of(1) + steps[0]['Id'].should.equal(step_id) + + steps = client.list_steps(ClusterId=cluster_id, + StepStates=['STARTING'])['Steps'] + steps.should.have.length_of(1) + steps[0]['Id'].should.equal(step_id) + + +@mock_emr +def test_tags(): + input_tags = [{'Key': 'newkey1', 'Value': 'newval1'}, + {'Key': 'newkey2', 'Value': 'newval2'}] + + client = boto3.client('emr', region_name='us-east-1') + cluster_id = client.run_job_flow(**run_job_flow_args)['JobFlowId'] + + client.add_tags(ResourceId=cluster_id, Tags=input_tags) + resp = client.describe_cluster(ClusterId=cluster_id)['Cluster'] + resp['Tags'].should.have.length_of(2) + dict((t['Key'], t['Value']) for t in resp['Tags']).should.equal( + dict((t['Key'], t['Value']) for t in input_tags)) + + client.remove_tags(ResourceId=cluster_id, TagKeys=[ + t['Key'] for t in input_tags]) + resp = client.describe_cluster(ClusterId=cluster_id)['Cluster'] + resp['Tags'].should.equal([]) diff --git a/tests/test_glue/test_datacatalog.py b/tests/test_glue/test_datacatalog.py index 72daed28d..e4891f307 100644 --- a/tests/test_glue/test_datacatalog.py +++ b/tests/test_glue/test_datacatalog.py @@ -1,426 +1,447 @@ -from __future__ import unicode_literals - -import sure # noqa -import re -from nose.tools import assert_raises -import boto3 -from botocore.client import ClientError - - -from datetime import datetime -import pytz - -from moto import mock_glue -from . import helpers - - -@mock_glue -def test_create_database(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'myspecialdatabase' - helpers.create_database(client, database_name) - - response = helpers.get_database(client, database_name) - database = response['Database'] - - database.should.equal({'Name': database_name}) - - -@mock_glue -def test_create_database_already_exists(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'cantcreatethisdatabasetwice' - helpers.create_database(client, database_name) - - with assert_raises(ClientError) as exc: - helpers.create_database(client, database_name) - - exc.exception.response['Error']['Code'].should.equal('AlreadyExistsException') - - -@mock_glue -def test_get_database_not_exits(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'nosuchdatabase' - - with assert_raises(ClientError) as exc: - helpers.get_database(client, database_name) - - exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') - exc.exception.response['Error']['Message'].should.match('Database nosuchdatabase not found') - - -@mock_glue -def test_create_table(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'myspecialdatabase' - helpers.create_database(client, database_name) - - table_name = 'myspecialtable' - table_input = helpers.create_table_input(database_name, table_name) - helpers.create_table(client, database_name, table_name, table_input) - - response = helpers.get_table(client, database_name, table_name) - table = response['Table'] - - table['Name'].should.equal(table_input['Name']) - table['StorageDescriptor'].should.equal(table_input['StorageDescriptor']) - table['PartitionKeys'].should.equal(table_input['PartitionKeys']) - - -@mock_glue -def test_create_table_already_exists(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'myspecialdatabase' - helpers.create_database(client, database_name) - - table_name = 'cantcreatethistabletwice' - helpers.create_table(client, database_name, table_name) - - with assert_raises(ClientError) as exc: - helpers.create_table(client, database_name, table_name) - - exc.exception.response['Error']['Code'].should.equal('AlreadyExistsException') - - -@mock_glue -def test_get_tables(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'myspecialdatabase' - helpers.create_database(client, database_name) - - table_names = ['myfirsttable', 'mysecondtable', 'mythirdtable'] - table_inputs = {} - - for table_name in table_names: - table_input = helpers.create_table_input(database_name, table_name) - table_inputs[table_name] = table_input - helpers.create_table(client, database_name, table_name, table_input) - - response = helpers.get_tables(client, database_name) - - tables = response['TableList'] - - tables.should.have.length_of(3) - - for table in tables: - table_name = table['Name'] - table_name.should.equal(table_inputs[table_name]['Name']) - table['StorageDescriptor'].should.equal(table_inputs[table_name]['StorageDescriptor']) - table['PartitionKeys'].should.equal(table_inputs[table_name]['PartitionKeys']) - - -@mock_glue -def test_get_table_versions(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'myspecialdatabase' - helpers.create_database(client, database_name) - - table_name = 'myfirsttable' - version_inputs = {} - - table_input = helpers.create_table_input(database_name, table_name) - helpers.create_table(client, database_name, table_name, table_input) - version_inputs["1"] = table_input - - columns = [{'Name': 'country', 'Type': 'string'}] - table_input = helpers.create_table_input(database_name, table_name, columns=columns) - helpers.update_table(client, database_name, table_name, table_input) - version_inputs["2"] = table_input - - # Updateing with an indentical input should still create a new version - helpers.update_table(client, database_name, table_name, table_input) - version_inputs["3"] = table_input - - response = helpers.get_table_versions(client, database_name, table_name) - - vers = response['TableVersions'] - - vers.should.have.length_of(3) - vers[0]['Table']['StorageDescriptor']['Columns'].should.equal([]) - vers[-1]['Table']['StorageDescriptor']['Columns'].should.equal(columns) - - for n, ver in enumerate(vers): - n = str(n + 1) - ver['VersionId'].should.equal(n) - ver['Table']['Name'].should.equal(table_name) - ver['Table']['StorageDescriptor'].should.equal(version_inputs[n]['StorageDescriptor']) - ver['Table']['PartitionKeys'].should.equal(version_inputs[n]['PartitionKeys']) - - response = helpers.get_table_version(client, database_name, table_name, "3") - ver = response['TableVersion'] - - ver['VersionId'].should.equal("3") - ver['Table']['Name'].should.equal(table_name) - ver['Table']['StorageDescriptor']['Columns'].should.equal(columns) - - -@mock_glue -def test_get_table_version_not_found(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'myspecialdatabase' - table_name = 'myfirsttable' - helpers.create_database(client, database_name) - helpers.create_table(client, database_name, table_name) - - with assert_raises(ClientError) as exc: - helpers.get_table_version(client, database_name, 'myfirsttable', "20") - - exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') - exc.exception.response['Error']['Message'].should.match('version', re.I) - - -@mock_glue -def test_get_table_version_invalid_input(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'myspecialdatabase' - table_name = 'myfirsttable' - helpers.create_database(client, database_name) - helpers.create_table(client, database_name, table_name) - - with assert_raises(ClientError) as exc: - helpers.get_table_version(client, database_name, 'myfirsttable', "10not-an-int") - - exc.exception.response['Error']['Code'].should.equal('InvalidInputException') - - -@mock_glue -def test_get_table_not_exits(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'myspecialdatabase' - helpers.create_database(client, database_name) - - with assert_raises(ClientError) as exc: - helpers.get_table(client, database_name, 'myfirsttable') - - exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') - exc.exception.response['Error']['Message'].should.match('Table myfirsttable not found') - - -@mock_glue -def test_get_table_when_database_not_exits(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'nosuchdatabase' - - with assert_raises(ClientError) as exc: - helpers.get_table(client, database_name, 'myfirsttable') - - exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') - exc.exception.response['Error']['Message'].should.match('Database nosuchdatabase not found') - - -@mock_glue -def test_get_partitions_empty(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'myspecialdatabase' - table_name = 'myfirsttable' - helpers.create_database(client, database_name) - - helpers.create_table(client, database_name, table_name) - - response = client.get_partitions(DatabaseName=database_name, TableName=table_name) - - response['Partitions'].should.have.length_of(0) - - -@mock_glue -def test_create_partition(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'myspecialdatabase' - table_name = 'myfirsttable' - values = ['2018-10-01'] - helpers.create_database(client, database_name) - - helpers.create_table(client, database_name, table_name) - - before = datetime.now(pytz.utc) - - part_input = helpers.create_partition_input(database_name, table_name, values=values) - helpers.create_partition(client, database_name, table_name, part_input) - - after = datetime.now(pytz.utc) - - response = client.get_partitions(DatabaseName=database_name, TableName=table_name) - - partitions = response['Partitions'] - - partitions.should.have.length_of(1) - - partition = partitions[0] - - partition['TableName'].should.equal(table_name) - partition['StorageDescriptor'].should.equal(part_input['StorageDescriptor']) - partition['Values'].should.equal(values) - partition['CreationTime'].should.be.greater_than(before) - partition['CreationTime'].should.be.lower_than(after) - - -@mock_glue -def test_create_partition_already_exist(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'myspecialdatabase' - table_name = 'myfirsttable' - values = ['2018-10-01'] - helpers.create_database(client, database_name) - - helpers.create_table(client, database_name, table_name) - - helpers.create_partition(client, database_name, table_name, values=values) - - with assert_raises(ClientError) as exc: - helpers.create_partition(client, database_name, table_name, values=values) - - exc.exception.response['Error']['Code'].should.equal('AlreadyExistsException') - - -@mock_glue -def test_get_partition_not_found(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'myspecialdatabase' - table_name = 'myfirsttable' - values = ['2018-10-01'] - helpers.create_database(client, database_name) - - helpers.create_table(client, database_name, table_name) - - with assert_raises(ClientError) as exc: - helpers.get_partition(client, database_name, table_name, values) - - exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') - exc.exception.response['Error']['Message'].should.match('partition') - - -@mock_glue -def test_get_partition(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'myspecialdatabase' - table_name = 'myfirsttable' - helpers.create_database(client, database_name) - - helpers.create_table(client, database_name, table_name) - - values = [['2018-10-01'], ['2018-09-01']] - - helpers.create_partition(client, database_name, table_name, values=values[0]) - helpers.create_partition(client, database_name, table_name, values=values[1]) - - response = client.get_partition(DatabaseName=database_name, TableName=table_name, PartitionValues=values[1]) - - partition = response['Partition'] - - partition['TableName'].should.equal(table_name) - partition['Values'].should.equal(values[1]) - - -@mock_glue -def test_update_partition_not_found_moving(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'myspecialdatabase' - table_name = 'myfirsttable' - - helpers.create_database(client, database_name) - helpers.create_table(client, database_name, table_name) - - with assert_raises(ClientError) as exc: - helpers.update_partition(client, database_name, table_name, old_values=['0000-00-00'], values=['2018-10-02']) - - exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') - exc.exception.response['Error']['Message'].should.match('partition') - - -@mock_glue -def test_update_partition_not_found_change_in_place(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'myspecialdatabase' - table_name = 'myfirsttable' - values = ['2018-10-01'] - - helpers.create_database(client, database_name) - helpers.create_table(client, database_name, table_name) - - with assert_raises(ClientError) as exc: - helpers.update_partition(client, database_name, table_name, old_values=values, values=values) - - exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') - exc.exception.response['Error']['Message'].should.match('partition') - - -@mock_glue -def test_update_partition_cannot_overwrite(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'myspecialdatabase' - table_name = 'myfirsttable' - helpers.create_database(client, database_name) - - helpers.create_table(client, database_name, table_name) - - values = [['2018-10-01'], ['2018-09-01']] - - helpers.create_partition(client, database_name, table_name, values=values[0]) - helpers.create_partition(client, database_name, table_name, values=values[1]) - - with assert_raises(ClientError) as exc: - helpers.update_partition(client, database_name, table_name, old_values=values[0], values=values[1]) - - exc.exception.response['Error']['Code'].should.equal('AlreadyExistsException') - - -@mock_glue -def test_update_partition(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'myspecialdatabase' - table_name = 'myfirsttable' - values = ['2018-10-01'] - - helpers.create_database(client, database_name) - helpers.create_table(client, database_name, table_name) - helpers.create_partition(client, database_name, table_name, values=values) - - response = helpers.update_partition( - client, - database_name, - table_name, - old_values=values, - values=values, - columns=[{'Name': 'country', 'Type': 'string'}], - ) - - response = client.get_partition(DatabaseName=database_name, TableName=table_name, PartitionValues=values) - partition = response['Partition'] - - partition['TableName'].should.equal(table_name) - partition['StorageDescriptor']['Columns'].should.equal([{'Name': 'country', 'Type': 'string'}]) - - -@mock_glue -def test_update_partition_move(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'myspecialdatabase' - table_name = 'myfirsttable' - values = ['2018-10-01'] - new_values = ['2018-09-01'] - - helpers.create_database(client, database_name) - helpers.create_table(client, database_name, table_name) - helpers.create_partition(client, database_name, table_name, values=values) - - response = helpers.update_partition( - client, - database_name, - table_name, - old_values=values, - values=new_values, - columns=[{'Name': 'country', 'Type': 'string'}], - ) - - with assert_raises(ClientError) as exc: - helpers.get_partition(client, database_name, table_name, values) - - # Old partition shouldn't exist anymore - exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') - - response = client.get_partition(DatabaseName=database_name, TableName=table_name, PartitionValues=new_values) - partition = response['Partition'] - - partition['TableName'].should.equal(table_name) - partition['StorageDescriptor']['Columns'].should.equal([{'Name': 'country', 'Type': 'string'}]) +from __future__ import unicode_literals + +import sure # noqa +import re +from nose.tools import assert_raises +import boto3 +from botocore.client import ClientError + + +from datetime import datetime +import pytz + +from moto import mock_glue +from . import helpers + + +@mock_glue +def test_create_database(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + helpers.create_database(client, database_name) + + response = helpers.get_database(client, database_name) + database = response['Database'] + + database.should.equal({'Name': database_name}) + + +@mock_glue +def test_create_database_already_exists(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'cantcreatethisdatabasetwice' + helpers.create_database(client, database_name) + + with assert_raises(ClientError) as exc: + helpers.create_database(client, database_name) + + exc.exception.response['Error']['Code'].should.equal('AlreadyExistsException') + + +@mock_glue +def test_get_database_not_exits(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'nosuchdatabase' + + with assert_raises(ClientError) as exc: + helpers.get_database(client, database_name) + + exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') + exc.exception.response['Error']['Message'].should.match('Database nosuchdatabase not found') + + +@mock_glue +def test_create_table(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + helpers.create_database(client, database_name) + + table_name = 'myspecialtable' + table_input = helpers.create_table_input(database_name, table_name) + helpers.create_table(client, database_name, table_name, table_input) + + response = helpers.get_table(client, database_name, table_name) + table = response['Table'] + + table['Name'].should.equal(table_input['Name']) + table['StorageDescriptor'].should.equal(table_input['StorageDescriptor']) + table['PartitionKeys'].should.equal(table_input['PartitionKeys']) + + +@mock_glue +def test_create_table_already_exists(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + helpers.create_database(client, database_name) + + table_name = 'cantcreatethistabletwice' + helpers.create_table(client, database_name, table_name) + + with assert_raises(ClientError) as exc: + helpers.create_table(client, database_name, table_name) + + exc.exception.response['Error']['Code'].should.equal('AlreadyExistsException') + + +@mock_glue +def test_get_tables(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + helpers.create_database(client, database_name) + + table_names = ['myfirsttable', 'mysecondtable', 'mythirdtable'] + table_inputs = {} + + for table_name in table_names: + table_input = helpers.create_table_input(database_name, table_name) + table_inputs[table_name] = table_input + helpers.create_table(client, database_name, table_name, table_input) + + response = helpers.get_tables(client, database_name) + + tables = response['TableList'] + + tables.should.have.length_of(3) + + for table in tables: + table_name = table['Name'] + table_name.should.equal(table_inputs[table_name]['Name']) + table['StorageDescriptor'].should.equal(table_inputs[table_name]['StorageDescriptor']) + table['PartitionKeys'].should.equal(table_inputs[table_name]['PartitionKeys']) + + +@mock_glue +def test_get_table_versions(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + helpers.create_database(client, database_name) + + table_name = 'myfirsttable' + version_inputs = {} + + table_input = helpers.create_table_input(database_name, table_name) + helpers.create_table(client, database_name, table_name, table_input) + version_inputs["1"] = table_input + + columns = [{'Name': 'country', 'Type': 'string'}] + table_input = helpers.create_table_input(database_name, table_name, columns=columns) + helpers.update_table(client, database_name, table_name, table_input) + version_inputs["2"] = table_input + + # Updateing with an indentical input should still create a new version + helpers.update_table(client, database_name, table_name, table_input) + version_inputs["3"] = table_input + + response = helpers.get_table_versions(client, database_name, table_name) + + vers = response['TableVersions'] + + vers.should.have.length_of(3) + vers[0]['Table']['StorageDescriptor']['Columns'].should.equal([]) + vers[-1]['Table']['StorageDescriptor']['Columns'].should.equal(columns) + + for n, ver in enumerate(vers): + n = str(n + 1) + ver['VersionId'].should.equal(n) + ver['Table']['Name'].should.equal(table_name) + ver['Table']['StorageDescriptor'].should.equal(version_inputs[n]['StorageDescriptor']) + ver['Table']['PartitionKeys'].should.equal(version_inputs[n]['PartitionKeys']) + + response = helpers.get_table_version(client, database_name, table_name, "3") + ver = response['TableVersion'] + + ver['VersionId'].should.equal("3") + ver['Table']['Name'].should.equal(table_name) + ver['Table']['StorageDescriptor']['Columns'].should.equal(columns) + + +@mock_glue +def test_get_table_version_not_found(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + helpers.create_database(client, database_name) + helpers.create_table(client, database_name, table_name) + + with assert_raises(ClientError) as exc: + helpers.get_table_version(client, database_name, 'myfirsttable', "20") + + exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') + exc.exception.response['Error']['Message'].should.match('version', re.I) + + +@mock_glue +def test_get_table_version_invalid_input(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + helpers.create_database(client, database_name) + helpers.create_table(client, database_name, table_name) + + with assert_raises(ClientError) as exc: + helpers.get_table_version(client, database_name, 'myfirsttable', "10not-an-int") + + exc.exception.response['Error']['Code'].should.equal('InvalidInputException') + + +@mock_glue +def test_get_table_not_exits(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + helpers.create_database(client, database_name) + + with assert_raises(ClientError) as exc: + helpers.get_table(client, database_name, 'myfirsttable') + + exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') + exc.exception.response['Error']['Message'].should.match('Table myfirsttable not found') + + +@mock_glue +def test_get_table_when_database_not_exits(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'nosuchdatabase' + + with assert_raises(ClientError) as exc: + helpers.get_table(client, database_name, 'myfirsttable') + + exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') + exc.exception.response['Error']['Message'].should.match('Database nosuchdatabase not found') + + +@mock_glue +def test_delete_table(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + helpers.create_database(client, database_name) + + table_name = 'myspecialtable' + table_input = helpers.create_table_input(database_name, table_name) + helpers.create_table(client, database_name, table_name, table_input) + + result = client.delete_table(DatabaseName=database_name, Name=table_name) + result['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + + # confirm table is deleted + with assert_raises(ClientError) as exc: + helpers.get_table(client, database_name, table_name) + + exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') + exc.exception.response['Error']['Message'].should.match('Table myspecialtable not found') + + +@mock_glue +def test_get_partitions_empty(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + helpers.create_database(client, database_name) + + helpers.create_table(client, database_name, table_name) + + response = client.get_partitions(DatabaseName=database_name, TableName=table_name) + + response['Partitions'].should.have.length_of(0) + + +@mock_glue +def test_create_partition(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + values = ['2018-10-01'] + helpers.create_database(client, database_name) + + helpers.create_table(client, database_name, table_name) + + before = datetime.now(pytz.utc) + + part_input = helpers.create_partition_input(database_name, table_name, values=values) + helpers.create_partition(client, database_name, table_name, part_input) + + after = datetime.now(pytz.utc) + + response = client.get_partitions(DatabaseName=database_name, TableName=table_name) + + partitions = response['Partitions'] + + partitions.should.have.length_of(1) + + partition = partitions[0] + + partition['TableName'].should.equal(table_name) + partition['StorageDescriptor'].should.equal(part_input['StorageDescriptor']) + partition['Values'].should.equal(values) + partition['CreationTime'].should.be.greater_than(before) + partition['CreationTime'].should.be.lower_than(after) + + +@mock_glue +def test_create_partition_already_exist(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + values = ['2018-10-01'] + helpers.create_database(client, database_name) + + helpers.create_table(client, database_name, table_name) + + helpers.create_partition(client, database_name, table_name, values=values) + + with assert_raises(ClientError) as exc: + helpers.create_partition(client, database_name, table_name, values=values) + + exc.exception.response['Error']['Code'].should.equal('AlreadyExistsException') + + +@mock_glue +def test_get_partition_not_found(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + values = ['2018-10-01'] + helpers.create_database(client, database_name) + + helpers.create_table(client, database_name, table_name) + + with assert_raises(ClientError) as exc: + helpers.get_partition(client, database_name, table_name, values) + + exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') + exc.exception.response['Error']['Message'].should.match('partition') + + +@mock_glue +def test_get_partition(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + helpers.create_database(client, database_name) + + helpers.create_table(client, database_name, table_name) + + values = [['2018-10-01'], ['2018-09-01']] + + helpers.create_partition(client, database_name, table_name, values=values[0]) + helpers.create_partition(client, database_name, table_name, values=values[1]) + + response = client.get_partition(DatabaseName=database_name, TableName=table_name, PartitionValues=values[1]) + + partition = response['Partition'] + + partition['TableName'].should.equal(table_name) + partition['Values'].should.equal(values[1]) + + +@mock_glue +def test_update_partition_not_found_moving(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + + helpers.create_database(client, database_name) + helpers.create_table(client, database_name, table_name) + + with assert_raises(ClientError) as exc: + helpers.update_partition(client, database_name, table_name, old_values=['0000-00-00'], values=['2018-10-02']) + + exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') + exc.exception.response['Error']['Message'].should.match('partition') + + +@mock_glue +def test_update_partition_not_found_change_in_place(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + values = ['2018-10-01'] + + helpers.create_database(client, database_name) + helpers.create_table(client, database_name, table_name) + + with assert_raises(ClientError) as exc: + helpers.update_partition(client, database_name, table_name, old_values=values, values=values) + + exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') + exc.exception.response['Error']['Message'].should.match('partition') + + +@mock_glue +def test_update_partition_cannot_overwrite(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + helpers.create_database(client, database_name) + + helpers.create_table(client, database_name, table_name) + + values = [['2018-10-01'], ['2018-09-01']] + + helpers.create_partition(client, database_name, table_name, values=values[0]) + helpers.create_partition(client, database_name, table_name, values=values[1]) + + with assert_raises(ClientError) as exc: + helpers.update_partition(client, database_name, table_name, old_values=values[0], values=values[1]) + + exc.exception.response['Error']['Code'].should.equal('AlreadyExistsException') + + +@mock_glue +def test_update_partition(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + values = ['2018-10-01'] + + helpers.create_database(client, database_name) + helpers.create_table(client, database_name, table_name) + helpers.create_partition(client, database_name, table_name, values=values) + + response = helpers.update_partition( + client, + database_name, + table_name, + old_values=values, + values=values, + columns=[{'Name': 'country', 'Type': 'string'}], + ) + + response = client.get_partition(DatabaseName=database_name, TableName=table_name, PartitionValues=values) + partition = response['Partition'] + + partition['TableName'].should.equal(table_name) + partition['StorageDescriptor']['Columns'].should.equal([{'Name': 'country', 'Type': 'string'}]) + + +@mock_glue +def test_update_partition_move(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + values = ['2018-10-01'] + new_values = ['2018-09-01'] + + helpers.create_database(client, database_name) + helpers.create_table(client, database_name, table_name) + helpers.create_partition(client, database_name, table_name, values=values) + + response = helpers.update_partition( + client, + database_name, + table_name, + old_values=values, + values=new_values, + columns=[{'Name': 'country', 'Type': 'string'}], + ) + + with assert_raises(ClientError) as exc: + helpers.get_partition(client, database_name, table_name, values) + + # Old partition shouldn't exist anymore + exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') + + response = client.get_partition(DatabaseName=database_name, TableName=table_name, PartitionValues=new_values) + partition = response['Partition'] + + partition['TableName'].should.equal(table_name) + partition['StorageDescriptor']['Columns'].should.equal([{'Name': 'country', 'Type': 'string'}]) diff --git a/tests/test_iam/test_iam.py b/tests/test_iam/test_iam.py index 1cd6f9e62..01f52af12 100644 --- a/tests/test_iam/test_iam.py +++ b/tests/test_iam/test_iam.py @@ -128,7 +128,6 @@ def test_create_role_and_instance_profile(): profile = conn.create_instance_profile('my-other-profile') profile.path.should.equal('/') - @mock_iam_deprecated() def test_remove_role_from_instance_profile(): conn = boto.connect_iam() @@ -358,7 +357,7 @@ def test_list_policy_versions(): versions = conn.list_policy_versions( PolicyArn="arn:aws:iam::123456789012:policy/TestListPolicyVersions") versions.get('Versions')[0].get('VersionId').should.equal('v1') - + conn.create_policy_version( PolicyArn="arn:aws:iam::123456789012:policy/TestListPolicyVersions", PolicyDocument='{"second":"policy"}') @@ -1292,4 +1291,22 @@ def test_create_role_no_path(): conn = boto3.client('iam', region_name='us-east-1') resp = conn.create_role(RoleName='my-role', AssumeRolePolicyDocument='some policy', Description='test') resp.get('Role').get('Arn').should.equal('arn:aws:iam::123456789012:role/my-role') + resp.get('Role').should_not.have.key('PermissionsBoundary') +@mock_iam() +def test_create_role_with_permissions_boundary(): + conn = boto3.client('iam', region_name='us-east-1') + boundary = 'arn:aws:iam::123456789012:policy/boundary' + resp = conn.create_role(RoleName='my-role', AssumeRolePolicyDocument='some policy', Description='test', PermissionsBoundary=boundary) + expected = { + 'PermissionsBoundaryType': 'PermissionsBoundaryPolicy', + 'PermissionsBoundaryArn': boundary + } + resp.get('Role').get('PermissionsBoundary').should.equal(expected) + + invalid_boundary_arn = 'arn:aws:iam::123456789:not_a_boundary' + with assert_raises(ClientError): + conn.create_role(RoleName='bad-boundary', AssumeRolePolicyDocument='some policy', Description='test', PermissionsBoundary=invalid_boundary_arn) + + # Ensure the PermissionsBoundary is included in role listing as well + conn.list_roles().get('Roles')[0].get('PermissionsBoundary').should.equal(expected) diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py index 33497a382..8f11912b0 100644 --- a/tests/test_iot/test_iot.py +++ b/tests/test_iot/test_iot.py @@ -350,7 +350,7 @@ def test_list_things_with_attribute_and_thing_type_filter_and_next_token(): @mock_iot def test_certs(): - client = boto3.client('iot', region_name='ap-northeast-1') + client = boto3.client('iot', region_name='us-east-1') cert = client.create_keys_and_certificate(setAsActive=True) cert.should.have.key('certificateArn').which.should_not.be.none cert.should.have.key('certificateId').which.should_not.be.none @@ -367,6 +367,29 @@ def test_certs(): cert_desc.should.have.key('certificateId').which.should_not.be.none cert_desc.should.have.key('certificatePem').which.should_not.be.none cert_desc.should.have.key('status').which.should.equal('ACTIVE') + cert_pem = cert_desc['certificatePem'] + + res = client.list_certificates() + for cert in res['certificates']: + cert.should.have.key('certificateArn').which.should_not.be.none + cert.should.have.key('certificateId').which.should_not.be.none + cert.should.have.key('status').which.should_not.be.none + cert.should.have.key('creationDate').which.should_not.be.none + + client.update_certificate(certificateId=cert_id, newStatus='REVOKED') + cert = client.describe_certificate(certificateId=cert_id) + cert_desc = cert['certificateDescription'] + cert_desc.should.have.key('status').which.should.equal('REVOKED') + + client.delete_certificate(certificateId=cert_id) + res = client.list_certificates() + res.should.have.key('certificates') + + # Test register_certificate flow + cert = client.register_certificate(certificatePem=cert_pem, setAsActive=True) + cert.should.have.key('certificateId').which.should_not.be.none + cert.should.have.key('certificateArn').which.should_not.be.none + cert_id = cert['certificateId'] res = client.list_certificates() res.should.have.key('certificates').which.should.have.length_of(1) @@ -378,11 +401,12 @@ def test_certs(): client.update_certificate(certificateId=cert_id, newStatus='REVOKED') cert = client.describe_certificate(certificateId=cert_id) - cert_desc.should.have.key('status').which.should.equal('ACTIVE') + cert_desc = cert['certificateDescription'] + cert_desc.should.have.key('status').which.should.equal('REVOKED') client.delete_certificate(certificateId=cert_id) res = client.list_certificates() - res.should.have.key('certificates').which.should.have.length_of(0) + res.should.have.key('certificates') @mock_iot diff --git a/tests/test_kinesis/test_kinesis.py b/tests/test_kinesis/test_kinesis.py index 736dc05c3..6986f79fc 100644 --- a/tests/test_kinesis/test_kinesis.py +++ b/tests/test_kinesis/test_kinesis.py @@ -1,624 +1,647 @@ -from __future__ import unicode_literals - -import boto.kinesis -from boto.kinesis.exceptions import ResourceNotFoundException, InvalidArgumentException -import boto3 -import sure # noqa -import datetime -import time - -from moto import mock_kinesis, mock_kinesis_deprecated - - -@mock_kinesis_deprecated -def test_create_cluster(): - conn = boto.kinesis.connect_to_region("us-west-2") - - conn.create_stream("my_stream", 2) - - stream_response = conn.describe_stream("my_stream") - - stream = stream_response["StreamDescription"] - stream["StreamName"].should.equal("my_stream") - stream["HasMoreShards"].should.equal(False) - stream["StreamARN"].should.equal( - "arn:aws:kinesis:us-west-2:123456789012:my_stream") - stream["StreamStatus"].should.equal("ACTIVE") - - shards = stream['Shards'] - shards.should.have.length_of(2) - - -@mock_kinesis_deprecated -def test_describe_non_existant_stream(): - conn = boto.kinesis.connect_to_region("us-east-1") - conn.describe_stream.when.called_with( - "not-a-stream").should.throw(ResourceNotFoundException) - - -@mock_kinesis_deprecated -def test_list_and_delete_stream(): - conn = boto.kinesis.connect_to_region("us-west-2") - - conn.create_stream("stream1", 1) - conn.create_stream("stream2", 1) - - conn.list_streams()['StreamNames'].should.have.length_of(2) - - conn.delete_stream("stream2") - - conn.list_streams()['StreamNames'].should.have.length_of(1) - - # Delete invalid id - conn.delete_stream.when.called_with( - "not-a-stream").should.throw(ResourceNotFoundException) - - -@mock_kinesis -def test_list_many_streams(): - conn = boto3.client('kinesis', region_name="us-west-2") - - for i in range(11): - conn.create_stream(StreamName="stream%d" % i, ShardCount=1) - - resp = conn.list_streams() - stream_names = resp["StreamNames"] - has_more_streams = resp["HasMoreStreams"] - stream_names.should.have.length_of(10) - has_more_streams.should.be(True) - resp2 = conn.list_streams(ExclusiveStartStreamName=stream_names[-1]) - stream_names = resp2["StreamNames"] - has_more_streams = resp2["HasMoreStreams"] - stream_names.should.have.length_of(1) - has_more_streams.should.equal(False) - - -@mock_kinesis_deprecated -def test_basic_shard_iterator(): - conn = boto.kinesis.connect_to_region("us-west-2") - - stream_name = "my_stream" - conn.create_stream(stream_name, 1) - - response = conn.describe_stream(stream_name) - shard_id = response['StreamDescription']['Shards'][0]['ShardId'] - - response = conn.get_shard_iterator(stream_name, shard_id, 'TRIM_HORIZON') - shard_iterator = response['ShardIterator'] - - response = conn.get_records(shard_iterator) - shard_iterator = response['NextShardIterator'] - response['Records'].should.equal([]) - response['MillisBehindLatest'].should.equal(0) - - -@mock_kinesis_deprecated -def test_get_invalid_shard_iterator(): - conn = boto.kinesis.connect_to_region("us-west-2") - - stream_name = "my_stream" - conn.create_stream(stream_name, 1) - - conn.get_shard_iterator.when.called_with( - stream_name, "123", 'TRIM_HORIZON').should.throw(ResourceNotFoundException) - - -@mock_kinesis_deprecated -def test_put_records(): - conn = boto.kinesis.connect_to_region("us-west-2") - - stream_name = "my_stream" - conn.create_stream(stream_name, 1) - - data = "hello world" - partition_key = "1234" - - conn.put_record.when.called_with( - stream_name, data, 1234).should.throw(InvalidArgumentException) - - conn.put_record(stream_name, data, partition_key) - - response = conn.describe_stream(stream_name) - shard_id = response['StreamDescription']['Shards'][0]['ShardId'] - - response = conn.get_shard_iterator(stream_name, shard_id, 'TRIM_HORIZON') - shard_iterator = response['ShardIterator'] - - response = conn.get_records(shard_iterator) - shard_iterator = response['NextShardIterator'] - response['Records'].should.have.length_of(1) - record = response['Records'][0] - - record["Data"].should.equal("hello world") - record["PartitionKey"].should.equal("1234") - record["SequenceNumber"].should.equal("1") - - -@mock_kinesis_deprecated -def test_get_records_limit(): - conn = boto.kinesis.connect_to_region("us-west-2") - - stream_name = "my_stream" - conn.create_stream(stream_name, 1) - - # Create some data - data = "hello world" - - for index in range(5): - conn.put_record(stream_name, data, str(index)) - - # Get a shard iterator - response = conn.describe_stream(stream_name) - shard_id = response['StreamDescription']['Shards'][0]['ShardId'] - response = conn.get_shard_iterator(stream_name, shard_id, 'TRIM_HORIZON') - shard_iterator = response['ShardIterator'] - - # Retrieve only 3 records - response = conn.get_records(shard_iterator, limit=3) - response['Records'].should.have.length_of(3) - - # Then get the rest of the results - next_shard_iterator = response['NextShardIterator'] - response = conn.get_records(next_shard_iterator) - response['Records'].should.have.length_of(2) - - -@mock_kinesis_deprecated -def test_get_records_at_sequence_number(): - # AT_SEQUENCE_NUMBER - Start reading exactly from the position denoted by - # a specific sequence number. - conn = boto.kinesis.connect_to_region("us-west-2") - stream_name = "my_stream" - conn.create_stream(stream_name, 1) - - # Create some data - for index in range(1, 5): - conn.put_record(stream_name, str(index), str(index)) - - # Get a shard iterator - response = conn.describe_stream(stream_name) - shard_id = response['StreamDescription']['Shards'][0]['ShardId'] - response = conn.get_shard_iterator(stream_name, shard_id, 'TRIM_HORIZON') - shard_iterator = response['ShardIterator'] - - # Get the second record - response = conn.get_records(shard_iterator, limit=2) - second_sequence_id = response['Records'][1]['SequenceNumber'] - - # Then get a new iterator starting at that id - response = conn.get_shard_iterator( - stream_name, shard_id, 'AT_SEQUENCE_NUMBER', second_sequence_id) - shard_iterator = response['ShardIterator'] - - response = conn.get_records(shard_iterator) - # And the first result returned should be the second item - response['Records'][0]['SequenceNumber'].should.equal(second_sequence_id) - response['Records'][0]['Data'].should.equal('2') - - -@mock_kinesis_deprecated -def test_get_records_after_sequence_number(): - # AFTER_SEQUENCE_NUMBER - Start reading right after the position denoted - # by a specific sequence number. - conn = boto.kinesis.connect_to_region("us-west-2") - stream_name = "my_stream" - conn.create_stream(stream_name, 1) - - # Create some data - for index in range(1, 5): - conn.put_record(stream_name, str(index), str(index)) - - # Get a shard iterator - response = conn.describe_stream(stream_name) - shard_id = response['StreamDescription']['Shards'][0]['ShardId'] - response = conn.get_shard_iterator(stream_name, shard_id, 'TRIM_HORIZON') - shard_iterator = response['ShardIterator'] - - # Get the second record - response = conn.get_records(shard_iterator, limit=2) - second_sequence_id = response['Records'][1]['SequenceNumber'] - - # Then get a new iterator starting after that id - response = conn.get_shard_iterator( - stream_name, shard_id, 'AFTER_SEQUENCE_NUMBER', second_sequence_id) - shard_iterator = response['ShardIterator'] - - response = conn.get_records(shard_iterator) - # And the first result returned should be the third item - response['Records'][0]['Data'].should.equal('3') - response['MillisBehindLatest'].should.equal(0) - - -@mock_kinesis_deprecated -def test_get_records_latest(): - # LATEST - Start reading just after the most recent record in the shard, - # so that you always read the most recent data in the shard. - conn = boto.kinesis.connect_to_region("us-west-2") - stream_name = "my_stream" - conn.create_stream(stream_name, 1) - - # Create some data - for index in range(1, 5): - conn.put_record(stream_name, str(index), str(index)) - - # Get a shard iterator - response = conn.describe_stream(stream_name) - shard_id = response['StreamDescription']['Shards'][0]['ShardId'] - response = conn.get_shard_iterator(stream_name, shard_id, 'TRIM_HORIZON') - shard_iterator = response['ShardIterator'] - - # Get the second record - response = conn.get_records(shard_iterator, limit=2) - second_sequence_id = response['Records'][1]['SequenceNumber'] - - # Then get a new iterator starting after that id - response = conn.get_shard_iterator( - stream_name, shard_id, 'LATEST', second_sequence_id) - shard_iterator = response['ShardIterator'] - - # Write some more data - conn.put_record(stream_name, "last_record", "last_record") - - response = conn.get_records(shard_iterator) - # And the only result returned should be the new item - response['Records'].should.have.length_of(1) - response['Records'][0]['PartitionKey'].should.equal('last_record') - response['Records'][0]['Data'].should.equal('last_record') - response['MillisBehindLatest'].should.equal(0) - - -@mock_kinesis -def test_get_records_at_timestamp(): - # AT_TIMESTAMP - Read the first record at or after the specified timestamp - conn = boto3.client('kinesis', region_name="us-west-2") - stream_name = "my_stream" - conn.create_stream(StreamName=stream_name, ShardCount=1) - - # Create some data - for index in range(1, 5): - conn.put_record(StreamName=stream_name, - Data=str(index), - PartitionKey=str(index)) - - # When boto3 floors the timestamp that we pass to get_shard_iterator to - # second precision even though AWS supports ms precision: - # http://docs.aws.amazon.com/kinesis/latest/APIReference/API_GetShardIterator.html - # To test around this limitation we wait until we well into the next second - # before capturing the time and storing the records we expect to retrieve. - time.sleep(1.0) - timestamp = datetime.datetime.utcnow() - - keys = [str(i) for i in range(5, 10)] - for k in keys: - conn.put_record(StreamName=stream_name, - Data=k, - PartitionKey=k) - - # Get a shard iterator - response = conn.describe_stream(StreamName=stream_name) - shard_id = response['StreamDescription']['Shards'][0]['ShardId'] - response = conn.get_shard_iterator(StreamName=stream_name, - ShardId=shard_id, - ShardIteratorType='AT_TIMESTAMP', - Timestamp=timestamp) - shard_iterator = response['ShardIterator'] - - response = conn.get_records(ShardIterator=shard_iterator) - - response['Records'].should.have.length_of(len(keys)) - partition_keys = [r['PartitionKey'] for r in response['Records']] - partition_keys.should.equal(keys) - response['MillisBehindLatest'].should.equal(0) - - -@mock_kinesis -def test_get_records_at_very_old_timestamp(): - conn = boto3.client('kinesis', region_name="us-west-2") - stream_name = "my_stream" - conn.create_stream(StreamName=stream_name, ShardCount=1) - - # Create some data - keys = [str(i) for i in range(1, 5)] - for k in keys: - conn.put_record(StreamName=stream_name, - Data=k, - PartitionKey=k) - - # Get a shard iterator - response = conn.describe_stream(StreamName=stream_name) - shard_id = response['StreamDescription']['Shards'][0]['ShardId'] - response = conn.get_shard_iterator(StreamName=stream_name, - ShardId=shard_id, - ShardIteratorType='AT_TIMESTAMP', - Timestamp=1) - shard_iterator = response['ShardIterator'] - - response = conn.get_records(ShardIterator=shard_iterator) - response['Records'].should.have.length_of(len(keys)) - partition_keys = [r['PartitionKey'] for r in response['Records']] - partition_keys.should.equal(keys) - response['MillisBehindLatest'].should.equal(0) - - -@mock_kinesis -def test_get_records_timestamp_filtering(): - conn = boto3.client('kinesis', region_name="us-west-2") - stream_name = "my_stream" - conn.create_stream(StreamName=stream_name, ShardCount=1) - - conn.put_record(StreamName=stream_name, - Data='0', - PartitionKey='0') - - time.sleep(1.0) - timestamp = datetime.datetime.utcnow() - - conn.put_record(StreamName=stream_name, - Data='1', - PartitionKey='1') - - response = conn.describe_stream(StreamName=stream_name) - shard_id = response['StreamDescription']['Shards'][0]['ShardId'] - response = conn.get_shard_iterator(StreamName=stream_name, - ShardId=shard_id, - ShardIteratorType='AT_TIMESTAMP', - Timestamp=timestamp) - shard_iterator = response['ShardIterator'] - - response = conn.get_records(ShardIterator=shard_iterator) - response['Records'].should.have.length_of(1) - response['Records'][0]['PartitionKey'].should.equal('1') - response['Records'][0]['ApproximateArrivalTimestamp'].should.be.\ - greater_than(timestamp) - response['MillisBehindLatest'].should.equal(0) - - -@mock_kinesis -def test_get_records_millis_behind_latest(): - conn = boto3.client('kinesis', region_name="us-west-2") - stream_name = "my_stream" - conn.create_stream(StreamName=stream_name, ShardCount=1) - - conn.put_record(StreamName=stream_name, - Data='0', - PartitionKey='0') - time.sleep(1.0) - conn.put_record(StreamName=stream_name, - Data='1', - PartitionKey='1') - - response = conn.describe_stream(StreamName=stream_name) - shard_id = response['StreamDescription']['Shards'][0]['ShardId'] - response = conn.get_shard_iterator(StreamName=stream_name, - ShardId=shard_id, - ShardIteratorType='TRIM_HORIZON') - shard_iterator = response['ShardIterator'] - - response = conn.get_records(ShardIterator=shard_iterator, Limit=1) - response['Records'].should.have.length_of(1) - response['MillisBehindLatest'].should.be.greater_than(0) - - -@mock_kinesis -def test_get_records_at_very_new_timestamp(): - conn = boto3.client('kinesis', region_name="us-west-2") - stream_name = "my_stream" - conn.create_stream(StreamName=stream_name, ShardCount=1) - - # Create some data - keys = [str(i) for i in range(1, 5)] - for k in keys: - conn.put_record(StreamName=stream_name, - Data=k, - PartitionKey=k) - - timestamp = datetime.datetime.utcnow() + datetime.timedelta(seconds=1) - - # Get a shard iterator - response = conn.describe_stream(StreamName=stream_name) - shard_id = response['StreamDescription']['Shards'][0]['ShardId'] - response = conn.get_shard_iterator(StreamName=stream_name, - ShardId=shard_id, - ShardIteratorType='AT_TIMESTAMP', - Timestamp=timestamp) - shard_iterator = response['ShardIterator'] - - response = conn.get_records(ShardIterator=shard_iterator) - - response['Records'].should.have.length_of(0) - response['MillisBehindLatest'].should.equal(0) - - -@mock_kinesis -def test_get_records_from_empty_stream_at_timestamp(): - conn = boto3.client('kinesis', region_name="us-west-2") - stream_name = "my_stream" - conn.create_stream(StreamName=stream_name, ShardCount=1) - - timestamp = datetime.datetime.utcnow() - - # Get a shard iterator - response = conn.describe_stream(StreamName=stream_name) - shard_id = response['StreamDescription']['Shards'][0]['ShardId'] - response = conn.get_shard_iterator(StreamName=stream_name, - ShardId=shard_id, - ShardIteratorType='AT_TIMESTAMP', - Timestamp=timestamp) - shard_iterator = response['ShardIterator'] - - response = conn.get_records(ShardIterator=shard_iterator) - - response['Records'].should.have.length_of(0) - response['MillisBehindLatest'].should.equal(0) - - -@mock_kinesis_deprecated -def test_invalid_shard_iterator_type(): - conn = boto.kinesis.connect_to_region("us-west-2") - stream_name = "my_stream" - conn.create_stream(stream_name, 1) - - response = conn.describe_stream(stream_name) - shard_id = response['StreamDescription']['Shards'][0]['ShardId'] - response = conn.get_shard_iterator.when.called_with( - stream_name, shard_id, 'invalid-type').should.throw(InvalidArgumentException) - - -@mock_kinesis_deprecated -def test_add_tags(): - conn = boto.kinesis.connect_to_region("us-west-2") - stream_name = "my_stream" - conn.create_stream(stream_name, 1) - - conn.describe_stream(stream_name) - conn.add_tags_to_stream(stream_name, {'tag1': 'val1'}) - conn.add_tags_to_stream(stream_name, {'tag2': 'val2'}) - conn.add_tags_to_stream(stream_name, {'tag1': 'val3'}) - conn.add_tags_to_stream(stream_name, {'tag2': 'val4'}) - - -@mock_kinesis_deprecated -def test_list_tags(): - conn = boto.kinesis.connect_to_region("us-west-2") - stream_name = "my_stream" - conn.create_stream(stream_name, 1) - - conn.describe_stream(stream_name) - conn.add_tags_to_stream(stream_name, {'tag1': 'val1'}) - tags = dict([(tag['Key'], tag['Value']) - for tag in conn.list_tags_for_stream(stream_name)['Tags']]) - tags.get('tag1').should.equal('val1') - conn.add_tags_to_stream(stream_name, {'tag2': 'val2'}) - tags = dict([(tag['Key'], tag['Value']) - for tag in conn.list_tags_for_stream(stream_name)['Tags']]) - tags.get('tag2').should.equal('val2') - conn.add_tags_to_stream(stream_name, {'tag1': 'val3'}) - tags = dict([(tag['Key'], tag['Value']) - for tag in conn.list_tags_for_stream(stream_name)['Tags']]) - tags.get('tag1').should.equal('val3') - conn.add_tags_to_stream(stream_name, {'tag2': 'val4'}) - tags = dict([(tag['Key'], tag['Value']) - for tag in conn.list_tags_for_stream(stream_name)['Tags']]) - tags.get('tag2').should.equal('val4') - - -@mock_kinesis_deprecated -def test_remove_tags(): - conn = boto.kinesis.connect_to_region("us-west-2") - stream_name = "my_stream" - conn.create_stream(stream_name, 1) - - conn.describe_stream(stream_name) - conn.add_tags_to_stream(stream_name, {'tag1': 'val1'}) - tags = dict([(tag['Key'], tag['Value']) - for tag in conn.list_tags_for_stream(stream_name)['Tags']]) - tags.get('tag1').should.equal('val1') - conn.remove_tags_from_stream(stream_name, ['tag1']) - tags = dict([(tag['Key'], tag['Value']) - for tag in conn.list_tags_for_stream(stream_name)['Tags']]) - tags.get('tag1').should.equal(None) - - conn.add_tags_to_stream(stream_name, {'tag2': 'val2'}) - tags = dict([(tag['Key'], tag['Value']) - for tag in conn.list_tags_for_stream(stream_name)['Tags']]) - tags.get('tag2').should.equal('val2') - conn.remove_tags_from_stream(stream_name, ['tag2']) - tags = dict([(tag['Key'], tag['Value']) - for tag in conn.list_tags_for_stream(stream_name)['Tags']]) - tags.get('tag2').should.equal(None) - - -@mock_kinesis_deprecated -def test_split_shard(): - conn = boto.kinesis.connect_to_region("us-west-2") - stream_name = 'my_stream' - - conn.create_stream(stream_name, 2) - - # Create some data - for index in range(1, 100): - conn.put_record(stream_name, str(index), str(index)) - - stream_response = conn.describe_stream(stream_name) - - stream = stream_response["StreamDescription"] - shards = stream['Shards'] - shards.should.have.length_of(2) - sum([shard['SequenceNumberRange']['EndingSequenceNumber'] - for shard in shards]).should.equal(99) - - shard_range = shards[0]['HashKeyRange'] - new_starting_hash = ( - int(shard_range['EndingHashKey']) + int(shard_range['StartingHashKey'])) // 2 - conn.split_shard("my_stream", shards[0]['ShardId'], str(new_starting_hash)) - - stream_response = conn.describe_stream(stream_name) - - stream = stream_response["StreamDescription"] - shards = stream['Shards'] - shards.should.have.length_of(3) - sum([shard['SequenceNumberRange']['EndingSequenceNumber'] - for shard in shards]).should.equal(99) - - shard_range = shards[2]['HashKeyRange'] - new_starting_hash = ( - int(shard_range['EndingHashKey']) + int(shard_range['StartingHashKey'])) // 2 - conn.split_shard("my_stream", shards[2]['ShardId'], str(new_starting_hash)) - - stream_response = conn.describe_stream(stream_name) - - stream = stream_response["StreamDescription"] - shards = stream['Shards'] - shards.should.have.length_of(4) - sum([shard['SequenceNumberRange']['EndingSequenceNumber'] - for shard in shards]).should.equal(99) - - -@mock_kinesis_deprecated -def test_merge_shards(): - conn = boto.kinesis.connect_to_region("us-west-2") - stream_name = 'my_stream' - - conn.create_stream(stream_name, 4) - - # Create some data - for index in range(1, 100): - conn.put_record(stream_name, str(index), str(index)) - - stream_response = conn.describe_stream(stream_name) - - stream = stream_response["StreamDescription"] - shards = stream['Shards'] - shards.should.have.length_of(4) - - conn.merge_shards.when.called_with( - stream_name, 'shardId-000000000000', 'shardId-000000000002').should.throw(InvalidArgumentException) - - stream_response = conn.describe_stream(stream_name) - - stream = stream_response["StreamDescription"] - shards = stream['Shards'] - shards.should.have.length_of(4) - sum([shard['SequenceNumberRange']['EndingSequenceNumber'] - for shard in shards]).should.equal(99) - - conn.merge_shards(stream_name, 'shardId-000000000000', - 'shardId-000000000001') - - stream_response = conn.describe_stream(stream_name) - - stream = stream_response["StreamDescription"] - shards = stream['Shards'] - shards.should.have.length_of(3) - sum([shard['SequenceNumberRange']['EndingSequenceNumber'] - for shard in shards]).should.equal(99) - conn.merge_shards(stream_name, 'shardId-000000000002', - 'shardId-000000000000') - - stream_response = conn.describe_stream(stream_name) - - stream = stream_response["StreamDescription"] - shards = stream['Shards'] - shards.should.have.length_of(2) - sum([shard['SequenceNumberRange']['EndingSequenceNumber'] - for shard in shards]).should.equal(99) +from __future__ import unicode_literals + +import datetime +import time + +import boto.kinesis +import boto3 +from boto.kinesis.exceptions import ResourceNotFoundException, \ + InvalidArgumentException + +from moto import mock_kinesis, mock_kinesis_deprecated + + +@mock_kinesis_deprecated +def test_create_cluster(): + conn = boto.kinesis.connect_to_region("us-west-2") + + conn.create_stream("my_stream", 2) + + stream_response = conn.describe_stream("my_stream") + + stream = stream_response["StreamDescription"] + stream["StreamName"].should.equal("my_stream") + stream["HasMoreShards"].should.equal(False) + stream["StreamARN"].should.equal( + "arn:aws:kinesis:us-west-2:123456789012:my_stream") + stream["StreamStatus"].should.equal("ACTIVE") + + shards = stream['Shards'] + shards.should.have.length_of(2) + + +@mock_kinesis_deprecated +def test_describe_non_existant_stream(): + conn = boto.kinesis.connect_to_region("us-east-1") + conn.describe_stream.when.called_with( + "not-a-stream").should.throw(ResourceNotFoundException) + + +@mock_kinesis_deprecated +def test_list_and_delete_stream(): + conn = boto.kinesis.connect_to_region("us-west-2") + + conn.create_stream("stream1", 1) + conn.create_stream("stream2", 1) + + conn.list_streams()['StreamNames'].should.have.length_of(2) + + conn.delete_stream("stream2") + + conn.list_streams()['StreamNames'].should.have.length_of(1) + + # Delete invalid id + conn.delete_stream.when.called_with( + "not-a-stream").should.throw(ResourceNotFoundException) + + +@mock_kinesis +def test_list_many_streams(): + conn = boto3.client('kinesis', region_name="us-west-2") + + for i in range(11): + conn.create_stream(StreamName="stream%d" % i, ShardCount=1) + + resp = conn.list_streams() + stream_names = resp["StreamNames"] + has_more_streams = resp["HasMoreStreams"] + stream_names.should.have.length_of(10) + has_more_streams.should.be(True) + resp2 = conn.list_streams(ExclusiveStartStreamName=stream_names[-1]) + stream_names = resp2["StreamNames"] + has_more_streams = resp2["HasMoreStreams"] + stream_names.should.have.length_of(1) + has_more_streams.should.equal(False) + + +@mock_kinesis +def test_describe_stream_summary(): + conn = boto3.client('kinesis', region_name="us-west-2") + stream_name = 'my_stream_summary' + shard_count = 5 + conn.create_stream(StreamName=stream_name, ShardCount=shard_count) + + resp = conn.describe_stream_summary(StreamName=stream_name) + stream = resp["StreamDescriptionSummary"] + + stream["StreamName"].should.equal(stream_name) + stream["OpenShardCount"].should.equal(shard_count) + stream["StreamARN"].should.equal( + "arn:aws:kinesis:us-west-2:123456789012:{}".format(stream_name)) + stream["StreamStatus"].should.equal("ACTIVE") + + +@mock_kinesis_deprecated +def test_basic_shard_iterator(): + conn = boto.kinesis.connect_to_region("us-west-2") + + stream_name = "my_stream" + conn.create_stream(stream_name, 1) + + response = conn.describe_stream(stream_name) + shard_id = response['StreamDescription']['Shards'][0]['ShardId'] + + response = conn.get_shard_iterator(stream_name, shard_id, 'TRIM_HORIZON') + shard_iterator = response['ShardIterator'] + + response = conn.get_records(shard_iterator) + shard_iterator = response['NextShardIterator'] + response['Records'].should.equal([]) + response['MillisBehindLatest'].should.equal(0) + + +@mock_kinesis_deprecated +def test_get_invalid_shard_iterator(): + conn = boto.kinesis.connect_to_region("us-west-2") + + stream_name = "my_stream" + conn.create_stream(stream_name, 1) + + conn.get_shard_iterator.when.called_with( + stream_name, "123", 'TRIM_HORIZON').should.throw( + ResourceNotFoundException) + + +@mock_kinesis_deprecated +def test_put_records(): + conn = boto.kinesis.connect_to_region("us-west-2") + + stream_name = "my_stream" + conn.create_stream(stream_name, 1) + + data = "hello world" + partition_key = "1234" + + conn.put_record.when.called_with( + stream_name, data, 1234).should.throw(InvalidArgumentException) + + conn.put_record(stream_name, data, partition_key) + + response = conn.describe_stream(stream_name) + shard_id = response['StreamDescription']['Shards'][0]['ShardId'] + + response = conn.get_shard_iterator(stream_name, shard_id, 'TRIM_HORIZON') + shard_iterator = response['ShardIterator'] + + response = conn.get_records(shard_iterator) + shard_iterator = response['NextShardIterator'] + response['Records'].should.have.length_of(1) + record = response['Records'][0] + + record["Data"].should.equal("hello world") + record["PartitionKey"].should.equal("1234") + record["SequenceNumber"].should.equal("1") + + +@mock_kinesis_deprecated +def test_get_records_limit(): + conn = boto.kinesis.connect_to_region("us-west-2") + + stream_name = "my_stream" + conn.create_stream(stream_name, 1) + + # Create some data + data = "hello world" + + for index in range(5): + conn.put_record(stream_name, data, str(index)) + + # Get a shard iterator + response = conn.describe_stream(stream_name) + shard_id = response['StreamDescription']['Shards'][0]['ShardId'] + response = conn.get_shard_iterator(stream_name, shard_id, 'TRIM_HORIZON') + shard_iterator = response['ShardIterator'] + + # Retrieve only 3 records + response = conn.get_records(shard_iterator, limit=3) + response['Records'].should.have.length_of(3) + + # Then get the rest of the results + next_shard_iterator = response['NextShardIterator'] + response = conn.get_records(next_shard_iterator) + response['Records'].should.have.length_of(2) + + +@mock_kinesis_deprecated +def test_get_records_at_sequence_number(): + # AT_SEQUENCE_NUMBER - Start reading exactly from the position denoted by + # a specific sequence number. + conn = boto.kinesis.connect_to_region("us-west-2") + stream_name = "my_stream" + conn.create_stream(stream_name, 1) + + # Create some data + for index in range(1, 5): + conn.put_record(stream_name, str(index), str(index)) + + # Get a shard iterator + response = conn.describe_stream(stream_name) + shard_id = response['StreamDescription']['Shards'][0]['ShardId'] + response = conn.get_shard_iterator(stream_name, shard_id, 'TRIM_HORIZON') + shard_iterator = response['ShardIterator'] + + # Get the second record + response = conn.get_records(shard_iterator, limit=2) + second_sequence_id = response['Records'][1]['SequenceNumber'] + + # Then get a new iterator starting at that id + response = conn.get_shard_iterator( + stream_name, shard_id, 'AT_SEQUENCE_NUMBER', second_sequence_id) + shard_iterator = response['ShardIterator'] + + response = conn.get_records(shard_iterator) + # And the first result returned should be the second item + response['Records'][0]['SequenceNumber'].should.equal(second_sequence_id) + response['Records'][0]['Data'].should.equal('2') + + +@mock_kinesis_deprecated +def test_get_records_after_sequence_number(): + # AFTER_SEQUENCE_NUMBER - Start reading right after the position denoted + # by a specific sequence number. + conn = boto.kinesis.connect_to_region("us-west-2") + stream_name = "my_stream" + conn.create_stream(stream_name, 1) + + # Create some data + for index in range(1, 5): + conn.put_record(stream_name, str(index), str(index)) + + # Get a shard iterator + response = conn.describe_stream(stream_name) + shard_id = response['StreamDescription']['Shards'][0]['ShardId'] + response = conn.get_shard_iterator(stream_name, shard_id, 'TRIM_HORIZON') + shard_iterator = response['ShardIterator'] + + # Get the second record + response = conn.get_records(shard_iterator, limit=2) + second_sequence_id = response['Records'][1]['SequenceNumber'] + + # Then get a new iterator starting after that id + response = conn.get_shard_iterator( + stream_name, shard_id, 'AFTER_SEQUENCE_NUMBER', second_sequence_id) + shard_iterator = response['ShardIterator'] + + response = conn.get_records(shard_iterator) + # And the first result returned should be the third item + response['Records'][0]['Data'].should.equal('3') + response['MillisBehindLatest'].should.equal(0) + + +@mock_kinesis_deprecated +def test_get_records_latest(): + # LATEST - Start reading just after the most recent record in the shard, + # so that you always read the most recent data in the shard. + conn = boto.kinesis.connect_to_region("us-west-2") + stream_name = "my_stream" + conn.create_stream(stream_name, 1) + + # Create some data + for index in range(1, 5): + conn.put_record(stream_name, str(index), str(index)) + + # Get a shard iterator + response = conn.describe_stream(stream_name) + shard_id = response['StreamDescription']['Shards'][0]['ShardId'] + response = conn.get_shard_iterator(stream_name, shard_id, 'TRIM_HORIZON') + shard_iterator = response['ShardIterator'] + + # Get the second record + response = conn.get_records(shard_iterator, limit=2) + second_sequence_id = response['Records'][1]['SequenceNumber'] + + # Then get a new iterator starting after that id + response = conn.get_shard_iterator( + stream_name, shard_id, 'LATEST', second_sequence_id) + shard_iterator = response['ShardIterator'] + + # Write some more data + conn.put_record(stream_name, "last_record", "last_record") + + response = conn.get_records(shard_iterator) + # And the only result returned should be the new item + response['Records'].should.have.length_of(1) + response['Records'][0]['PartitionKey'].should.equal('last_record') + response['Records'][0]['Data'].should.equal('last_record') + response['MillisBehindLatest'].should.equal(0) + + +@mock_kinesis +def test_get_records_at_timestamp(): + # AT_TIMESTAMP - Read the first record at or after the specified timestamp + conn = boto3.client('kinesis', region_name="us-west-2") + stream_name = "my_stream" + conn.create_stream(StreamName=stream_name, ShardCount=1) + + # Create some data + for index in range(1, 5): + conn.put_record(StreamName=stream_name, + Data=str(index), + PartitionKey=str(index)) + + # When boto3 floors the timestamp that we pass to get_shard_iterator to + # second precision even though AWS supports ms precision: + # http://docs.aws.amazon.com/kinesis/latest/APIReference/API_GetShardIterator.html + # To test around this limitation we wait until we well into the next second + # before capturing the time and storing the records we expect to retrieve. + time.sleep(1.0) + timestamp = datetime.datetime.utcnow() + + keys = [str(i) for i in range(5, 10)] + for k in keys: + conn.put_record(StreamName=stream_name, + Data=k, + PartitionKey=k) + + # Get a shard iterator + response = conn.describe_stream(StreamName=stream_name) + shard_id = response['StreamDescription']['Shards'][0]['ShardId'] + response = conn.get_shard_iterator(StreamName=stream_name, + ShardId=shard_id, + ShardIteratorType='AT_TIMESTAMP', + Timestamp=timestamp) + shard_iterator = response['ShardIterator'] + + response = conn.get_records(ShardIterator=shard_iterator) + + response['Records'].should.have.length_of(len(keys)) + partition_keys = [r['PartitionKey'] for r in response['Records']] + partition_keys.should.equal(keys) + response['MillisBehindLatest'].should.equal(0) + + +@mock_kinesis +def test_get_records_at_very_old_timestamp(): + conn = boto3.client('kinesis', region_name="us-west-2") + stream_name = "my_stream" + conn.create_stream(StreamName=stream_name, ShardCount=1) + + # Create some data + keys = [str(i) for i in range(1, 5)] + for k in keys: + conn.put_record(StreamName=stream_name, + Data=k, + PartitionKey=k) + + # Get a shard iterator + response = conn.describe_stream(StreamName=stream_name) + shard_id = response['StreamDescription']['Shards'][0]['ShardId'] + response = conn.get_shard_iterator(StreamName=stream_name, + ShardId=shard_id, + ShardIteratorType='AT_TIMESTAMP', + Timestamp=1) + shard_iterator = response['ShardIterator'] + + response = conn.get_records(ShardIterator=shard_iterator) + response['Records'].should.have.length_of(len(keys)) + partition_keys = [r['PartitionKey'] for r in response['Records']] + partition_keys.should.equal(keys) + response['MillisBehindLatest'].should.equal(0) + + +@mock_kinesis +def test_get_records_timestamp_filtering(): + conn = boto3.client('kinesis', region_name="us-west-2") + stream_name = "my_stream" + conn.create_stream(StreamName=stream_name, ShardCount=1) + + conn.put_record(StreamName=stream_name, + Data='0', + PartitionKey='0') + + time.sleep(1.0) + timestamp = datetime.datetime.utcnow() + + conn.put_record(StreamName=stream_name, + Data='1', + PartitionKey='1') + + response = conn.describe_stream(StreamName=stream_name) + shard_id = response['StreamDescription']['Shards'][0]['ShardId'] + response = conn.get_shard_iterator(StreamName=stream_name, + ShardId=shard_id, + ShardIteratorType='AT_TIMESTAMP', + Timestamp=timestamp) + shard_iterator = response['ShardIterator'] + + response = conn.get_records(ShardIterator=shard_iterator) + response['Records'].should.have.length_of(1) + response['Records'][0]['PartitionKey'].should.equal('1') + response['Records'][0]['ApproximateArrivalTimestamp'].should.be. \ + greater_than(timestamp) + response['MillisBehindLatest'].should.equal(0) + + +@mock_kinesis +def test_get_records_millis_behind_latest(): + conn = boto3.client('kinesis', region_name="us-west-2") + stream_name = "my_stream" + conn.create_stream(StreamName=stream_name, ShardCount=1) + + conn.put_record(StreamName=stream_name, + Data='0', + PartitionKey='0') + time.sleep(1.0) + conn.put_record(StreamName=stream_name, + Data='1', + PartitionKey='1') + + response = conn.describe_stream(StreamName=stream_name) + shard_id = response['StreamDescription']['Shards'][0]['ShardId'] + response = conn.get_shard_iterator(StreamName=stream_name, + ShardId=shard_id, + ShardIteratorType='TRIM_HORIZON') + shard_iterator = response['ShardIterator'] + + response = conn.get_records(ShardIterator=shard_iterator, Limit=1) + response['Records'].should.have.length_of(1) + response['MillisBehindLatest'].should.be.greater_than(0) + + +@mock_kinesis +def test_get_records_at_very_new_timestamp(): + conn = boto3.client('kinesis', region_name="us-west-2") + stream_name = "my_stream" + conn.create_stream(StreamName=stream_name, ShardCount=1) + + # Create some data + keys = [str(i) for i in range(1, 5)] + for k in keys: + conn.put_record(StreamName=stream_name, + Data=k, + PartitionKey=k) + + timestamp = datetime.datetime.utcnow() + datetime.timedelta(seconds=1) + + # Get a shard iterator + response = conn.describe_stream(StreamName=stream_name) + shard_id = response['StreamDescription']['Shards'][0]['ShardId'] + response = conn.get_shard_iterator(StreamName=stream_name, + ShardId=shard_id, + ShardIteratorType='AT_TIMESTAMP', + Timestamp=timestamp) + shard_iterator = response['ShardIterator'] + + response = conn.get_records(ShardIterator=shard_iterator) + + response['Records'].should.have.length_of(0) + response['MillisBehindLatest'].should.equal(0) + + +@mock_kinesis +def test_get_records_from_empty_stream_at_timestamp(): + conn = boto3.client('kinesis', region_name="us-west-2") + stream_name = "my_stream" + conn.create_stream(StreamName=stream_name, ShardCount=1) + + timestamp = datetime.datetime.utcnow() + + # Get a shard iterator + response = conn.describe_stream(StreamName=stream_name) + shard_id = response['StreamDescription']['Shards'][0]['ShardId'] + response = conn.get_shard_iterator(StreamName=stream_name, + ShardId=shard_id, + ShardIteratorType='AT_TIMESTAMP', + Timestamp=timestamp) + shard_iterator = response['ShardIterator'] + + response = conn.get_records(ShardIterator=shard_iterator) + + response['Records'].should.have.length_of(0) + response['MillisBehindLatest'].should.equal(0) + + +@mock_kinesis_deprecated +def test_invalid_shard_iterator_type(): + conn = boto.kinesis.connect_to_region("us-west-2") + stream_name = "my_stream" + conn.create_stream(stream_name, 1) + + response = conn.describe_stream(stream_name) + shard_id = response['StreamDescription']['Shards'][0]['ShardId'] + response = conn.get_shard_iterator.when.called_with( + stream_name, shard_id, 'invalid-type').should.throw( + InvalidArgumentException) + + +@mock_kinesis_deprecated +def test_add_tags(): + conn = boto.kinesis.connect_to_region("us-west-2") + stream_name = "my_stream" + conn.create_stream(stream_name, 1) + + conn.describe_stream(stream_name) + conn.add_tags_to_stream(stream_name, {'tag1': 'val1'}) + conn.add_tags_to_stream(stream_name, {'tag2': 'val2'}) + conn.add_tags_to_stream(stream_name, {'tag1': 'val3'}) + conn.add_tags_to_stream(stream_name, {'tag2': 'val4'}) + + +@mock_kinesis_deprecated +def test_list_tags(): + conn = boto.kinesis.connect_to_region("us-west-2") + stream_name = "my_stream" + conn.create_stream(stream_name, 1) + + conn.describe_stream(stream_name) + conn.add_tags_to_stream(stream_name, {'tag1': 'val1'}) + tags = dict([(tag['Key'], tag['Value']) + for tag in conn.list_tags_for_stream(stream_name)['Tags']]) + tags.get('tag1').should.equal('val1') + conn.add_tags_to_stream(stream_name, {'tag2': 'val2'}) + tags = dict([(tag['Key'], tag['Value']) + for tag in conn.list_tags_for_stream(stream_name)['Tags']]) + tags.get('tag2').should.equal('val2') + conn.add_tags_to_stream(stream_name, {'tag1': 'val3'}) + tags = dict([(tag['Key'], tag['Value']) + for tag in conn.list_tags_for_stream(stream_name)['Tags']]) + tags.get('tag1').should.equal('val3') + conn.add_tags_to_stream(stream_name, {'tag2': 'val4'}) + tags = dict([(tag['Key'], tag['Value']) + for tag in conn.list_tags_for_stream(stream_name)['Tags']]) + tags.get('tag2').should.equal('val4') + + +@mock_kinesis_deprecated +def test_remove_tags(): + conn = boto.kinesis.connect_to_region("us-west-2") + stream_name = "my_stream" + conn.create_stream(stream_name, 1) + + conn.describe_stream(stream_name) + conn.add_tags_to_stream(stream_name, {'tag1': 'val1'}) + tags = dict([(tag['Key'], tag['Value']) + for tag in conn.list_tags_for_stream(stream_name)['Tags']]) + tags.get('tag1').should.equal('val1') + conn.remove_tags_from_stream(stream_name, ['tag1']) + tags = dict([(tag['Key'], tag['Value']) + for tag in conn.list_tags_for_stream(stream_name)['Tags']]) + tags.get('tag1').should.equal(None) + + conn.add_tags_to_stream(stream_name, {'tag2': 'val2'}) + tags = dict([(tag['Key'], tag['Value']) + for tag in conn.list_tags_for_stream(stream_name)['Tags']]) + tags.get('tag2').should.equal('val2') + conn.remove_tags_from_stream(stream_name, ['tag2']) + tags = dict([(tag['Key'], tag['Value']) + for tag in conn.list_tags_for_stream(stream_name)['Tags']]) + tags.get('tag2').should.equal(None) + + +@mock_kinesis_deprecated +def test_split_shard(): + conn = boto.kinesis.connect_to_region("us-west-2") + stream_name = 'my_stream' + + conn.create_stream(stream_name, 2) + + # Create some data + for index in range(1, 100): + conn.put_record(stream_name, str(index), str(index)) + + stream_response = conn.describe_stream(stream_name) + + stream = stream_response["StreamDescription"] + shards = stream['Shards'] + shards.should.have.length_of(2) + sum([shard['SequenceNumberRange']['EndingSequenceNumber'] + for shard in shards]).should.equal(99) + + shard_range = shards[0]['HashKeyRange'] + new_starting_hash = ( + int(shard_range['EndingHashKey']) + int( + shard_range['StartingHashKey'])) // 2 + conn.split_shard("my_stream", shards[0]['ShardId'], str(new_starting_hash)) + + stream_response = conn.describe_stream(stream_name) + + stream = stream_response["StreamDescription"] + shards = stream['Shards'] + shards.should.have.length_of(3) + sum([shard['SequenceNumberRange']['EndingSequenceNumber'] + for shard in shards]).should.equal(99) + + shard_range = shards[2]['HashKeyRange'] + new_starting_hash = ( + int(shard_range['EndingHashKey']) + int( + shard_range['StartingHashKey'])) // 2 + conn.split_shard("my_stream", shards[2]['ShardId'], str(new_starting_hash)) + + stream_response = conn.describe_stream(stream_name) + + stream = stream_response["StreamDescription"] + shards = stream['Shards'] + shards.should.have.length_of(4) + sum([shard['SequenceNumberRange']['EndingSequenceNumber'] + for shard in shards]).should.equal(99) + + +@mock_kinesis_deprecated +def test_merge_shards(): + conn = boto.kinesis.connect_to_region("us-west-2") + stream_name = 'my_stream' + + conn.create_stream(stream_name, 4) + + # Create some data + for index in range(1, 100): + conn.put_record(stream_name, str(index), str(index)) + + stream_response = conn.describe_stream(stream_name) + + stream = stream_response["StreamDescription"] + shards = stream['Shards'] + shards.should.have.length_of(4) + + conn.merge_shards.when.called_with( + stream_name, 'shardId-000000000000', + 'shardId-000000000002').should.throw(InvalidArgumentException) + + stream_response = conn.describe_stream(stream_name) + + stream = stream_response["StreamDescription"] + shards = stream['Shards'] + shards.should.have.length_of(4) + sum([shard['SequenceNumberRange']['EndingSequenceNumber'] + for shard in shards]).should.equal(99) + + conn.merge_shards(stream_name, 'shardId-000000000000', + 'shardId-000000000001') + + stream_response = conn.describe_stream(stream_name) + + stream = stream_response["StreamDescription"] + shards = stream['Shards'] + shards.should.have.length_of(3) + sum([shard['SequenceNumberRange']['EndingSequenceNumber'] + for shard in shards]).should.equal(99) + conn.merge_shards(stream_name, 'shardId-000000000002', + 'shardId-000000000000') + + stream_response = conn.describe_stream(stream_name) + + stream = stream_response["StreamDescription"] + shards = stream['Shards'] + shards.should.have.length_of(2) + sum([shard['SequenceNumberRange']['EndingSequenceNumber'] + for shard in shards]).should.equal(99) diff --git a/tests/test_kms/test_kms.py b/tests/test_kms/test_kms.py index e7ce9f74b..f0d77d3e9 100644 --- a/tests/test_kms/test_kms.py +++ b/tests/test_kms/test_kms.py @@ -18,13 +18,14 @@ from dateutil.tz import tzutc @mock_kms_deprecated def test_create_key(): conn = boto.kms.connect_to_region("us-west-2") + with freeze_time("2015-01-01 00:00:00"): + key = conn.create_key(policy="my policy", + description="my key", key_usage='ENCRYPT_DECRYPT') - key = conn.create_key(policy="my policy", - description="my key", key_usage='ENCRYPT_DECRYPT') - - key['KeyMetadata']['Description'].should.equal("my key") - key['KeyMetadata']['KeyUsage'].should.equal("ENCRYPT_DECRYPT") - key['KeyMetadata']['Enabled'].should.equal(True) + key['KeyMetadata']['Description'].should.equal("my key") + key['KeyMetadata']['KeyUsage'].should.equal("ENCRYPT_DECRYPT") + key['KeyMetadata']['Enabled'].should.equal(True) + key['KeyMetadata']['CreationDate'].should.equal("1420070400") @mock_kms_deprecated @@ -980,5 +981,3 @@ def test_put_key_policy_key_not_found(): PolicyName='default', Policy='new policy' ) - - diff --git a/tests/test_logs/test_logs.py b/tests/test_logs/test_logs.py index 443bc8c2f..7048061f0 100644 --- a/tests/test_logs/test_logs.py +++ b/tests/test_logs/test_logs.py @@ -1,128 +1,164 @@ -import boto3 -import sure # noqa -import six -from botocore.exceptions import ClientError - -from moto import mock_logs, settings -from nose.tools import assert_raises - -_logs_region = 'us-east-1' if settings.TEST_SERVER_MODE else 'us-west-2' - - -@mock_logs -def test_log_group_create(): - conn = boto3.client('logs', 'us-west-2') - log_group_name = 'dummy' - response = conn.create_log_group(logGroupName=log_group_name) - - response = conn.describe_log_groups(logGroupNamePrefix=log_group_name) - assert len(response['logGroups']) == 1 - - response = conn.delete_log_group(logGroupName=log_group_name) - - -@mock_logs -def test_exceptions(): - conn = boto3.client('logs', 'us-west-2') - log_group_name = 'dummy' - log_stream_name = 'dummp-stream' - conn.create_log_group(logGroupName=log_group_name) - with assert_raises(ClientError): - conn.create_log_group(logGroupName=log_group_name) - - # descrine_log_groups is not implemented yet - - conn.create_log_stream( - logGroupName=log_group_name, - logStreamName=log_stream_name - ) - with assert_raises(ClientError): - conn.create_log_stream( - logGroupName=log_group_name, - logStreamName=log_stream_name - ) - - conn.put_log_events( - logGroupName=log_group_name, - logStreamName=log_stream_name, - logEvents=[ - { - 'timestamp': 0, - 'message': 'line' - }, - ], - ) - - with assert_raises(ClientError): - conn.put_log_events( - logGroupName=log_group_name, - logStreamName="invalid-stream", - logEvents=[ - { - 'timestamp': 0, - 'message': 'line' - }, - ], - ) - - -@mock_logs -def test_put_logs(): - conn = boto3.client('logs', 'us-west-2') - log_group_name = 'dummy' - log_stream_name = 'stream' - conn.create_log_group(logGroupName=log_group_name) - conn.create_log_stream( - logGroupName=log_group_name, - logStreamName=log_stream_name - ) - messages = [ - {'timestamp': 0, 'message': 'hello'}, - {'timestamp': 0, 'message': 'world'} - ] - putRes = conn.put_log_events( - logGroupName=log_group_name, - logStreamName=log_stream_name, - logEvents=messages - ) - res = conn.get_log_events( - logGroupName=log_group_name, - logStreamName=log_stream_name - ) - events = res['events'] - nextSequenceToken = putRes['nextSequenceToken'] - assert isinstance(nextSequenceToken, six.string_types) == True - assert len(nextSequenceToken) == 56 - events.should.have.length_of(2) - - -@mock_logs -def test_filter_logs_interleaved(): - conn = boto3.client('logs', 'us-west-2') - log_group_name = 'dummy' - log_stream_name = 'stream' - conn.create_log_group(logGroupName=log_group_name) - conn.create_log_stream( - logGroupName=log_group_name, - logStreamName=log_stream_name - ) - messages = [ - {'timestamp': 0, 'message': 'hello'}, - {'timestamp': 0, 'message': 'world'} - ] - conn.put_log_events( - logGroupName=log_group_name, - logStreamName=log_stream_name, - logEvents=messages - ) - res = conn.filter_log_events( - logGroupName=log_group_name, - logStreamNames=[log_stream_name], - interleaved=True, - ) - events = res['events'] - for original_message, resulting_event in zip(messages, events): - resulting_event['eventId'].should.equal(str(resulting_event['eventId'])) - resulting_event['timestamp'].should.equal(original_message['timestamp']) - resulting_event['message'].should.equal(original_message['message']) - +import boto3 +import sure # noqa +import six +from botocore.exceptions import ClientError + +from moto import mock_logs, settings +from nose.tools import assert_raises + +_logs_region = 'us-east-1' if settings.TEST_SERVER_MODE else 'us-west-2' + + +@mock_logs +def test_log_group_create(): + conn = boto3.client('logs', 'us-west-2') + log_group_name = 'dummy' + response = conn.create_log_group(logGroupName=log_group_name) + + response = conn.describe_log_groups(logGroupNamePrefix=log_group_name) + assert len(response['logGroups']) == 1 + # AWS defaults to Never Expire for log group retention + assert response['logGroups'][0].get('retentionInDays') == None + + response = conn.delete_log_group(logGroupName=log_group_name) + + +@mock_logs +def test_exceptions(): + conn = boto3.client('logs', 'us-west-2') + log_group_name = 'dummy' + log_stream_name = 'dummp-stream' + conn.create_log_group(logGroupName=log_group_name) + with assert_raises(ClientError): + conn.create_log_group(logGroupName=log_group_name) + + # descrine_log_groups is not implemented yet + + conn.create_log_stream( + logGroupName=log_group_name, + logStreamName=log_stream_name + ) + with assert_raises(ClientError): + conn.create_log_stream( + logGroupName=log_group_name, + logStreamName=log_stream_name + ) + + conn.put_log_events( + logGroupName=log_group_name, + logStreamName=log_stream_name, + logEvents=[ + { + 'timestamp': 0, + 'message': 'line' + }, + ], + ) + + with assert_raises(ClientError): + conn.put_log_events( + logGroupName=log_group_name, + logStreamName="invalid-stream", + logEvents=[ + { + 'timestamp': 0, + 'message': 'line' + }, + ], + ) + + +@mock_logs +def test_put_logs(): + conn = boto3.client('logs', 'us-west-2') + log_group_name = 'dummy' + log_stream_name = 'stream' + conn.create_log_group(logGroupName=log_group_name) + conn.create_log_stream( + logGroupName=log_group_name, + logStreamName=log_stream_name + ) + messages = [ + {'timestamp': 0, 'message': 'hello'}, + {'timestamp': 0, 'message': 'world'} + ] + putRes = conn.put_log_events( + logGroupName=log_group_name, + logStreamName=log_stream_name, + logEvents=messages + ) + res = conn.get_log_events( + logGroupName=log_group_name, + logStreamName=log_stream_name + ) + events = res['events'] + nextSequenceToken = putRes['nextSequenceToken'] + assert isinstance(nextSequenceToken, six.string_types) == True + assert len(nextSequenceToken) == 56 + events.should.have.length_of(2) + + +@mock_logs +def test_filter_logs_interleaved(): + conn = boto3.client('logs', 'us-west-2') + log_group_name = 'dummy' + log_stream_name = 'stream' + conn.create_log_group(logGroupName=log_group_name) + conn.create_log_stream( + logGroupName=log_group_name, + logStreamName=log_stream_name + ) + messages = [ + {'timestamp': 0, 'message': 'hello'}, + {'timestamp': 0, 'message': 'world'} + ] + conn.put_log_events( + logGroupName=log_group_name, + logStreamName=log_stream_name, + logEvents=messages + ) + res = conn.filter_log_events( + logGroupName=log_group_name, + logStreamNames=[log_stream_name], + interleaved=True, + ) + events = res['events'] + for original_message, resulting_event in zip(messages, events): + resulting_event['eventId'].should.equal(str(resulting_event['eventId'])) + resulting_event['timestamp'].should.equal(original_message['timestamp']) + resulting_event['message'].should.equal(original_message['message']) + +@mock_logs +def test_put_retention_policy(): + conn = boto3.client('logs', 'us-west-2') + log_group_name = 'dummy' + response = conn.create_log_group(logGroupName=log_group_name) + + response = conn.put_retention_policy(logGroupName=log_group_name, retentionInDays=7) + + response = conn.describe_log_groups(logGroupNamePrefix=log_group_name) + assert len(response['logGroups']) == 1 + assert response['logGroups'][0].get('retentionInDays') == 7 + + response = conn.delete_log_group(logGroupName=log_group_name) + +@mock_logs +def test_delete_retention_policy(): + conn = boto3.client('logs', 'us-west-2') + log_group_name = 'dummy' + response = conn.create_log_group(logGroupName=log_group_name) + + response = conn.put_retention_policy(logGroupName=log_group_name, retentionInDays=7) + + response = conn.describe_log_groups(logGroupNamePrefix=log_group_name) + assert len(response['logGroups']) == 1 + assert response['logGroups'][0].get('retentionInDays') == 7 + + response = conn.delete_retention_policy(logGroupName=log_group_name) + + response = conn.describe_log_groups(logGroupNamePrefix=log_group_name) + assert len(response['logGroups']) == 1 + assert response['logGroups'][0].get('retentionInDays') == None + + response = conn.delete_log_group(logGroupName=log_group_name) + diff --git a/tests/test_organizations/organizations_test_utils.py b/tests/test_organizations/organizations_test_utils.py index 1f21eee74..36933d41a 100644 --- a/tests/test_organizations/organizations_test_utils.py +++ b/tests/test_organizations/organizations_test_utils.py @@ -1,136 +1,152 @@ -from __future__ import unicode_literals - -import six -import sure # noqa -import datetime -from moto.organizations import utils - -EMAIL_REGEX = "^.+@[a-zA-Z0-9-.]+.[a-zA-Z]{2,3}|[0-9]{1,3}$" -ORG_ID_REGEX = r'o-[a-z0-9]{%s}' % utils.ORG_ID_SIZE -ROOT_ID_REGEX = r'r-[a-z0-9]{%s}' % utils.ROOT_ID_SIZE -OU_ID_REGEX = r'ou-[a-z0-9]{%s}-[a-z0-9]{%s}' % (utils.ROOT_ID_SIZE, utils.OU_ID_SUFFIX_SIZE) -ACCOUNT_ID_REGEX = r'[0-9]{%s}' % utils.ACCOUNT_ID_SIZE -CREATE_ACCOUNT_STATUS_ID_REGEX = r'car-[a-z0-9]{%s}' % utils.CREATE_ACCOUNT_STATUS_ID_SIZE - - -def test_make_random_org_id(): - org_id = utils.make_random_org_id() - org_id.should.match(ORG_ID_REGEX) - - -def test_make_random_root_id(): - root_id = utils.make_random_root_id() - root_id.should.match(ROOT_ID_REGEX) - - -def test_make_random_ou_id(): - root_id = utils.make_random_root_id() - ou_id = utils.make_random_ou_id(root_id) - ou_id.should.match(OU_ID_REGEX) - - -def test_make_random_account_id(): - account_id = utils.make_random_account_id() - account_id.should.match(ACCOUNT_ID_REGEX) - - -def test_make_random_create_account_status_id(): - create_account_status_id = utils.make_random_create_account_status_id() - create_account_status_id.should.match(CREATE_ACCOUNT_STATUS_ID_REGEX) - - -def validate_organization(response): - org = response['Organization'] - sorted(org.keys()).should.equal([ - 'Arn', - 'AvailablePolicyTypes', - 'FeatureSet', - 'Id', - 'MasterAccountArn', - 'MasterAccountEmail', - 'MasterAccountId', - ]) - org['Id'].should.match(ORG_ID_REGEX) - org['MasterAccountId'].should.equal(utils.MASTER_ACCOUNT_ID) - org['MasterAccountArn'].should.equal(utils.MASTER_ACCOUNT_ARN_FORMAT.format( - org['MasterAccountId'], - org['Id'], - )) - org['Arn'].should.equal(utils.ORGANIZATION_ARN_FORMAT.format( - org['MasterAccountId'], - org['Id'], - )) - org['MasterAccountEmail'].should.equal(utils.MASTER_ACCOUNT_EMAIL) - org['FeatureSet'].should.be.within(['ALL', 'CONSOLIDATED_BILLING']) - org['AvailablePolicyTypes'].should.equal([{ - 'Type': 'SERVICE_CONTROL_POLICY', - 'Status': 'ENABLED' - }]) - - -def validate_roots(org, response): - response.should.have.key('Roots').should.be.a(list) - response['Roots'].should_not.be.empty - root = response['Roots'][0] - root.should.have.key('Id').should.match(ROOT_ID_REGEX) - root.should.have.key('Arn').should.equal(utils.ROOT_ARN_FORMAT.format( - org['MasterAccountId'], - org['Id'], - root['Id'], - )) - root.should.have.key('Name').should.be.a(six.string_types) - root.should.have.key('PolicyTypes').should.be.a(list) - root['PolicyTypes'][0].should.have.key('Type').should.equal('SERVICE_CONTROL_POLICY') - root['PolicyTypes'][0].should.have.key('Status').should.equal('ENABLED') - - -def validate_organizational_unit(org, response): - response.should.have.key('OrganizationalUnit').should.be.a(dict) - ou = response['OrganizationalUnit'] - ou.should.have.key('Id').should.match(OU_ID_REGEX) - ou.should.have.key('Arn').should.equal(utils.OU_ARN_FORMAT.format( - org['MasterAccountId'], - org['Id'], - ou['Id'], - )) - ou.should.have.key('Name').should.be.a(six.string_types) - - -def validate_account(org, account): - sorted(account.keys()).should.equal([ - 'Arn', - 'Email', - 'Id', - 'JoinedMethod', - 'JoinedTimestamp', - 'Name', - 'Status', - ]) - account['Id'].should.match(ACCOUNT_ID_REGEX) - account['Arn'].should.equal(utils.ACCOUNT_ARN_FORMAT.format( - org['MasterAccountId'], - org['Id'], - account['Id'], - )) - account['Email'].should.match(EMAIL_REGEX) - account['JoinedMethod'].should.be.within(['INVITED', 'CREATED']) - account['Status'].should.be.within(['ACTIVE', 'SUSPENDED']) - account['Name'].should.be.a(six.string_types) - account['JoinedTimestamp'].should.be.a(datetime.datetime) - - -def validate_create_account_status(create_status): - sorted(create_status.keys()).should.equal([ - 'AccountId', - 'AccountName', - 'CompletedTimestamp', - 'Id', - 'RequestedTimestamp', - 'State', - ]) - create_status['Id'].should.match(CREATE_ACCOUNT_STATUS_ID_REGEX) - create_status['AccountId'].should.match(ACCOUNT_ID_REGEX) - create_status['AccountName'].should.be.a(six.string_types) - create_status['State'].should.equal('SUCCEEDED') - create_status['RequestedTimestamp'].should.be.a(datetime.datetime) - create_status['CompletedTimestamp'].should.be.a(datetime.datetime) +from __future__ import unicode_literals + +import six +import sure # noqa +import datetime +from moto.organizations import utils + + +def test_make_random_org_id(): + org_id = utils.make_random_org_id() + org_id.should.match(utils.ORG_ID_REGEX) + + +def test_make_random_root_id(): + root_id = utils.make_random_root_id() + root_id.should.match(utils.ROOT_ID_REGEX) + + +def test_make_random_ou_id(): + root_id = utils.make_random_root_id() + ou_id = utils.make_random_ou_id(root_id) + ou_id.should.match(utils.OU_ID_REGEX) + + +def test_make_random_account_id(): + account_id = utils.make_random_account_id() + account_id.should.match(utils.ACCOUNT_ID_REGEX) + + +def test_make_random_create_account_status_id(): + create_account_status_id = utils.make_random_create_account_status_id() + create_account_status_id.should.match(utils.CREATE_ACCOUNT_STATUS_ID_REGEX) + + +def test_make_random_service_control_policy_id(): + service_control_policy_id = utils.make_random_service_control_policy_id() + service_control_policy_id.should.match(utils.SCP_ID_REGEX) + + +def validate_organization(response): + org = response['Organization'] + sorted(org.keys()).should.equal([ + 'Arn', + 'AvailablePolicyTypes', + 'FeatureSet', + 'Id', + 'MasterAccountArn', + 'MasterAccountEmail', + 'MasterAccountId', + ]) + org['Id'].should.match(utils.ORG_ID_REGEX) + org['MasterAccountId'].should.equal(utils.MASTER_ACCOUNT_ID) + org['MasterAccountArn'].should.equal(utils.MASTER_ACCOUNT_ARN_FORMAT.format( + org['MasterAccountId'], + org['Id'], + )) + org['Arn'].should.equal(utils.ORGANIZATION_ARN_FORMAT.format( + org['MasterAccountId'], + org['Id'], + )) + org['MasterAccountEmail'].should.equal(utils.MASTER_ACCOUNT_EMAIL) + org['FeatureSet'].should.be.within(['ALL', 'CONSOLIDATED_BILLING']) + org['AvailablePolicyTypes'].should.equal([{ + 'Type': 'SERVICE_CONTROL_POLICY', + 'Status': 'ENABLED' + }]) + + +def validate_roots(org, response): + response.should.have.key('Roots').should.be.a(list) + response['Roots'].should_not.be.empty + root = response['Roots'][0] + root.should.have.key('Id').should.match(utils.ROOT_ID_REGEX) + root.should.have.key('Arn').should.equal(utils.ROOT_ARN_FORMAT.format( + org['MasterAccountId'], + org['Id'], + root['Id'], + )) + root.should.have.key('Name').should.be.a(six.string_types) + root.should.have.key('PolicyTypes').should.be.a(list) + root['PolicyTypes'][0].should.have.key('Type').should.equal('SERVICE_CONTROL_POLICY') + root['PolicyTypes'][0].should.have.key('Status').should.equal('ENABLED') + + +def validate_organizational_unit(org, response): + response.should.have.key('OrganizationalUnit').should.be.a(dict) + ou = response['OrganizationalUnit'] + ou.should.have.key('Id').should.match(utils.OU_ID_REGEX) + ou.should.have.key('Arn').should.equal(utils.OU_ARN_FORMAT.format( + org['MasterAccountId'], + org['Id'], + ou['Id'], + )) + ou.should.have.key('Name').should.be.a(six.string_types) + + +def validate_account(org, account): + sorted(account.keys()).should.equal([ + 'Arn', + 'Email', + 'Id', + 'JoinedMethod', + 'JoinedTimestamp', + 'Name', + 'Status', + ]) + account['Id'].should.match(utils.ACCOUNT_ID_REGEX) + account['Arn'].should.equal(utils.ACCOUNT_ARN_FORMAT.format( + org['MasterAccountId'], + org['Id'], + account['Id'], + )) + account['Email'].should.match(utils.EMAIL_REGEX) + account['JoinedMethod'].should.be.within(['INVITED', 'CREATED']) + account['Status'].should.be.within(['ACTIVE', 'SUSPENDED']) + account['Name'].should.be.a(six.string_types) + account['JoinedTimestamp'].should.be.a(datetime.datetime) + + +def validate_create_account_status(create_status): + sorted(create_status.keys()).should.equal([ + 'AccountId', + 'AccountName', + 'CompletedTimestamp', + 'Id', + 'RequestedTimestamp', + 'State', + ]) + create_status['Id'].should.match(utils.CREATE_ACCOUNT_STATUS_ID_REGEX) + create_status['AccountId'].should.match(utils.ACCOUNT_ID_REGEX) + create_status['AccountName'].should.be.a(six.string_types) + create_status['State'].should.equal('SUCCEEDED') + create_status['RequestedTimestamp'].should.be.a(datetime.datetime) + create_status['CompletedTimestamp'].should.be.a(datetime.datetime) + +def validate_policy_summary(org, summary): + summary.should.be.a(dict) + summary.should.have.key('Id').should.match(utils.SCP_ID_REGEX) + summary.should.have.key('Arn').should.equal(utils.SCP_ARN_FORMAT.format( + org['MasterAccountId'], + org['Id'], + summary['Id'], + )) + summary.should.have.key('Name').should.be.a(six.string_types) + summary.should.have.key('Description').should.be.a(six.string_types) + summary.should.have.key('Type').should.equal('SERVICE_CONTROL_POLICY') + summary.should.have.key('AwsManaged').should.be.a(bool) + +def validate_service_control_policy(org, response): + response.should.have.key('PolicySummary').should.be.a(dict) + response.should.have.key('Content').should.be.a(six.string_types) + validate_policy_summary(org, response['PolicySummary']) diff --git a/tests/test_organizations/test_organizations_boto3.py b/tests/test_organizations/test_organizations_boto3.py index ea3e17962..05f831e62 100644 --- a/tests/test_organizations/test_organizations_boto3.py +++ b/tests/test_organizations/test_organizations_boto3.py @@ -1,322 +1,594 @@ -from __future__ import unicode_literals - -import boto3 -import sure # noqa -from botocore.exceptions import ClientError -from nose.tools import assert_raises - -from moto import mock_organizations -from moto.organizations import utils -from .organizations_test_utils import ( - validate_organization, - validate_roots, - validate_organizational_unit, - validate_account, - validate_create_account_status, -) - - -@mock_organizations -def test_create_organization(): - client = boto3.client('organizations', region_name='us-east-1') - response = client.create_organization(FeatureSet='ALL') - validate_organization(response) - response['Organization']['FeatureSet'].should.equal('ALL') - - -@mock_organizations -def test_describe_organization(): - client = boto3.client('organizations', region_name='us-east-1') - client.create_organization(FeatureSet='ALL') - response = client.describe_organization() - validate_organization(response) - - -@mock_organizations -def test_describe_organization_exception(): - client = boto3.client('organizations', region_name='us-east-1') - with assert_raises(ClientError) as e: - response = client.describe_organization() - ex = e.exception - ex.operation_name.should.equal('DescribeOrganization') - ex.response['Error']['Code'].should.equal('400') - ex.response['Error']['Message'].should.contain('AWSOrganizationsNotInUseException') - - -# Organizational Units - -@mock_organizations -def test_list_roots(): - client = boto3.client('organizations', region_name='us-east-1') - org = client.create_organization(FeatureSet='ALL')['Organization'] - response = client.list_roots() - validate_roots(org, response) - - -@mock_organizations -def test_create_organizational_unit(): - client = boto3.client('organizations', region_name='us-east-1') - org = client.create_organization(FeatureSet='ALL')['Organization'] - root_id = client.list_roots()['Roots'][0]['Id'] - ou_name = 'ou01' - response = client.create_organizational_unit( - ParentId=root_id, - Name=ou_name, - ) - validate_organizational_unit(org, response) - response['OrganizationalUnit']['Name'].should.equal(ou_name) - - -@mock_organizations -def test_describe_organizational_unit(): - client = boto3.client('organizations', region_name='us-east-1') - org = client.create_organization(FeatureSet='ALL')['Organization'] - root_id = client.list_roots()['Roots'][0]['Id'] - ou_id = client.create_organizational_unit( - ParentId=root_id, - Name='ou01', - )['OrganizationalUnit']['Id'] - response = client.describe_organizational_unit(OrganizationalUnitId=ou_id) - validate_organizational_unit(org, response) - - -@mock_organizations -def test_describe_organizational_unit_exception(): - client = boto3.client('organizations', region_name='us-east-1') - org = client.create_organization(FeatureSet='ALL')['Organization'] - with assert_raises(ClientError) as e: - response = client.describe_organizational_unit( - OrganizationalUnitId=utils.make_random_root_id() - ) - ex = e.exception - ex.operation_name.should.equal('DescribeOrganizationalUnit') - ex.response['Error']['Code'].should.equal('400') - ex.response['Error']['Message'].should.contain('OrganizationalUnitNotFoundException') - - -@mock_organizations -def test_list_organizational_units_for_parent(): - client = boto3.client('organizations', region_name='us-east-1') - org = client.create_organization(FeatureSet='ALL')['Organization'] - root_id = client.list_roots()['Roots'][0]['Id'] - client.create_organizational_unit(ParentId=root_id, Name='ou01') - client.create_organizational_unit(ParentId=root_id, Name='ou02') - client.create_organizational_unit(ParentId=root_id, Name='ou03') - response = client.list_organizational_units_for_parent(ParentId=root_id) - response.should.have.key('OrganizationalUnits').should.be.a(list) - for ou in response['OrganizationalUnits']: - validate_organizational_unit(org, dict(OrganizationalUnit=ou)) - - -@mock_organizations -def test_list_organizational_units_for_parent_exception(): - client = boto3.client('organizations', region_name='us-east-1') - with assert_raises(ClientError) as e: - response = client.list_organizational_units_for_parent( - ParentId=utils.make_random_root_id() - ) - ex = e.exception - ex.operation_name.should.equal('ListOrganizationalUnitsForParent') - ex.response['Error']['Code'].should.equal('400') - ex.response['Error']['Message'].should.contain('ParentNotFoundException') - - -# Accounts -mockname = 'mock-account' -mockdomain = 'moto-example.org' -mockemail = '@'.join([mockname, mockdomain]) - - -@mock_organizations -def test_create_account(): - client = boto3.client('organizations', region_name='us-east-1') - client.create_organization(FeatureSet='ALL') - create_status = client.create_account( - AccountName=mockname, Email=mockemail - )['CreateAccountStatus'] - validate_create_account_status(create_status) - create_status['AccountName'].should.equal(mockname) - - -@mock_organizations -def test_describe_account(): - client = boto3.client('organizations', region_name='us-east-1') - org = client.create_organization(FeatureSet='ALL')['Organization'] - account_id = client.create_account( - AccountName=mockname, Email=mockemail - )['CreateAccountStatus']['AccountId'] - response = client.describe_account(AccountId=account_id) - validate_account(org, response['Account']) - response['Account']['Name'].should.equal(mockname) - response['Account']['Email'].should.equal(mockemail) - - -@mock_organizations -def test_describe_account_exception(): - client = boto3.client('organizations', region_name='us-east-1') - with assert_raises(ClientError) as e: - response = client.describe_account(AccountId=utils.make_random_account_id()) - ex = e.exception - ex.operation_name.should.equal('DescribeAccount') - ex.response['Error']['Code'].should.equal('400') - ex.response['Error']['Message'].should.contain('AccountNotFoundException') - - -@mock_organizations -def test_list_accounts(): - client = boto3.client('organizations', region_name='us-east-1') - org = client.create_organization(FeatureSet='ALL')['Organization'] - for i in range(5): - name = mockname + str(i) - email = name + '@' + mockdomain - client.create_account(AccountName=name, Email=email) - response = client.list_accounts() - response.should.have.key('Accounts') - accounts = response['Accounts'] - len(accounts).should.equal(5) - for account in accounts: - validate_account(org, account) - accounts[3]['Name'].should.equal(mockname + '3') - accounts[2]['Email'].should.equal(mockname + '2' + '@' + mockdomain) - - -@mock_organizations -def test_list_accounts_for_parent(): - client = boto3.client('organizations', region_name='us-east-1') - org = client.create_organization(FeatureSet='ALL')['Organization'] - root_id = client.list_roots()['Roots'][0]['Id'] - account_id = client.create_account( - AccountName=mockname, - Email=mockemail, - )['CreateAccountStatus']['AccountId'] - response = client.list_accounts_for_parent(ParentId=root_id) - account_id.should.be.within([account['Id'] for account in response['Accounts']]) - - -@mock_organizations -def test_move_account(): - client = boto3.client('organizations', region_name='us-east-1') - org = client.create_organization(FeatureSet='ALL')['Organization'] - root_id = client.list_roots()['Roots'][0]['Id'] - account_id = client.create_account( - AccountName=mockname, Email=mockemail - )['CreateAccountStatus']['AccountId'] - ou01 = client.create_organizational_unit(ParentId=root_id, Name='ou01') - ou01_id = ou01['OrganizationalUnit']['Id'] - client.move_account( - AccountId=account_id, - SourceParentId=root_id, - DestinationParentId=ou01_id, - ) - response = client.list_accounts_for_parent(ParentId=ou01_id) - account_id.should.be.within([account['Id'] for account in response['Accounts']]) - - -@mock_organizations -def test_list_parents_for_ou(): - client = boto3.client('organizations', region_name='us-east-1') - org = client.create_organization(FeatureSet='ALL')['Organization'] - root_id = client.list_roots()['Roots'][0]['Id'] - ou01 = client.create_organizational_unit(ParentId=root_id, Name='ou01') - ou01_id = ou01['OrganizationalUnit']['Id'] - response01 = client.list_parents(ChildId=ou01_id) - response01.should.have.key('Parents').should.be.a(list) - response01['Parents'][0].should.have.key('Id').should.equal(root_id) - response01['Parents'][0].should.have.key('Type').should.equal('ROOT') - ou02 = client.create_organizational_unit(ParentId=ou01_id, Name='ou02') - ou02_id = ou02['OrganizationalUnit']['Id'] - response02 = client.list_parents(ChildId=ou02_id) - response02.should.have.key('Parents').should.be.a(list) - response02['Parents'][0].should.have.key('Id').should.equal(ou01_id) - response02['Parents'][0].should.have.key('Type').should.equal('ORGANIZATIONAL_UNIT') - - -@mock_organizations -def test_list_parents_for_accounts(): - client = boto3.client('organizations', region_name='us-east-1') - org = client.create_organization(FeatureSet='ALL')['Organization'] - root_id = client.list_roots()['Roots'][0]['Id'] - ou01 = client.create_organizational_unit(ParentId=root_id, Name='ou01') - ou01_id = ou01['OrganizationalUnit']['Id'] - account01_id = client.create_account( - AccountName='account01', - Email='account01@moto-example.org' - )['CreateAccountStatus']['AccountId'] - account02_id = client.create_account( - AccountName='account02', - Email='account02@moto-example.org' - )['CreateAccountStatus']['AccountId'] - client.move_account( - AccountId=account02_id, - SourceParentId=root_id, - DestinationParentId=ou01_id, - ) - response01 = client.list_parents(ChildId=account01_id) - response01.should.have.key('Parents').should.be.a(list) - response01['Parents'][0].should.have.key('Id').should.equal(root_id) - response01['Parents'][0].should.have.key('Type').should.equal('ROOT') - response02 = client.list_parents(ChildId=account02_id) - response02.should.have.key('Parents').should.be.a(list) - response02['Parents'][0].should.have.key('Id').should.equal(ou01_id) - response02['Parents'][0].should.have.key('Type').should.equal('ORGANIZATIONAL_UNIT') - - -@mock_organizations -def test_list_children(): - client = boto3.client('organizations', region_name='us-east-1') - org = client.create_organization(FeatureSet='ALL')['Organization'] - root_id = client.list_roots()['Roots'][0]['Id'] - ou01 = client.create_organizational_unit(ParentId=root_id, Name='ou01') - ou01_id = ou01['OrganizationalUnit']['Id'] - ou02 = client.create_organizational_unit(ParentId=ou01_id, Name='ou02') - ou02_id = ou02['OrganizationalUnit']['Id'] - account01_id = client.create_account( - AccountName='account01', - Email='account01@moto-example.org' - )['CreateAccountStatus']['AccountId'] - account02_id = client.create_account( - AccountName='account02', - Email='account02@moto-example.org' - )['CreateAccountStatus']['AccountId'] - client.move_account( - AccountId=account02_id, - SourceParentId=root_id, - DestinationParentId=ou01_id, - ) - response01 = client.list_children(ParentId=root_id, ChildType='ACCOUNT') - response02 = client.list_children(ParentId=root_id, ChildType='ORGANIZATIONAL_UNIT') - response03 = client.list_children(ParentId=ou01_id, ChildType='ACCOUNT') - response04 = client.list_children(ParentId=ou01_id, ChildType='ORGANIZATIONAL_UNIT') - response01['Children'][0]['Id'].should.equal(account01_id) - response01['Children'][0]['Type'].should.equal('ACCOUNT') - response02['Children'][0]['Id'].should.equal(ou01_id) - response02['Children'][0]['Type'].should.equal('ORGANIZATIONAL_UNIT') - response03['Children'][0]['Id'].should.equal(account02_id) - response03['Children'][0]['Type'].should.equal('ACCOUNT') - response04['Children'][0]['Id'].should.equal(ou02_id) - response04['Children'][0]['Type'].should.equal('ORGANIZATIONAL_UNIT') - - -@mock_organizations -def test_list_children_exception(): - client = boto3.client('organizations', region_name='us-east-1') - org = client.create_organization(FeatureSet='ALL')['Organization'] - root_id = client.list_roots()['Roots'][0]['Id'] - with assert_raises(ClientError) as e: - response = client.list_children( - ParentId=utils.make_random_root_id(), - ChildType='ACCOUNT' - ) - ex = e.exception - ex.operation_name.should.equal('ListChildren') - ex.response['Error']['Code'].should.equal('400') - ex.response['Error']['Message'].should.contain('ParentNotFoundException') - with assert_raises(ClientError) as e: - response = client.list_children( - ParentId=root_id, - ChildType='BLEE' - ) - ex = e.exception - ex.operation_name.should.equal('ListChildren') - ex.response['Error']['Code'].should.equal('400') - ex.response['Error']['Message'].should.contain('InvalidInputException') +from __future__ import unicode_literals + +import boto3 +import json +import six +import sure # noqa +from botocore.exceptions import ClientError +from nose.tools import assert_raises + +from moto import mock_organizations +from moto.organizations import utils +from .organizations_test_utils import ( + validate_organization, + validate_roots, + validate_organizational_unit, + validate_account, + validate_create_account_status, + validate_service_control_policy, + validate_policy_summary, +) + + +@mock_organizations +def test_create_organization(): + client = boto3.client('organizations', region_name='us-east-1') + response = client.create_organization(FeatureSet='ALL') + validate_organization(response) + response['Organization']['FeatureSet'].should.equal('ALL') + + +@mock_organizations +def test_describe_organization(): + client = boto3.client('organizations', region_name='us-east-1') + client.create_organization(FeatureSet='ALL') + response = client.describe_organization() + validate_organization(response) + + +@mock_organizations +def test_describe_organization_exception(): + client = boto3.client('organizations', region_name='us-east-1') + with assert_raises(ClientError) as e: + response = client.describe_organization() + ex = e.exception + ex.operation_name.should.equal('DescribeOrganization') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('AWSOrganizationsNotInUseException') + + +# Organizational Units + +@mock_organizations +def test_list_roots(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + response = client.list_roots() + validate_roots(org, response) + + +@mock_organizations +def test_create_organizational_unit(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + ou_name = 'ou01' + response = client.create_organizational_unit( + ParentId=root_id, + Name=ou_name, + ) + validate_organizational_unit(org, response) + response['OrganizationalUnit']['Name'].should.equal(ou_name) + + +@mock_organizations +def test_describe_organizational_unit(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + ou_id = client.create_organizational_unit( + ParentId=root_id, + Name='ou01', + )['OrganizationalUnit']['Id'] + response = client.describe_organizational_unit(OrganizationalUnitId=ou_id) + validate_organizational_unit(org, response) + + +@mock_organizations +def test_describe_organizational_unit_exception(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + with assert_raises(ClientError) as e: + response = client.describe_organizational_unit( + OrganizationalUnitId=utils.make_random_root_id() + ) + ex = e.exception + ex.operation_name.should.equal('DescribeOrganizationalUnit') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('OrganizationalUnitNotFoundException') + + +@mock_organizations +def test_list_organizational_units_for_parent(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + client.create_organizational_unit(ParentId=root_id, Name='ou01') + client.create_organizational_unit(ParentId=root_id, Name='ou02') + client.create_organizational_unit(ParentId=root_id, Name='ou03') + response = client.list_organizational_units_for_parent(ParentId=root_id) + response.should.have.key('OrganizationalUnits').should.be.a(list) + for ou in response['OrganizationalUnits']: + validate_organizational_unit(org, dict(OrganizationalUnit=ou)) + + +@mock_organizations +def test_list_organizational_units_for_parent_exception(): + client = boto3.client('organizations', region_name='us-east-1') + with assert_raises(ClientError) as e: + response = client.list_organizational_units_for_parent( + ParentId=utils.make_random_root_id() + ) + ex = e.exception + ex.operation_name.should.equal('ListOrganizationalUnitsForParent') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('ParentNotFoundException') + + +# Accounts +mockname = 'mock-account' +mockdomain = 'moto-example.org' +mockemail = '@'.join([mockname, mockdomain]) + + +@mock_organizations +def test_create_account(): + client = boto3.client('organizations', region_name='us-east-1') + client.create_organization(FeatureSet='ALL') + create_status = client.create_account( + AccountName=mockname, Email=mockemail + )['CreateAccountStatus'] + validate_create_account_status(create_status) + create_status['AccountName'].should.equal(mockname) + + +@mock_organizations +def test_describe_account(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + account_id = client.create_account( + AccountName=mockname, Email=mockemail + )['CreateAccountStatus']['AccountId'] + response = client.describe_account(AccountId=account_id) + validate_account(org, response['Account']) + response['Account']['Name'].should.equal(mockname) + response['Account']['Email'].should.equal(mockemail) + + +@mock_organizations +def test_describe_account_exception(): + client = boto3.client('organizations', region_name='us-east-1') + with assert_raises(ClientError) as e: + response = client.describe_account(AccountId=utils.make_random_account_id()) + ex = e.exception + ex.operation_name.should.equal('DescribeAccount') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('AccountNotFoundException') + + +@mock_organizations +def test_list_accounts(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + for i in range(5): + name = mockname + str(i) + email = name + '@' + mockdomain + client.create_account(AccountName=name, Email=email) + response = client.list_accounts() + response.should.have.key('Accounts') + accounts = response['Accounts'] + len(accounts).should.equal(5) + for account in accounts: + validate_account(org, account) + accounts[3]['Name'].should.equal(mockname + '3') + accounts[2]['Email'].should.equal(mockname + '2' + '@' + mockdomain) + + +@mock_organizations +def test_list_accounts_for_parent(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + account_id = client.create_account( + AccountName=mockname, + Email=mockemail, + )['CreateAccountStatus']['AccountId'] + response = client.list_accounts_for_parent(ParentId=root_id) + account_id.should.be.within([account['Id'] for account in response['Accounts']]) + + +@mock_organizations +def test_move_account(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + account_id = client.create_account( + AccountName=mockname, Email=mockemail + )['CreateAccountStatus']['AccountId'] + ou01 = client.create_organizational_unit(ParentId=root_id, Name='ou01') + ou01_id = ou01['OrganizationalUnit']['Id'] + client.move_account( + AccountId=account_id, + SourceParentId=root_id, + DestinationParentId=ou01_id, + ) + response = client.list_accounts_for_parent(ParentId=ou01_id) + account_id.should.be.within([account['Id'] for account in response['Accounts']]) + + +@mock_organizations +def test_list_parents_for_ou(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + ou01 = client.create_organizational_unit(ParentId=root_id, Name='ou01') + ou01_id = ou01['OrganizationalUnit']['Id'] + response01 = client.list_parents(ChildId=ou01_id) + response01.should.have.key('Parents').should.be.a(list) + response01['Parents'][0].should.have.key('Id').should.equal(root_id) + response01['Parents'][0].should.have.key('Type').should.equal('ROOT') + ou02 = client.create_organizational_unit(ParentId=ou01_id, Name='ou02') + ou02_id = ou02['OrganizationalUnit']['Id'] + response02 = client.list_parents(ChildId=ou02_id) + response02.should.have.key('Parents').should.be.a(list) + response02['Parents'][0].should.have.key('Id').should.equal(ou01_id) + response02['Parents'][0].should.have.key('Type').should.equal('ORGANIZATIONAL_UNIT') + + +@mock_organizations +def test_list_parents_for_accounts(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + ou01 = client.create_organizational_unit(ParentId=root_id, Name='ou01') + ou01_id = ou01['OrganizationalUnit']['Id'] + account01_id = client.create_account( + AccountName='account01', + Email='account01@moto-example.org' + )['CreateAccountStatus']['AccountId'] + account02_id = client.create_account( + AccountName='account02', + Email='account02@moto-example.org' + )['CreateAccountStatus']['AccountId'] + client.move_account( + AccountId=account02_id, + SourceParentId=root_id, + DestinationParentId=ou01_id, + ) + response01 = client.list_parents(ChildId=account01_id) + response01.should.have.key('Parents').should.be.a(list) + response01['Parents'][0].should.have.key('Id').should.equal(root_id) + response01['Parents'][0].should.have.key('Type').should.equal('ROOT') + response02 = client.list_parents(ChildId=account02_id) + response02.should.have.key('Parents').should.be.a(list) + response02['Parents'][0].should.have.key('Id').should.equal(ou01_id) + response02['Parents'][0].should.have.key('Type').should.equal('ORGANIZATIONAL_UNIT') + + +@mock_organizations +def test_list_children(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + ou01 = client.create_organizational_unit(ParentId=root_id, Name='ou01') + ou01_id = ou01['OrganizationalUnit']['Id'] + ou02 = client.create_organizational_unit(ParentId=ou01_id, Name='ou02') + ou02_id = ou02['OrganizationalUnit']['Id'] + account01_id = client.create_account( + AccountName='account01', + Email='account01@moto-example.org' + )['CreateAccountStatus']['AccountId'] + account02_id = client.create_account( + AccountName='account02', + Email='account02@moto-example.org' + )['CreateAccountStatus']['AccountId'] + client.move_account( + AccountId=account02_id, + SourceParentId=root_id, + DestinationParentId=ou01_id, + ) + response01 = client.list_children(ParentId=root_id, ChildType='ACCOUNT') + response02 = client.list_children(ParentId=root_id, ChildType='ORGANIZATIONAL_UNIT') + response03 = client.list_children(ParentId=ou01_id, ChildType='ACCOUNT') + response04 = client.list_children(ParentId=ou01_id, ChildType='ORGANIZATIONAL_UNIT') + response01['Children'][0]['Id'].should.equal(account01_id) + response01['Children'][0]['Type'].should.equal('ACCOUNT') + response02['Children'][0]['Id'].should.equal(ou01_id) + response02['Children'][0]['Type'].should.equal('ORGANIZATIONAL_UNIT') + response03['Children'][0]['Id'].should.equal(account02_id) + response03['Children'][0]['Type'].should.equal('ACCOUNT') + response04['Children'][0]['Id'].should.equal(ou02_id) + response04['Children'][0]['Type'].should.equal('ORGANIZATIONAL_UNIT') + + +@mock_organizations +def test_list_children_exception(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + with assert_raises(ClientError) as e: + response = client.list_children( + ParentId=utils.make_random_root_id(), + ChildType='ACCOUNT' + ) + ex = e.exception + ex.operation_name.should.equal('ListChildren') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('ParentNotFoundException') + with assert_raises(ClientError) as e: + response = client.list_children( + ParentId=root_id, + ChildType='BLEE' + ) + ex = e.exception + ex.operation_name.should.equal('ListChildren') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('InvalidInputException') + + +# Service Control Policies +policy_doc01 = dict( + Version='2012-10-17', + Statement=[dict( + Sid='MockPolicyStatement', + Effect='Allow', + Action='s3:*', + Resource='*', + )] +) + +@mock_organizations +def test_create_policy(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + policy = client.create_policy( + Content=json.dumps(policy_doc01), + Description='A dummy service control policy', + Name='MockServiceControlPolicy', + Type='SERVICE_CONTROL_POLICY' + )['Policy'] + validate_service_control_policy(org, policy) + policy['PolicySummary']['Name'].should.equal('MockServiceControlPolicy') + policy['PolicySummary']['Description'].should.equal('A dummy service control policy') + policy['Content'].should.equal(json.dumps(policy_doc01)) + + +@mock_organizations +def test_describe_policy(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + policy_id = client.create_policy( + Content=json.dumps(policy_doc01), + Description='A dummy service control policy', + Name='MockServiceControlPolicy', + Type='SERVICE_CONTROL_POLICY' + )['Policy']['PolicySummary']['Id'] + policy = client.describe_policy(PolicyId=policy_id)['Policy'] + validate_service_control_policy(org, policy) + policy['PolicySummary']['Name'].should.equal('MockServiceControlPolicy') + policy['PolicySummary']['Description'].should.equal('A dummy service control policy') + policy['Content'].should.equal(json.dumps(policy_doc01)) + + +@mock_organizations +def test_describe_policy_exception(): + client = boto3.client('organizations', region_name='us-east-1') + client.create_organization(FeatureSet='ALL')['Organization'] + policy_id = 'p-47fhe9s3' + with assert_raises(ClientError) as e: + response = client.describe_policy(PolicyId=policy_id) + ex = e.exception + ex.operation_name.should.equal('DescribePolicy') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('PolicyNotFoundException') + with assert_raises(ClientError) as e: + response = client.describe_policy(PolicyId='meaninglessstring') + ex = e.exception + ex.operation_name.should.equal('DescribePolicy') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('InvalidInputException') + + +@mock_organizations +def test_attach_policy(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + ou_id = client.create_organizational_unit( + ParentId=root_id, + Name='ou01', + )['OrganizationalUnit']['Id'] + account_id = client.create_account( + AccountName=mockname, + Email=mockemail, + )['CreateAccountStatus']['AccountId'] + policy_id = client.create_policy( + Content=json.dumps(policy_doc01), + Description='A dummy service control policy', + Name='MockServiceControlPolicy', + Type='SERVICE_CONTROL_POLICY' + )['Policy']['PolicySummary']['Id'] + response = client.attach_policy(PolicyId=policy_id, TargetId=root_id) + response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + response = client.attach_policy(PolicyId=policy_id, TargetId=ou_id) + response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + response = client.attach_policy(PolicyId=policy_id, TargetId=account_id) + response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + + +@mock_organizations +def test_attach_policy_exception(): + client = boto3.client('organizations', region_name='us-east-1') + client.create_organization(FeatureSet='ALL')['Organization'] + root_id='r-dj873' + ou_id='ou-gi99-i7r8eh2i2' + account_id='126644886543' + policy_id = client.create_policy( + Content=json.dumps(policy_doc01), + Description='A dummy service control policy', + Name='MockServiceControlPolicy', + Type='SERVICE_CONTROL_POLICY' + )['Policy']['PolicySummary']['Id'] + with assert_raises(ClientError) as e: + response = client.attach_policy(PolicyId=policy_id, TargetId=root_id) + ex = e.exception + ex.operation_name.should.equal('AttachPolicy') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('OrganizationalUnitNotFoundException') + with assert_raises(ClientError) as e: + response = client.attach_policy(PolicyId=policy_id, TargetId=ou_id) + ex = e.exception + ex.operation_name.should.equal('AttachPolicy') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('OrganizationalUnitNotFoundException') + with assert_raises(ClientError) as e: + response = client.attach_policy(PolicyId=policy_id, TargetId=account_id) + ex = e.exception + ex.operation_name.should.equal('AttachPolicy') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('AccountNotFoundException') + with assert_raises(ClientError) as e: + response = client.attach_policy(PolicyId=policy_id, TargetId='meaninglessstring') + ex = e.exception + ex.operation_name.should.equal('AttachPolicy') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('InvalidInputException') + + +@mock_organizations +def test_list_polices(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + for i in range(0,4): + client.create_policy( + Content=json.dumps(policy_doc01), + Description='A dummy service control policy', + Name='MockServiceControlPolicy' + str(i), + Type='SERVICE_CONTROL_POLICY' + ) + response = client.list_policies(Filter='SERVICE_CONTROL_POLICY') + for policy in response['Policies']: + validate_policy_summary(org, policy) + + +@mock_organizations +def test_list_policies_for_target(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + ou_id = client.create_organizational_unit( + ParentId=root_id, + Name='ou01', + )['OrganizationalUnit']['Id'] + account_id = client.create_account( + AccountName=mockname, + Email=mockemail, + )['CreateAccountStatus']['AccountId'] + policy_id = client.create_policy( + Content=json.dumps(policy_doc01), + Description='A dummy service control policy', + Name='MockServiceControlPolicy', + Type='SERVICE_CONTROL_POLICY' + )['Policy']['PolicySummary']['Id'] + client.attach_policy(PolicyId=policy_id, TargetId=ou_id) + response = client.list_policies_for_target( + TargetId=ou_id, + Filter='SERVICE_CONTROL_POLICY', + ) + for policy in response['Policies']: + validate_policy_summary(org, policy) + client.attach_policy(PolicyId=policy_id, TargetId=account_id) + response = client.list_policies_for_target( + TargetId=account_id, + Filter='SERVICE_CONTROL_POLICY', + ) + for policy in response['Policies']: + validate_policy_summary(org, policy) + + +@mock_organizations +def test_list_policies_for_target_exception(): + client = boto3.client('organizations', region_name='us-east-1') + client.create_organization(FeatureSet='ALL')['Organization'] + ou_id='ou-gi99-i7r8eh2i2' + account_id='126644886543' + with assert_raises(ClientError) as e: + response = client.list_policies_for_target( + TargetId=ou_id, + Filter='SERVICE_CONTROL_POLICY', + ) + ex = e.exception + ex.operation_name.should.equal('ListPoliciesForTarget') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('OrganizationalUnitNotFoundException') + with assert_raises(ClientError) as e: + response = client.list_policies_for_target( + TargetId=account_id, + Filter='SERVICE_CONTROL_POLICY', + ) + ex = e.exception + ex.operation_name.should.equal('ListPoliciesForTarget') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('AccountNotFoundException') + with assert_raises(ClientError) as e: + response = client.list_policies_for_target( + TargetId='meaninglessstring', + Filter='SERVICE_CONTROL_POLICY', + ) + ex = e.exception + ex.operation_name.should.equal('ListPoliciesForTarget') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('InvalidInputException') + + +@mock_organizations +def test_list_targets_for_policy(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + ou_id = client.create_organizational_unit( + ParentId=root_id, + Name='ou01', + )['OrganizationalUnit']['Id'] + account_id = client.create_account( + AccountName=mockname, + Email=mockemail, + )['CreateAccountStatus']['AccountId'] + policy_id = client.create_policy( + Content=json.dumps(policy_doc01), + Description='A dummy service control policy', + Name='MockServiceControlPolicy', + Type='SERVICE_CONTROL_POLICY' + )['Policy']['PolicySummary']['Id'] + client.attach_policy(PolicyId=policy_id, TargetId=root_id) + client.attach_policy(PolicyId=policy_id, TargetId=ou_id) + client.attach_policy(PolicyId=policy_id, TargetId=account_id) + response = client.list_targets_for_policy(PolicyId=policy_id) + for target in response['Targets']: + target.should.be.a(dict) + target.should.have.key('Name').should.be.a(six.string_types) + target.should.have.key('Arn').should.be.a(six.string_types) + target.should.have.key('TargetId').should.be.a(six.string_types) + target.should.have.key('Type').should.be.within( + ['ROOT', 'ORGANIZATIONAL_UNIT', 'ACCOUNT'] + ) + + +@mock_organizations +def test_list_targets_for_policy_exception(): + client = boto3.client('organizations', region_name='us-east-1') + client.create_organization(FeatureSet='ALL')['Organization'] + policy_id = 'p-47fhe9s3' + with assert_raises(ClientError) as e: + response = client.list_targets_for_policy(PolicyId=policy_id) + ex = e.exception + ex.operation_name.should.equal('ListTargetsForPolicy') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('PolicyNotFoundException') + with assert_raises(ClientError) as e: + response = client.list_targets_for_policy(PolicyId='meaninglessstring') + ex = e.exception + ex.operation_name.should.equal('ListTargetsForPolicy') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('InvalidInputException') + diff --git a/tests/test_rds/test_rds.py b/tests/test_rds/test_rds.py index 064598012..af330e672 100644 --- a/tests/test_rds/test_rds.py +++ b/tests/test_rds/test_rds.py @@ -1,324 +1,324 @@ -from __future__ import unicode_literals - -import boto3 -import boto.rds -import boto.vpc -from boto.exception import BotoServerError -import sure # noqa - -from moto import mock_ec2_deprecated, mock_rds_deprecated, mock_rds -from tests.helpers import disable_on_py3 - - -@mock_rds_deprecated -def test_create_database(): - conn = boto.rds.connect_to_region("us-west-2") - - database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2', - security_groups=["my_sg"]) - - database.status.should.equal('available') - database.id.should.equal("db-master-1") - database.allocated_storage.should.equal(10) - database.instance_class.should.equal("db.m1.small") - database.master_username.should.equal("root") - database.endpoint.should.equal( - ('db-master-1.aaaaaaaaaa.us-west-2.rds.amazonaws.com', 3306)) - database.security_groups[0].name.should.equal('my_sg') - - -@mock_rds_deprecated -def test_get_databases(): - conn = boto.rds.connect_to_region("us-west-2") - - list(conn.get_all_dbinstances()).should.have.length_of(0) - - conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2') - conn.create_dbinstance("db-master-2", 10, 'db.m1.small', 'root', 'hunter2') - - list(conn.get_all_dbinstances()).should.have.length_of(2) - - databases = conn.get_all_dbinstances("db-master-1") - list(databases).should.have.length_of(1) - - databases[0].id.should.equal("db-master-1") - - -@mock_rds -def test_get_databases_paginated(): - conn = boto3.client('rds', region_name="us-west-2") - - for i in range(51): - conn.create_db_instance(AllocatedStorage=5, - Port=5432, - DBInstanceIdentifier='rds%d' % i, - DBInstanceClass='db.t1.micro', - Engine='postgres') - - resp = conn.describe_db_instances() - resp["DBInstances"].should.have.length_of(50) - resp["Marker"].should.equal(resp["DBInstances"][-1]['DBInstanceIdentifier']) - - resp2 = conn.describe_db_instances(Marker=resp["Marker"]) - resp2["DBInstances"].should.have.length_of(1) - - -@mock_rds_deprecated -def test_describe_non_existant_database(): - conn = boto.rds.connect_to_region("us-west-2") - conn.get_all_dbinstances.when.called_with( - "not-a-db").should.throw(BotoServerError) - - -@mock_rds_deprecated -def test_delete_database(): - conn = boto.rds.connect_to_region("us-west-2") - list(conn.get_all_dbinstances()).should.have.length_of(0) - - conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2') - list(conn.get_all_dbinstances()).should.have.length_of(1) - - conn.delete_dbinstance("db-master-1") - list(conn.get_all_dbinstances()).should.have.length_of(0) - - -@mock_rds_deprecated -def test_delete_non_existant_database(): - conn = boto.rds.connect_to_region("us-west-2") - conn.delete_dbinstance.when.called_with( - "not-a-db").should.throw(BotoServerError) - - -@mock_rds_deprecated -def test_create_database_security_group(): - conn = boto.rds.connect_to_region("us-west-2") - - security_group = conn.create_dbsecurity_group('db_sg', 'DB Security Group') - security_group.name.should.equal('db_sg') - security_group.description.should.equal("DB Security Group") - list(security_group.ip_ranges).should.equal([]) - - -@mock_rds_deprecated -def test_get_security_groups(): - conn = boto.rds.connect_to_region("us-west-2") - - list(conn.get_all_dbsecurity_groups()).should.have.length_of(0) - - conn.create_dbsecurity_group('db_sg1', 'DB Security Group') - conn.create_dbsecurity_group('db_sg2', 'DB Security Group') - - list(conn.get_all_dbsecurity_groups()).should.have.length_of(2) - - databases = conn.get_all_dbsecurity_groups("db_sg1") - list(databases).should.have.length_of(1) - - databases[0].name.should.equal("db_sg1") - - -@mock_rds_deprecated -def test_get_non_existant_security_group(): - conn = boto.rds.connect_to_region("us-west-2") - conn.get_all_dbsecurity_groups.when.called_with( - "not-a-sg").should.throw(BotoServerError) - - -@mock_rds_deprecated -def test_delete_database_security_group(): - conn = boto.rds.connect_to_region("us-west-2") - conn.create_dbsecurity_group('db_sg', 'DB Security Group') - - list(conn.get_all_dbsecurity_groups()).should.have.length_of(1) - - conn.delete_dbsecurity_group("db_sg") - list(conn.get_all_dbsecurity_groups()).should.have.length_of(0) - - -@mock_rds_deprecated -def test_delete_non_existant_security_group(): - conn = boto.rds.connect_to_region("us-west-2") - conn.delete_dbsecurity_group.when.called_with( - "not-a-db").should.throw(BotoServerError) - - -@disable_on_py3() -@mock_rds_deprecated -def test_security_group_authorize(): - conn = boto.rds.connect_to_region("us-west-2") - security_group = conn.create_dbsecurity_group('db_sg', 'DB Security Group') - list(security_group.ip_ranges).should.equal([]) - - security_group.authorize(cidr_ip='10.3.2.45/32') - security_group = conn.get_all_dbsecurity_groups()[0] - list(security_group.ip_ranges).should.have.length_of(1) - security_group.ip_ranges[0].cidr_ip.should.equal('10.3.2.45/32') - - -@mock_rds_deprecated -def test_add_security_group_to_database(): - conn = boto.rds.connect_to_region("us-west-2") - - database = conn.create_dbinstance( - "db-master-1", 10, 'db.m1.small', 'root', 'hunter2') - security_group = conn.create_dbsecurity_group('db_sg', 'DB Security Group') - database.modify(security_groups=[security_group]) - - database = conn.get_all_dbinstances()[0] - list(database.security_groups).should.have.length_of(1) - - database.security_groups[0].name.should.equal("db_sg") - - -@mock_ec2_deprecated -@mock_rds_deprecated -def test_add_database_subnet_group(): - vpc_conn = boto.vpc.connect_to_region("us-west-2") - vpc = vpc_conn.create_vpc("10.0.0.0/16") - subnet1 = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24") - subnet2 = vpc_conn.create_subnet(vpc.id, "10.2.0.0/24") - - subnet_ids = [subnet1.id, subnet2.id] - conn = boto.rds.connect_to_region("us-west-2") - subnet_group = conn.create_db_subnet_group( - "db_subnet", "my db subnet", subnet_ids) - subnet_group.name.should.equal('db_subnet') - subnet_group.description.should.equal("my db subnet") - list(subnet_group.subnet_ids).should.equal(subnet_ids) - - -@mock_ec2_deprecated -@mock_rds_deprecated -def test_describe_database_subnet_group(): - vpc_conn = boto.vpc.connect_to_region("us-west-2") - vpc = vpc_conn.create_vpc("10.0.0.0/16") - subnet = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24") - - conn = boto.rds.connect_to_region("us-west-2") - conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id]) - conn.create_db_subnet_group("db_subnet2", "my db subnet", [subnet.id]) - - list(conn.get_all_db_subnet_groups()).should.have.length_of(2) - list(conn.get_all_db_subnet_groups("db_subnet1")).should.have.length_of(1) - - conn.get_all_db_subnet_groups.when.called_with( - "not-a-subnet").should.throw(BotoServerError) - - -@mock_ec2_deprecated -@mock_rds_deprecated -def test_delete_database_subnet_group(): - vpc_conn = boto.vpc.connect_to_region("us-west-2") - vpc = vpc_conn.create_vpc("10.0.0.0/16") - subnet = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24") - - conn = boto.rds.connect_to_region("us-west-2") - conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id]) - list(conn.get_all_db_subnet_groups()).should.have.length_of(1) - - conn.delete_db_subnet_group("db_subnet1") - list(conn.get_all_db_subnet_groups()).should.have.length_of(0) - - conn.delete_db_subnet_group.when.called_with( - "db_subnet1").should.throw(BotoServerError) - - -@mock_ec2_deprecated -@mock_rds_deprecated -def test_create_database_in_subnet_group(): - vpc_conn = boto.vpc.connect_to_region("us-west-2") - vpc = vpc_conn.create_vpc("10.0.0.0/16") - subnet = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24") - - conn = boto.rds.connect_to_region("us-west-2") - conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id]) - - database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', - 'root', 'hunter2', db_subnet_group_name="db_subnet1") - - database = conn.get_all_dbinstances("db-master-1")[0] - database.subnet_group.name.should.equal("db_subnet1") - - -@mock_rds_deprecated -def test_create_database_replica(): - conn = boto.rds.connect_to_region("us-west-2") - - primary = conn.create_dbinstance( - "db-master-1", 10, 'db.m1.small', 'root', 'hunter2') - - replica = conn.create_dbinstance_read_replica( - "replica", "db-master-1", "db.m1.small") - replica.id.should.equal("replica") - replica.instance_class.should.equal("db.m1.small") - status_info = replica.status_infos[0] - status_info.normal.should.equal(True) - status_info.status_type.should.equal('read replication') - status_info.status.should.equal('replicating') - - primary = conn.get_all_dbinstances("db-master-1")[0] - primary.read_replica_dbinstance_identifiers[0].should.equal("replica") - - conn.delete_dbinstance("replica") - - primary = conn.get_all_dbinstances("db-master-1")[0] - list(primary.read_replica_dbinstance_identifiers).should.have.length_of(0) - - -@mock_rds_deprecated -def test_create_cross_region_database_replica(): - west_1_conn = boto.rds.connect_to_region("us-west-1") - west_2_conn = boto.rds.connect_to_region("us-west-2") - - primary = west_1_conn.create_dbinstance( - "db-master-1", 10, 'db.m1.small', 'root', 'hunter2') - - primary_arn = "arn:aws:rds:us-west-1:1234567890:db:db-master-1" - replica = west_2_conn.create_dbinstance_read_replica( - "replica", - primary_arn, - "db.m1.small", - ) - - primary = west_1_conn.get_all_dbinstances("db-master-1")[0] - primary.read_replica_dbinstance_identifiers[0].should.equal("replica") - - replica = west_2_conn.get_all_dbinstances("replica")[0] - replica.instance_class.should.equal("db.m1.small") - - west_2_conn.delete_dbinstance("replica") - - primary = west_1_conn.get_all_dbinstances("db-master-1")[0] - list(primary.read_replica_dbinstance_identifiers).should.have.length_of(0) - - -@mock_rds_deprecated -def test_connecting_to_us_east_1(): - # boto does not use us-east-1 in the URL for RDS, - # and that broke moto in the past: - # https://github.com/boto/boto/blob/e271ff09364ea18d9d8b6f4d63d6b0ac6cbc9b75/boto/endpoints.json#L285 - conn = boto.rds.connect_to_region("us-east-1") - - database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2', - security_groups=["my_sg"]) - - database.status.should.equal('available') - database.id.should.equal("db-master-1") - database.allocated_storage.should.equal(10) - database.instance_class.should.equal("db.m1.small") - database.master_username.should.equal("root") - database.endpoint.should.equal( - ('db-master-1.aaaaaaaaaa.us-east-1.rds.amazonaws.com', 3306)) - database.security_groups[0].name.should.equal('my_sg') - - -@mock_rds_deprecated -def test_create_database_with_iops(): - conn = boto.rds.connect_to_region("us-west-2") - - database = conn.create_dbinstance( - "db-master-1", 10, 'db.m1.small', 'root', 'hunter2', iops=6000) - - database.status.should.equal('available') - database.iops.should.equal(6000) - # boto>2.36.0 may change the following property name to `storage_type` - database.StorageType.should.equal('io1') +from __future__ import unicode_literals + +import boto3 +import boto.rds +import boto.vpc +from boto.exception import BotoServerError +import sure # noqa + +from moto import mock_ec2_deprecated, mock_rds_deprecated, mock_rds +from tests.helpers import disable_on_py3 + + +@mock_rds_deprecated +def test_create_database(): + conn = boto.rds.connect_to_region("us-west-2") + + database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2', + security_groups=["my_sg"]) + + database.status.should.equal('available') + database.id.should.equal("db-master-1") + database.allocated_storage.should.equal(10) + database.instance_class.should.equal("db.m1.small") + database.master_username.should.equal("root") + database.endpoint.should.equal( + ('db-master-1.aaaaaaaaaa.us-west-2.rds.amazonaws.com', 3306)) + database.security_groups[0].name.should.equal('my_sg') + + +@mock_rds_deprecated +def test_get_databases(): + conn = boto.rds.connect_to_region("us-west-2") + + list(conn.get_all_dbinstances()).should.have.length_of(0) + + conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2') + conn.create_dbinstance("db-master-2", 10, 'db.m1.small', 'root', 'hunter2') + + list(conn.get_all_dbinstances()).should.have.length_of(2) + + databases = conn.get_all_dbinstances("db-master-1") + list(databases).should.have.length_of(1) + + databases[0].id.should.equal("db-master-1") + + +@mock_rds +def test_get_databases_paginated(): + conn = boto3.client('rds', region_name="us-west-2") + + for i in range(51): + conn.create_db_instance(AllocatedStorage=5, + Port=5432, + DBInstanceIdentifier='rds%d' % i, + DBInstanceClass='db.t1.micro', + Engine='postgres') + + resp = conn.describe_db_instances() + resp["DBInstances"].should.have.length_of(50) + resp["Marker"].should.equal(resp["DBInstances"][-1]['DBInstanceIdentifier']) + + resp2 = conn.describe_db_instances(Marker=resp["Marker"]) + resp2["DBInstances"].should.have.length_of(1) + + +@mock_rds_deprecated +def test_describe_non_existant_database(): + conn = boto.rds.connect_to_region("us-west-2") + conn.get_all_dbinstances.when.called_with( + "not-a-db").should.throw(BotoServerError) + + +@mock_rds_deprecated +def test_delete_database(): + conn = boto.rds.connect_to_region("us-west-2") + list(conn.get_all_dbinstances()).should.have.length_of(0) + + conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2') + list(conn.get_all_dbinstances()).should.have.length_of(1) + + conn.delete_dbinstance("db-master-1") + list(conn.get_all_dbinstances()).should.have.length_of(0) + + +@mock_rds_deprecated +def test_delete_non_existant_database(): + conn = boto.rds.connect_to_region("us-west-2") + conn.delete_dbinstance.when.called_with( + "not-a-db").should.throw(BotoServerError) + + +@mock_rds_deprecated +def test_create_database_security_group(): + conn = boto.rds.connect_to_region("us-west-2") + + security_group = conn.create_dbsecurity_group('db_sg', 'DB Security Group') + security_group.name.should.equal('db_sg') + security_group.description.should.equal("DB Security Group") + list(security_group.ip_ranges).should.equal([]) + + +@mock_rds_deprecated +def test_get_security_groups(): + conn = boto.rds.connect_to_region("us-west-2") + + list(conn.get_all_dbsecurity_groups()).should.have.length_of(0) + + conn.create_dbsecurity_group('db_sg1', 'DB Security Group') + conn.create_dbsecurity_group('db_sg2', 'DB Security Group') + + list(conn.get_all_dbsecurity_groups()).should.have.length_of(2) + + databases = conn.get_all_dbsecurity_groups("db_sg1") + list(databases).should.have.length_of(1) + + databases[0].name.should.equal("db_sg1") + + +@mock_rds_deprecated +def test_get_non_existant_security_group(): + conn = boto.rds.connect_to_region("us-west-2") + conn.get_all_dbsecurity_groups.when.called_with( + "not-a-sg").should.throw(BotoServerError) + + +@mock_rds_deprecated +def test_delete_database_security_group(): + conn = boto.rds.connect_to_region("us-west-2") + conn.create_dbsecurity_group('db_sg', 'DB Security Group') + + list(conn.get_all_dbsecurity_groups()).should.have.length_of(1) + + conn.delete_dbsecurity_group("db_sg") + list(conn.get_all_dbsecurity_groups()).should.have.length_of(0) + + +@mock_rds_deprecated +def test_delete_non_existant_security_group(): + conn = boto.rds.connect_to_region("us-west-2") + conn.delete_dbsecurity_group.when.called_with( + "not-a-db").should.throw(BotoServerError) + + +@disable_on_py3() +@mock_rds_deprecated +def test_security_group_authorize(): + conn = boto.rds.connect_to_region("us-west-2") + security_group = conn.create_dbsecurity_group('db_sg', 'DB Security Group') + list(security_group.ip_ranges).should.equal([]) + + security_group.authorize(cidr_ip='10.3.2.45/32') + security_group = conn.get_all_dbsecurity_groups()[0] + list(security_group.ip_ranges).should.have.length_of(1) + security_group.ip_ranges[0].cidr_ip.should.equal('10.3.2.45/32') + + +@mock_rds_deprecated +def test_add_security_group_to_database(): + conn = boto.rds.connect_to_region("us-west-2") + + database = conn.create_dbinstance( + "db-master-1", 10, 'db.m1.small', 'root', 'hunter2') + security_group = conn.create_dbsecurity_group('db_sg', 'DB Security Group') + database.modify(security_groups=[security_group]) + + database = conn.get_all_dbinstances()[0] + list(database.security_groups).should.have.length_of(1) + + database.security_groups[0].name.should.equal("db_sg") + + +@mock_ec2_deprecated +@mock_rds_deprecated +def test_add_database_subnet_group(): + vpc_conn = boto.vpc.connect_to_region("us-west-2") + vpc = vpc_conn.create_vpc("10.0.0.0/16") + subnet1 = vpc_conn.create_subnet(vpc.id, "10.0.1.0/24") + subnet2 = vpc_conn.create_subnet(vpc.id, "10.0.2.0/24") + + subnet_ids = [subnet1.id, subnet2.id] + conn = boto.rds.connect_to_region("us-west-2") + subnet_group = conn.create_db_subnet_group( + "db_subnet", "my db subnet", subnet_ids) + subnet_group.name.should.equal('db_subnet') + subnet_group.description.should.equal("my db subnet") + list(subnet_group.subnet_ids).should.equal(subnet_ids) + + +@mock_ec2_deprecated +@mock_rds_deprecated +def test_describe_database_subnet_group(): + vpc_conn = boto.vpc.connect_to_region("us-west-2") + vpc = vpc_conn.create_vpc("10.0.0.0/16") + subnet = vpc_conn.create_subnet(vpc.id, "10.0.1.0/24") + + conn = boto.rds.connect_to_region("us-west-2") + conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id]) + conn.create_db_subnet_group("db_subnet2", "my db subnet", [subnet.id]) + + list(conn.get_all_db_subnet_groups()).should.have.length_of(2) + list(conn.get_all_db_subnet_groups("db_subnet1")).should.have.length_of(1) + + conn.get_all_db_subnet_groups.when.called_with( + "not-a-subnet").should.throw(BotoServerError) + + +@mock_ec2_deprecated +@mock_rds_deprecated +def test_delete_database_subnet_group(): + vpc_conn = boto.vpc.connect_to_region("us-west-2") + vpc = vpc_conn.create_vpc("10.0.0.0/16") + subnet = vpc_conn.create_subnet(vpc.id, "10.0.1.0/24") + + conn = boto.rds.connect_to_region("us-west-2") + conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id]) + list(conn.get_all_db_subnet_groups()).should.have.length_of(1) + + conn.delete_db_subnet_group("db_subnet1") + list(conn.get_all_db_subnet_groups()).should.have.length_of(0) + + conn.delete_db_subnet_group.when.called_with( + "db_subnet1").should.throw(BotoServerError) + + +@mock_ec2_deprecated +@mock_rds_deprecated +def test_create_database_in_subnet_group(): + vpc_conn = boto.vpc.connect_to_region("us-west-2") + vpc = vpc_conn.create_vpc("10.0.0.0/16") + subnet = vpc_conn.create_subnet(vpc.id, "10.0.1.0/24") + + conn = boto.rds.connect_to_region("us-west-2") + conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id]) + + database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', + 'root', 'hunter2', db_subnet_group_name="db_subnet1") + + database = conn.get_all_dbinstances("db-master-1")[0] + database.subnet_group.name.should.equal("db_subnet1") + + +@mock_rds_deprecated +def test_create_database_replica(): + conn = boto.rds.connect_to_region("us-west-2") + + primary = conn.create_dbinstance( + "db-master-1", 10, 'db.m1.small', 'root', 'hunter2') + + replica = conn.create_dbinstance_read_replica( + "replica", "db-master-1", "db.m1.small") + replica.id.should.equal("replica") + replica.instance_class.should.equal("db.m1.small") + status_info = replica.status_infos[0] + status_info.normal.should.equal(True) + status_info.status_type.should.equal('read replication') + status_info.status.should.equal('replicating') + + primary = conn.get_all_dbinstances("db-master-1")[0] + primary.read_replica_dbinstance_identifiers[0].should.equal("replica") + + conn.delete_dbinstance("replica") + + primary = conn.get_all_dbinstances("db-master-1")[0] + list(primary.read_replica_dbinstance_identifiers).should.have.length_of(0) + + +@mock_rds_deprecated +def test_create_cross_region_database_replica(): + west_1_conn = boto.rds.connect_to_region("us-west-1") + west_2_conn = boto.rds.connect_to_region("us-west-2") + + primary = west_1_conn.create_dbinstance( + "db-master-1", 10, 'db.m1.small', 'root', 'hunter2') + + primary_arn = "arn:aws:rds:us-west-1:1234567890:db:db-master-1" + replica = west_2_conn.create_dbinstance_read_replica( + "replica", + primary_arn, + "db.m1.small", + ) + + primary = west_1_conn.get_all_dbinstances("db-master-1")[0] + primary.read_replica_dbinstance_identifiers[0].should.equal("replica") + + replica = west_2_conn.get_all_dbinstances("replica")[0] + replica.instance_class.should.equal("db.m1.small") + + west_2_conn.delete_dbinstance("replica") + + primary = west_1_conn.get_all_dbinstances("db-master-1")[0] + list(primary.read_replica_dbinstance_identifiers).should.have.length_of(0) + + +@mock_rds_deprecated +def test_connecting_to_us_east_1(): + # boto does not use us-east-1 in the URL for RDS, + # and that broke moto in the past: + # https://github.com/boto/boto/blob/e271ff09364ea18d9d8b6f4d63d6b0ac6cbc9b75/boto/endpoints.json#L285 + conn = boto.rds.connect_to_region("us-east-1") + + database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2', + security_groups=["my_sg"]) + + database.status.should.equal('available') + database.id.should.equal("db-master-1") + database.allocated_storage.should.equal(10) + database.instance_class.should.equal("db.m1.small") + database.master_username.should.equal("root") + database.endpoint.should.equal( + ('db-master-1.aaaaaaaaaa.us-east-1.rds.amazonaws.com', 3306)) + database.security_groups[0].name.should.equal('my_sg') + + +@mock_rds_deprecated +def test_create_database_with_iops(): + conn = boto.rds.connect_to_region("us-west-2") + + database = conn.create_dbinstance( + "db-master-1", 10, 'db.m1.small', 'root', 'hunter2', iops=6000) + + database.status.should.equal('available') + database.iops.should.equal(6000) + # boto>2.36.0 may change the following property name to `storage_type` + database.StorageType.should.equal('io1') diff --git a/tests/test_rds2/test_rds2.py b/tests/test_rds2/test_rds2.py index 311cd7fd7..a25b53196 100644 --- a/tests/test_rds2/test_rds2.py +++ b/tests/test_rds2/test_rds2.py @@ -1,1472 +1,1472 @@ -from __future__ import unicode_literals - -from botocore.exceptions import ClientError, ParamValidationError -import boto3 -import sure # noqa -from moto import mock_ec2, mock_kms, mock_rds2 - - -@mock_rds2 -def test_create_database(): - conn = boto3.client('rds', region_name='us-west-2') - database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', - AllocatedStorage=10, - Engine='postgres', - DBName='staging-postgres', - DBInstanceClass='db.m1.small', - LicenseModel='license-included', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=["my_sg"]) - db_instance = database['DBInstance'] - db_instance['AllocatedStorage'].should.equal(10) - db_instance['DBInstanceClass'].should.equal("db.m1.small") - db_instance['LicenseModel'].should.equal("license-included") - db_instance['MasterUsername'].should.equal("root") - db_instance['DBSecurityGroups'][0][ - 'DBSecurityGroupName'].should.equal('my_sg') - db_instance['DBInstanceArn'].should.equal( - 'arn:aws:rds:us-west-2:1234567890:db:db-master-1') - db_instance['DBInstanceStatus'].should.equal('available') - db_instance['DBName'].should.equal('staging-postgres') - db_instance['DBInstanceIdentifier'].should.equal("db-master-1") - db_instance['IAMDatabaseAuthenticationEnabled'].should.equal(False) - db_instance['DbiResourceId'].should.contain("db-") - db_instance['CopyTagsToSnapshot'].should.equal(False) - - -@mock_rds2 -def test_stop_database(): - conn = boto3.client('rds', region_name='us-west-2') - database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', - AllocatedStorage=10, - Engine='postgres', - DBName='staging-postgres', - DBInstanceClass='db.m1.small', - LicenseModel='license-included', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=["my_sg"]) - mydb = conn.describe_db_instances(DBInstanceIdentifier=database['DBInstance']['DBInstanceIdentifier'])['DBInstances'][0] - mydb['DBInstanceStatus'].should.equal('available') - # test stopping database should shutdown - response = conn.stop_db_instance(DBInstanceIdentifier=mydb['DBInstanceIdentifier']) - response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) - response['DBInstance']['DBInstanceStatus'].should.equal('stopped') - # test rdsclient error when trying to stop an already stopped database - conn.stop_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier']).should.throw(ClientError) - # test stopping a stopped database with snapshot should error and no snapshot should exist for that call - conn.stop_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier'], DBSnapshotIdentifier='rocky4570-rds-snap').should.throw(ClientError) - response = conn.describe_db_snapshots() - response['DBSnapshots'].should.equal([]) - - -@mock_rds2 -def test_start_database(): - conn = boto3.client('rds', region_name='us-west-2') - database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', - AllocatedStorage=10, - Engine='postgres', - DBName='staging-postgres', - DBInstanceClass='db.m1.small', - LicenseModel='license-included', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=["my_sg"]) - mydb = conn.describe_db_instances(DBInstanceIdentifier=database['DBInstance']['DBInstanceIdentifier'])['DBInstances'][0] - mydb['DBInstanceStatus'].should.equal('available') - # test starting an already started database should error - conn.start_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier']).should.throw(ClientError) - # stop and test start - should go from stopped to available, create snapshot and check snapshot - response = conn.stop_db_instance(DBInstanceIdentifier=mydb['DBInstanceIdentifier'], DBSnapshotIdentifier='rocky4570-rds-snap') - response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) - response['DBInstance']['DBInstanceStatus'].should.equal('stopped') - response = conn.describe_db_snapshots() - response['DBSnapshots'][0]['DBSnapshotIdentifier'].should.equal('rocky4570-rds-snap') - response = conn.start_db_instance(DBInstanceIdentifier=mydb['DBInstanceIdentifier']) - response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) - response['DBInstance']['DBInstanceStatus'].should.equal('available') - # starting database should not remove snapshot - response = conn.describe_db_snapshots() - response['DBSnapshots'][0]['DBSnapshotIdentifier'].should.equal('rocky4570-rds-snap') - # test stopping database, create snapshot with existing snapshot already created should throw error - conn.stop_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier'], DBSnapshotIdentifier='rocky4570-rds-snap').should.throw(ClientError) - # test stopping database not invoking snapshot should succeed. - response = conn.stop_db_instance(DBInstanceIdentifier=mydb['DBInstanceIdentifier']) - response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) - response['DBInstance']['DBInstanceStatus'].should.equal('stopped') - - -@mock_rds2 -def test_fail_to_stop_multi_az(): - conn = boto3.client('rds', region_name='us-west-2') - database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', - AllocatedStorage=10, - Engine='postgres', - DBName='staging-postgres', - DBInstanceClass='db.m1.small', - LicenseModel='license-included', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=["my_sg"], - MultiAZ=True) - - mydb = conn.describe_db_instances(DBInstanceIdentifier=database['DBInstance']['DBInstanceIdentifier'])['DBInstances'][0] - mydb['DBInstanceStatus'].should.equal('available') - # multi-az databases arent allowed to be shutdown at this time. - conn.stop_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier']).should.throw(ClientError) - # multi-az databases arent allowed to be started up at this time. - conn.start_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier']).should.throw(ClientError) - - -@mock_rds2 -def test_fail_to_stop_readreplica(): - conn = boto3.client('rds', region_name='us-west-2') - database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', - AllocatedStorage=10, - Engine='postgres', - DBName='staging-postgres', - DBInstanceClass='db.m1.small', - LicenseModel='license-included', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=["my_sg"]) - - replica = conn.create_db_instance_read_replica(DBInstanceIdentifier="db-replica-1", - SourceDBInstanceIdentifier="db-master-1", - DBInstanceClass="db.m1.small") - - mydb = conn.describe_db_instances(DBInstanceIdentifier=replica['DBInstance']['DBInstanceIdentifier'])['DBInstances'][0] - mydb['DBInstanceStatus'].should.equal('available') - # read-replicas are not allowed to be stopped at this time. - conn.stop_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier']).should.throw(ClientError) - # read-replicas are not allowed to be started at this time. - conn.start_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier']).should.throw(ClientError) - - -@mock_rds2 -def test_get_databases(): - conn = boto3.client('rds', region_name='us-west-2') - - instances = conn.describe_db_instances() - list(instances['DBInstances']).should.have.length_of(0) - - conn.create_db_instance(DBInstanceIdentifier='db-master-1', - AllocatedStorage=10, - DBInstanceClass='postgres', - Engine='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=['my_sg']) - conn.create_db_instance(DBInstanceIdentifier='db-master-2', - AllocatedStorage=10, - DBInstanceClass='postgres', - Engine='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=['my_sg']) - instances = conn.describe_db_instances() - list(instances['DBInstances']).should.have.length_of(2) - - instances = conn.describe_db_instances(DBInstanceIdentifier="db-master-1") - list(instances['DBInstances']).should.have.length_of(1) - instances['DBInstances'][0][ - 'DBInstanceIdentifier'].should.equal("db-master-1") - instances['DBInstances'][0]['DBInstanceArn'].should.equal( - 'arn:aws:rds:us-west-2:1234567890:db:db-master-1') - - -@mock_rds2 -def test_get_databases_paginated(): - conn = boto3.client('rds', region_name="us-west-2") - - for i in range(51): - conn.create_db_instance(AllocatedStorage=5, - Port=5432, - DBInstanceIdentifier='rds%d' % i, - DBInstanceClass='db.t1.micro', - Engine='postgres') - - resp = conn.describe_db_instances() - resp["DBInstances"].should.have.length_of(50) - resp["Marker"].should.equal(resp["DBInstances"][-1]['DBInstanceIdentifier']) - - resp2 = conn.describe_db_instances(Marker=resp["Marker"]) - resp2["DBInstances"].should.have.length_of(1) - - resp3 = conn.describe_db_instances(MaxRecords=100) - resp3["DBInstances"].should.have.length_of(51) - -@mock_rds2 -def test_describe_non_existant_database(): - conn = boto3.client('rds', region_name='us-west-2') - conn.describe_db_instances.when.called_with( - DBInstanceIdentifier="not-a-db").should.throw(ClientError) - - -@mock_rds2 -def test_modify_db_instance(): - conn = boto3.client('rds', region_name='us-west-2') - database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', - AllocatedStorage=10, - DBInstanceClass='postgres', - Engine='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=['my_sg']) - instances = conn.describe_db_instances(DBInstanceIdentifier='db-master-1') - instances['DBInstances'][0]['AllocatedStorage'].should.equal(10) - conn.modify_db_instance(DBInstanceIdentifier='db-master-1', - AllocatedStorage=20, - ApplyImmediately=True) - instances = conn.describe_db_instances(DBInstanceIdentifier='db-master-1') - instances['DBInstances'][0]['AllocatedStorage'].should.equal(20) - - -@mock_rds2 -def test_rename_db_instance(): - conn = boto3.client('rds', region_name='us-west-2') - database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', - AllocatedStorage=10, - DBInstanceClass='postgres', - Engine='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=['my_sg']) - instances = conn.describe_db_instances(DBInstanceIdentifier="db-master-1") - list(instances['DBInstances']).should.have.length_of(1) - conn.describe_db_instances.when.called_with(DBInstanceIdentifier="db-master-2").should.throw(ClientError) - conn.modify_db_instance(DBInstanceIdentifier='db-master-1', - NewDBInstanceIdentifier='db-master-2', - ApplyImmediately=True) - conn.describe_db_instances.when.called_with(DBInstanceIdentifier="db-master-1").should.throw(ClientError) - instances = conn.describe_db_instances(DBInstanceIdentifier="db-master-2") - list(instances['DBInstances']).should.have.length_of(1) - - -@mock_rds2 -def test_modify_non_existant_database(): - conn = boto3.client('rds', region_name='us-west-2') - conn.modify_db_instance.when.called_with(DBInstanceIdentifier='not-a-db', - AllocatedStorage=20, - ApplyImmediately=True).should.throw(ClientError) - - -@mock_rds2 -def test_reboot_db_instance(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_db_instance(DBInstanceIdentifier='db-master-1', - AllocatedStorage=10, - DBInstanceClass='postgres', - Engine='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=['my_sg']) - database = conn.reboot_db_instance(DBInstanceIdentifier='db-master-1') - database['DBInstance']['DBInstanceIdentifier'].should.equal("db-master-1") - - -@mock_rds2 -def test_reboot_non_existant_database(): - conn = boto3.client('rds', region_name='us-west-2') - conn.reboot_db_instance.when.called_with( - DBInstanceIdentifier="not-a-db").should.throw(ClientError) - - -@mock_rds2 -def test_delete_database(): - conn = boto3.client('rds', region_name='us-west-2') - instances = conn.describe_db_instances() - list(instances['DBInstances']).should.have.length_of(0) - conn.create_db_instance(DBInstanceIdentifier='db-primary-1', - AllocatedStorage=10, - Engine='postgres', - DBInstanceClass='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=['my_sg']) - instances = conn.describe_db_instances() - list(instances['DBInstances']).should.have.length_of(1) - - conn.delete_db_instance(DBInstanceIdentifier="db-primary-1", - FinalDBSnapshotIdentifier='primary-1-snapshot') - - instances = conn.describe_db_instances() - list(instances['DBInstances']).should.have.length_of(0) - - # Saved the snapshot - snapshots = conn.describe_db_snapshots(DBInstanceIdentifier="db-primary-1").get('DBSnapshots') - snapshots[0].get('Engine').should.equal('postgres') - - -@mock_rds2 -def test_delete_non_existant_database(): - conn = boto3.client('rds2', region_name="us-west-2") - conn.delete_db_instance.when.called_with( - DBInstanceIdentifier="not-a-db").should.throw(ClientError) - - -@mock_rds2 -def test_create_db_snapshots(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_db_snapshot.when.called_with( - DBInstanceIdentifier='db-primary-1', - DBSnapshotIdentifier='snapshot-1').should.throw(ClientError) - - conn.create_db_instance(DBInstanceIdentifier='db-primary-1', - AllocatedStorage=10, - Engine='postgres', - DBName='staging-postgres', - DBInstanceClass='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=["my_sg"]) - - snapshot = conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', - DBSnapshotIdentifier='g-1').get('DBSnapshot') - - snapshot.get('Engine').should.equal('postgres') - snapshot.get('DBInstanceIdentifier').should.equal('db-primary-1') - snapshot.get('DBSnapshotIdentifier').should.equal('g-1') - result = conn.list_tags_for_resource(ResourceName=snapshot['DBSnapshotArn']) - result['TagList'].should.equal([]) - - -@mock_rds2 -def test_create_db_snapshots_copy_tags(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_db_snapshot.when.called_with( - DBInstanceIdentifier='db-primary-1', - DBSnapshotIdentifier='snapshot-1').should.throw(ClientError) - - conn.create_db_instance(DBInstanceIdentifier='db-primary-1', - AllocatedStorage=10, - Engine='postgres', - DBName='staging-postgres', - DBInstanceClass='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=["my_sg"], - CopyTagsToSnapshot=True, - Tags=[ - { - 'Key': 'foo', - 'Value': 'bar', - }, - { - 'Key': 'foo1', - 'Value': 'bar1', - }, - ]) - - snapshot = conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', - DBSnapshotIdentifier='g-1').get('DBSnapshot') - - snapshot.get('Engine').should.equal('postgres') - snapshot.get('DBInstanceIdentifier').should.equal('db-primary-1') - snapshot.get('DBSnapshotIdentifier').should.equal('g-1') - result = conn.list_tags_for_resource(ResourceName=snapshot['DBSnapshotArn']) - result['TagList'].should.equal([{'Value': 'bar', - 'Key': 'foo'}, - {'Value': 'bar1', - 'Key': 'foo1'}]) - - -@mock_rds2 -def test_describe_db_snapshots(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_db_instance(DBInstanceIdentifier='db-primary-1', - AllocatedStorage=10, - Engine='postgres', - DBName='staging-postgres', - DBInstanceClass='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=["my_sg"]) - - created = conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', - DBSnapshotIdentifier='snapshot-1').get('DBSnapshot') - - created.get('Engine').should.equal('postgres') - - by_database_id = conn.describe_db_snapshots(DBInstanceIdentifier='db-primary-1').get('DBSnapshots') - by_snapshot_id = conn.describe_db_snapshots(DBSnapshotIdentifier='snapshot-1').get('DBSnapshots') - by_snapshot_id.should.equal(by_database_id) - - snapshot = by_snapshot_id[0] - snapshot.should.equal(created) - snapshot.get('Engine').should.equal('postgres') - - conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', - DBSnapshotIdentifier='snapshot-2') - snapshots = conn.describe_db_snapshots(DBInstanceIdentifier='db-primary-1').get('DBSnapshots') - snapshots.should.have.length_of(2) - - -@mock_rds2 -def test_delete_db_snapshot(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_db_instance(DBInstanceIdentifier='db-primary-1', - AllocatedStorage=10, - Engine='postgres', - DBName='staging-postgres', - DBInstanceClass='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=["my_sg"]) - conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', - DBSnapshotIdentifier='snapshot-1') - - conn.describe_db_snapshots(DBSnapshotIdentifier='snapshot-1').get('DBSnapshots')[0] - conn.delete_db_snapshot(DBSnapshotIdentifier='snapshot-1') - conn.describe_db_snapshots.when.called_with( - DBSnapshotIdentifier='snapshot-1').should.throw(ClientError) - - -@mock_rds2 -def test_create_option_group(): - conn = boto3.client('rds', region_name='us-west-2') - option_group = conn.create_option_group(OptionGroupName='test', - EngineName='mysql', - MajorEngineVersion='5.6', - OptionGroupDescription='test option group') - option_group['OptionGroup']['OptionGroupName'].should.equal('test') - option_group['OptionGroup']['EngineName'].should.equal('mysql') - option_group['OptionGroup'][ - 'OptionGroupDescription'].should.equal('test option group') - option_group['OptionGroup']['MajorEngineVersion'].should.equal('5.6') - - -@mock_rds2 -def test_create_option_group_bad_engine_name(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_option_group.when.called_with(OptionGroupName='test', - EngineName='invalid_engine', - MajorEngineVersion='5.6', - OptionGroupDescription='test invalid engine').should.throw(ClientError) - - -@mock_rds2 -def test_create_option_group_bad_engine_major_version(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_option_group.when.called_with(OptionGroupName='test', - EngineName='mysql', - MajorEngineVersion='6.6.6', - OptionGroupDescription='test invalid engine version').should.throw(ClientError) - - -@mock_rds2 -def test_create_option_group_empty_description(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_option_group.when.called_with(OptionGroupName='test', - EngineName='mysql', - MajorEngineVersion='5.6', - OptionGroupDescription='').should.throw(ClientError) - - -@mock_rds2 -def test_create_option_group_duplicate(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_option_group(OptionGroupName='test', - EngineName='mysql', - MajorEngineVersion='5.6', - OptionGroupDescription='test option group') - conn.create_option_group.when.called_with(OptionGroupName='test', - EngineName='mysql', - MajorEngineVersion='5.6', - OptionGroupDescription='test option group').should.throw(ClientError) - - -@mock_rds2 -def test_describe_option_group(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_option_group(OptionGroupName='test', - EngineName='mysql', - MajorEngineVersion='5.6', - OptionGroupDescription='test option group') - option_groups = conn.describe_option_groups(OptionGroupName='test') - option_groups['OptionGroupsList'][0][ - 'OptionGroupName'].should.equal('test') - - -@mock_rds2 -def test_describe_non_existant_option_group(): - conn = boto3.client('rds', region_name='us-west-2') - conn.describe_option_groups.when.called_with( - OptionGroupName="not-a-option-group").should.throw(ClientError) - - -@mock_rds2 -def test_delete_option_group(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_option_group(OptionGroupName='test', - EngineName='mysql', - MajorEngineVersion='5.6', - OptionGroupDescription='test option group') - option_groups = conn.describe_option_groups(OptionGroupName='test') - option_groups['OptionGroupsList'][0][ - 'OptionGroupName'].should.equal('test') - conn.delete_option_group(OptionGroupName='test') - conn.describe_option_groups.when.called_with( - OptionGroupName='test').should.throw(ClientError) - - -@mock_rds2 -def test_delete_non_existant_option_group(): - conn = boto3.client('rds', region_name='us-west-2') - conn.delete_option_group.when.called_with( - OptionGroupName='non-existant').should.throw(ClientError) - - -@mock_rds2 -def test_describe_option_group_options(): - conn = boto3.client('rds', region_name='us-west-2') - option_group_options = conn.describe_option_group_options( - EngineName='sqlserver-ee') - len(option_group_options['OptionGroupOptions']).should.equal(4) - option_group_options = conn.describe_option_group_options( - EngineName='sqlserver-ee', MajorEngineVersion='11.00') - len(option_group_options['OptionGroupOptions']).should.equal(2) - option_group_options = conn.describe_option_group_options( - EngineName='mysql', MajorEngineVersion='5.6') - len(option_group_options['OptionGroupOptions']).should.equal(1) - conn.describe_option_group_options.when.called_with( - EngineName='non-existent').should.throw(ClientError) - conn.describe_option_group_options.when.called_with( - EngineName='mysql', MajorEngineVersion='non-existent').should.throw(ClientError) - - -@mock_rds2 -def test_modify_option_group(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_option_group(OptionGroupName='test', EngineName='mysql', - MajorEngineVersion='5.6', OptionGroupDescription='test option group') - # TODO: create option and validate before deleting. - # if Someone can tell me how the hell to use this function - # to add options to an option_group, I can finish coding this. - result = conn.modify_option_group(OptionGroupName='test', OptionsToInclude=[ - ], OptionsToRemove=['MEMCACHED'], ApplyImmediately=True) - result['OptionGroup']['EngineName'].should.equal('mysql') - result['OptionGroup']['Options'].should.equal([]) - result['OptionGroup']['OptionGroupName'].should.equal('test') - - -@mock_rds2 -def test_modify_option_group_no_options(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_option_group(OptionGroupName='test', EngineName='mysql', - MajorEngineVersion='5.6', OptionGroupDescription='test option group') - conn.modify_option_group.when.called_with( - OptionGroupName='test').should.throw(ClientError) - - -@mock_rds2 -def test_modify_non_existant_option_group(): - conn = boto3.client('rds', region_name='us-west-2') - conn.modify_option_group.when.called_with(OptionGroupName='non-existant', OptionsToInclude=[( - 'OptionName', 'Port', 'DBSecurityGroupMemberships', 'VpcSecurityGroupMemberships', 'OptionSettings')]).should.throw(ParamValidationError) - - -@mock_rds2 -def test_delete_non_existant_database(): - conn = boto3.client('rds', region_name='us-west-2') - conn.delete_db_instance.when.called_with( - DBInstanceIdentifier="not-a-db").should.throw(ClientError) - - -@mock_rds2 -def test_list_tags_invalid_arn(): - conn = boto3.client('rds', region_name='us-west-2') - conn.list_tags_for_resource.when.called_with( - ResourceName='arn:aws:rds:bad-arn').should.throw(ClientError) - - -@mock_rds2 -def test_list_tags_db(): - conn = boto3.client('rds', region_name='us-west-2') - result = conn.list_tags_for_resource( - ResourceName='arn:aws:rds:us-west-2:1234567890:db:foo') - result['TagList'].should.equal([]) - test_instance = conn.create_db_instance( - DBInstanceIdentifier='db-with-tags', - AllocatedStorage=10, - DBInstanceClass='postgres', - Engine='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=['my_sg'], - Tags=[ - { - 'Key': 'foo', - 'Value': 'bar', - }, - { - 'Key': 'foo1', - 'Value': 'bar1', - }, - ]) - result = conn.list_tags_for_resource( - ResourceName=test_instance['DBInstance']['DBInstanceArn']) - result['TagList'].should.equal([{'Value': 'bar', - 'Key': 'foo'}, - {'Value': 'bar1', - 'Key': 'foo1'}]) - - -@mock_rds2 -def test_add_tags_db(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_db_instance(DBInstanceIdentifier='db-without-tags', - AllocatedStorage=10, - DBInstanceClass='postgres', - Engine='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=['my_sg'], - Tags=[ - { - 'Key': 'foo', - 'Value': 'bar', - }, - { - 'Key': 'foo1', - 'Value': 'bar1', - }, - ]) - result = conn.list_tags_for_resource( - ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-without-tags') - list(result['TagList']).should.have.length_of(2) - conn.add_tags_to_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-without-tags', - Tags=[ - { - 'Key': 'foo', - 'Value': 'fish', - }, - { - 'Key': 'foo2', - 'Value': 'bar2', - }, - ]) - result = conn.list_tags_for_resource( - ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-without-tags') - list(result['TagList']).should.have.length_of(3) - - -@mock_rds2 -def test_remove_tags_db(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_db_instance(DBInstanceIdentifier='db-with-tags', - AllocatedStorage=10, - DBInstanceClass='postgres', - Engine='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=['my_sg'], - Tags=[ - { - 'Key': 'foo', - 'Value': 'bar', - }, - { - 'Key': 'foo1', - 'Value': 'bar1', - }, - ]) - result = conn.list_tags_for_resource( - ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-with-tags') - list(result['TagList']).should.have.length_of(2) - conn.remove_tags_from_resource( - ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-with-tags', TagKeys=['foo']) - result = conn.list_tags_for_resource( - ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-with-tags') - len(result['TagList']).should.equal(1) - - -@mock_rds2 -def test_list_tags_snapshot(): - conn = boto3.client('rds', region_name='us-west-2') - result = conn.list_tags_for_resource( - ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:foo') - result['TagList'].should.equal([]) - conn.create_db_instance(DBInstanceIdentifier='db-primary-1', - AllocatedStorage=10, - Engine='postgres', - DBName='staging-postgres', - DBInstanceClass='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=["my_sg"]) - snapshot = conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', - DBSnapshotIdentifier='snapshot-with-tags', - Tags=[ - { - 'Key': 'foo', - 'Value': 'bar', - }, - { - 'Key': 'foo1', - 'Value': 'bar1', - }, - ]) - result = conn.list_tags_for_resource(ResourceName=snapshot['DBSnapshot']['DBSnapshotArn']) - result['TagList'].should.equal([{'Value': 'bar', - 'Key': 'foo'}, - {'Value': 'bar1', - 'Key': 'foo1'}]) - - -@mock_rds2 -def test_add_tags_snapshot(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_db_instance(DBInstanceIdentifier='db-primary-1', - AllocatedStorage=10, - Engine='postgres', - DBName='staging-postgres', - DBInstanceClass='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=["my_sg"]) - snapshot = conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', - DBSnapshotIdentifier='snapshot-without-tags', - Tags=[ - { - 'Key': 'foo', - 'Value': 'bar', - }, - { - 'Key': 'foo1', - 'Value': 'bar1', - }, - ]) - result = conn.list_tags_for_resource( - ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-without-tags') - list(result['TagList']).should.have.length_of(2) - conn.add_tags_to_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-without-tags', - Tags=[ - { - 'Key': 'foo', - 'Value': 'fish', - }, - { - 'Key': 'foo2', - 'Value': 'bar2', - }, - ]) - result = conn.list_tags_for_resource( - ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-without-tags') - list(result['TagList']).should.have.length_of(3) - - -@mock_rds2 -def test_remove_tags_snapshot(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_db_instance(DBInstanceIdentifier='db-primary-1', - AllocatedStorage=10, - Engine='postgres', - DBName='staging-postgres', - DBInstanceClass='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=["my_sg"]) - snapshot = conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', - DBSnapshotIdentifier='snapshot-with-tags', - Tags=[ - { - 'Key': 'foo', - 'Value': 'bar', - }, - { - 'Key': 'foo1', - 'Value': 'bar1', - }, - ]) - result = conn.list_tags_for_resource( - ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-with-tags') - list(result['TagList']).should.have.length_of(2) - conn.remove_tags_from_resource( - ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-with-tags', TagKeys=['foo']) - result = conn.list_tags_for_resource( - ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-with-tags') - len(result['TagList']).should.equal(1) - - -@mock_rds2 -def test_add_tags_option_group(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_option_group(OptionGroupName='test', - EngineName='mysql', - MajorEngineVersion='5.6', - OptionGroupDescription='test option group') - result = conn.list_tags_for_resource( - ResourceName='arn:aws:rds:us-west-2:1234567890:og:test') - list(result['TagList']).should.have.length_of(0) - conn.add_tags_to_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:og:test', - Tags=[ - { - 'Key': 'foo', - 'Value': 'fish', - }, - { - 'Key': 'foo2', - 'Value': 'bar2', - }]) - result = conn.list_tags_for_resource( - ResourceName='arn:aws:rds:us-west-2:1234567890:og:test') - list(result['TagList']).should.have.length_of(2) - - -@mock_rds2 -def test_remove_tags_option_group(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_option_group(OptionGroupName='test', - EngineName='mysql', - MajorEngineVersion='5.6', - OptionGroupDescription='test option group') - result = conn.list_tags_for_resource( - ResourceName='arn:aws:rds:us-west-2:1234567890:og:test') - conn.add_tags_to_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:og:test', - Tags=[ - { - 'Key': 'foo', - 'Value': 'fish', - }, - { - 'Key': 'foo2', - 'Value': 'bar2', - }]) - result = conn.list_tags_for_resource( - ResourceName='arn:aws:rds:us-west-2:1234567890:og:test') - list(result['TagList']).should.have.length_of(2) - conn.remove_tags_from_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:og:test', - TagKeys=['foo']) - result = conn.list_tags_for_resource( - ResourceName='arn:aws:rds:us-west-2:1234567890:og:test') - list(result['TagList']).should.have.length_of(1) - - -@mock_rds2 -def test_create_database_security_group(): - conn = boto3.client('rds', region_name='us-west-2') - - result = conn.create_db_security_group( - DBSecurityGroupName='db_sg', DBSecurityGroupDescription='DB Security Group') - result['DBSecurityGroup']['DBSecurityGroupName'].should.equal("db_sg") - result['DBSecurityGroup'][ - 'DBSecurityGroupDescription'].should.equal("DB Security Group") - result['DBSecurityGroup']['IPRanges'].should.equal([]) - - -@mock_rds2 -def test_get_security_groups(): - conn = boto3.client('rds', region_name='us-west-2') - - result = conn.describe_db_security_groups() - result['DBSecurityGroups'].should.have.length_of(0) - - conn.create_db_security_group( - DBSecurityGroupName='db_sg1', DBSecurityGroupDescription='DB Security Group') - conn.create_db_security_group( - DBSecurityGroupName='db_sg2', DBSecurityGroupDescription='DB Security Group') - - result = conn.describe_db_security_groups() - result['DBSecurityGroups'].should.have.length_of(2) - - result = conn.describe_db_security_groups(DBSecurityGroupName="db_sg1") - result['DBSecurityGroups'].should.have.length_of(1) - result['DBSecurityGroups'][0]['DBSecurityGroupName'].should.equal("db_sg1") - - -@mock_rds2 -def test_get_non_existant_security_group(): - conn = boto3.client('rds', region_name='us-west-2') - conn.describe_db_security_groups.when.called_with( - DBSecurityGroupName="not-a-sg").should.throw(ClientError) - - -@mock_rds2 -def test_delete_database_security_group(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_db_security_group( - DBSecurityGroupName='db_sg', DBSecurityGroupDescription='DB Security Group') - - result = conn.describe_db_security_groups() - result['DBSecurityGroups'].should.have.length_of(1) - - conn.delete_db_security_group(DBSecurityGroupName="db_sg") - result = conn.describe_db_security_groups() - result['DBSecurityGroups'].should.have.length_of(0) - - -@mock_rds2 -def test_delete_non_existant_security_group(): - conn = boto3.client('rds', region_name='us-west-2') - conn.delete_db_security_group.when.called_with( - DBSecurityGroupName="not-a-db").should.throw(ClientError) - - -@mock_rds2 -def test_security_group_authorize(): - conn = boto3.client('rds', region_name='us-west-2') - security_group = conn.create_db_security_group(DBSecurityGroupName='db_sg', - DBSecurityGroupDescription='DB Security Group') - security_group['DBSecurityGroup']['IPRanges'].should.equal([]) - - conn.authorize_db_security_group_ingress(DBSecurityGroupName='db_sg', - CIDRIP='10.3.2.45/32') - - result = conn.describe_db_security_groups(DBSecurityGroupName="db_sg") - result['DBSecurityGroups'][0]['IPRanges'].should.have.length_of(1) - result['DBSecurityGroups'][0]['IPRanges'].should.equal( - [{'Status': 'authorized', 'CIDRIP': '10.3.2.45/32'}]) - - conn.authorize_db_security_group_ingress(DBSecurityGroupName='db_sg', - CIDRIP='10.3.2.46/32') - result = conn.describe_db_security_groups(DBSecurityGroupName="db_sg") - result['DBSecurityGroups'][0]['IPRanges'].should.have.length_of(2) - result['DBSecurityGroups'][0]['IPRanges'].should.equal([ - {'Status': 'authorized', 'CIDRIP': '10.3.2.45/32'}, - {'Status': 'authorized', 'CIDRIP': '10.3.2.46/32'}, - ]) - - -@mock_rds2 -def test_add_security_group_to_database(): - conn = boto3.client('rds', region_name='us-west-2') - - conn.create_db_instance(DBInstanceIdentifier='db-master-1', - AllocatedStorage=10, - DBInstanceClass='postgres', - Engine='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234) - - result = conn.describe_db_instances() - result['DBInstances'][0]['DBSecurityGroups'].should.equal([]) - conn.create_db_security_group(DBSecurityGroupName='db_sg', - DBSecurityGroupDescription='DB Security Group') - conn.modify_db_instance(DBInstanceIdentifier='db-master-1', - DBSecurityGroups=['db_sg']) - result = conn.describe_db_instances() - result['DBInstances'][0]['DBSecurityGroups'][0][ - 'DBSecurityGroupName'].should.equal('db_sg') - - -@mock_rds2 -def test_list_tags_security_group(): - conn = boto3.client('rds', region_name='us-west-2') - result = conn.describe_db_subnet_groups() - result['DBSubnetGroups'].should.have.length_of(0) - - security_group = conn.create_db_security_group(DBSecurityGroupName="db_sg", - DBSecurityGroupDescription='DB Security Group', - Tags=[{'Value': 'bar', - 'Key': 'foo'}, - {'Value': 'bar1', - 'Key': 'foo1'}])['DBSecurityGroup']['DBSecurityGroupName'] - resource = 'arn:aws:rds:us-west-2:1234567890:secgrp:{0}'.format( - security_group) - result = conn.list_tags_for_resource(ResourceName=resource) - result['TagList'].should.equal([{'Value': 'bar', - 'Key': 'foo'}, - {'Value': 'bar1', - 'Key': 'foo1'}]) - - -@mock_rds2 -def test_add_tags_security_group(): - conn = boto3.client('rds', region_name='us-west-2') - result = conn.describe_db_subnet_groups() - result['DBSubnetGroups'].should.have.length_of(0) - - security_group = conn.create_db_security_group(DBSecurityGroupName="db_sg", - DBSecurityGroupDescription='DB Security Group')['DBSecurityGroup']['DBSecurityGroupName'] - - resource = 'arn:aws:rds:us-west-2:1234567890:secgrp:{0}'.format( - security_group) - conn.add_tags_to_resource(ResourceName=resource, - Tags=[{'Value': 'bar', - 'Key': 'foo'}, - {'Value': 'bar1', - 'Key': 'foo1'}]) - - result = conn.list_tags_for_resource(ResourceName=resource) - result['TagList'].should.equal([{'Value': 'bar', - 'Key': 'foo'}, - {'Value': 'bar1', - 'Key': 'foo1'}]) - - -@mock_rds2 -def test_remove_tags_security_group(): - conn = boto3.client('rds', region_name='us-west-2') - result = conn.describe_db_subnet_groups() - result['DBSubnetGroups'].should.have.length_of(0) - - security_group = conn.create_db_security_group(DBSecurityGroupName="db_sg", - DBSecurityGroupDescription='DB Security Group', - Tags=[{'Value': 'bar', - 'Key': 'foo'}, - {'Value': 'bar1', - 'Key': 'foo1'}])['DBSecurityGroup']['DBSecurityGroupName'] - - resource = 'arn:aws:rds:us-west-2:1234567890:secgrp:{0}'.format( - security_group) - conn.remove_tags_from_resource(ResourceName=resource, TagKeys=['foo']) - - result = conn.list_tags_for_resource(ResourceName=resource) - result['TagList'].should.equal([{'Value': 'bar1', 'Key': 'foo1'}]) - - -@mock_ec2 -@mock_rds2 -def test_create_database_subnet_group(): - vpc_conn = boto3.client('ec2', 'us-west-2') - vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] - subnet1 = vpc_conn.create_subnet( - VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] - subnet2 = vpc_conn.create_subnet( - VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/26')['Subnet'] - - subnet_ids = [subnet1['SubnetId'], subnet2['SubnetId']] - conn = boto3.client('rds', region_name='us-west-2') - result = conn.create_db_subnet_group(DBSubnetGroupName='db_subnet', - DBSubnetGroupDescription='my db subnet', - SubnetIds=subnet_ids) - result['DBSubnetGroup']['DBSubnetGroupName'].should.equal("db_subnet") - result['DBSubnetGroup'][ - 'DBSubnetGroupDescription'].should.equal("my db subnet") - subnets = result['DBSubnetGroup']['Subnets'] - subnet_group_ids = [subnets[0]['SubnetIdentifier'], - subnets[1]['SubnetIdentifier']] - list(subnet_group_ids).should.equal(subnet_ids) - - -@mock_ec2 -@mock_rds2 -def test_create_database_in_subnet_group(): - vpc_conn = boto3.client('ec2', 'us-west-2') - vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] - subnet = vpc_conn.create_subnet( - VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] - - conn = boto3.client('rds', region_name='us-west-2') - conn.create_db_subnet_group(DBSubnetGroupName='db_subnet1', - DBSubnetGroupDescription='my db subnet', - SubnetIds=[subnet['SubnetId']]) - conn.create_db_instance(DBInstanceIdentifier='db-master-1', - AllocatedStorage=10, - Engine='postgres', - DBInstanceClass='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSubnetGroupName='db_subnet1') - result = conn.describe_db_instances(DBInstanceIdentifier='db-master-1') - result['DBInstances'][0]['DBSubnetGroup'][ - 'DBSubnetGroupName'].should.equal('db_subnet1') - - -@mock_ec2 -@mock_rds2 -def test_describe_database_subnet_group(): - vpc_conn = boto3.client('ec2', 'us-west-2') - vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] - subnet = vpc_conn.create_subnet( - VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] - - conn = boto3.client('rds', region_name='us-west-2') - conn.create_db_subnet_group(DBSubnetGroupName="db_subnet1", - DBSubnetGroupDescription='my db subnet', - SubnetIds=[subnet['SubnetId']]) - conn.create_db_subnet_group(DBSubnetGroupName='db_subnet2', - DBSubnetGroupDescription='my db subnet', - SubnetIds=[subnet['SubnetId']]) - - resp = conn.describe_db_subnet_groups() - resp['DBSubnetGroups'].should.have.length_of(2) - - subnets = resp['DBSubnetGroups'][0]['Subnets'] - subnets.should.have.length_of(1) - - list(conn.describe_db_subnet_groups(DBSubnetGroupName="db_subnet1") - ['DBSubnetGroups']).should.have.length_of(1) - - conn.describe_db_subnet_groups.when.called_with( - DBSubnetGroupName="not-a-subnet").should.throw(ClientError) - - -@mock_ec2 -@mock_rds2 -def test_delete_database_subnet_group(): - vpc_conn = boto3.client('ec2', 'us-west-2') - vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] - subnet = vpc_conn.create_subnet( - VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] - - conn = boto3.client('rds', region_name='us-west-2') - result = conn.describe_db_subnet_groups() - result['DBSubnetGroups'].should.have.length_of(0) - - conn.create_db_subnet_group(DBSubnetGroupName="db_subnet1", - DBSubnetGroupDescription='my db subnet', - SubnetIds=[subnet['SubnetId']]) - result = conn.describe_db_subnet_groups() - result['DBSubnetGroups'].should.have.length_of(1) - - conn.delete_db_subnet_group(DBSubnetGroupName="db_subnet1") - result = conn.describe_db_subnet_groups() - result['DBSubnetGroups'].should.have.length_of(0) - - conn.delete_db_subnet_group.when.called_with( - DBSubnetGroupName="db_subnet1").should.throw(ClientError) - - -@mock_ec2 -@mock_rds2 -def test_list_tags_database_subnet_group(): - vpc_conn = boto3.client('ec2', 'us-west-2') - vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] - subnet = vpc_conn.create_subnet( - VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] - - conn = boto3.client('rds', region_name='us-west-2') - result = conn.describe_db_subnet_groups() - result['DBSubnetGroups'].should.have.length_of(0) - - subnet = conn.create_db_subnet_group(DBSubnetGroupName="db_subnet1", - DBSubnetGroupDescription='my db subnet', - SubnetIds=[subnet['SubnetId']], - Tags=[{'Value': 'bar', - 'Key': 'foo'}, - {'Value': 'bar1', - 'Key': 'foo1'}])['DBSubnetGroup']['DBSubnetGroupName'] - result = conn.list_tags_for_resource( - ResourceName='arn:aws:rds:us-west-2:1234567890:subgrp:{0}'.format(subnet)) - result['TagList'].should.equal([{'Value': 'bar', - 'Key': 'foo'}, - {'Value': 'bar1', - 'Key': 'foo1'}]) - - -@mock_ec2 -@mock_rds2 -def test_add_tags_database_subnet_group(): - vpc_conn = boto3.client('ec2', 'us-west-2') - vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] - subnet = vpc_conn.create_subnet( - VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] - - conn = boto3.client('rds', region_name='us-west-2') - result = conn.describe_db_subnet_groups() - result['DBSubnetGroups'].should.have.length_of(0) - - subnet = conn.create_db_subnet_group(DBSubnetGroupName="db_subnet1", - DBSubnetGroupDescription='my db subnet', - SubnetIds=[subnet['SubnetId']], - Tags=[])['DBSubnetGroup']['DBSubnetGroupName'] - resource = 'arn:aws:rds:us-west-2:1234567890:subgrp:{0}'.format(subnet) - - conn.add_tags_to_resource(ResourceName=resource, - Tags=[{'Value': 'bar', - 'Key': 'foo'}, - {'Value': 'bar1', - 'Key': 'foo1'}]) - - result = conn.list_tags_for_resource(ResourceName=resource) - result['TagList'].should.equal([{'Value': 'bar', - 'Key': 'foo'}, - {'Value': 'bar1', - 'Key': 'foo1'}]) - - -@mock_ec2 -@mock_rds2 -def test_remove_tags_database_subnet_group(): - vpc_conn = boto3.client('ec2', 'us-west-2') - vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] - subnet = vpc_conn.create_subnet( - VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] - - conn = boto3.client('rds', region_name='us-west-2') - result = conn.describe_db_subnet_groups() - result['DBSubnetGroups'].should.have.length_of(0) - - subnet = conn.create_db_subnet_group(DBSubnetGroupName="db_subnet1", - DBSubnetGroupDescription='my db subnet', - SubnetIds=[subnet['SubnetId']], - Tags=[{'Value': 'bar', - 'Key': 'foo'}, - {'Value': 'bar1', - 'Key': 'foo1'}])['DBSubnetGroup']['DBSubnetGroupName'] - resource = 'arn:aws:rds:us-west-2:1234567890:subgrp:{0}'.format(subnet) - - conn.remove_tags_from_resource(ResourceName=resource, TagKeys=['foo']) - - result = conn.list_tags_for_resource(ResourceName=resource) - result['TagList'].should.equal([{'Value': 'bar1', 'Key': 'foo1'}]) - - -@mock_rds2 -def test_create_database_replica(): - conn = boto3.client('rds', region_name='us-west-2') - - database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', - AllocatedStorage=10, - Engine='postgres', - DBInstanceClass='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=["my_sg"]) - - replica = conn.create_db_instance_read_replica(DBInstanceIdentifier="db-replica-1", - SourceDBInstanceIdentifier="db-master-1", - DBInstanceClass="db.m1.small") - replica['DBInstance'][ - 'ReadReplicaSourceDBInstanceIdentifier'].should.equal('db-master-1') - replica['DBInstance']['DBInstanceClass'].should.equal('db.m1.small') - replica['DBInstance']['DBInstanceIdentifier'].should.equal('db-replica-1') - - master = conn.describe_db_instances(DBInstanceIdentifier="db-master-1") - master['DBInstances'][0]['ReadReplicaDBInstanceIdentifiers'].should.equal([ - 'db-replica-1']) - - conn.delete_db_instance( - DBInstanceIdentifier="db-replica-1", SkipFinalSnapshot=True) - - master = conn.describe_db_instances(DBInstanceIdentifier="db-master-1") - master['DBInstances'][0][ - 'ReadReplicaDBInstanceIdentifiers'].should.equal([]) - - -@mock_rds2 -@mock_kms -def test_create_database_with_encrypted_storage(): - kms_conn = boto3.client('kms', region_name='us-west-2') - key = kms_conn.create_key(Policy='my RDS encryption policy', - Description='RDS encryption key', - KeyUsage='ENCRYPT_DECRYPT') - - conn = boto3.client('rds', region_name='us-west-2') - database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', - AllocatedStorage=10, - Engine='postgres', - DBInstanceClass='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=["my_sg"], - StorageEncrypted=True, - KmsKeyId=key['KeyMetadata']['KeyId']) - - database['DBInstance']['StorageEncrypted'].should.equal(True) - database['DBInstance']['KmsKeyId'].should.equal( - key['KeyMetadata']['KeyId']) - - -@mock_rds2 -def test_create_db_parameter_group(): - conn = boto3.client('rds', region_name='us-west-2') - db_parameter_group = conn.create_db_parameter_group(DBParameterGroupName='test', - DBParameterGroupFamily='mysql5.6', - Description='test parameter group') - - db_parameter_group['DBParameterGroup'][ - 'DBParameterGroupName'].should.equal('test') - db_parameter_group['DBParameterGroup'][ - 'DBParameterGroupFamily'].should.equal('mysql5.6') - db_parameter_group['DBParameterGroup'][ - 'Description'].should.equal('test parameter group') - - -@mock_rds2 -def test_create_db_instance_with_parameter_group(): - conn = boto3.client('rds', region_name='us-west-2') - db_parameter_group = conn.create_db_parameter_group(DBParameterGroupName='test', - DBParameterGroupFamily='mysql5.6', - Description='test parameter group') - - database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', - AllocatedStorage=10, - Engine='mysql', - DBInstanceClass='db.m1.small', - DBParameterGroupName='test', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234) - - len(database['DBInstance']['DBParameterGroups']).should.equal(1) - database['DBInstance']['DBParameterGroups'][0][ - 'DBParameterGroupName'].should.equal('test') - database['DBInstance']['DBParameterGroups'][0][ - 'ParameterApplyStatus'].should.equal('in-sync') - - -@mock_rds2 -def test_create_database_with_default_port(): - conn = boto3.client('rds', region_name='us-west-2') - database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', - AllocatedStorage=10, - Engine='postgres', - DBInstanceClass='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - DBSecurityGroups=["my_sg"]) - database['DBInstance']['Endpoint']['Port'].should.equal(5432) - - -@mock_rds2 -def test_modify_db_instance_with_parameter_group(): - conn = boto3.client('rds', region_name='us-west-2') - database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', - AllocatedStorage=10, - Engine='mysql', - DBInstanceClass='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234) - - len(database['DBInstance']['DBParameterGroups']).should.equal(1) - database['DBInstance']['DBParameterGroups'][0][ - 'DBParameterGroupName'].should.equal('default.mysql5.6') - database['DBInstance']['DBParameterGroups'][0][ - 'ParameterApplyStatus'].should.equal('in-sync') - - db_parameter_group = conn.create_db_parameter_group(DBParameterGroupName='test', - DBParameterGroupFamily='mysql5.6', - Description='test parameter group') - conn.modify_db_instance(DBInstanceIdentifier='db-master-1', - DBParameterGroupName='test', - ApplyImmediately=True) - - database = conn.describe_db_instances( - DBInstanceIdentifier='db-master-1')['DBInstances'][0] - len(database['DBParameterGroups']).should.equal(1) - database['DBParameterGroups'][0][ - 'DBParameterGroupName'].should.equal('test') - database['DBParameterGroups'][0][ - 'ParameterApplyStatus'].should.equal('in-sync') - - -@mock_rds2 -def test_create_db_parameter_group_empty_description(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_db_parameter_group.when.called_with(DBParameterGroupName='test', - DBParameterGroupFamily='mysql5.6', - Description='').should.throw(ClientError) - - -@mock_rds2 -def test_create_db_parameter_group_duplicate(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_db_parameter_group(DBParameterGroupName='test', - DBParameterGroupFamily='mysql5.6', - Description='test parameter group') - conn.create_db_parameter_group.when.called_with(DBParameterGroupName='test', - DBParameterGroupFamily='mysql5.6', - Description='test parameter group').should.throw(ClientError) - - -@mock_rds2 -def test_describe_db_parameter_group(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_db_parameter_group(DBParameterGroupName='test', - DBParameterGroupFamily='mysql5.6', - Description='test parameter group') - db_parameter_groups = conn.describe_db_parameter_groups( - DBParameterGroupName='test') - db_parameter_groups['DBParameterGroups'][0][ - 'DBParameterGroupName'].should.equal('test') - - -@mock_rds2 -def test_describe_non_existant_db_parameter_group(): - conn = boto3.client('rds', region_name='us-west-2') - db_parameter_groups = conn.describe_db_parameter_groups( - DBParameterGroupName='test') - len(db_parameter_groups['DBParameterGroups']).should.equal(0) - - -@mock_rds2 -def test_delete_db_parameter_group(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_db_parameter_group(DBParameterGroupName='test', - DBParameterGroupFamily='mysql5.6', - Description='test parameter group') - db_parameter_groups = conn.describe_db_parameter_groups( - DBParameterGroupName='test') - db_parameter_groups['DBParameterGroups'][0][ - 'DBParameterGroupName'].should.equal('test') - conn.delete_db_parameter_group(DBParameterGroupName='test') - db_parameter_groups = conn.describe_db_parameter_groups( - DBParameterGroupName='test') - len(db_parameter_groups['DBParameterGroups']).should.equal(0) - - -@mock_rds2 -def test_modify_db_parameter_group(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_db_parameter_group(DBParameterGroupName='test', - DBParameterGroupFamily='mysql5.6', - Description='test parameter group') - - modify_result = conn.modify_db_parameter_group(DBParameterGroupName='test', - Parameters=[{ - 'ParameterName': 'foo', - 'ParameterValue': 'foo_val', - 'Description': 'test param', - 'ApplyMethod': 'immediate' - }] - ) - - modify_result['DBParameterGroupName'].should.equal('test') - - db_parameters = conn.describe_db_parameters(DBParameterGroupName='test') - db_parameters['Parameters'][0]['ParameterName'].should.equal('foo') - db_parameters['Parameters'][0]['ParameterValue'].should.equal('foo_val') - db_parameters['Parameters'][0]['Description'].should.equal('test param') - db_parameters['Parameters'][0]['ApplyMethod'].should.equal('immediate') - - -@mock_rds2 -def test_delete_non_existant_db_parameter_group(): - conn = boto3.client('rds', region_name='us-west-2') - conn.delete_db_parameter_group.when.called_with( - DBParameterGroupName='non-existant').should.throw(ClientError) - - -@mock_rds2 -def test_create_parameter_group_with_tags(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_db_parameter_group(DBParameterGroupName='test', - DBParameterGroupFamily='mysql5.6', - Description='test parameter group', - Tags=[{ - 'Key': 'foo', - 'Value': 'bar', - }]) - result = conn.list_tags_for_resource( - ResourceName='arn:aws:rds:us-west-2:1234567890:pg:test') - result['TagList'].should.equal([{'Value': 'bar', 'Key': 'foo'}]) +from __future__ import unicode_literals + +from botocore.exceptions import ClientError, ParamValidationError +import boto3 +import sure # noqa +from moto import mock_ec2, mock_kms, mock_rds2 + + +@mock_rds2 +def test_create_database(): + conn = boto3.client('rds', region_name='us-west-2') + database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + LicenseModel='license-included', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"]) + db_instance = database['DBInstance'] + db_instance['AllocatedStorage'].should.equal(10) + db_instance['DBInstanceClass'].should.equal("db.m1.small") + db_instance['LicenseModel'].should.equal("license-included") + db_instance['MasterUsername'].should.equal("root") + db_instance['DBSecurityGroups'][0][ + 'DBSecurityGroupName'].should.equal('my_sg') + db_instance['DBInstanceArn'].should.equal( + 'arn:aws:rds:us-west-2:1234567890:db:db-master-1') + db_instance['DBInstanceStatus'].should.equal('available') + db_instance['DBName'].should.equal('staging-postgres') + db_instance['DBInstanceIdentifier'].should.equal("db-master-1") + db_instance['IAMDatabaseAuthenticationEnabled'].should.equal(False) + db_instance['DbiResourceId'].should.contain("db-") + db_instance['CopyTagsToSnapshot'].should.equal(False) + + +@mock_rds2 +def test_stop_database(): + conn = boto3.client('rds', region_name='us-west-2') + database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + LicenseModel='license-included', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"]) + mydb = conn.describe_db_instances(DBInstanceIdentifier=database['DBInstance']['DBInstanceIdentifier'])['DBInstances'][0] + mydb['DBInstanceStatus'].should.equal('available') + # test stopping database should shutdown + response = conn.stop_db_instance(DBInstanceIdentifier=mydb['DBInstanceIdentifier']) + response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + response['DBInstance']['DBInstanceStatus'].should.equal('stopped') + # test rdsclient error when trying to stop an already stopped database + conn.stop_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier']).should.throw(ClientError) + # test stopping a stopped database with snapshot should error and no snapshot should exist for that call + conn.stop_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier'], DBSnapshotIdentifier='rocky4570-rds-snap').should.throw(ClientError) + response = conn.describe_db_snapshots() + response['DBSnapshots'].should.equal([]) + + +@mock_rds2 +def test_start_database(): + conn = boto3.client('rds', region_name='us-west-2') + database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + LicenseModel='license-included', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"]) + mydb = conn.describe_db_instances(DBInstanceIdentifier=database['DBInstance']['DBInstanceIdentifier'])['DBInstances'][0] + mydb['DBInstanceStatus'].should.equal('available') + # test starting an already started database should error + conn.start_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier']).should.throw(ClientError) + # stop and test start - should go from stopped to available, create snapshot and check snapshot + response = conn.stop_db_instance(DBInstanceIdentifier=mydb['DBInstanceIdentifier'], DBSnapshotIdentifier='rocky4570-rds-snap') + response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + response['DBInstance']['DBInstanceStatus'].should.equal('stopped') + response = conn.describe_db_snapshots() + response['DBSnapshots'][0]['DBSnapshotIdentifier'].should.equal('rocky4570-rds-snap') + response = conn.start_db_instance(DBInstanceIdentifier=mydb['DBInstanceIdentifier']) + response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + response['DBInstance']['DBInstanceStatus'].should.equal('available') + # starting database should not remove snapshot + response = conn.describe_db_snapshots() + response['DBSnapshots'][0]['DBSnapshotIdentifier'].should.equal('rocky4570-rds-snap') + # test stopping database, create snapshot with existing snapshot already created should throw error + conn.stop_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier'], DBSnapshotIdentifier='rocky4570-rds-snap').should.throw(ClientError) + # test stopping database not invoking snapshot should succeed. + response = conn.stop_db_instance(DBInstanceIdentifier=mydb['DBInstanceIdentifier']) + response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + response['DBInstance']['DBInstanceStatus'].should.equal('stopped') + + +@mock_rds2 +def test_fail_to_stop_multi_az(): + conn = boto3.client('rds', region_name='us-west-2') + database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + LicenseModel='license-included', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"], + MultiAZ=True) + + mydb = conn.describe_db_instances(DBInstanceIdentifier=database['DBInstance']['DBInstanceIdentifier'])['DBInstances'][0] + mydb['DBInstanceStatus'].should.equal('available') + # multi-az databases arent allowed to be shutdown at this time. + conn.stop_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier']).should.throw(ClientError) + # multi-az databases arent allowed to be started up at this time. + conn.start_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier']).should.throw(ClientError) + + +@mock_rds2 +def test_fail_to_stop_readreplica(): + conn = boto3.client('rds', region_name='us-west-2') + database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + LicenseModel='license-included', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"]) + + replica = conn.create_db_instance_read_replica(DBInstanceIdentifier="db-replica-1", + SourceDBInstanceIdentifier="db-master-1", + DBInstanceClass="db.m1.small") + + mydb = conn.describe_db_instances(DBInstanceIdentifier=replica['DBInstance']['DBInstanceIdentifier'])['DBInstances'][0] + mydb['DBInstanceStatus'].should.equal('available') + # read-replicas are not allowed to be stopped at this time. + conn.stop_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier']).should.throw(ClientError) + # read-replicas are not allowed to be started at this time. + conn.start_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier']).should.throw(ClientError) + + +@mock_rds2 +def test_get_databases(): + conn = boto3.client('rds', region_name='us-west-2') + + instances = conn.describe_db_instances() + list(instances['DBInstances']).should.have.length_of(0) + + conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + DBInstanceClass='postgres', + Engine='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=['my_sg']) + conn.create_db_instance(DBInstanceIdentifier='db-master-2', + AllocatedStorage=10, + DBInstanceClass='postgres', + Engine='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=['my_sg']) + instances = conn.describe_db_instances() + list(instances['DBInstances']).should.have.length_of(2) + + instances = conn.describe_db_instances(DBInstanceIdentifier="db-master-1") + list(instances['DBInstances']).should.have.length_of(1) + instances['DBInstances'][0][ + 'DBInstanceIdentifier'].should.equal("db-master-1") + instances['DBInstances'][0]['DBInstanceArn'].should.equal( + 'arn:aws:rds:us-west-2:1234567890:db:db-master-1') + + +@mock_rds2 +def test_get_databases_paginated(): + conn = boto3.client('rds', region_name="us-west-2") + + for i in range(51): + conn.create_db_instance(AllocatedStorage=5, + Port=5432, + DBInstanceIdentifier='rds%d' % i, + DBInstanceClass='db.t1.micro', + Engine='postgres') + + resp = conn.describe_db_instances() + resp["DBInstances"].should.have.length_of(50) + resp["Marker"].should.equal(resp["DBInstances"][-1]['DBInstanceIdentifier']) + + resp2 = conn.describe_db_instances(Marker=resp["Marker"]) + resp2["DBInstances"].should.have.length_of(1) + + resp3 = conn.describe_db_instances(MaxRecords=100) + resp3["DBInstances"].should.have.length_of(51) + +@mock_rds2 +def test_describe_non_existant_database(): + conn = boto3.client('rds', region_name='us-west-2') + conn.describe_db_instances.when.called_with( + DBInstanceIdentifier="not-a-db").should.throw(ClientError) + + +@mock_rds2 +def test_modify_db_instance(): + conn = boto3.client('rds', region_name='us-west-2') + database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + DBInstanceClass='postgres', + Engine='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=['my_sg']) + instances = conn.describe_db_instances(DBInstanceIdentifier='db-master-1') + instances['DBInstances'][0]['AllocatedStorage'].should.equal(10) + conn.modify_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=20, + ApplyImmediately=True) + instances = conn.describe_db_instances(DBInstanceIdentifier='db-master-1') + instances['DBInstances'][0]['AllocatedStorage'].should.equal(20) + + +@mock_rds2 +def test_rename_db_instance(): + conn = boto3.client('rds', region_name='us-west-2') + database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + DBInstanceClass='postgres', + Engine='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=['my_sg']) + instances = conn.describe_db_instances(DBInstanceIdentifier="db-master-1") + list(instances['DBInstances']).should.have.length_of(1) + conn.describe_db_instances.when.called_with(DBInstanceIdentifier="db-master-2").should.throw(ClientError) + conn.modify_db_instance(DBInstanceIdentifier='db-master-1', + NewDBInstanceIdentifier='db-master-2', + ApplyImmediately=True) + conn.describe_db_instances.when.called_with(DBInstanceIdentifier="db-master-1").should.throw(ClientError) + instances = conn.describe_db_instances(DBInstanceIdentifier="db-master-2") + list(instances['DBInstances']).should.have.length_of(1) + + +@mock_rds2 +def test_modify_non_existant_database(): + conn = boto3.client('rds', region_name='us-west-2') + conn.modify_db_instance.when.called_with(DBInstanceIdentifier='not-a-db', + AllocatedStorage=20, + ApplyImmediately=True).should.throw(ClientError) + + +@mock_rds2 +def test_reboot_db_instance(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + DBInstanceClass='postgres', + Engine='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=['my_sg']) + database = conn.reboot_db_instance(DBInstanceIdentifier='db-master-1') + database['DBInstance']['DBInstanceIdentifier'].should.equal("db-master-1") + + +@mock_rds2 +def test_reboot_non_existant_database(): + conn = boto3.client('rds', region_name='us-west-2') + conn.reboot_db_instance.when.called_with( + DBInstanceIdentifier="not-a-db").should.throw(ClientError) + + +@mock_rds2 +def test_delete_database(): + conn = boto3.client('rds', region_name='us-west-2') + instances = conn.describe_db_instances() + list(instances['DBInstances']).should.have.length_of(0) + conn.create_db_instance(DBInstanceIdentifier='db-primary-1', + AllocatedStorage=10, + Engine='postgres', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=['my_sg']) + instances = conn.describe_db_instances() + list(instances['DBInstances']).should.have.length_of(1) + + conn.delete_db_instance(DBInstanceIdentifier="db-primary-1", + FinalDBSnapshotIdentifier='primary-1-snapshot') + + instances = conn.describe_db_instances() + list(instances['DBInstances']).should.have.length_of(0) + + # Saved the snapshot + snapshots = conn.describe_db_snapshots(DBInstanceIdentifier="db-primary-1").get('DBSnapshots') + snapshots[0].get('Engine').should.equal('postgres') + + +@mock_rds2 +def test_delete_non_existant_database(): + conn = boto3.client('rds2', region_name="us-west-2") + conn.delete_db_instance.when.called_with( + DBInstanceIdentifier="not-a-db").should.throw(ClientError) + + +@mock_rds2 +def test_create_db_snapshots(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_snapshot.when.called_with( + DBInstanceIdentifier='db-primary-1', + DBSnapshotIdentifier='snapshot-1').should.throw(ClientError) + + conn.create_db_instance(DBInstanceIdentifier='db-primary-1', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"]) + + snapshot = conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', + DBSnapshotIdentifier='g-1').get('DBSnapshot') + + snapshot.get('Engine').should.equal('postgres') + snapshot.get('DBInstanceIdentifier').should.equal('db-primary-1') + snapshot.get('DBSnapshotIdentifier').should.equal('g-1') + result = conn.list_tags_for_resource(ResourceName=snapshot['DBSnapshotArn']) + result['TagList'].should.equal([]) + + +@mock_rds2 +def test_create_db_snapshots_copy_tags(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_snapshot.when.called_with( + DBInstanceIdentifier='db-primary-1', + DBSnapshotIdentifier='snapshot-1').should.throw(ClientError) + + conn.create_db_instance(DBInstanceIdentifier='db-primary-1', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"], + CopyTagsToSnapshot=True, + Tags=[ + { + 'Key': 'foo', + 'Value': 'bar', + }, + { + 'Key': 'foo1', + 'Value': 'bar1', + }, + ]) + + snapshot = conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', + DBSnapshotIdentifier='g-1').get('DBSnapshot') + + snapshot.get('Engine').should.equal('postgres') + snapshot.get('DBInstanceIdentifier').should.equal('db-primary-1') + snapshot.get('DBSnapshotIdentifier').should.equal('g-1') + result = conn.list_tags_for_resource(ResourceName=snapshot['DBSnapshotArn']) + result['TagList'].should.equal([{'Value': 'bar', + 'Key': 'foo'}, + {'Value': 'bar1', + 'Key': 'foo1'}]) + + +@mock_rds2 +def test_describe_db_snapshots(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_instance(DBInstanceIdentifier='db-primary-1', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"]) + + created = conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', + DBSnapshotIdentifier='snapshot-1').get('DBSnapshot') + + created.get('Engine').should.equal('postgres') + + by_database_id = conn.describe_db_snapshots(DBInstanceIdentifier='db-primary-1').get('DBSnapshots') + by_snapshot_id = conn.describe_db_snapshots(DBSnapshotIdentifier='snapshot-1').get('DBSnapshots') + by_snapshot_id.should.equal(by_database_id) + + snapshot = by_snapshot_id[0] + snapshot.should.equal(created) + snapshot.get('Engine').should.equal('postgres') + + conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', + DBSnapshotIdentifier='snapshot-2') + snapshots = conn.describe_db_snapshots(DBInstanceIdentifier='db-primary-1').get('DBSnapshots') + snapshots.should.have.length_of(2) + + +@mock_rds2 +def test_delete_db_snapshot(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_instance(DBInstanceIdentifier='db-primary-1', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"]) + conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', + DBSnapshotIdentifier='snapshot-1') + + conn.describe_db_snapshots(DBSnapshotIdentifier='snapshot-1').get('DBSnapshots')[0] + conn.delete_db_snapshot(DBSnapshotIdentifier='snapshot-1') + conn.describe_db_snapshots.when.called_with( + DBSnapshotIdentifier='snapshot-1').should.throw(ClientError) + + +@mock_rds2 +def test_create_option_group(): + conn = boto3.client('rds', region_name='us-west-2') + option_group = conn.create_option_group(OptionGroupName='test', + EngineName='mysql', + MajorEngineVersion='5.6', + OptionGroupDescription='test option group') + option_group['OptionGroup']['OptionGroupName'].should.equal('test') + option_group['OptionGroup']['EngineName'].should.equal('mysql') + option_group['OptionGroup'][ + 'OptionGroupDescription'].should.equal('test option group') + option_group['OptionGroup']['MajorEngineVersion'].should.equal('5.6') + + +@mock_rds2 +def test_create_option_group_bad_engine_name(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_option_group.when.called_with(OptionGroupName='test', + EngineName='invalid_engine', + MajorEngineVersion='5.6', + OptionGroupDescription='test invalid engine').should.throw(ClientError) + + +@mock_rds2 +def test_create_option_group_bad_engine_major_version(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_option_group.when.called_with(OptionGroupName='test', + EngineName='mysql', + MajorEngineVersion='6.6.6', + OptionGroupDescription='test invalid engine version').should.throw(ClientError) + + +@mock_rds2 +def test_create_option_group_empty_description(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_option_group.when.called_with(OptionGroupName='test', + EngineName='mysql', + MajorEngineVersion='5.6', + OptionGroupDescription='').should.throw(ClientError) + + +@mock_rds2 +def test_create_option_group_duplicate(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_option_group(OptionGroupName='test', + EngineName='mysql', + MajorEngineVersion='5.6', + OptionGroupDescription='test option group') + conn.create_option_group.when.called_with(OptionGroupName='test', + EngineName='mysql', + MajorEngineVersion='5.6', + OptionGroupDescription='test option group').should.throw(ClientError) + + +@mock_rds2 +def test_describe_option_group(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_option_group(OptionGroupName='test', + EngineName='mysql', + MajorEngineVersion='5.6', + OptionGroupDescription='test option group') + option_groups = conn.describe_option_groups(OptionGroupName='test') + option_groups['OptionGroupsList'][0][ + 'OptionGroupName'].should.equal('test') + + +@mock_rds2 +def test_describe_non_existant_option_group(): + conn = boto3.client('rds', region_name='us-west-2') + conn.describe_option_groups.when.called_with( + OptionGroupName="not-a-option-group").should.throw(ClientError) + + +@mock_rds2 +def test_delete_option_group(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_option_group(OptionGroupName='test', + EngineName='mysql', + MajorEngineVersion='5.6', + OptionGroupDescription='test option group') + option_groups = conn.describe_option_groups(OptionGroupName='test') + option_groups['OptionGroupsList'][0][ + 'OptionGroupName'].should.equal('test') + conn.delete_option_group(OptionGroupName='test') + conn.describe_option_groups.when.called_with( + OptionGroupName='test').should.throw(ClientError) + + +@mock_rds2 +def test_delete_non_existant_option_group(): + conn = boto3.client('rds', region_name='us-west-2') + conn.delete_option_group.when.called_with( + OptionGroupName='non-existant').should.throw(ClientError) + + +@mock_rds2 +def test_describe_option_group_options(): + conn = boto3.client('rds', region_name='us-west-2') + option_group_options = conn.describe_option_group_options( + EngineName='sqlserver-ee') + len(option_group_options['OptionGroupOptions']).should.equal(4) + option_group_options = conn.describe_option_group_options( + EngineName='sqlserver-ee', MajorEngineVersion='11.00') + len(option_group_options['OptionGroupOptions']).should.equal(2) + option_group_options = conn.describe_option_group_options( + EngineName='mysql', MajorEngineVersion='5.6') + len(option_group_options['OptionGroupOptions']).should.equal(1) + conn.describe_option_group_options.when.called_with( + EngineName='non-existent').should.throw(ClientError) + conn.describe_option_group_options.when.called_with( + EngineName='mysql', MajorEngineVersion='non-existent').should.throw(ClientError) + + +@mock_rds2 +def test_modify_option_group(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_option_group(OptionGroupName='test', EngineName='mysql', + MajorEngineVersion='5.6', OptionGroupDescription='test option group') + # TODO: create option and validate before deleting. + # if Someone can tell me how the hell to use this function + # to add options to an option_group, I can finish coding this. + result = conn.modify_option_group(OptionGroupName='test', OptionsToInclude=[ + ], OptionsToRemove=['MEMCACHED'], ApplyImmediately=True) + result['OptionGroup']['EngineName'].should.equal('mysql') + result['OptionGroup']['Options'].should.equal([]) + result['OptionGroup']['OptionGroupName'].should.equal('test') + + +@mock_rds2 +def test_modify_option_group_no_options(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_option_group(OptionGroupName='test', EngineName='mysql', + MajorEngineVersion='5.6', OptionGroupDescription='test option group') + conn.modify_option_group.when.called_with( + OptionGroupName='test').should.throw(ClientError) + + +@mock_rds2 +def test_modify_non_existant_option_group(): + conn = boto3.client('rds', region_name='us-west-2') + conn.modify_option_group.when.called_with(OptionGroupName='non-existant', OptionsToInclude=[( + 'OptionName', 'Port', 'DBSecurityGroupMemberships', 'VpcSecurityGroupMemberships', 'OptionSettings')]).should.throw(ParamValidationError) + + +@mock_rds2 +def test_delete_non_existant_database(): + conn = boto3.client('rds', region_name='us-west-2') + conn.delete_db_instance.when.called_with( + DBInstanceIdentifier="not-a-db").should.throw(ClientError) + + +@mock_rds2 +def test_list_tags_invalid_arn(): + conn = boto3.client('rds', region_name='us-west-2') + conn.list_tags_for_resource.when.called_with( + ResourceName='arn:aws:rds:bad-arn').should.throw(ClientError) + + +@mock_rds2 +def test_list_tags_db(): + conn = boto3.client('rds', region_name='us-west-2') + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:db:foo') + result['TagList'].should.equal([]) + test_instance = conn.create_db_instance( + DBInstanceIdentifier='db-with-tags', + AllocatedStorage=10, + DBInstanceClass='postgres', + Engine='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=['my_sg'], + Tags=[ + { + 'Key': 'foo', + 'Value': 'bar', + }, + { + 'Key': 'foo1', + 'Value': 'bar1', + }, + ]) + result = conn.list_tags_for_resource( + ResourceName=test_instance['DBInstance']['DBInstanceArn']) + result['TagList'].should.equal([{'Value': 'bar', + 'Key': 'foo'}, + {'Value': 'bar1', + 'Key': 'foo1'}]) + + +@mock_rds2 +def test_add_tags_db(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_instance(DBInstanceIdentifier='db-without-tags', + AllocatedStorage=10, + DBInstanceClass='postgres', + Engine='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=['my_sg'], + Tags=[ + { + 'Key': 'foo', + 'Value': 'bar', + }, + { + 'Key': 'foo1', + 'Value': 'bar1', + }, + ]) + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-without-tags') + list(result['TagList']).should.have.length_of(2) + conn.add_tags_to_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-without-tags', + Tags=[ + { + 'Key': 'foo', + 'Value': 'fish', + }, + { + 'Key': 'foo2', + 'Value': 'bar2', + }, + ]) + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-without-tags') + list(result['TagList']).should.have.length_of(3) + + +@mock_rds2 +def test_remove_tags_db(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_instance(DBInstanceIdentifier='db-with-tags', + AllocatedStorage=10, + DBInstanceClass='postgres', + Engine='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=['my_sg'], + Tags=[ + { + 'Key': 'foo', + 'Value': 'bar', + }, + { + 'Key': 'foo1', + 'Value': 'bar1', + }, + ]) + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-with-tags') + list(result['TagList']).should.have.length_of(2) + conn.remove_tags_from_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-with-tags', TagKeys=['foo']) + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-with-tags') + len(result['TagList']).should.equal(1) + + +@mock_rds2 +def test_list_tags_snapshot(): + conn = boto3.client('rds', region_name='us-west-2') + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:foo') + result['TagList'].should.equal([]) + conn.create_db_instance(DBInstanceIdentifier='db-primary-1', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"]) + snapshot = conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', + DBSnapshotIdentifier='snapshot-with-tags', + Tags=[ + { + 'Key': 'foo', + 'Value': 'bar', + }, + { + 'Key': 'foo1', + 'Value': 'bar1', + }, + ]) + result = conn.list_tags_for_resource(ResourceName=snapshot['DBSnapshot']['DBSnapshotArn']) + result['TagList'].should.equal([{'Value': 'bar', + 'Key': 'foo'}, + {'Value': 'bar1', + 'Key': 'foo1'}]) + + +@mock_rds2 +def test_add_tags_snapshot(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_instance(DBInstanceIdentifier='db-primary-1', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"]) + snapshot = conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', + DBSnapshotIdentifier='snapshot-without-tags', + Tags=[ + { + 'Key': 'foo', + 'Value': 'bar', + }, + { + 'Key': 'foo1', + 'Value': 'bar1', + }, + ]) + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-without-tags') + list(result['TagList']).should.have.length_of(2) + conn.add_tags_to_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-without-tags', + Tags=[ + { + 'Key': 'foo', + 'Value': 'fish', + }, + { + 'Key': 'foo2', + 'Value': 'bar2', + }, + ]) + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-without-tags') + list(result['TagList']).should.have.length_of(3) + + +@mock_rds2 +def test_remove_tags_snapshot(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_instance(DBInstanceIdentifier='db-primary-1', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"]) + snapshot = conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', + DBSnapshotIdentifier='snapshot-with-tags', + Tags=[ + { + 'Key': 'foo', + 'Value': 'bar', + }, + { + 'Key': 'foo1', + 'Value': 'bar1', + }, + ]) + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-with-tags') + list(result['TagList']).should.have.length_of(2) + conn.remove_tags_from_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-with-tags', TagKeys=['foo']) + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-with-tags') + len(result['TagList']).should.equal(1) + + +@mock_rds2 +def test_add_tags_option_group(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_option_group(OptionGroupName='test', + EngineName='mysql', + MajorEngineVersion='5.6', + OptionGroupDescription='test option group') + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:og:test') + list(result['TagList']).should.have.length_of(0) + conn.add_tags_to_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:og:test', + Tags=[ + { + 'Key': 'foo', + 'Value': 'fish', + }, + { + 'Key': 'foo2', + 'Value': 'bar2', + }]) + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:og:test') + list(result['TagList']).should.have.length_of(2) + + +@mock_rds2 +def test_remove_tags_option_group(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_option_group(OptionGroupName='test', + EngineName='mysql', + MajorEngineVersion='5.6', + OptionGroupDescription='test option group') + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:og:test') + conn.add_tags_to_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:og:test', + Tags=[ + { + 'Key': 'foo', + 'Value': 'fish', + }, + { + 'Key': 'foo2', + 'Value': 'bar2', + }]) + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:og:test') + list(result['TagList']).should.have.length_of(2) + conn.remove_tags_from_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:og:test', + TagKeys=['foo']) + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:og:test') + list(result['TagList']).should.have.length_of(1) + + +@mock_rds2 +def test_create_database_security_group(): + conn = boto3.client('rds', region_name='us-west-2') + + result = conn.create_db_security_group( + DBSecurityGroupName='db_sg', DBSecurityGroupDescription='DB Security Group') + result['DBSecurityGroup']['DBSecurityGroupName'].should.equal("db_sg") + result['DBSecurityGroup'][ + 'DBSecurityGroupDescription'].should.equal("DB Security Group") + result['DBSecurityGroup']['IPRanges'].should.equal([]) + + +@mock_rds2 +def test_get_security_groups(): + conn = boto3.client('rds', region_name='us-west-2') + + result = conn.describe_db_security_groups() + result['DBSecurityGroups'].should.have.length_of(0) + + conn.create_db_security_group( + DBSecurityGroupName='db_sg1', DBSecurityGroupDescription='DB Security Group') + conn.create_db_security_group( + DBSecurityGroupName='db_sg2', DBSecurityGroupDescription='DB Security Group') + + result = conn.describe_db_security_groups() + result['DBSecurityGroups'].should.have.length_of(2) + + result = conn.describe_db_security_groups(DBSecurityGroupName="db_sg1") + result['DBSecurityGroups'].should.have.length_of(1) + result['DBSecurityGroups'][0]['DBSecurityGroupName'].should.equal("db_sg1") + + +@mock_rds2 +def test_get_non_existant_security_group(): + conn = boto3.client('rds', region_name='us-west-2') + conn.describe_db_security_groups.when.called_with( + DBSecurityGroupName="not-a-sg").should.throw(ClientError) + + +@mock_rds2 +def test_delete_database_security_group(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_security_group( + DBSecurityGroupName='db_sg', DBSecurityGroupDescription='DB Security Group') + + result = conn.describe_db_security_groups() + result['DBSecurityGroups'].should.have.length_of(1) + + conn.delete_db_security_group(DBSecurityGroupName="db_sg") + result = conn.describe_db_security_groups() + result['DBSecurityGroups'].should.have.length_of(0) + + +@mock_rds2 +def test_delete_non_existant_security_group(): + conn = boto3.client('rds', region_name='us-west-2') + conn.delete_db_security_group.when.called_with( + DBSecurityGroupName="not-a-db").should.throw(ClientError) + + +@mock_rds2 +def test_security_group_authorize(): + conn = boto3.client('rds', region_name='us-west-2') + security_group = conn.create_db_security_group(DBSecurityGroupName='db_sg', + DBSecurityGroupDescription='DB Security Group') + security_group['DBSecurityGroup']['IPRanges'].should.equal([]) + + conn.authorize_db_security_group_ingress(DBSecurityGroupName='db_sg', + CIDRIP='10.3.2.45/32') + + result = conn.describe_db_security_groups(DBSecurityGroupName="db_sg") + result['DBSecurityGroups'][0]['IPRanges'].should.have.length_of(1) + result['DBSecurityGroups'][0]['IPRanges'].should.equal( + [{'Status': 'authorized', 'CIDRIP': '10.3.2.45/32'}]) + + conn.authorize_db_security_group_ingress(DBSecurityGroupName='db_sg', + CIDRIP='10.3.2.46/32') + result = conn.describe_db_security_groups(DBSecurityGroupName="db_sg") + result['DBSecurityGroups'][0]['IPRanges'].should.have.length_of(2) + result['DBSecurityGroups'][0]['IPRanges'].should.equal([ + {'Status': 'authorized', 'CIDRIP': '10.3.2.45/32'}, + {'Status': 'authorized', 'CIDRIP': '10.3.2.46/32'}, + ]) + + +@mock_rds2 +def test_add_security_group_to_database(): + conn = boto3.client('rds', region_name='us-west-2') + + conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + DBInstanceClass='postgres', + Engine='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234) + + result = conn.describe_db_instances() + result['DBInstances'][0]['DBSecurityGroups'].should.equal([]) + conn.create_db_security_group(DBSecurityGroupName='db_sg', + DBSecurityGroupDescription='DB Security Group') + conn.modify_db_instance(DBInstanceIdentifier='db-master-1', + DBSecurityGroups=['db_sg']) + result = conn.describe_db_instances() + result['DBInstances'][0]['DBSecurityGroups'][0][ + 'DBSecurityGroupName'].should.equal('db_sg') + + +@mock_rds2 +def test_list_tags_security_group(): + conn = boto3.client('rds', region_name='us-west-2') + result = conn.describe_db_subnet_groups() + result['DBSubnetGroups'].should.have.length_of(0) + + security_group = conn.create_db_security_group(DBSecurityGroupName="db_sg", + DBSecurityGroupDescription='DB Security Group', + Tags=[{'Value': 'bar', + 'Key': 'foo'}, + {'Value': 'bar1', + 'Key': 'foo1'}])['DBSecurityGroup']['DBSecurityGroupName'] + resource = 'arn:aws:rds:us-west-2:1234567890:secgrp:{0}'.format( + security_group) + result = conn.list_tags_for_resource(ResourceName=resource) + result['TagList'].should.equal([{'Value': 'bar', + 'Key': 'foo'}, + {'Value': 'bar1', + 'Key': 'foo1'}]) + + +@mock_rds2 +def test_add_tags_security_group(): + conn = boto3.client('rds', region_name='us-west-2') + result = conn.describe_db_subnet_groups() + result['DBSubnetGroups'].should.have.length_of(0) + + security_group = conn.create_db_security_group(DBSecurityGroupName="db_sg", + DBSecurityGroupDescription='DB Security Group')['DBSecurityGroup']['DBSecurityGroupName'] + + resource = 'arn:aws:rds:us-west-2:1234567890:secgrp:{0}'.format( + security_group) + conn.add_tags_to_resource(ResourceName=resource, + Tags=[{'Value': 'bar', + 'Key': 'foo'}, + {'Value': 'bar1', + 'Key': 'foo1'}]) + + result = conn.list_tags_for_resource(ResourceName=resource) + result['TagList'].should.equal([{'Value': 'bar', + 'Key': 'foo'}, + {'Value': 'bar1', + 'Key': 'foo1'}]) + + +@mock_rds2 +def test_remove_tags_security_group(): + conn = boto3.client('rds', region_name='us-west-2') + result = conn.describe_db_subnet_groups() + result['DBSubnetGroups'].should.have.length_of(0) + + security_group = conn.create_db_security_group(DBSecurityGroupName="db_sg", + DBSecurityGroupDescription='DB Security Group', + Tags=[{'Value': 'bar', + 'Key': 'foo'}, + {'Value': 'bar1', + 'Key': 'foo1'}])['DBSecurityGroup']['DBSecurityGroupName'] + + resource = 'arn:aws:rds:us-west-2:1234567890:secgrp:{0}'.format( + security_group) + conn.remove_tags_from_resource(ResourceName=resource, TagKeys=['foo']) + + result = conn.list_tags_for_resource(ResourceName=resource) + result['TagList'].should.equal([{'Value': 'bar1', 'Key': 'foo1'}]) + + +@mock_ec2 +@mock_rds2 +def test_create_database_subnet_group(): + vpc_conn = boto3.client('ec2', 'us-west-2') + vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] + subnet1 = vpc_conn.create_subnet( + VpcId=vpc['VpcId'], CidrBlock='10.0.1.0/24')['Subnet'] + subnet2 = vpc_conn.create_subnet( + VpcId=vpc['VpcId'], CidrBlock='10.0.2.0/24')['Subnet'] + + subnet_ids = [subnet1['SubnetId'], subnet2['SubnetId']] + conn = boto3.client('rds', region_name='us-west-2') + result = conn.create_db_subnet_group(DBSubnetGroupName='db_subnet', + DBSubnetGroupDescription='my db subnet', + SubnetIds=subnet_ids) + result['DBSubnetGroup']['DBSubnetGroupName'].should.equal("db_subnet") + result['DBSubnetGroup'][ + 'DBSubnetGroupDescription'].should.equal("my db subnet") + subnets = result['DBSubnetGroup']['Subnets'] + subnet_group_ids = [subnets[0]['SubnetIdentifier'], + subnets[1]['SubnetIdentifier']] + list(subnet_group_ids).should.equal(subnet_ids) + + +@mock_ec2 +@mock_rds2 +def test_create_database_in_subnet_group(): + vpc_conn = boto3.client('ec2', 'us-west-2') + vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] + subnet = vpc_conn.create_subnet( + VpcId=vpc['VpcId'], CidrBlock='10.0.1.0/24')['Subnet'] + + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_subnet_group(DBSubnetGroupName='db_subnet1', + DBSubnetGroupDescription='my db subnet', + SubnetIds=[subnet['SubnetId']]) + conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + Engine='postgres', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSubnetGroupName='db_subnet1') + result = conn.describe_db_instances(DBInstanceIdentifier='db-master-1') + result['DBInstances'][0]['DBSubnetGroup'][ + 'DBSubnetGroupName'].should.equal('db_subnet1') + + +@mock_ec2 +@mock_rds2 +def test_describe_database_subnet_group(): + vpc_conn = boto3.client('ec2', 'us-west-2') + vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] + subnet = vpc_conn.create_subnet( + VpcId=vpc['VpcId'], CidrBlock='10.0.1.0/24')['Subnet'] + + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_subnet_group(DBSubnetGroupName="db_subnet1", + DBSubnetGroupDescription='my db subnet', + SubnetIds=[subnet['SubnetId']]) + conn.create_db_subnet_group(DBSubnetGroupName='db_subnet2', + DBSubnetGroupDescription='my db subnet', + SubnetIds=[subnet['SubnetId']]) + + resp = conn.describe_db_subnet_groups() + resp['DBSubnetGroups'].should.have.length_of(2) + + subnets = resp['DBSubnetGroups'][0]['Subnets'] + subnets.should.have.length_of(1) + + list(conn.describe_db_subnet_groups(DBSubnetGroupName="db_subnet1") + ['DBSubnetGroups']).should.have.length_of(1) + + conn.describe_db_subnet_groups.when.called_with( + DBSubnetGroupName="not-a-subnet").should.throw(ClientError) + + +@mock_ec2 +@mock_rds2 +def test_delete_database_subnet_group(): + vpc_conn = boto3.client('ec2', 'us-west-2') + vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] + subnet = vpc_conn.create_subnet( + VpcId=vpc['VpcId'], CidrBlock='10.0.1.0/24')['Subnet'] + + conn = boto3.client('rds', region_name='us-west-2') + result = conn.describe_db_subnet_groups() + result['DBSubnetGroups'].should.have.length_of(0) + + conn.create_db_subnet_group(DBSubnetGroupName="db_subnet1", + DBSubnetGroupDescription='my db subnet', + SubnetIds=[subnet['SubnetId']]) + result = conn.describe_db_subnet_groups() + result['DBSubnetGroups'].should.have.length_of(1) + + conn.delete_db_subnet_group(DBSubnetGroupName="db_subnet1") + result = conn.describe_db_subnet_groups() + result['DBSubnetGroups'].should.have.length_of(0) + + conn.delete_db_subnet_group.when.called_with( + DBSubnetGroupName="db_subnet1").should.throw(ClientError) + + +@mock_ec2 +@mock_rds2 +def test_list_tags_database_subnet_group(): + vpc_conn = boto3.client('ec2', 'us-west-2') + vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] + subnet = vpc_conn.create_subnet( + VpcId=vpc['VpcId'], CidrBlock='10.0.1.0/24')['Subnet'] + + conn = boto3.client('rds', region_name='us-west-2') + result = conn.describe_db_subnet_groups() + result['DBSubnetGroups'].should.have.length_of(0) + + subnet = conn.create_db_subnet_group(DBSubnetGroupName="db_subnet1", + DBSubnetGroupDescription='my db subnet', + SubnetIds=[subnet['SubnetId']], + Tags=[{'Value': 'bar', + 'Key': 'foo'}, + {'Value': 'bar1', + 'Key': 'foo1'}])['DBSubnetGroup']['DBSubnetGroupName'] + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:subgrp:{0}'.format(subnet)) + result['TagList'].should.equal([{'Value': 'bar', + 'Key': 'foo'}, + {'Value': 'bar1', + 'Key': 'foo1'}]) + + +@mock_ec2 +@mock_rds2 +def test_add_tags_database_subnet_group(): + vpc_conn = boto3.client('ec2', 'us-west-2') + vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] + subnet = vpc_conn.create_subnet( + VpcId=vpc['VpcId'], CidrBlock='10.0.1.0/24')['Subnet'] + + conn = boto3.client('rds', region_name='us-west-2') + result = conn.describe_db_subnet_groups() + result['DBSubnetGroups'].should.have.length_of(0) + + subnet = conn.create_db_subnet_group(DBSubnetGroupName="db_subnet1", + DBSubnetGroupDescription='my db subnet', + SubnetIds=[subnet['SubnetId']], + Tags=[])['DBSubnetGroup']['DBSubnetGroupName'] + resource = 'arn:aws:rds:us-west-2:1234567890:subgrp:{0}'.format(subnet) + + conn.add_tags_to_resource(ResourceName=resource, + Tags=[{'Value': 'bar', + 'Key': 'foo'}, + {'Value': 'bar1', + 'Key': 'foo1'}]) + + result = conn.list_tags_for_resource(ResourceName=resource) + result['TagList'].should.equal([{'Value': 'bar', + 'Key': 'foo'}, + {'Value': 'bar1', + 'Key': 'foo1'}]) + + +@mock_ec2 +@mock_rds2 +def test_remove_tags_database_subnet_group(): + vpc_conn = boto3.client('ec2', 'us-west-2') + vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] + subnet = vpc_conn.create_subnet( + VpcId=vpc['VpcId'], CidrBlock='10.0.1.0/24')['Subnet'] + + conn = boto3.client('rds', region_name='us-west-2') + result = conn.describe_db_subnet_groups() + result['DBSubnetGroups'].should.have.length_of(0) + + subnet = conn.create_db_subnet_group(DBSubnetGroupName="db_subnet1", + DBSubnetGroupDescription='my db subnet', + SubnetIds=[subnet['SubnetId']], + Tags=[{'Value': 'bar', + 'Key': 'foo'}, + {'Value': 'bar1', + 'Key': 'foo1'}])['DBSubnetGroup']['DBSubnetGroupName'] + resource = 'arn:aws:rds:us-west-2:1234567890:subgrp:{0}'.format(subnet) + + conn.remove_tags_from_resource(ResourceName=resource, TagKeys=['foo']) + + result = conn.list_tags_for_resource(ResourceName=resource) + result['TagList'].should.equal([{'Value': 'bar1', 'Key': 'foo1'}]) + + +@mock_rds2 +def test_create_database_replica(): + conn = boto3.client('rds', region_name='us-west-2') + + database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + Engine='postgres', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"]) + + replica = conn.create_db_instance_read_replica(DBInstanceIdentifier="db-replica-1", + SourceDBInstanceIdentifier="db-master-1", + DBInstanceClass="db.m1.small") + replica['DBInstance'][ + 'ReadReplicaSourceDBInstanceIdentifier'].should.equal('db-master-1') + replica['DBInstance']['DBInstanceClass'].should.equal('db.m1.small') + replica['DBInstance']['DBInstanceIdentifier'].should.equal('db-replica-1') + + master = conn.describe_db_instances(DBInstanceIdentifier="db-master-1") + master['DBInstances'][0]['ReadReplicaDBInstanceIdentifiers'].should.equal([ + 'db-replica-1']) + + conn.delete_db_instance( + DBInstanceIdentifier="db-replica-1", SkipFinalSnapshot=True) + + master = conn.describe_db_instances(DBInstanceIdentifier="db-master-1") + master['DBInstances'][0][ + 'ReadReplicaDBInstanceIdentifiers'].should.equal([]) + + +@mock_rds2 +@mock_kms +def test_create_database_with_encrypted_storage(): + kms_conn = boto3.client('kms', region_name='us-west-2') + key = kms_conn.create_key(Policy='my RDS encryption policy', + Description='RDS encryption key', + KeyUsage='ENCRYPT_DECRYPT') + + conn = boto3.client('rds', region_name='us-west-2') + database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + Engine='postgres', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"], + StorageEncrypted=True, + KmsKeyId=key['KeyMetadata']['KeyId']) + + database['DBInstance']['StorageEncrypted'].should.equal(True) + database['DBInstance']['KmsKeyId'].should.equal( + key['KeyMetadata']['KeyId']) + + +@mock_rds2 +def test_create_db_parameter_group(): + conn = boto3.client('rds', region_name='us-west-2') + db_parameter_group = conn.create_db_parameter_group(DBParameterGroupName='test', + DBParameterGroupFamily='mysql5.6', + Description='test parameter group') + + db_parameter_group['DBParameterGroup'][ + 'DBParameterGroupName'].should.equal('test') + db_parameter_group['DBParameterGroup'][ + 'DBParameterGroupFamily'].should.equal('mysql5.6') + db_parameter_group['DBParameterGroup'][ + 'Description'].should.equal('test parameter group') + + +@mock_rds2 +def test_create_db_instance_with_parameter_group(): + conn = boto3.client('rds', region_name='us-west-2') + db_parameter_group = conn.create_db_parameter_group(DBParameterGroupName='test', + DBParameterGroupFamily='mysql5.6', + Description='test parameter group') + + database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + Engine='mysql', + DBInstanceClass='db.m1.small', + DBParameterGroupName='test', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234) + + len(database['DBInstance']['DBParameterGroups']).should.equal(1) + database['DBInstance']['DBParameterGroups'][0][ + 'DBParameterGroupName'].should.equal('test') + database['DBInstance']['DBParameterGroups'][0][ + 'ParameterApplyStatus'].should.equal('in-sync') + + +@mock_rds2 +def test_create_database_with_default_port(): + conn = boto3.client('rds', region_name='us-west-2') + database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + Engine='postgres', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + DBSecurityGroups=["my_sg"]) + database['DBInstance']['Endpoint']['Port'].should.equal(5432) + + +@mock_rds2 +def test_modify_db_instance_with_parameter_group(): + conn = boto3.client('rds', region_name='us-west-2') + database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + Engine='mysql', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234) + + len(database['DBInstance']['DBParameterGroups']).should.equal(1) + database['DBInstance']['DBParameterGroups'][0][ + 'DBParameterGroupName'].should.equal('default.mysql5.6') + database['DBInstance']['DBParameterGroups'][0][ + 'ParameterApplyStatus'].should.equal('in-sync') + + db_parameter_group = conn.create_db_parameter_group(DBParameterGroupName='test', + DBParameterGroupFamily='mysql5.6', + Description='test parameter group') + conn.modify_db_instance(DBInstanceIdentifier='db-master-1', + DBParameterGroupName='test', + ApplyImmediately=True) + + database = conn.describe_db_instances( + DBInstanceIdentifier='db-master-1')['DBInstances'][0] + len(database['DBParameterGroups']).should.equal(1) + database['DBParameterGroups'][0][ + 'DBParameterGroupName'].should.equal('test') + database['DBParameterGroups'][0][ + 'ParameterApplyStatus'].should.equal('in-sync') + + +@mock_rds2 +def test_create_db_parameter_group_empty_description(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_parameter_group.when.called_with(DBParameterGroupName='test', + DBParameterGroupFamily='mysql5.6', + Description='').should.throw(ClientError) + + +@mock_rds2 +def test_create_db_parameter_group_duplicate(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_parameter_group(DBParameterGroupName='test', + DBParameterGroupFamily='mysql5.6', + Description='test parameter group') + conn.create_db_parameter_group.when.called_with(DBParameterGroupName='test', + DBParameterGroupFamily='mysql5.6', + Description='test parameter group').should.throw(ClientError) + + +@mock_rds2 +def test_describe_db_parameter_group(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_parameter_group(DBParameterGroupName='test', + DBParameterGroupFamily='mysql5.6', + Description='test parameter group') + db_parameter_groups = conn.describe_db_parameter_groups( + DBParameterGroupName='test') + db_parameter_groups['DBParameterGroups'][0][ + 'DBParameterGroupName'].should.equal('test') + + +@mock_rds2 +def test_describe_non_existant_db_parameter_group(): + conn = boto3.client('rds', region_name='us-west-2') + db_parameter_groups = conn.describe_db_parameter_groups( + DBParameterGroupName='test') + len(db_parameter_groups['DBParameterGroups']).should.equal(0) + + +@mock_rds2 +def test_delete_db_parameter_group(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_parameter_group(DBParameterGroupName='test', + DBParameterGroupFamily='mysql5.6', + Description='test parameter group') + db_parameter_groups = conn.describe_db_parameter_groups( + DBParameterGroupName='test') + db_parameter_groups['DBParameterGroups'][0][ + 'DBParameterGroupName'].should.equal('test') + conn.delete_db_parameter_group(DBParameterGroupName='test') + db_parameter_groups = conn.describe_db_parameter_groups( + DBParameterGroupName='test') + len(db_parameter_groups['DBParameterGroups']).should.equal(0) + + +@mock_rds2 +def test_modify_db_parameter_group(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_parameter_group(DBParameterGroupName='test', + DBParameterGroupFamily='mysql5.6', + Description='test parameter group') + + modify_result = conn.modify_db_parameter_group(DBParameterGroupName='test', + Parameters=[{ + 'ParameterName': 'foo', + 'ParameterValue': 'foo_val', + 'Description': 'test param', + 'ApplyMethod': 'immediate' + }] + ) + + modify_result['DBParameterGroupName'].should.equal('test') + + db_parameters = conn.describe_db_parameters(DBParameterGroupName='test') + db_parameters['Parameters'][0]['ParameterName'].should.equal('foo') + db_parameters['Parameters'][0]['ParameterValue'].should.equal('foo_val') + db_parameters['Parameters'][0]['Description'].should.equal('test param') + db_parameters['Parameters'][0]['ApplyMethod'].should.equal('immediate') + + +@mock_rds2 +def test_delete_non_existant_db_parameter_group(): + conn = boto3.client('rds', region_name='us-west-2') + conn.delete_db_parameter_group.when.called_with( + DBParameterGroupName='non-existant').should.throw(ClientError) + + +@mock_rds2 +def test_create_parameter_group_with_tags(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_parameter_group(DBParameterGroupName='test', + DBParameterGroupFamily='mysql5.6', + Description='test parameter group', + Tags=[{ + 'Key': 'foo', + 'Value': 'bar', + }]) + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:pg:test') + result['TagList'].should.equal([{'Value': 'bar', 'Key': 'foo'}]) diff --git a/tests/test_redshift/test_redshift.py b/tests/test_redshift/test_redshift.py index f0e227a5d..541614788 100644 --- a/tests/test_redshift/test_redshift.py +++ b/tests/test_redshift/test_redshift.py @@ -1,1242 +1,1263 @@ -from __future__ import unicode_literals - -import datetime - -import boto -import boto3 -from boto.redshift.exceptions import ( - ClusterNotFound, - ClusterParameterGroupNotFound, - ClusterSecurityGroupNotFound, - ClusterSubnetGroupNotFound, - InvalidSubnet, -) -from botocore.exceptions import ( - ClientError -) -import sure # noqa - -from moto import mock_ec2 -from moto import mock_ec2_deprecated -from moto import mock_redshift -from moto import mock_redshift_deprecated - - -@mock_redshift -def test_create_cluster_boto3(): - client = boto3.client('redshift', region_name='us-east-1') - response = client.create_cluster( - DBName='test', - ClusterIdentifier='test', - ClusterType='single-node', - NodeType='ds2.xlarge', - MasterUsername='user', - MasterUserPassword='password', - ) - response['Cluster']['NodeType'].should.equal('ds2.xlarge') - create_time = response['Cluster']['ClusterCreateTime'] - create_time.should.be.lower_than(datetime.datetime.now(create_time.tzinfo)) - - -@mock_redshift -def test_create_snapshot_copy_grant(): - client = boto3.client('redshift', region_name='us-east-1') - grants = client.create_snapshot_copy_grant( - SnapshotCopyGrantName='test-us-east-1', - KmsKeyId='fake', - ) - grants['SnapshotCopyGrant']['SnapshotCopyGrantName'].should.equal('test-us-east-1') - grants['SnapshotCopyGrant']['KmsKeyId'].should.equal('fake') - - client.delete_snapshot_copy_grant( - SnapshotCopyGrantName='test-us-east-1', - ) - - client.describe_snapshot_copy_grants.when.called_with( - SnapshotCopyGrantName='test-us-east-1', - ).should.throw(Exception) - - -@mock_redshift -def test_create_many_snapshot_copy_grants(): - client = boto3.client('redshift', region_name='us-east-1') - - for i in range(10): - client.create_snapshot_copy_grant( - SnapshotCopyGrantName='test-us-east-1-{0}'.format(i), - KmsKeyId='fake', - ) - response = client.describe_snapshot_copy_grants() - len(response['SnapshotCopyGrants']).should.equal(10) - - -@mock_redshift -def test_no_snapshot_copy_grants(): - client = boto3.client('redshift', region_name='us-east-1') - response = client.describe_snapshot_copy_grants() - len(response['SnapshotCopyGrants']).should.equal(0) - - -@mock_redshift_deprecated -def test_create_cluster(): - conn = boto.redshift.connect_to_region("us-east-1") - cluster_identifier = 'my_cluster' - - cluster_response = conn.create_cluster( - cluster_identifier, - node_type="dw.hs1.xlarge", - master_username="username", - master_user_password="password", - db_name="my_db", - cluster_type="multi-node", - availability_zone="us-east-1d", - preferred_maintenance_window="Mon:03:00-Mon:11:00", - automated_snapshot_retention_period=10, - port=1234, - cluster_version="1.0", - allow_version_upgrade=True, - number_of_nodes=3, - ) - cluster_response['CreateClusterResponse']['CreateClusterResult'][ - 'Cluster']['ClusterStatus'].should.equal('creating') - - cluster_response = conn.describe_clusters(cluster_identifier) - cluster = cluster_response['DescribeClustersResponse'][ - 'DescribeClustersResult']['Clusters'][0] - - cluster['ClusterIdentifier'].should.equal(cluster_identifier) - cluster['NodeType'].should.equal("dw.hs1.xlarge") - cluster['MasterUsername'].should.equal("username") - cluster['DBName'].should.equal("my_db") - cluster['ClusterSecurityGroups'][0][ - 'ClusterSecurityGroupName'].should.equal("Default") - cluster['VpcSecurityGroups'].should.equal([]) - cluster['ClusterSubnetGroupName'].should.equal(None) - cluster['AvailabilityZone'].should.equal("us-east-1d") - cluster['PreferredMaintenanceWindow'].should.equal("Mon:03:00-Mon:11:00") - cluster['ClusterParameterGroups'][0][ - 'ParameterGroupName'].should.equal("default.redshift-1.0") - cluster['AutomatedSnapshotRetentionPeriod'].should.equal(10) - cluster['Port'].should.equal(1234) - cluster['ClusterVersion'].should.equal("1.0") - cluster['AllowVersionUpgrade'].should.equal(True) - cluster['NumberOfNodes'].should.equal(3) - - -@mock_redshift_deprecated -def test_create_single_node_cluster(): - conn = boto.redshift.connect_to_region("us-east-1") - cluster_identifier = 'my_cluster' - - conn.create_cluster( - cluster_identifier, - node_type="dw.hs1.xlarge", - master_username="username", - master_user_password="password", - db_name="my_db", - cluster_type="single-node", - ) - - cluster_response = conn.describe_clusters(cluster_identifier) - cluster = cluster_response['DescribeClustersResponse'][ - 'DescribeClustersResult']['Clusters'][0] - - cluster['ClusterIdentifier'].should.equal(cluster_identifier) - cluster['NodeType'].should.equal("dw.hs1.xlarge") - cluster['MasterUsername'].should.equal("username") - cluster['DBName'].should.equal("my_db") - cluster['NumberOfNodes'].should.equal(1) - - -@mock_redshift_deprecated -def test_default_cluster_attributes(): - conn = boto.redshift.connect_to_region("us-east-1") - cluster_identifier = 'my_cluster' - - conn.create_cluster( - cluster_identifier, - node_type="dw.hs1.xlarge", - master_username="username", - master_user_password="password", - ) - - cluster_response = conn.describe_clusters(cluster_identifier) - cluster = cluster_response['DescribeClustersResponse'][ - 'DescribeClustersResult']['Clusters'][0] - - cluster['DBName'].should.equal("dev") - cluster['ClusterSubnetGroupName'].should.equal(None) - assert "us-east-" in cluster['AvailabilityZone'] - cluster['PreferredMaintenanceWindow'].should.equal("Mon:03:00-Mon:03:30") - cluster['ClusterParameterGroups'][0][ - 'ParameterGroupName'].should.equal("default.redshift-1.0") - cluster['AutomatedSnapshotRetentionPeriod'].should.equal(1) - cluster['Port'].should.equal(5439) - cluster['ClusterVersion'].should.equal("1.0") - cluster['AllowVersionUpgrade'].should.equal(True) - cluster['NumberOfNodes'].should.equal(1) - - -@mock_redshift_deprecated -@mock_ec2_deprecated -def test_create_cluster_in_subnet_group(): - vpc_conn = boto.connect_vpc() - vpc = vpc_conn.create_vpc("10.0.0.0/16") - subnet = vpc_conn.create_subnet(vpc.id, "10.0.0.0/24") - redshift_conn = boto.connect_redshift() - redshift_conn.create_cluster_subnet_group( - "my_subnet_group", - "This is my subnet group", - subnet_ids=[subnet.id], - ) - - redshift_conn.create_cluster( - "my_cluster", - node_type="dw.hs1.xlarge", - master_username="username", - master_user_password="password", - cluster_subnet_group_name='my_subnet_group', - ) - - cluster_response = redshift_conn.describe_clusters("my_cluster") - cluster = cluster_response['DescribeClustersResponse'][ - 'DescribeClustersResult']['Clusters'][0] - cluster['ClusterSubnetGroupName'].should.equal('my_subnet_group') - - -@mock_redshift -@mock_ec2 -def test_create_cluster_in_subnet_group_boto3(): - ec2 = boto3.resource('ec2', region_name='us-east-1') - vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock='10.0.0.0/24') - client = boto3.client('redshift', region_name='us-east-1') - client.create_cluster_subnet_group( - ClusterSubnetGroupName='my_subnet_group', - Description='This is my subnet group', - SubnetIds=[subnet.id] - ) - - client.create_cluster( - ClusterIdentifier="my_cluster", - NodeType="dw.hs1.xlarge", - MasterUsername="username", - MasterUserPassword="password", - ClusterSubnetGroupName='my_subnet_group', - ) - - cluster_response = client.describe_clusters(ClusterIdentifier="my_cluster") - cluster = cluster_response['Clusters'][0] - cluster['ClusterSubnetGroupName'].should.equal('my_subnet_group') - - -@mock_redshift_deprecated -def test_create_cluster_with_security_group(): - conn = boto.redshift.connect_to_region("us-east-1") - conn.create_cluster_security_group( - "security_group1", - "This is my security group", - ) - conn.create_cluster_security_group( - "security_group2", - "This is my security group", - ) - - cluster_identifier = 'my_cluster' - conn.create_cluster( - cluster_identifier, - node_type="dw.hs1.xlarge", - master_username="username", - master_user_password="password", - cluster_security_groups=["security_group1", "security_group2"] - ) - - cluster_response = conn.describe_clusters(cluster_identifier) - cluster = cluster_response['DescribeClustersResponse'][ - 'DescribeClustersResult']['Clusters'][0] - group_names = [group['ClusterSecurityGroupName'] - for group in cluster['ClusterSecurityGroups']] - set(group_names).should.equal(set(["security_group1", "security_group2"])) - - -@mock_redshift -def test_create_cluster_with_security_group_boto3(): - client = boto3.client('redshift', region_name='us-east-1') - client.create_cluster_security_group( - ClusterSecurityGroupName="security_group1", - Description="This is my security group", - ) - client.create_cluster_security_group( - ClusterSecurityGroupName="security_group2", - Description="This is my security group", - ) - - cluster_identifier = 'my_cluster' - client.create_cluster( - ClusterIdentifier=cluster_identifier, - NodeType="dw.hs1.xlarge", - MasterUsername="username", - MasterUserPassword="password", - ClusterSecurityGroups=["security_group1", "security_group2"] - ) - response = client.describe_clusters(ClusterIdentifier=cluster_identifier) - cluster = response['Clusters'][0] - group_names = [group['ClusterSecurityGroupName'] - for group in cluster['ClusterSecurityGroups']] - set(group_names).should.equal({"security_group1", "security_group2"}) - - -@mock_redshift_deprecated -@mock_ec2_deprecated -def test_create_cluster_with_vpc_security_groups(): - vpc_conn = boto.connect_vpc() - ec2_conn = boto.connect_ec2() - redshift_conn = boto.connect_redshift() - vpc = vpc_conn.create_vpc("10.0.0.0/16") - security_group = ec2_conn.create_security_group( - "vpc_security_group", "a group", vpc_id=vpc.id) - - redshift_conn.create_cluster( - "my_cluster", - node_type="dw.hs1.xlarge", - master_username="username", - master_user_password="password", - vpc_security_group_ids=[security_group.id], - ) - - cluster_response = redshift_conn.describe_clusters("my_cluster") - cluster = cluster_response['DescribeClustersResponse'][ - 'DescribeClustersResult']['Clusters'][0] - group_ids = [group['VpcSecurityGroupId'] - for group in cluster['VpcSecurityGroups']] - list(group_ids).should.equal([security_group.id]) - - -@mock_redshift -@mock_ec2 -def test_create_cluster_with_vpc_security_groups_boto3(): - ec2 = boto3.resource('ec2', region_name='us-east-1') - vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - client = boto3.client('redshift', region_name='us-east-1') - cluster_id = 'my_cluster' - security_group = ec2.create_security_group( - Description="vpc_security_group", - GroupName="a group", - VpcId=vpc.id) - client.create_cluster( - ClusterIdentifier=cluster_id, - NodeType="dw.hs1.xlarge", - MasterUsername="username", - MasterUserPassword="password", - VpcSecurityGroupIds=[security_group.id], - ) - response = client.describe_clusters(ClusterIdentifier=cluster_id) - cluster = response['Clusters'][0] - group_ids = [group['VpcSecurityGroupId'] - for group in cluster['VpcSecurityGroups']] - list(group_ids).should.equal([security_group.id]) - - -@mock_redshift -def test_create_cluster_with_iam_roles(): - iam_roles_arn = ['arn:aws:iam:::role/my-iam-role',] - client = boto3.client('redshift', region_name='us-east-1') - cluster_id = 'my_cluster' - client.create_cluster( - ClusterIdentifier=cluster_id, - NodeType="dw.hs1.xlarge", - MasterUsername="username", - MasterUserPassword="password", - IamRoles=iam_roles_arn - ) - response = client.describe_clusters(ClusterIdentifier=cluster_id) - cluster = response['Clusters'][0] - iam_roles = [role['IamRoleArn'] for role in cluster['IamRoles']] - iam_roles_arn.should.equal(iam_roles) - - -@mock_redshift_deprecated -def test_create_cluster_with_parameter_group(): - conn = boto.connect_redshift() - conn.create_cluster_parameter_group( - "my_parameter_group", - "redshift-1.0", - "This is my parameter group", - ) - - conn.create_cluster( - "my_cluster", - node_type="dw.hs1.xlarge", - master_username="username", - master_user_password="password", - cluster_parameter_group_name='my_parameter_group', - ) - - cluster_response = conn.describe_clusters("my_cluster") - cluster = cluster_response['DescribeClustersResponse'][ - 'DescribeClustersResult']['Clusters'][0] - cluster['ClusterParameterGroups'][0][ - 'ParameterGroupName'].should.equal("my_parameter_group") - - -@mock_redshift_deprecated -def test_describe_non_existent_cluster(): - conn = boto.redshift.connect_to_region("us-east-1") - conn.describe_clusters.when.called_with( - "not-a-cluster").should.throw(ClusterNotFound) - - -@mock_redshift_deprecated -def test_delete_cluster(): - conn = boto.connect_redshift() - cluster_identifier = 'my_cluster' - - conn.create_cluster( - cluster_identifier, - node_type='single-node', - master_username="username", - master_user_password="password", - ) - - clusters = conn.describe_clusters()['DescribeClustersResponse'][ - 'DescribeClustersResult']['Clusters'] - list(clusters).should.have.length_of(1) - - conn.delete_cluster(cluster_identifier) - - clusters = conn.describe_clusters()['DescribeClustersResponse'][ - 'DescribeClustersResult']['Clusters'] - list(clusters).should.have.length_of(0) - - # Delete invalid id - conn.delete_cluster.when.called_with( - "not-a-cluster").should.throw(ClusterNotFound) - - -@mock_redshift_deprecated -def test_modify_cluster(): - conn = boto.connect_redshift() - cluster_identifier = 'my_cluster' - conn.create_cluster_security_group( - "security_group", - "This is my security group", - ) - conn.create_cluster_parameter_group( - "my_parameter_group", - "redshift-1.0", - "This is my parameter group", - ) - - conn.create_cluster( - cluster_identifier, - node_type='single-node', - master_username="username", - master_user_password="password", - ) - - conn.modify_cluster( - cluster_identifier, - cluster_type="multi-node", - node_type="dw.hs1.xlarge", - cluster_security_groups="security_group", - master_user_password="new_password", - cluster_parameter_group_name="my_parameter_group", - automated_snapshot_retention_period=7, - preferred_maintenance_window="Tue:03:00-Tue:11:00", - allow_version_upgrade=False, - new_cluster_identifier="new_identifier", - ) - - cluster_response = conn.describe_clusters("new_identifier") - cluster = cluster_response['DescribeClustersResponse'][ - 'DescribeClustersResult']['Clusters'][0] - - cluster['ClusterIdentifier'].should.equal("new_identifier") - cluster['NodeType'].should.equal("dw.hs1.xlarge") - cluster['ClusterSecurityGroups'][0][ - 'ClusterSecurityGroupName'].should.equal("security_group") - cluster['PreferredMaintenanceWindow'].should.equal("Tue:03:00-Tue:11:00") - cluster['ClusterParameterGroups'][0][ - 'ParameterGroupName'].should.equal("my_parameter_group") - cluster['AutomatedSnapshotRetentionPeriod'].should.equal(7) - cluster['AllowVersionUpgrade'].should.equal(False) - # This one should remain unmodified. - cluster['NumberOfNodes'].should.equal(1) - - -@mock_redshift_deprecated -@mock_ec2_deprecated -def test_create_cluster_subnet_group(): - vpc_conn = boto.connect_vpc() - vpc = vpc_conn.create_vpc("10.0.0.0/16") - subnet1 = vpc_conn.create_subnet(vpc.id, "10.0.0.0/24") - subnet2 = vpc_conn.create_subnet(vpc.id, "10.0.1.0/24") - - redshift_conn = boto.connect_redshift() - - redshift_conn.create_cluster_subnet_group( - "my_subnet", - "This is my subnet group", - subnet_ids=[subnet1.id, subnet2.id], - ) - - subnets_response = redshift_conn.describe_cluster_subnet_groups( - "my_subnet") - my_subnet = subnets_response['DescribeClusterSubnetGroupsResponse'][ - 'DescribeClusterSubnetGroupsResult']['ClusterSubnetGroups'][0] - - my_subnet['ClusterSubnetGroupName'].should.equal("my_subnet") - my_subnet['Description'].should.equal("This is my subnet group") - subnet_ids = [subnet['SubnetIdentifier'] - for subnet in my_subnet['Subnets']] - set(subnet_ids).should.equal(set([subnet1.id, subnet2.id])) - - -@mock_redshift_deprecated -@mock_ec2_deprecated -def test_create_invalid_cluster_subnet_group(): - redshift_conn = boto.connect_redshift() - redshift_conn.create_cluster_subnet_group.when.called_with( - "my_subnet", - "This is my subnet group", - subnet_ids=["subnet-1234"], - ).should.throw(InvalidSubnet) - - -@mock_redshift_deprecated -def test_describe_non_existent_subnet_group(): - conn = boto.redshift.connect_to_region("us-east-1") - conn.describe_cluster_subnet_groups.when.called_with( - "not-a-subnet-group").should.throw(ClusterSubnetGroupNotFound) - - -@mock_redshift_deprecated -@mock_ec2_deprecated -def test_delete_cluster_subnet_group(): - vpc_conn = boto.connect_vpc() - vpc = vpc_conn.create_vpc("10.0.0.0/16") - subnet = vpc_conn.create_subnet(vpc.id, "10.0.0.0/24") - redshift_conn = boto.connect_redshift() - - redshift_conn.create_cluster_subnet_group( - "my_subnet", - "This is my subnet group", - subnet_ids=[subnet.id], - ) - - subnets_response = redshift_conn.describe_cluster_subnet_groups() - subnets = subnets_response['DescribeClusterSubnetGroupsResponse'][ - 'DescribeClusterSubnetGroupsResult']['ClusterSubnetGroups'] - subnets.should.have.length_of(1) - - redshift_conn.delete_cluster_subnet_group("my_subnet") - - subnets_response = redshift_conn.describe_cluster_subnet_groups() - subnets = subnets_response['DescribeClusterSubnetGroupsResponse'][ - 'DescribeClusterSubnetGroupsResult']['ClusterSubnetGroups'] - subnets.should.have.length_of(0) - - # Delete invalid id - redshift_conn.delete_cluster_subnet_group.when.called_with( - "not-a-subnet-group").should.throw(ClusterSubnetGroupNotFound) - - -@mock_redshift_deprecated -def test_create_cluster_security_group(): - conn = boto.connect_redshift() - conn.create_cluster_security_group( - "my_security_group", - "This is my security group", - ) - - groups_response = conn.describe_cluster_security_groups( - "my_security_group") - my_group = groups_response['DescribeClusterSecurityGroupsResponse'][ - 'DescribeClusterSecurityGroupsResult']['ClusterSecurityGroups'][0] - - my_group['ClusterSecurityGroupName'].should.equal("my_security_group") - my_group['Description'].should.equal("This is my security group") - list(my_group['IPRanges']).should.equal([]) - - -@mock_redshift_deprecated -def test_describe_non_existent_security_group(): - conn = boto.redshift.connect_to_region("us-east-1") - conn.describe_cluster_security_groups.when.called_with( - "not-a-security-group").should.throw(ClusterSecurityGroupNotFound) - - -@mock_redshift_deprecated -def test_delete_cluster_security_group(): - conn = boto.connect_redshift() - conn.create_cluster_security_group( - "my_security_group", - "This is my security group", - ) - - groups_response = conn.describe_cluster_security_groups() - groups = groups_response['DescribeClusterSecurityGroupsResponse'][ - 'DescribeClusterSecurityGroupsResult']['ClusterSecurityGroups'] - groups.should.have.length_of(2) # The default group already exists - - conn.delete_cluster_security_group("my_security_group") - - groups_response = conn.describe_cluster_security_groups() - groups = groups_response['DescribeClusterSecurityGroupsResponse'][ - 'DescribeClusterSecurityGroupsResult']['ClusterSecurityGroups'] - groups.should.have.length_of(1) - - # Delete invalid id - conn.delete_cluster_security_group.when.called_with( - "not-a-security-group").should.throw(ClusterSecurityGroupNotFound) - - -@mock_redshift_deprecated -def test_create_cluster_parameter_group(): - conn = boto.connect_redshift() - conn.create_cluster_parameter_group( - "my_parameter_group", - "redshift-1.0", - "This is my parameter group", - ) - - groups_response = conn.describe_cluster_parameter_groups( - "my_parameter_group") - my_group = groups_response['DescribeClusterParameterGroupsResponse'][ - 'DescribeClusterParameterGroupsResult']['ParameterGroups'][0] - - my_group['ParameterGroupName'].should.equal("my_parameter_group") - my_group['ParameterGroupFamily'].should.equal("redshift-1.0") - my_group['Description'].should.equal("This is my parameter group") - - -@mock_redshift_deprecated -def test_describe_non_existent_parameter_group(): - conn = boto.redshift.connect_to_region("us-east-1") - conn.describe_cluster_parameter_groups.when.called_with( - "not-a-parameter-group").should.throw(ClusterParameterGroupNotFound) - - -@mock_redshift_deprecated -def test_delete_cluster_parameter_group(): - conn = boto.connect_redshift() - conn.create_cluster_parameter_group( - "my_parameter_group", - "redshift-1.0", - "This is my parameter group", - ) - - groups_response = conn.describe_cluster_parameter_groups() - groups = groups_response['DescribeClusterParameterGroupsResponse'][ - 'DescribeClusterParameterGroupsResult']['ParameterGroups'] - groups.should.have.length_of(2) # The default group already exists - - conn.delete_cluster_parameter_group("my_parameter_group") - - groups_response = conn.describe_cluster_parameter_groups() - groups = groups_response['DescribeClusterParameterGroupsResponse'][ - 'DescribeClusterParameterGroupsResult']['ParameterGroups'] - groups.should.have.length_of(1) - - # Delete invalid id - conn.delete_cluster_parameter_group.when.called_with( - "not-a-parameter-group").should.throw(ClusterParameterGroupNotFound) - - - -@mock_redshift -def test_create_cluster_snapshot_of_non_existent_cluster(): - client = boto3.client('redshift', region_name='us-east-1') - cluster_identifier = 'non-existent-cluster-id' - client.create_cluster_snapshot.when.called_with( - SnapshotIdentifier='snapshot-id', - ClusterIdentifier=cluster_identifier, - ).should.throw(ClientError, 'Cluster {} not found.'.format(cluster_identifier)) - - -@mock_redshift -def test_create_cluster_snapshot(): - client = boto3.client('redshift', region_name='us-east-1') - cluster_identifier = 'my_cluster' - snapshot_identifier = 'my_snapshot' - - cluster_response = client.create_cluster( - DBName='test-db', - ClusterIdentifier=cluster_identifier, - ClusterType='single-node', - NodeType='ds2.xlarge', - MasterUsername='username', - MasterUserPassword='password', - ) - cluster_response['Cluster']['NodeType'].should.equal('ds2.xlarge') - - snapshot_response = client.create_cluster_snapshot( - SnapshotIdentifier=snapshot_identifier, - ClusterIdentifier=cluster_identifier, - Tags=[{'Key': 'test-tag-key', - 'Value': 'test-tag-value'}] - ) - snapshot = snapshot_response['Snapshot'] - snapshot['SnapshotIdentifier'].should.equal(snapshot_identifier) - snapshot['ClusterIdentifier'].should.equal(cluster_identifier) - snapshot['NumberOfNodes'].should.equal(1) - snapshot['NodeType'].should.equal('ds2.xlarge') - snapshot['MasterUsername'].should.equal('username') - - -@mock_redshift -def test_describe_cluster_snapshots(): - client = boto3.client('redshift', region_name='us-east-1') - cluster_identifier = 'my_cluster' - snapshot_identifier = 'my_snapshot' - - client.create_cluster( - DBName='test-db', - ClusterIdentifier=cluster_identifier, - ClusterType='single-node', - NodeType='ds2.xlarge', - MasterUsername='username', - MasterUserPassword='password', - ) - - client.create_cluster_snapshot( - SnapshotIdentifier=snapshot_identifier, - ClusterIdentifier=cluster_identifier, - ) - - resp_clust = client.describe_cluster_snapshots(ClusterIdentifier=cluster_identifier) - resp_snap = client.describe_cluster_snapshots(SnapshotIdentifier=snapshot_identifier) - resp_clust['Snapshots'][0].should.equal(resp_snap['Snapshots'][0]) - snapshot = resp_snap['Snapshots'][0] - snapshot['SnapshotIdentifier'].should.equal(snapshot_identifier) - snapshot['ClusterIdentifier'].should.equal(cluster_identifier) - snapshot['NumberOfNodes'].should.equal(1) - snapshot['NodeType'].should.equal('ds2.xlarge') - snapshot['MasterUsername'].should.equal('username') - - -@mock_redshift -def test_describe_cluster_snapshots_not_found_error(): - client = boto3.client('redshift', region_name='us-east-1') - cluster_identifier = 'my_cluster' - snapshot_identifier = 'my_snapshot' - - client.describe_cluster_snapshots.when.called_with( - ClusterIdentifier=cluster_identifier, - ).should.throw(ClientError, 'Cluster {} not found.'.format(cluster_identifier)) - - client.describe_cluster_snapshots.when.called_with( - SnapshotIdentifier=snapshot_identifier - ).should.throw(ClientError, 'Snapshot {} not found.'.format(snapshot_identifier)) - - -@mock_redshift -def test_delete_cluster_snapshot(): - client = boto3.client('redshift', region_name='us-east-1') - cluster_identifier = 'my_cluster' - snapshot_identifier = 'my_snapshot' - - client.create_cluster( - ClusterIdentifier=cluster_identifier, - ClusterType='single-node', - NodeType='ds2.xlarge', - MasterUsername='username', - MasterUserPassword='password', - ) - client.create_cluster_snapshot( - SnapshotIdentifier=snapshot_identifier, - ClusterIdentifier=cluster_identifier - ) - - snapshots = client.describe_cluster_snapshots()['Snapshots'] - list(snapshots).should.have.length_of(1) - - client.delete_cluster_snapshot(SnapshotIdentifier=snapshot_identifier)[ - 'Snapshot']['Status'].should.equal('deleted') - - snapshots = client.describe_cluster_snapshots()['Snapshots'] - list(snapshots).should.have.length_of(0) - - # Delete invalid id - client.delete_cluster_snapshot.when.called_with( - SnapshotIdentifier="not-a-snapshot").should.throw(ClientError) - - -@mock_redshift -def test_cluster_snapshot_already_exists(): - client = boto3.client('redshift', region_name='us-east-1') - cluster_identifier = 'my_cluster' - snapshot_identifier = 'my_snapshot' - - client.create_cluster( - DBName='test-db', - ClusterIdentifier=cluster_identifier, - ClusterType='single-node', - NodeType='ds2.xlarge', - MasterUsername='username', - MasterUserPassword='password', - ) - - client.create_cluster_snapshot( - SnapshotIdentifier=snapshot_identifier, - ClusterIdentifier=cluster_identifier - ) - - client.create_cluster_snapshot.when.called_with( - SnapshotIdentifier=snapshot_identifier, - ClusterIdentifier=cluster_identifier - ).should.throw(ClientError) - - -@mock_redshift -def test_create_cluster_from_snapshot(): - client = boto3.client('redshift', region_name='us-east-1') - original_cluster_identifier = 'original-cluster' - original_snapshot_identifier = 'original-snapshot' - new_cluster_identifier = 'new-cluster' - - client.create_cluster( - ClusterIdentifier=original_cluster_identifier, - ClusterType='single-node', - NodeType='ds2.xlarge', - MasterUsername='username', - MasterUserPassword='password', - ) - client.create_cluster_snapshot( - SnapshotIdentifier=original_snapshot_identifier, - ClusterIdentifier=original_cluster_identifier - ) - response = client.restore_from_cluster_snapshot( - ClusterIdentifier=new_cluster_identifier, - SnapshotIdentifier=original_snapshot_identifier, - Port=1234 - ) - response['Cluster']['ClusterStatus'].should.equal('creating') - - response = client.describe_clusters( - ClusterIdentifier=new_cluster_identifier - ) - new_cluster = response['Clusters'][0] - new_cluster['NodeType'].should.equal('ds2.xlarge') - new_cluster['MasterUsername'].should.equal('username') - new_cluster['Endpoint']['Port'].should.equal(1234) - - -@mock_redshift -def test_create_cluster_from_snapshot_with_waiter(): - client = boto3.client('redshift', region_name='us-east-1') - original_cluster_identifier = 'original-cluster' - original_snapshot_identifier = 'original-snapshot' - new_cluster_identifier = 'new-cluster' - - client.create_cluster( - ClusterIdentifier=original_cluster_identifier, - ClusterType='single-node', - NodeType='ds2.xlarge', - MasterUsername='username', - MasterUserPassword='password', - ) - client.create_cluster_snapshot( - SnapshotIdentifier=original_snapshot_identifier, - ClusterIdentifier=original_cluster_identifier - ) - response = client.restore_from_cluster_snapshot( - ClusterIdentifier=new_cluster_identifier, - SnapshotIdentifier=original_snapshot_identifier, - Port=1234 - ) - response['Cluster']['ClusterStatus'].should.equal('creating') - - client.get_waiter('cluster_restored').wait( - ClusterIdentifier=new_cluster_identifier, - WaiterConfig={ - 'Delay': 1, - 'MaxAttempts': 2, - } - ) - - response = client.describe_clusters( - ClusterIdentifier=new_cluster_identifier - ) - new_cluster = response['Clusters'][0] - new_cluster['NodeType'].should.equal('ds2.xlarge') - new_cluster['MasterUsername'].should.equal('username') - new_cluster['Endpoint']['Port'].should.equal(1234) - - -@mock_redshift -def test_create_cluster_from_non_existent_snapshot(): - client = boto3.client('redshift', region_name='us-east-1') - client.restore_from_cluster_snapshot.when.called_with( - ClusterIdentifier='cluster-id', - SnapshotIdentifier='non-existent-snapshot', - ).should.throw(ClientError, 'Snapshot non-existent-snapshot not found.') - - -@mock_redshift -def test_create_cluster_status_update(): - client = boto3.client('redshift', region_name='us-east-1') - cluster_identifier = 'test-cluster' - - response = client.create_cluster( - ClusterIdentifier=cluster_identifier, - ClusterType='single-node', - NodeType='ds2.xlarge', - MasterUsername='username', - MasterUserPassword='password', - ) - response['Cluster']['ClusterStatus'].should.equal('creating') - - response = client.describe_clusters( - ClusterIdentifier=cluster_identifier - ) - response['Clusters'][0]['ClusterStatus'].should.equal('available') - - -@mock_redshift -def test_describe_tags_with_resource_type(): - client = boto3.client('redshift', region_name='us-east-1') - cluster_identifier = 'my_cluster' - cluster_arn = 'arn:aws:redshift:us-east-1:123456789012:' \ - 'cluster:{}'.format(cluster_identifier) - snapshot_identifier = 'my_snapshot' - snapshot_arn = 'arn:aws:redshift:us-east-1:123456789012:' \ - 'snapshot:{}/{}'.format(cluster_identifier, - snapshot_identifier) - tag_key = 'test-tag-key' - tag_value = 'test-tag-value' - - client.create_cluster( - DBName='test-db', - ClusterIdentifier=cluster_identifier, - ClusterType='single-node', - NodeType='ds2.xlarge', - MasterUsername='username', - MasterUserPassword='password', - Tags=[{'Key': tag_key, - 'Value': tag_value}] - ) - tags_response = client.describe_tags(ResourceType='cluster') - tagged_resources = tags_response['TaggedResources'] - list(tagged_resources).should.have.length_of(1) - tagged_resources[0]['ResourceType'].should.equal('cluster') - tagged_resources[0]['ResourceName'].should.equal(cluster_arn) - tag = tagged_resources[0]['Tag'] - tag['Key'].should.equal(tag_key) - tag['Value'].should.equal(tag_value) - - client.create_cluster_snapshot( - SnapshotIdentifier=snapshot_identifier, - ClusterIdentifier=cluster_identifier, - Tags=[{'Key': tag_key, - 'Value': tag_value}] - ) - tags_response = client.describe_tags(ResourceType='snapshot') - tagged_resources = tags_response['TaggedResources'] - list(tagged_resources).should.have.length_of(1) - tagged_resources[0]['ResourceType'].should.equal('snapshot') - tagged_resources[0]['ResourceName'].should.equal(snapshot_arn) - tag = tagged_resources[0]['Tag'] - tag['Key'].should.equal(tag_key) - tag['Value'].should.equal(tag_value) - - -@mock_redshift -def test_describe_tags_cannot_specify_resource_type_and_resource_name(): - client = boto3.client('redshift', region_name='us-east-1') - resource_name = 'arn:aws:redshift:us-east-1:123456789012:cluster:cluster-id' - resource_type = 'cluster' - client.describe_tags.when.called_with( - ResourceName=resource_name, - ResourceType=resource_type - ).should.throw(ClientError, 'using either an ARN or a resource type') - - -@mock_redshift -def test_describe_tags_with_resource_name(): - client = boto3.client('redshift', region_name='us-east-1') - cluster_identifier = 'cluster-id' - cluster_arn = 'arn:aws:redshift:us-east-1:123456789012:' \ - 'cluster:{}'.format(cluster_identifier) - snapshot_identifier = 'snapshot-id' - snapshot_arn = 'arn:aws:redshift:us-east-1:123456789012:' \ - 'snapshot:{}/{}'.format(cluster_identifier, - snapshot_identifier) - tag_key = 'test-tag-key' - tag_value = 'test-tag-value' - - client.create_cluster( - DBName='test-db', - ClusterIdentifier=cluster_identifier, - ClusterType='single-node', - NodeType='ds2.xlarge', - MasterUsername='username', - MasterUserPassword='password', - Tags=[{'Key': tag_key, - 'Value': tag_value}] - ) - tags_response = client.describe_tags(ResourceName=cluster_arn) - tagged_resources = tags_response['TaggedResources'] - list(tagged_resources).should.have.length_of(1) - tagged_resources[0]['ResourceType'].should.equal('cluster') - tagged_resources[0]['ResourceName'].should.equal(cluster_arn) - tag = tagged_resources[0]['Tag'] - tag['Key'].should.equal(tag_key) - tag['Value'].should.equal(tag_value) - - client.create_cluster_snapshot( - SnapshotIdentifier=snapshot_identifier, - ClusterIdentifier=cluster_identifier, - Tags=[{'Key': tag_key, - 'Value': tag_value}] - ) - tags_response = client.describe_tags(ResourceName=snapshot_arn) - tagged_resources = tags_response['TaggedResources'] - list(tagged_resources).should.have.length_of(1) - tagged_resources[0]['ResourceType'].should.equal('snapshot') - tagged_resources[0]['ResourceName'].should.equal(snapshot_arn) - tag = tagged_resources[0]['Tag'] - tag['Key'].should.equal(tag_key) - tag['Value'].should.equal(tag_value) - - -@mock_redshift -def test_create_tags(): - client = boto3.client('redshift', region_name='us-east-1') - cluster_identifier = 'cluster-id' - cluster_arn = 'arn:aws:redshift:us-east-1:123456789012:' \ - 'cluster:{}'.format(cluster_identifier) - tag_key = 'test-tag-key' - tag_value = 'test-tag-value' - num_tags = 5 - tags = [] - for i in range(0, num_tags): - tag = {'Key': '{}-{}'.format(tag_key, i), - 'Value': '{}-{}'.format(tag_value, i)} - tags.append(tag) - - client.create_cluster( - DBName='test-db', - ClusterIdentifier=cluster_identifier, - ClusterType='single-node', - NodeType='ds2.xlarge', - MasterUsername='username', - MasterUserPassword='password', - ) - client.create_tags( - ResourceName=cluster_arn, - Tags=tags - ) - response = client.describe_clusters(ClusterIdentifier=cluster_identifier) - cluster = response['Clusters'][0] - list(cluster['Tags']).should.have.length_of(num_tags) - response = client.describe_tags(ResourceName=cluster_arn) - list(response['TaggedResources']).should.have.length_of(num_tags) - - -@mock_redshift -def test_delete_tags(): - client = boto3.client('redshift', region_name='us-east-1') - cluster_identifier = 'cluster-id' - cluster_arn = 'arn:aws:redshift:us-east-1:123456789012:' \ - 'cluster:{}'.format(cluster_identifier) - tag_key = 'test-tag-key' - tag_value = 'test-tag-value' - tags = [] - for i in range(1, 2): - tag = {'Key': '{}-{}'.format(tag_key, i), - 'Value': '{}-{}'.format(tag_value, i)} - tags.append(tag) - - client.create_cluster( - DBName='test-db', - ClusterIdentifier=cluster_identifier, - ClusterType='single-node', - NodeType='ds2.xlarge', - MasterUsername='username', - MasterUserPassword='password', - Tags=tags - ) - client.delete_tags( - ResourceName=cluster_arn, - TagKeys=[tag['Key'] for tag in tags - if tag['Key'] != '{}-1'.format(tag_key)] - ) - response = client.describe_clusters(ClusterIdentifier=cluster_identifier) - cluster = response['Clusters'][0] - list(cluster['Tags']).should.have.length_of(1) - response = client.describe_tags(ResourceName=cluster_arn) - list(response['TaggedResources']).should.have.length_of(1) - - -@mock_ec2 -@mock_redshift -def test_describe_tags_all_resource_types(): - ec2 = boto3.resource('ec2', region_name='us-east-1') - vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock='10.0.0.0/24') - client = boto3.client('redshift', region_name='us-east-1') - response = client.describe_tags() - list(response['TaggedResources']).should.have.length_of(0) - client.create_cluster_subnet_group( - ClusterSubnetGroupName='my_subnet_group', - Description='This is my subnet group', - SubnetIds=[subnet.id], - Tags=[{'Key': 'tag_key', - 'Value': 'tag_value'}] - ) - client.create_cluster_security_group( - ClusterSecurityGroupName="security_group1", - Description="This is my security group", - Tags=[{'Key': 'tag_key', - 'Value': 'tag_value'}] - ) - client.create_cluster( - DBName='test', - ClusterIdentifier='my_cluster', - ClusterType='single-node', - NodeType='ds2.xlarge', - MasterUsername='user', - MasterUserPassword='password', - Tags=[{'Key': 'tag_key', - 'Value': 'tag_value'}] - ) - client.create_cluster_snapshot( - SnapshotIdentifier='my_snapshot', - ClusterIdentifier='my_cluster', - Tags=[{'Key': 'tag_key', - 'Value': 'tag_value'}] - ) - client.create_cluster_parameter_group( - ParameterGroupName="my_parameter_group", - ParameterGroupFamily="redshift-1.0", - Description="This is my parameter group", - Tags=[{'Key': 'tag_key', - 'Value': 'tag_value'}] - ) - response = client.describe_tags() - expected_types = ['cluster', 'parametergroup', 'securitygroup', 'snapshot', 'subnetgroup'] - tagged_resources = response['TaggedResources'] - returned_types = [resource['ResourceType'] for resource in tagged_resources] - list(tagged_resources).should.have.length_of(len(expected_types)) - set(returned_types).should.equal(set(expected_types)) - - -@mock_redshift -def test_tagged_resource_not_found_error(): - client = boto3.client('redshift', region_name='us-east-1') - - cluster_arn = 'arn:aws:redshift:us-east-1::cluster:fake' - client.describe_tags.when.called_with( - ResourceName=cluster_arn - ).should.throw(ClientError, 'cluster (fake) not found.') - - snapshot_arn = 'arn:aws:redshift:us-east-1::snapshot:cluster-id/snap-id' - client.delete_tags.when.called_with( - ResourceName=snapshot_arn, - TagKeys=['test'] - ).should.throw(ClientError, 'snapshot (snap-id) not found.') - - client.describe_tags.when.called_with( - ResourceType='cluster' - ).should.throw(ClientError, "resource of type 'cluster' not found.") - - client.describe_tags.when.called_with( - ResourceName='bad:arn' - ).should.throw(ClientError, "Tagging is not supported for this type of resource") - - -@mock_redshift -def test_enable_snapshot_copy(): - client = boto3.client('redshift', region_name='us-east-1') - client.create_cluster( - ClusterIdentifier='test', - ClusterType='single-node', - DBName='test', - Encrypted=True, - MasterUsername='user', - MasterUserPassword='password', - NodeType='ds2.xlarge', - ) - client.enable_snapshot_copy( - ClusterIdentifier='test', - DestinationRegion='us-west-2', - RetentionPeriod=3, - SnapshotCopyGrantName='copy-us-east-1-to-us-west-2' - ) - response = client.describe_clusters(ClusterIdentifier='test') - cluster_snapshot_copy_status = response['Clusters'][0]['ClusterSnapshotCopyStatus'] - cluster_snapshot_copy_status['RetentionPeriod'].should.equal(3) - cluster_snapshot_copy_status['DestinationRegion'].should.equal('us-west-2') - cluster_snapshot_copy_status['SnapshotCopyGrantName'].should.equal('copy-us-east-1-to-us-west-2') - - -@mock_redshift -def test_enable_snapshot_copy_unencrypted(): - client = boto3.client('redshift', region_name='us-east-1') - client.create_cluster( - ClusterIdentifier='test', - ClusterType='single-node', - DBName='test', - MasterUsername='user', - MasterUserPassword='password', - NodeType='ds2.xlarge', - ) - client.enable_snapshot_copy( - ClusterIdentifier='test', - DestinationRegion='us-west-2', - ) - response = client.describe_clusters(ClusterIdentifier='test') - cluster_snapshot_copy_status = response['Clusters'][0]['ClusterSnapshotCopyStatus'] - cluster_snapshot_copy_status['RetentionPeriod'].should.equal(7) - cluster_snapshot_copy_status['DestinationRegion'].should.equal('us-west-2') - - -@mock_redshift -def test_disable_snapshot_copy(): - client = boto3.client('redshift', region_name='us-east-1') - client.create_cluster( - DBName='test', - ClusterIdentifier='test', - ClusterType='single-node', - NodeType='ds2.xlarge', - MasterUsername='user', - MasterUserPassword='password', - ) - client.enable_snapshot_copy( - ClusterIdentifier='test', - DestinationRegion='us-west-2', - RetentionPeriod=3, - SnapshotCopyGrantName='copy-us-east-1-to-us-west-2', - ) - client.disable_snapshot_copy( - ClusterIdentifier='test', - ) - response = client.describe_clusters(ClusterIdentifier='test') - response['Clusters'][0].shouldnt.contain('ClusterSnapshotCopyStatus') - - -@mock_redshift -def test_modify_snapshot_copy_retention_period(): - client = boto3.client('redshift', region_name='us-east-1') - client.create_cluster( - DBName='test', - ClusterIdentifier='test', - ClusterType='single-node', - NodeType='ds2.xlarge', - MasterUsername='user', - MasterUserPassword='password', - ) - client.enable_snapshot_copy( - ClusterIdentifier='test', - DestinationRegion='us-west-2', - RetentionPeriod=3, - SnapshotCopyGrantName='copy-us-east-1-to-us-west-2', - ) - client.modify_snapshot_copy_retention_period( - ClusterIdentifier='test', - RetentionPeriod=5, - ) - response = client.describe_clusters(ClusterIdentifier='test') - cluster_snapshot_copy_status = response['Clusters'][0]['ClusterSnapshotCopyStatus'] - cluster_snapshot_copy_status['RetentionPeriod'].should.equal(5) +from __future__ import unicode_literals + +import datetime + +import boto +import boto3 +from boto.redshift.exceptions import ( + ClusterNotFound, + ClusterParameterGroupNotFound, + ClusterSecurityGroupNotFound, + ClusterSubnetGroupNotFound, + InvalidSubnet +) +from botocore.exceptions import ( + ClientError +) +import sure # noqa + +from moto import mock_ec2 +from moto import mock_ec2_deprecated +from moto import mock_redshift +from moto import mock_redshift_deprecated + + +@mock_redshift +def test_create_cluster_boto3(): + client = boto3.client('redshift', region_name='us-east-1') + response = client.create_cluster( + DBName='test', + ClusterIdentifier='test', + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='user', + MasterUserPassword='password', + ) + response['Cluster']['NodeType'].should.equal('ds2.xlarge') + create_time = response['Cluster']['ClusterCreateTime'] + create_time.should.be.lower_than(datetime.datetime.now(create_time.tzinfo)) + + +@mock_redshift +def test_create_snapshot_copy_grant(): + client = boto3.client('redshift', region_name='us-east-1') + grants = client.create_snapshot_copy_grant( + SnapshotCopyGrantName='test-us-east-1', + KmsKeyId='fake', + ) + grants['SnapshotCopyGrant']['SnapshotCopyGrantName'].should.equal('test-us-east-1') + grants['SnapshotCopyGrant']['KmsKeyId'].should.equal('fake') + + client.delete_snapshot_copy_grant( + SnapshotCopyGrantName='test-us-east-1', + ) + + client.describe_snapshot_copy_grants.when.called_with( + SnapshotCopyGrantName='test-us-east-1', + ).should.throw(Exception) + + +@mock_redshift +def test_create_many_snapshot_copy_grants(): + client = boto3.client('redshift', region_name='us-east-1') + + for i in range(10): + client.create_snapshot_copy_grant( + SnapshotCopyGrantName='test-us-east-1-{0}'.format(i), + KmsKeyId='fake', + ) + response = client.describe_snapshot_copy_grants() + len(response['SnapshotCopyGrants']).should.equal(10) + + +@mock_redshift +def test_no_snapshot_copy_grants(): + client = boto3.client('redshift', region_name='us-east-1') + response = client.describe_snapshot_copy_grants() + len(response['SnapshotCopyGrants']).should.equal(0) + + +@mock_redshift_deprecated +def test_create_cluster(): + conn = boto.redshift.connect_to_region("us-east-1") + cluster_identifier = 'my_cluster' + + cluster_response = conn.create_cluster( + cluster_identifier, + node_type="dw.hs1.xlarge", + master_username="username", + master_user_password="password", + db_name="my_db", + cluster_type="multi-node", + availability_zone="us-east-1d", + preferred_maintenance_window="Mon:03:00-Mon:11:00", + automated_snapshot_retention_period=10, + port=1234, + cluster_version="1.0", + allow_version_upgrade=True, + number_of_nodes=3, + ) + cluster_response['CreateClusterResponse']['CreateClusterResult'][ + 'Cluster']['ClusterStatus'].should.equal('creating') + + cluster_response = conn.describe_clusters(cluster_identifier) + cluster = cluster_response['DescribeClustersResponse'][ + 'DescribeClustersResult']['Clusters'][0] + + cluster['ClusterIdentifier'].should.equal(cluster_identifier) + cluster['NodeType'].should.equal("dw.hs1.xlarge") + cluster['MasterUsername'].should.equal("username") + cluster['DBName'].should.equal("my_db") + cluster['ClusterSecurityGroups'][0][ + 'ClusterSecurityGroupName'].should.equal("Default") + cluster['VpcSecurityGroups'].should.equal([]) + cluster['ClusterSubnetGroupName'].should.equal(None) + cluster['AvailabilityZone'].should.equal("us-east-1d") + cluster['PreferredMaintenanceWindow'].should.equal("Mon:03:00-Mon:11:00") + cluster['ClusterParameterGroups'][0][ + 'ParameterGroupName'].should.equal("default.redshift-1.0") + cluster['AutomatedSnapshotRetentionPeriod'].should.equal(10) + cluster['Port'].should.equal(1234) + cluster['ClusterVersion'].should.equal("1.0") + cluster['AllowVersionUpgrade'].should.equal(True) + cluster['NumberOfNodes'].should.equal(3) + + +@mock_redshift_deprecated +def test_create_single_node_cluster(): + conn = boto.redshift.connect_to_region("us-east-1") + cluster_identifier = 'my_cluster' + + conn.create_cluster( + cluster_identifier, + node_type="dw.hs1.xlarge", + master_username="username", + master_user_password="password", + db_name="my_db", + cluster_type="single-node", + ) + + cluster_response = conn.describe_clusters(cluster_identifier) + cluster = cluster_response['DescribeClustersResponse'][ + 'DescribeClustersResult']['Clusters'][0] + + cluster['ClusterIdentifier'].should.equal(cluster_identifier) + cluster['NodeType'].should.equal("dw.hs1.xlarge") + cluster['MasterUsername'].should.equal("username") + cluster['DBName'].should.equal("my_db") + cluster['NumberOfNodes'].should.equal(1) + + +@mock_redshift_deprecated +def test_default_cluster_attributes(): + conn = boto.redshift.connect_to_region("us-east-1") + cluster_identifier = 'my_cluster' + + conn.create_cluster( + cluster_identifier, + node_type="dw.hs1.xlarge", + master_username="username", + master_user_password="password", + ) + + cluster_response = conn.describe_clusters(cluster_identifier) + cluster = cluster_response['DescribeClustersResponse'][ + 'DescribeClustersResult']['Clusters'][0] + + cluster['DBName'].should.equal("dev") + cluster['ClusterSubnetGroupName'].should.equal(None) + assert "us-east-" in cluster['AvailabilityZone'] + cluster['PreferredMaintenanceWindow'].should.equal("Mon:03:00-Mon:03:30") + cluster['ClusterParameterGroups'][0][ + 'ParameterGroupName'].should.equal("default.redshift-1.0") + cluster['AutomatedSnapshotRetentionPeriod'].should.equal(1) + cluster['Port'].should.equal(5439) + cluster['ClusterVersion'].should.equal("1.0") + cluster['AllowVersionUpgrade'].should.equal(True) + cluster['NumberOfNodes'].should.equal(1) + + +@mock_redshift +@mock_ec2 +def test_create_cluster_in_subnet_group(): + ec2 = boto3.resource('ec2', region_name='us-east-1') + vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16") + subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock="10.0.0.0/24") + client = boto3.client('redshift', region_name='us-east-1') + client.create_cluster_subnet_group( + ClusterSubnetGroupName="my_subnet_group", + Description="This is my subnet group", + SubnetIds=[subnet.id], + ) + + client.create_cluster( + ClusterIdentifier="my_cluster", + NodeType="dw.hs1.xlarge", + MasterUsername="username", + MasterUserPassword="password", + ClusterSubnetGroupName='my_subnet_group', + ) + + cluster_response = client.describe_clusters(ClusterIdentifier="my_cluster") + cluster = cluster_response['Clusters'][0] + cluster['ClusterSubnetGroupName'].should.equal('my_subnet_group') + + +@mock_redshift +@mock_ec2 +def test_create_cluster_in_subnet_group_boto3(): + ec2 = boto3.resource('ec2', region_name='us-east-1') + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock='10.0.0.0/24') + client = boto3.client('redshift', region_name='us-east-1') + client.create_cluster_subnet_group( + ClusterSubnetGroupName='my_subnet_group', + Description='This is my subnet group', + SubnetIds=[subnet.id] + ) + + client.create_cluster( + ClusterIdentifier="my_cluster", + NodeType="dw.hs1.xlarge", + MasterUsername="username", + MasterUserPassword="password", + ClusterSubnetGroupName='my_subnet_group', + ) + + cluster_response = client.describe_clusters(ClusterIdentifier="my_cluster") + cluster = cluster_response['Clusters'][0] + cluster['ClusterSubnetGroupName'].should.equal('my_subnet_group') + + +@mock_redshift_deprecated +def test_create_cluster_with_security_group(): + conn = boto.redshift.connect_to_region("us-east-1") + conn.create_cluster_security_group( + "security_group1", + "This is my security group", + ) + conn.create_cluster_security_group( + "security_group2", + "This is my security group", + ) + + cluster_identifier = 'my_cluster' + conn.create_cluster( + cluster_identifier, + node_type="dw.hs1.xlarge", + master_username="username", + master_user_password="password", + cluster_security_groups=["security_group1", "security_group2"] + ) + + cluster_response = conn.describe_clusters(cluster_identifier) + cluster = cluster_response['DescribeClustersResponse'][ + 'DescribeClustersResult']['Clusters'][0] + group_names = [group['ClusterSecurityGroupName'] + for group in cluster['ClusterSecurityGroups']] + set(group_names).should.equal(set(["security_group1", "security_group2"])) + + +@mock_redshift +def test_create_cluster_with_security_group_boto3(): + client = boto3.client('redshift', region_name='us-east-1') + client.create_cluster_security_group( + ClusterSecurityGroupName="security_group1", + Description="This is my security group", + ) + client.create_cluster_security_group( + ClusterSecurityGroupName="security_group2", + Description="This is my security group", + ) + + cluster_identifier = 'my_cluster' + client.create_cluster( + ClusterIdentifier=cluster_identifier, + NodeType="dw.hs1.xlarge", + MasterUsername="username", + MasterUserPassword="password", + ClusterSecurityGroups=["security_group1", "security_group2"] + ) + response = client.describe_clusters(ClusterIdentifier=cluster_identifier) + cluster = response['Clusters'][0] + group_names = [group['ClusterSecurityGroupName'] + for group in cluster['ClusterSecurityGroups']] + set(group_names).should.equal({"security_group1", "security_group2"}) + + +@mock_redshift_deprecated +@mock_ec2_deprecated +def test_create_cluster_with_vpc_security_groups(): + vpc_conn = boto.connect_vpc() + ec2_conn = boto.connect_ec2() + redshift_conn = boto.connect_redshift() + vpc = vpc_conn.create_vpc("10.0.0.0/16") + security_group = ec2_conn.create_security_group( + "vpc_security_group", "a group", vpc_id=vpc.id) + + redshift_conn.create_cluster( + "my_cluster", + node_type="dw.hs1.xlarge", + master_username="username", + master_user_password="password", + vpc_security_group_ids=[security_group.id], + ) + + cluster_response = redshift_conn.describe_clusters("my_cluster") + cluster = cluster_response['DescribeClustersResponse'][ + 'DescribeClustersResult']['Clusters'][0] + group_ids = [group['VpcSecurityGroupId'] + for group in cluster['VpcSecurityGroups']] + list(group_ids).should.equal([security_group.id]) + + +@mock_redshift +@mock_ec2 +def test_create_cluster_with_vpc_security_groups_boto3(): + ec2 = boto3.resource('ec2', region_name='us-east-1') + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + client = boto3.client('redshift', region_name='us-east-1') + cluster_id = 'my_cluster' + security_group = ec2.create_security_group( + Description="vpc_security_group", + GroupName="a group", + VpcId=vpc.id) + client.create_cluster( + ClusterIdentifier=cluster_id, + NodeType="dw.hs1.xlarge", + MasterUsername="username", + MasterUserPassword="password", + VpcSecurityGroupIds=[security_group.id], + ) + response = client.describe_clusters(ClusterIdentifier=cluster_id) + cluster = response['Clusters'][0] + group_ids = [group['VpcSecurityGroupId'] + for group in cluster['VpcSecurityGroups']] + list(group_ids).should.equal([security_group.id]) + + +@mock_redshift +def test_create_cluster_with_iam_roles(): + iam_roles_arn = ['arn:aws:iam:::role/my-iam-role', ] + client = boto3.client('redshift', region_name='us-east-1') + cluster_id = 'my_cluster' + client.create_cluster( + ClusterIdentifier=cluster_id, + NodeType="dw.hs1.xlarge", + MasterUsername="username", + MasterUserPassword="password", + IamRoles=iam_roles_arn + ) + response = client.describe_clusters(ClusterIdentifier=cluster_id) + cluster = response['Clusters'][0] + iam_roles = [role['IamRoleArn'] for role in cluster['IamRoles']] + iam_roles_arn.should.equal(iam_roles) + + +@mock_redshift_deprecated +def test_create_cluster_with_parameter_group(): + conn = boto.connect_redshift() + conn.create_cluster_parameter_group( + "my_parameter_group", + "redshift-1.0", + "This is my parameter group", + ) + + conn.create_cluster( + "my_cluster", + node_type="dw.hs1.xlarge", + master_username="username", + master_user_password="password", + cluster_parameter_group_name='my_parameter_group', + ) + + cluster_response = conn.describe_clusters("my_cluster") + cluster = cluster_response['DescribeClustersResponse'][ + 'DescribeClustersResult']['Clusters'][0] + cluster['ClusterParameterGroups'][0][ + 'ParameterGroupName'].should.equal("my_parameter_group") + + +@mock_redshift_deprecated +def test_describe_non_existent_cluster(): + conn = boto.redshift.connect_to_region("us-east-1") + conn.describe_clusters.when.called_with( + "not-a-cluster").should.throw(ClusterNotFound) + +@mock_redshift_deprecated +def test_delete_cluster(): + conn = boto.connect_redshift() + cluster_identifier = "my_cluster" + snapshot_identifier = "my_snapshot" + + conn.create_cluster( + cluster_identifier, + node_type="single-node", + master_username="username", + master_user_password="password", + ) + + conn.delete_cluster.when.called_with(cluster_identifier, False).should.throw(AttributeError) + + clusters = conn.describe_clusters()['DescribeClustersResponse'][ + 'DescribeClustersResult']['Clusters'] + list(clusters).should.have.length_of(1) + + conn.delete_cluster( + cluster_identifier=cluster_identifier, + skip_final_cluster_snapshot=False, + final_cluster_snapshot_identifier=snapshot_identifier + ) + + clusters = conn.describe_clusters()['DescribeClustersResponse'][ + 'DescribeClustersResult']['Clusters'] + list(clusters).should.have.length_of(0) + + snapshots = conn.describe_cluster_snapshots()["DescribeClusterSnapshotsResponse"][ + "DescribeClusterSnapshotsResult"]["Snapshots"] + list(snapshots).should.have.length_of(1) + + assert snapshot_identifier in snapshots[0]["SnapshotIdentifier"] + + # Delete invalid id + conn.delete_cluster.when.called_with( + "not-a-cluster").should.throw(ClusterNotFound) + + +@mock_redshift_deprecated +def test_modify_cluster(): + conn = boto.connect_redshift() + cluster_identifier = 'my_cluster' + conn.create_cluster_security_group( + "security_group", + "This is my security group", + ) + conn.create_cluster_parameter_group( + "my_parameter_group", + "redshift-1.0", + "This is my parameter group", + ) + + conn.create_cluster( + cluster_identifier, + node_type='single-node', + master_username="username", + master_user_password="password", + ) + + conn.modify_cluster( + cluster_identifier, + cluster_type="multi-node", + node_type="dw.hs1.xlarge", + cluster_security_groups="security_group", + master_user_password="new_password", + cluster_parameter_group_name="my_parameter_group", + automated_snapshot_retention_period=7, + preferred_maintenance_window="Tue:03:00-Tue:11:00", + allow_version_upgrade=False, + new_cluster_identifier="new_identifier", + ) + + cluster_response = conn.describe_clusters("new_identifier") + cluster = cluster_response['DescribeClustersResponse'][ + 'DescribeClustersResult']['Clusters'][0] + + cluster['ClusterIdentifier'].should.equal("new_identifier") + cluster['NodeType'].should.equal("dw.hs1.xlarge") + cluster['ClusterSecurityGroups'][0][ + 'ClusterSecurityGroupName'].should.equal("security_group") + cluster['PreferredMaintenanceWindow'].should.equal("Tue:03:00-Tue:11:00") + cluster['ClusterParameterGroups'][0][ + 'ParameterGroupName'].should.equal("my_parameter_group") + cluster['AutomatedSnapshotRetentionPeriod'].should.equal(7) + cluster['AllowVersionUpgrade'].should.equal(False) + # This one should remain unmodified. + cluster['NumberOfNodes'].should.equal(1) + + +@mock_redshift +@mock_ec2 +def test_create_cluster_subnet_group(): + ec2 = boto3.resource('ec2', region_name='us-east-1') + vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16") + subnet1 = ec2.create_subnet(VpcId=vpc.id, CidrBlock="10.0.0.0/24") + subnet2 = ec2.create_subnet(VpcId=vpc.id, CidrBlock="10.0.1.0/24") + client = boto3.client('redshift', region_name='us-east-1') + + client.create_cluster_subnet_group( + ClusterSubnetGroupName='my_subnet_group', + Description='This is my subnet group', + SubnetIds=[subnet1.id, subnet2.id], + ) + + subnets_response = client.describe_cluster_subnet_groups( + ClusterSubnetGroupName="my_subnet_group") + my_subnet = subnets_response['ClusterSubnetGroups'][0] + + my_subnet['ClusterSubnetGroupName'].should.equal("my_subnet_group") + my_subnet['Description'].should.equal("This is my subnet group") + subnet_ids = [subnet['SubnetIdentifier'] + for subnet in my_subnet['Subnets']] + set(subnet_ids).should.equal(set([subnet1.id, subnet2.id])) + + +@mock_redshift_deprecated +@mock_ec2_deprecated +def test_create_invalid_cluster_subnet_group(): + redshift_conn = boto.connect_redshift() + redshift_conn.create_cluster_subnet_group.when.called_with( + "my_subnet", + "This is my subnet group", + subnet_ids=["subnet-1234"], + ).should.throw(InvalidSubnet) + + +@mock_redshift_deprecated +def test_describe_non_existent_subnet_group(): + conn = boto.redshift.connect_to_region("us-east-1") + conn.describe_cluster_subnet_groups.when.called_with( + "not-a-subnet-group").should.throw(ClusterSubnetGroupNotFound) + + +@mock_redshift +@mock_ec2 +def test_delete_cluster_subnet_group(): + ec2 = boto3.resource('ec2', region_name='us-east-1') + vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16") + subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock="10.0.0.0/24") + client = boto3.client('redshift', region_name='us-east-1') + + client.create_cluster_subnet_group( + ClusterSubnetGroupName='my_subnet_group', + Description='This is my subnet group', + SubnetIds=[subnet.id], + ) + + subnets_response = client.describe_cluster_subnet_groups() + subnets = subnets_response['ClusterSubnetGroups'] + subnets.should.have.length_of(1) + + client.delete_cluster_subnet_group(ClusterSubnetGroupName="my_subnet_group") + + subnets_response = client.describe_cluster_subnet_groups() + subnets = subnets_response['ClusterSubnetGroups'] + subnets.should.have.length_of(0) + + # Delete invalid id + client.delete_cluster_subnet_group.when.called_with( + ClusterSubnetGroupName="not-a-subnet-group").should.throw(ClientError) + + +@mock_redshift_deprecated +def test_create_cluster_security_group(): + conn = boto.connect_redshift() + conn.create_cluster_security_group( + "my_security_group", + "This is my security group", + ) + + groups_response = conn.describe_cluster_security_groups( + "my_security_group") + my_group = groups_response['DescribeClusterSecurityGroupsResponse'][ + 'DescribeClusterSecurityGroupsResult']['ClusterSecurityGroups'][0] + + my_group['ClusterSecurityGroupName'].should.equal("my_security_group") + my_group['Description'].should.equal("This is my security group") + list(my_group['IPRanges']).should.equal([]) + + +@mock_redshift_deprecated +def test_describe_non_existent_security_group(): + conn = boto.redshift.connect_to_region("us-east-1") + conn.describe_cluster_security_groups.when.called_with( + "not-a-security-group").should.throw(ClusterSecurityGroupNotFound) + + +@mock_redshift_deprecated +def test_delete_cluster_security_group(): + conn = boto.connect_redshift() + conn.create_cluster_security_group( + "my_security_group", + "This is my security group", + ) + + groups_response = conn.describe_cluster_security_groups() + groups = groups_response['DescribeClusterSecurityGroupsResponse'][ + 'DescribeClusterSecurityGroupsResult']['ClusterSecurityGroups'] + groups.should.have.length_of(2) # The default group already exists + + conn.delete_cluster_security_group("my_security_group") + + groups_response = conn.describe_cluster_security_groups() + groups = groups_response['DescribeClusterSecurityGroupsResponse'][ + 'DescribeClusterSecurityGroupsResult']['ClusterSecurityGroups'] + groups.should.have.length_of(1) + + # Delete invalid id + conn.delete_cluster_security_group.when.called_with( + "not-a-security-group").should.throw(ClusterSecurityGroupNotFound) + + +@mock_redshift_deprecated +def test_create_cluster_parameter_group(): + conn = boto.connect_redshift() + conn.create_cluster_parameter_group( + "my_parameter_group", + "redshift-1.0", + "This is my parameter group", + ) + + groups_response = conn.describe_cluster_parameter_groups( + "my_parameter_group") + my_group = groups_response['DescribeClusterParameterGroupsResponse'][ + 'DescribeClusterParameterGroupsResult']['ParameterGroups'][0] + + my_group['ParameterGroupName'].should.equal("my_parameter_group") + my_group['ParameterGroupFamily'].should.equal("redshift-1.0") + my_group['Description'].should.equal("This is my parameter group") + + +@mock_redshift_deprecated +def test_describe_non_existent_parameter_group(): + conn = boto.redshift.connect_to_region("us-east-1") + conn.describe_cluster_parameter_groups.when.called_with( + "not-a-parameter-group").should.throw(ClusterParameterGroupNotFound) + + +@mock_redshift_deprecated +def test_delete_cluster_parameter_group(): + conn = boto.connect_redshift() + conn.create_cluster_parameter_group( + "my_parameter_group", + "redshift-1.0", + "This is my parameter group", + ) + + groups_response = conn.describe_cluster_parameter_groups() + groups = groups_response['DescribeClusterParameterGroupsResponse'][ + 'DescribeClusterParameterGroupsResult']['ParameterGroups'] + groups.should.have.length_of(2) # The default group already exists + + conn.delete_cluster_parameter_group("my_parameter_group") + + groups_response = conn.describe_cluster_parameter_groups() + groups = groups_response['DescribeClusterParameterGroupsResponse'][ + 'DescribeClusterParameterGroupsResult']['ParameterGroups'] + groups.should.have.length_of(1) + + # Delete invalid id + conn.delete_cluster_parameter_group.when.called_with( + "not-a-parameter-group").should.throw(ClusterParameterGroupNotFound) + + +@mock_redshift +def test_create_cluster_snapshot_of_non_existent_cluster(): + client = boto3.client('redshift', region_name='us-east-1') + cluster_identifier = 'non-existent-cluster-id' + client.create_cluster_snapshot.when.called_with( + SnapshotIdentifier='snapshot-id', + ClusterIdentifier=cluster_identifier, + ).should.throw(ClientError, 'Cluster {} not found.'.format(cluster_identifier)) + + +@mock_redshift +def test_create_cluster_snapshot(): + client = boto3.client('redshift', region_name='us-east-1') + cluster_identifier = 'my_cluster' + snapshot_identifier = 'my_snapshot' + + cluster_response = client.create_cluster( + DBName='test-db', + ClusterIdentifier=cluster_identifier, + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='username', + MasterUserPassword='password', + ) + cluster_response['Cluster']['NodeType'].should.equal('ds2.xlarge') + + snapshot_response = client.create_cluster_snapshot( + SnapshotIdentifier=snapshot_identifier, + ClusterIdentifier=cluster_identifier, + Tags=[{'Key': 'test-tag-key', + 'Value': 'test-tag-value'}] + ) + snapshot = snapshot_response['Snapshot'] + snapshot['SnapshotIdentifier'].should.equal(snapshot_identifier) + snapshot['ClusterIdentifier'].should.equal(cluster_identifier) + snapshot['NumberOfNodes'].should.equal(1) + snapshot['NodeType'].should.equal('ds2.xlarge') + snapshot['MasterUsername'].should.equal('username') + + +@mock_redshift +def test_describe_cluster_snapshots(): + client = boto3.client('redshift', region_name='us-east-1') + cluster_identifier = 'my_cluster' + snapshot_identifier_1 = 'my_snapshot_1' + snapshot_identifier_2 = 'my_snapshot_2' + + client.create_cluster( + DBName='test-db', + ClusterIdentifier=cluster_identifier, + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='username', + MasterUserPassword='password', + ) + + client.create_cluster_snapshot( + SnapshotIdentifier=snapshot_identifier_1, + ClusterIdentifier=cluster_identifier, + ) + client.create_cluster_snapshot( + SnapshotIdentifier=snapshot_identifier_2, + ClusterIdentifier=cluster_identifier, + ) + + resp_snap_1 = client.describe_cluster_snapshots(SnapshotIdentifier=snapshot_identifier_1) + snapshot_1 = resp_snap_1['Snapshots'][0] + snapshot_1['SnapshotIdentifier'].should.equal(snapshot_identifier_1) + snapshot_1['ClusterIdentifier'].should.equal(cluster_identifier) + snapshot_1['NumberOfNodes'].should.equal(1) + snapshot_1['NodeType'].should.equal('ds2.xlarge') + snapshot_1['MasterUsername'].should.equal('username') + + resp_snap_2 = client.describe_cluster_snapshots(SnapshotIdentifier=snapshot_identifier_2) + snapshot_2 = resp_snap_2['Snapshots'][0] + snapshot_2['SnapshotIdentifier'].should.equal(snapshot_identifier_2) + snapshot_2['ClusterIdentifier'].should.equal(cluster_identifier) + snapshot_2['NumberOfNodes'].should.equal(1) + snapshot_2['NodeType'].should.equal('ds2.xlarge') + snapshot_2['MasterUsername'].should.equal('username') + + resp_clust = client.describe_cluster_snapshots(ClusterIdentifier=cluster_identifier) + resp_clust['Snapshots'][0].should.equal(resp_snap_1['Snapshots'][0]) + resp_clust['Snapshots'][1].should.equal(resp_snap_2['Snapshots'][0]) + + +@mock_redshift +def test_describe_cluster_snapshots_not_found_error(): + client = boto3.client('redshift', region_name='us-east-1') + cluster_identifier = 'my_cluster' + snapshot_identifier = 'my_snapshot' + + client.describe_cluster_snapshots.when.called_with( + ClusterIdentifier=cluster_identifier, + ).should.throw(ClientError, 'Cluster {} not found.'.format(cluster_identifier)) + + client.describe_cluster_snapshots.when.called_with( + SnapshotIdentifier=snapshot_identifier + ).should.throw(ClientError, 'Snapshot {} not found.'.format(snapshot_identifier)) + + +@mock_redshift +def test_delete_cluster_snapshot(): + client = boto3.client('redshift', region_name='us-east-1') + cluster_identifier = 'my_cluster' + snapshot_identifier = 'my_snapshot' + + client.create_cluster( + ClusterIdentifier=cluster_identifier, + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='username', + MasterUserPassword='password', + ) + client.create_cluster_snapshot( + SnapshotIdentifier=snapshot_identifier, + ClusterIdentifier=cluster_identifier + ) + + snapshots = client.describe_cluster_snapshots()['Snapshots'] + list(snapshots).should.have.length_of(1) + + client.delete_cluster_snapshot(SnapshotIdentifier=snapshot_identifier)[ + 'Snapshot']['Status'].should.equal('deleted') + + snapshots = client.describe_cluster_snapshots()['Snapshots'] + list(snapshots).should.have.length_of(0) + + # Delete invalid id + client.delete_cluster_snapshot.when.called_with( + SnapshotIdentifier="not-a-snapshot").should.throw(ClientError) + + +@mock_redshift +def test_cluster_snapshot_already_exists(): + client = boto3.client('redshift', region_name='us-east-1') + cluster_identifier = 'my_cluster' + snapshot_identifier = 'my_snapshot' + + client.create_cluster( + DBName='test-db', + ClusterIdentifier=cluster_identifier, + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='username', + MasterUserPassword='password', + ) + + client.create_cluster_snapshot( + SnapshotIdentifier=snapshot_identifier, + ClusterIdentifier=cluster_identifier + ) + + client.create_cluster_snapshot.when.called_with( + SnapshotIdentifier=snapshot_identifier, + ClusterIdentifier=cluster_identifier + ).should.throw(ClientError) + + +@mock_redshift +def test_create_cluster_from_snapshot(): + client = boto3.client('redshift', region_name='us-east-1') + original_cluster_identifier = 'original-cluster' + original_snapshot_identifier = 'original-snapshot' + new_cluster_identifier = 'new-cluster' + + client.create_cluster( + ClusterIdentifier=original_cluster_identifier, + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='username', + MasterUserPassword='password', + ) + client.create_cluster_snapshot( + SnapshotIdentifier=original_snapshot_identifier, + ClusterIdentifier=original_cluster_identifier + ) + response = client.restore_from_cluster_snapshot( + ClusterIdentifier=new_cluster_identifier, + SnapshotIdentifier=original_snapshot_identifier, + Port=1234 + ) + response['Cluster']['ClusterStatus'].should.equal('creating') + + response = client.describe_clusters( + ClusterIdentifier=new_cluster_identifier + ) + new_cluster = response['Clusters'][0] + new_cluster['NodeType'].should.equal('ds2.xlarge') + new_cluster['MasterUsername'].should.equal('username') + new_cluster['Endpoint']['Port'].should.equal(1234) + + +@mock_redshift +def test_create_cluster_from_snapshot_with_waiter(): + client = boto3.client('redshift', region_name='us-east-1') + original_cluster_identifier = 'original-cluster' + original_snapshot_identifier = 'original-snapshot' + new_cluster_identifier = 'new-cluster' + + client.create_cluster( + ClusterIdentifier=original_cluster_identifier, + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='username', + MasterUserPassword='password', + ) + client.create_cluster_snapshot( + SnapshotIdentifier=original_snapshot_identifier, + ClusterIdentifier=original_cluster_identifier + ) + response = client.restore_from_cluster_snapshot( + ClusterIdentifier=new_cluster_identifier, + SnapshotIdentifier=original_snapshot_identifier, + Port=1234 + ) + response['Cluster']['ClusterStatus'].should.equal('creating') + + client.get_waiter('cluster_restored').wait( + ClusterIdentifier=new_cluster_identifier, + WaiterConfig={ + 'Delay': 1, + 'MaxAttempts': 2, + } + ) + + response = client.describe_clusters( + ClusterIdentifier=new_cluster_identifier + ) + new_cluster = response['Clusters'][0] + new_cluster['NodeType'].should.equal('ds2.xlarge') + new_cluster['MasterUsername'].should.equal('username') + new_cluster['Endpoint']['Port'].should.equal(1234) + + +@mock_redshift +def test_create_cluster_from_non_existent_snapshot(): + client = boto3.client('redshift', region_name='us-east-1') + client.restore_from_cluster_snapshot.when.called_with( + ClusterIdentifier='cluster-id', + SnapshotIdentifier='non-existent-snapshot', + ).should.throw(ClientError, 'Snapshot non-existent-snapshot not found.') + + +@mock_redshift +def test_create_cluster_status_update(): + client = boto3.client('redshift', region_name='us-east-1') + cluster_identifier = 'test-cluster' + + response = client.create_cluster( + ClusterIdentifier=cluster_identifier, + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='username', + MasterUserPassword='password', + ) + response['Cluster']['ClusterStatus'].should.equal('creating') + + response = client.describe_clusters( + ClusterIdentifier=cluster_identifier + ) + response['Clusters'][0]['ClusterStatus'].should.equal('available') + + +@mock_redshift +def test_describe_tags_with_resource_type(): + client = boto3.client('redshift', region_name='us-east-1') + cluster_identifier = 'my_cluster' + cluster_arn = 'arn:aws:redshift:us-east-1:123456789012:' \ + 'cluster:{}'.format(cluster_identifier) + snapshot_identifier = 'my_snapshot' + snapshot_arn = 'arn:aws:redshift:us-east-1:123456789012:' \ + 'snapshot:{}/{}'.format(cluster_identifier, + snapshot_identifier) + tag_key = 'test-tag-key' + tag_value = 'test-tag-value' + + client.create_cluster( + DBName='test-db', + ClusterIdentifier=cluster_identifier, + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='username', + MasterUserPassword='password', + Tags=[{'Key': tag_key, + 'Value': tag_value}] + ) + tags_response = client.describe_tags(ResourceType='cluster') + tagged_resources = tags_response['TaggedResources'] + list(tagged_resources).should.have.length_of(1) + tagged_resources[0]['ResourceType'].should.equal('cluster') + tagged_resources[0]['ResourceName'].should.equal(cluster_arn) + tag = tagged_resources[0]['Tag'] + tag['Key'].should.equal(tag_key) + tag['Value'].should.equal(tag_value) + + client.create_cluster_snapshot( + SnapshotIdentifier=snapshot_identifier, + ClusterIdentifier=cluster_identifier, + Tags=[{'Key': tag_key, + 'Value': tag_value}] + ) + tags_response = client.describe_tags(ResourceType='snapshot') + tagged_resources = tags_response['TaggedResources'] + list(tagged_resources).should.have.length_of(1) + tagged_resources[0]['ResourceType'].should.equal('snapshot') + tagged_resources[0]['ResourceName'].should.equal(snapshot_arn) + tag = tagged_resources[0]['Tag'] + tag['Key'].should.equal(tag_key) + tag['Value'].should.equal(tag_value) + + +@mock_redshift +def test_describe_tags_cannot_specify_resource_type_and_resource_name(): + client = boto3.client('redshift', region_name='us-east-1') + resource_name = 'arn:aws:redshift:us-east-1:123456789012:cluster:cluster-id' + resource_type = 'cluster' + client.describe_tags.when.called_with( + ResourceName=resource_name, + ResourceType=resource_type + ).should.throw(ClientError, 'using either an ARN or a resource type') + + +@mock_redshift +def test_describe_tags_with_resource_name(): + client = boto3.client('redshift', region_name='us-east-1') + cluster_identifier = 'cluster-id' + cluster_arn = 'arn:aws:redshift:us-east-1:123456789012:' \ + 'cluster:{}'.format(cluster_identifier) + snapshot_identifier = 'snapshot-id' + snapshot_arn = 'arn:aws:redshift:us-east-1:123456789012:' \ + 'snapshot:{}/{}'.format(cluster_identifier, + snapshot_identifier) + tag_key = 'test-tag-key' + tag_value = 'test-tag-value' + + client.create_cluster( + DBName='test-db', + ClusterIdentifier=cluster_identifier, + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='username', + MasterUserPassword='password', + Tags=[{'Key': tag_key, + 'Value': tag_value}] + ) + tags_response = client.describe_tags(ResourceName=cluster_arn) + tagged_resources = tags_response['TaggedResources'] + list(tagged_resources).should.have.length_of(1) + tagged_resources[0]['ResourceType'].should.equal('cluster') + tagged_resources[0]['ResourceName'].should.equal(cluster_arn) + tag = tagged_resources[0]['Tag'] + tag['Key'].should.equal(tag_key) + tag['Value'].should.equal(tag_value) + + client.create_cluster_snapshot( + SnapshotIdentifier=snapshot_identifier, + ClusterIdentifier=cluster_identifier, + Tags=[{'Key': tag_key, + 'Value': tag_value}] + ) + tags_response = client.describe_tags(ResourceName=snapshot_arn) + tagged_resources = tags_response['TaggedResources'] + list(tagged_resources).should.have.length_of(1) + tagged_resources[0]['ResourceType'].should.equal('snapshot') + tagged_resources[0]['ResourceName'].should.equal(snapshot_arn) + tag = tagged_resources[0]['Tag'] + tag['Key'].should.equal(tag_key) + tag['Value'].should.equal(tag_value) + + +@mock_redshift +def test_create_tags(): + client = boto3.client('redshift', region_name='us-east-1') + cluster_identifier = 'cluster-id' + cluster_arn = 'arn:aws:redshift:us-east-1:123456789012:' \ + 'cluster:{}'.format(cluster_identifier) + tag_key = 'test-tag-key' + tag_value = 'test-tag-value' + num_tags = 5 + tags = [] + for i in range(0, num_tags): + tag = {'Key': '{}-{}'.format(tag_key, i), + 'Value': '{}-{}'.format(tag_value, i)} + tags.append(tag) + + client.create_cluster( + DBName='test-db', + ClusterIdentifier=cluster_identifier, + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='username', + MasterUserPassword='password', + ) + client.create_tags( + ResourceName=cluster_arn, + Tags=tags + ) + response = client.describe_clusters(ClusterIdentifier=cluster_identifier) + cluster = response['Clusters'][0] + list(cluster['Tags']).should.have.length_of(num_tags) + response = client.describe_tags(ResourceName=cluster_arn) + list(response['TaggedResources']).should.have.length_of(num_tags) + + +@mock_redshift +def test_delete_tags(): + client = boto3.client('redshift', region_name='us-east-1') + cluster_identifier = 'cluster-id' + cluster_arn = 'arn:aws:redshift:us-east-1:123456789012:' \ + 'cluster:{}'.format(cluster_identifier) + tag_key = 'test-tag-key' + tag_value = 'test-tag-value' + tags = [] + for i in range(1, 2): + tag = {'Key': '{}-{}'.format(tag_key, i), + 'Value': '{}-{}'.format(tag_value, i)} + tags.append(tag) + + client.create_cluster( + DBName='test-db', + ClusterIdentifier=cluster_identifier, + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='username', + MasterUserPassword='password', + Tags=tags + ) + client.delete_tags( + ResourceName=cluster_arn, + TagKeys=[tag['Key'] for tag in tags + if tag['Key'] != '{}-1'.format(tag_key)] + ) + response = client.describe_clusters(ClusterIdentifier=cluster_identifier) + cluster = response['Clusters'][0] + list(cluster['Tags']).should.have.length_of(1) + response = client.describe_tags(ResourceName=cluster_arn) + list(response['TaggedResources']).should.have.length_of(1) + + +@mock_ec2 +@mock_redshift +def test_describe_tags_all_resource_types(): + ec2 = boto3.resource('ec2', region_name='us-east-1') + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock='10.0.0.0/24') + client = boto3.client('redshift', region_name='us-east-1') + response = client.describe_tags() + list(response['TaggedResources']).should.have.length_of(0) + client.create_cluster_subnet_group( + ClusterSubnetGroupName='my_subnet_group', + Description='This is my subnet group', + SubnetIds=[subnet.id], + Tags=[{'Key': 'tag_key', + 'Value': 'tag_value'}] + ) + client.create_cluster_security_group( + ClusterSecurityGroupName="security_group1", + Description="This is my security group", + Tags=[{'Key': 'tag_key', + 'Value': 'tag_value'}] + ) + client.create_cluster( + DBName='test', + ClusterIdentifier='my_cluster', + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='user', + MasterUserPassword='password', + Tags=[{'Key': 'tag_key', + 'Value': 'tag_value'}] + ) + client.create_cluster_snapshot( + SnapshotIdentifier='my_snapshot', + ClusterIdentifier='my_cluster', + Tags=[{'Key': 'tag_key', + 'Value': 'tag_value'}] + ) + client.create_cluster_parameter_group( + ParameterGroupName="my_parameter_group", + ParameterGroupFamily="redshift-1.0", + Description="This is my parameter group", + Tags=[{'Key': 'tag_key', + 'Value': 'tag_value'}] + ) + response = client.describe_tags() + expected_types = ['cluster', 'parametergroup', 'securitygroup', 'snapshot', 'subnetgroup'] + tagged_resources = response['TaggedResources'] + returned_types = [resource['ResourceType'] for resource in tagged_resources] + list(tagged_resources).should.have.length_of(len(expected_types)) + set(returned_types).should.equal(set(expected_types)) + + +@mock_redshift +def test_tagged_resource_not_found_error(): + client = boto3.client('redshift', region_name='us-east-1') + + cluster_arn = 'arn:aws:redshift:us-east-1::cluster:fake' + client.describe_tags.when.called_with( + ResourceName=cluster_arn + ).should.throw(ClientError, 'cluster (fake) not found.') + + snapshot_arn = 'arn:aws:redshift:us-east-1::snapshot:cluster-id/snap-id' + client.delete_tags.when.called_with( + ResourceName=snapshot_arn, + TagKeys=['test'] + ).should.throw(ClientError, 'snapshot (snap-id) not found.') + + client.describe_tags.when.called_with( + ResourceType='cluster' + ).should.throw(ClientError, "resource of type 'cluster' not found.") + + client.describe_tags.when.called_with( + ResourceName='bad:arn' + ).should.throw(ClientError, "Tagging is not supported for this type of resource") + + +@mock_redshift +def test_enable_snapshot_copy(): + client = boto3.client('redshift', region_name='us-east-1') + client.create_cluster( + ClusterIdentifier='test', + ClusterType='single-node', + DBName='test', + Encrypted=True, + MasterUsername='user', + MasterUserPassword='password', + NodeType='ds2.xlarge', + ) + client.enable_snapshot_copy( + ClusterIdentifier='test', + DestinationRegion='us-west-2', + RetentionPeriod=3, + SnapshotCopyGrantName='copy-us-east-1-to-us-west-2' + ) + response = client.describe_clusters(ClusterIdentifier='test') + cluster_snapshot_copy_status = response['Clusters'][0]['ClusterSnapshotCopyStatus'] + cluster_snapshot_copy_status['RetentionPeriod'].should.equal(3) + cluster_snapshot_copy_status['DestinationRegion'].should.equal('us-west-2') + cluster_snapshot_copy_status['SnapshotCopyGrantName'].should.equal('copy-us-east-1-to-us-west-2') + + +@mock_redshift +def test_enable_snapshot_copy_unencrypted(): + client = boto3.client('redshift', region_name='us-east-1') + client.create_cluster( + ClusterIdentifier='test', + ClusterType='single-node', + DBName='test', + MasterUsername='user', + MasterUserPassword='password', + NodeType='ds2.xlarge', + ) + client.enable_snapshot_copy( + ClusterIdentifier='test', + DestinationRegion='us-west-2', + ) + response = client.describe_clusters(ClusterIdentifier='test') + cluster_snapshot_copy_status = response['Clusters'][0]['ClusterSnapshotCopyStatus'] + cluster_snapshot_copy_status['RetentionPeriod'].should.equal(7) + cluster_snapshot_copy_status['DestinationRegion'].should.equal('us-west-2') + + +@mock_redshift +def test_disable_snapshot_copy(): + client = boto3.client('redshift', region_name='us-east-1') + client.create_cluster( + DBName='test', + ClusterIdentifier='test', + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='user', + MasterUserPassword='password', + ) + client.enable_snapshot_copy( + ClusterIdentifier='test', + DestinationRegion='us-west-2', + RetentionPeriod=3, + SnapshotCopyGrantName='copy-us-east-1-to-us-west-2', + ) + client.disable_snapshot_copy( + ClusterIdentifier='test', + ) + response = client.describe_clusters(ClusterIdentifier='test') + response['Clusters'][0].shouldnt.contain('ClusterSnapshotCopyStatus') + + +@mock_redshift +def test_modify_snapshot_copy_retention_period(): + client = boto3.client('redshift', region_name='us-east-1') + client.create_cluster( + DBName='test', + ClusterIdentifier='test', + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='user', + MasterUserPassword='password', + ) + client.enable_snapshot_copy( + ClusterIdentifier='test', + DestinationRegion='us-west-2', + RetentionPeriod=3, + SnapshotCopyGrantName='copy-us-east-1-to-us-west-2', + ) + client.modify_snapshot_copy_retention_period( + ClusterIdentifier='test', + RetentionPeriod=5, + ) + response = client.describe_clusters(ClusterIdentifier='test') + cluster_snapshot_copy_status = response['Clusters'][0]['ClusterSnapshotCopyStatus'] + cluster_snapshot_copy_status['RetentionPeriod'].should.equal(5) diff --git a/tests/test_resourcegroups/__init__.py b/tests/test_resourcegroups/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/test_resourcegroups/test_resourcegroups.py b/tests/test_resourcegroups/test_resourcegroups.py new file mode 100644 index 000000000..bb3624413 --- /dev/null +++ b/tests/test_resourcegroups/test_resourcegroups.py @@ -0,0 +1,165 @@ +from __future__ import unicode_literals + +import boto3 +import json +import sure # noqa + +from moto import mock_resourcegroups + + +@mock_resourcegroups +def test_create_group(): + resource_groups = boto3.client("resource-groups", region_name="us-east-1") + + response = resource_groups.create_group( + Name="test_resource_group", + Description="description", + ResourceQuery={ + "Type": "TAG_FILTERS_1_0", + "Query": json.dumps( + { + "ResourceTypeFilters": ["AWS::AllSupported"], + "TagFilters": [ + {"Key": "resources_tag_key", "Values": ["resources_tag_value"]} + ], + } + ), + }, + Tags={"resource_group_tag_key": "resource_group_tag_value"} + ) + response["Group"]["Name"].should.contain("test_resource_group") + response["ResourceQuery"]["Type"].should.contain("TAG_FILTERS_1_0") + response["Tags"]["resource_group_tag_key"].should.contain("resource_group_tag_value") + + +@mock_resourcegroups +def test_delete_group(): + resource_groups = boto3.client("resource-groups", region_name="us-east-1") + + test_create_group() + + response = resource_groups.delete_group(GroupName="test_resource_group") + response["Group"]["Name"].should.contain("test_resource_group") + + response = resource_groups.list_groups() + response["GroupIdentifiers"].should.have.length_of(0) + response["Groups"].should.have.length_of(0) + + +@mock_resourcegroups +def test_get_group(): + resource_groups = boto3.client("resource-groups", region_name="us-east-1") + + test_create_group() + + response = resource_groups.get_group(GroupName="test_resource_group") + response["Group"]["Description"].should.contain("description") + + return response + + +@mock_resourcegroups +def test_get_group_query(): + resource_groups = boto3.client("resource-groups", region_name="us-east-1") + + test_create_group() + + response = resource_groups.get_group_query(GroupName="test_resource_group") + response["GroupQuery"]["ResourceQuery"]["Type"].should.contain("TAG_FILTERS_1_0") + + +@mock_resourcegroups +def test_get_tags(): + resource_groups = boto3.client("resource-groups", region_name="us-east-1") + + response = test_get_group() + + response = resource_groups.get_tags(Arn=response["Group"]["GroupArn"]) + response["Tags"].should.have.length_of(1) + response["Tags"]["resource_group_tag_key"].should.contain("resource_group_tag_value") + + return response + + +@mock_resourcegroups +def test_list_groups(): + resource_groups = boto3.client("resource-groups", region_name="us-east-1") + + test_create_group() + + response = resource_groups.list_groups() + response["GroupIdentifiers"].should.have.length_of(1) + response["Groups"].should.have.length_of(1) + + +@mock_resourcegroups +def test_tag(): + resource_groups = boto3.client("resource-groups", region_name="us-east-1") + + response = test_get_tags() + + response = resource_groups.tag( + Arn=response["Arn"], + Tags={"resource_group_tag_key_2": "resource_group_tag_value_2"} + ) + response["Tags"]["resource_group_tag_key_2"].should.contain("resource_group_tag_value_2") + + response = resource_groups.get_tags(Arn=response["Arn"]) + response["Tags"].should.have.length_of(2) + response["Tags"]["resource_group_tag_key_2"].should.contain("resource_group_tag_value_2") + + +@mock_resourcegroups +def test_untag(): + resource_groups = boto3.client("resource-groups", region_name="us-east-1") + + response = test_get_tags() + + response = resource_groups.untag(Arn=response["Arn"], Keys=["resource_group_tag_key"]) + response["Keys"].should.contain("resource_group_tag_key") + + response = resource_groups.get_tags(Arn=response["Arn"]) + response["Tags"].should.have.length_of(0) + + +@mock_resourcegroups +def test_update_group(): + resource_groups = boto3.client("resource-groups", region_name="us-east-1") + + test_get_group() + + response = resource_groups.update_group( + GroupName="test_resource_group", + Description="description_2", + ) + response["Group"]["Description"].should.contain("description_2") + + response = resource_groups.get_group(GroupName="test_resource_group") + response["Group"]["Description"].should.contain("description_2") + + +@mock_resourcegroups +def test_update_group_query(): + resource_groups = boto3.client("resource-groups", region_name="us-east-1") + + test_create_group() + + response = resource_groups.update_group_query( + GroupName="test_resource_group", + ResourceQuery={ + "Type": "CLOUDFORMATION_STACK_1_0", + "Query": json.dumps( + { + "ResourceTypeFilters": ["AWS::AllSupported"], + "StackIdentifier": ( + "arn:aws:cloudformation:eu-west-1:012345678912:stack/" + "test_stack/c223eca0-e744-11e8-8910-500c41f59083" + ) + } + ), + }, + ) + response["GroupQuery"]["ResourceQuery"]["Type"].should.contain("CLOUDFORMATION_STACK_1_0") + + response = resource_groups.get_group_query(GroupName="test_resource_group") + response["GroupQuery"]["ResourceQuery"]["Type"].should.contain("CLOUDFORMATION_STACK_1_0") diff --git a/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py b/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py index 3961d05bc..8015472bf 100644 --- a/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py +++ b/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py @@ -1,285 +1,285 @@ -from __future__ import unicode_literals - -import boto3 -import sure # noqa -from moto import mock_resourcegroupstaggingapi, mock_s3, mock_ec2, mock_elbv2 - - -@mock_s3 -@mock_resourcegroupstaggingapi -def test_get_resources_s3(): - # Tests pagination - s3_client = boto3.client('s3', region_name='eu-central-1') - - # Will end up having key1,key2,key3,key4 - response_keys = set() - - # Create 4 buckets - for i in range(1, 5): - i_str = str(i) - s3_client.create_bucket(Bucket='test_bucket' + i_str) - s3_client.put_bucket_tagging( - Bucket='test_bucket' + i_str, - Tagging={'TagSet': [{'Key': 'key' + i_str, 'Value': 'value' + i_str}]} - ) - response_keys.add('key' + i_str) - - rtapi = boto3.client('resourcegroupstaggingapi', region_name='eu-central-1') - resp = rtapi.get_resources(ResourcesPerPage=2) - for resource in resp['ResourceTagMappingList']: - response_keys.remove(resource['Tags'][0]['Key']) - - response_keys.should.have.length_of(2) - - resp = rtapi.get_resources( - ResourcesPerPage=2, - PaginationToken=resp['PaginationToken'] - ) - for resource in resp['ResourceTagMappingList']: - response_keys.remove(resource['Tags'][0]['Key']) - - response_keys.should.have.length_of(0) - - -@mock_ec2 -@mock_resourcegroupstaggingapi -def test_get_resources_ec2(): - client = boto3.client('ec2', region_name='eu-central-1') - - instances = client.run_instances( - ImageId='ami-123', - MinCount=1, - MaxCount=1, - InstanceType='t2.micro', - TagSpecifications=[ - { - 'ResourceType': 'instance', - 'Tags': [ - { - 'Key': 'MY_TAG1', - 'Value': 'MY_VALUE1', - }, - { - 'Key': 'MY_TAG2', - 'Value': 'MY_VALUE2', - }, - ], - }, - { - 'ResourceType': 'instance', - 'Tags': [ - { - 'Key': 'MY_TAG3', - 'Value': 'MY_VALUE3', - }, - ] - }, - ], - ) - instance_id = instances['Instances'][0]['InstanceId'] - image_id = client.create_image(Name='testami', InstanceId=instance_id)['ImageId'] - - client.create_tags( - Resources=[image_id], - Tags=[{'Key': 'ami', 'Value': 'test'}] - ) - - rtapi = boto3.client('resourcegroupstaggingapi', region_name='eu-central-1') - resp = rtapi.get_resources() - # Check we have 1 entry for Instance, 1 Entry for AMI - resp['ResourceTagMappingList'].should.have.length_of(2) - - # 1 Entry for AMI - resp = rtapi.get_resources(ResourceTypeFilters=['ec2:image']) - resp['ResourceTagMappingList'].should.have.length_of(1) - resp['ResourceTagMappingList'][0]['ResourceARN'].should.contain('image/') - - # As were iterating the same data, this rules out that the test above was a fluke - resp = rtapi.get_resources(ResourceTypeFilters=['ec2:instance']) - resp['ResourceTagMappingList'].should.have.length_of(1) - resp['ResourceTagMappingList'][0]['ResourceARN'].should.contain('instance/') - - # Basic test of tag filters - resp = rtapi.get_resources(TagFilters=[{'Key': 'MY_TAG1', 'Values': ['MY_VALUE1', 'some_other_value']}]) - resp['ResourceTagMappingList'].should.have.length_of(1) - resp['ResourceTagMappingList'][0]['ResourceARN'].should.contain('instance/') - - -@mock_ec2 -@mock_resourcegroupstaggingapi -def test_get_tag_keys_ec2(): - client = boto3.client('ec2', region_name='eu-central-1') - - client.run_instances( - ImageId='ami-123', - MinCount=1, - MaxCount=1, - InstanceType='t2.micro', - TagSpecifications=[ - { - 'ResourceType': 'instance', - 'Tags': [ - { - 'Key': 'MY_TAG1', - 'Value': 'MY_VALUE1', - }, - { - 'Key': 'MY_TAG2', - 'Value': 'MY_VALUE2', - }, - ], - }, - { - 'ResourceType': 'instance', - 'Tags': [ - { - 'Key': 'MY_TAG3', - 'Value': 'MY_VALUE3', - }, - ] - }, - ], - ) - - rtapi = boto3.client('resourcegroupstaggingapi', region_name='eu-central-1') - resp = rtapi.get_tag_keys() - - resp['TagKeys'].should.contain('MY_TAG1') - resp['TagKeys'].should.contain('MY_TAG2') - resp['TagKeys'].should.contain('MY_TAG3') - - # TODO test pagenation - - -@mock_ec2 -@mock_resourcegroupstaggingapi -def test_get_tag_values_ec2(): - client = boto3.client('ec2', region_name='eu-central-1') - - client.run_instances( - ImageId='ami-123', - MinCount=1, - MaxCount=1, - InstanceType='t2.micro', - TagSpecifications=[ - { - 'ResourceType': 'instance', - 'Tags': [ - { - 'Key': 'MY_TAG1', - 'Value': 'MY_VALUE1', - }, - { - 'Key': 'MY_TAG2', - 'Value': 'MY_VALUE2', - }, - ], - }, - { - 'ResourceType': 'instance', - 'Tags': [ - { - 'Key': 'MY_TAG3', - 'Value': 'MY_VALUE3', - }, - ] - }, - ], - ) - client.run_instances( - ImageId='ami-123', - MinCount=1, - MaxCount=1, - InstanceType='t2.micro', - TagSpecifications=[ - { - 'ResourceType': 'instance', - 'Tags': [ - { - 'Key': 'MY_TAG1', - 'Value': 'MY_VALUE4', - }, - { - 'Key': 'MY_TAG2', - 'Value': 'MY_VALUE5', - }, - ], - }, - { - 'ResourceType': 'instance', - 'Tags': [ - { - 'Key': 'MY_TAG3', - 'Value': 'MY_VALUE6', - }, - ] - }, - ], - ) - - rtapi = boto3.client('resourcegroupstaggingapi', region_name='eu-central-1') - resp = rtapi.get_tag_values(Key='MY_TAG1') - - resp['TagValues'].should.contain('MY_VALUE1') - resp['TagValues'].should.contain('MY_VALUE4') - -@mock_ec2 -@mock_elbv2 -@mock_resourcegroupstaggingapi -def test_get_resources_elbv2(): - conn = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - security_group = ec2.create_security_group( - GroupName='a-security-group', Description='First One') - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1b') - - conn.create_load_balancer( - Name='my-lb', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - Tags=[ - { - 'Key': 'key_name', - 'Value': 'a_value' - }, - { - 'Key': 'key_2', - 'Value': 'val2' - } - ] - ) - - conn.create_load_balancer( - Name='my-other-lb', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - ) - - rtapi = boto3.client('resourcegroupstaggingapi', region_name='us-east-1') - - resp = rtapi.get_resources(ResourceTypeFilters=['elasticloadbalancer:loadbalancer']) - - resp['ResourceTagMappingList'].should.have.length_of(2) - resp['ResourceTagMappingList'][0]['ResourceARN'].should.contain('loadbalancer/') - resp = rtapi.get_resources( - ResourceTypeFilters=['elasticloadbalancer:loadbalancer'], - TagFilters=[{ - 'Key': 'key_name' - }] - ) - - resp['ResourceTagMappingList'].should.have.length_of(1) - resp['ResourceTagMappingList'][0]['Tags'].should.contain({'Key': 'key_name', 'Value': 'a_value'}) - - # TODO test pagenation +from __future__ import unicode_literals + +import boto3 +import sure # noqa +from moto import mock_resourcegroupstaggingapi, mock_s3, mock_ec2, mock_elbv2 + + +@mock_s3 +@mock_resourcegroupstaggingapi +def test_get_resources_s3(): + # Tests pagination + s3_client = boto3.client('s3', region_name='eu-central-1') + + # Will end up having key1,key2,key3,key4 + response_keys = set() + + # Create 4 buckets + for i in range(1, 5): + i_str = str(i) + s3_client.create_bucket(Bucket='test_bucket' + i_str) + s3_client.put_bucket_tagging( + Bucket='test_bucket' + i_str, + Tagging={'TagSet': [{'Key': 'key' + i_str, 'Value': 'value' + i_str}]} + ) + response_keys.add('key' + i_str) + + rtapi = boto3.client('resourcegroupstaggingapi', region_name='eu-central-1') + resp = rtapi.get_resources(ResourcesPerPage=2) + for resource in resp['ResourceTagMappingList']: + response_keys.remove(resource['Tags'][0]['Key']) + + response_keys.should.have.length_of(2) + + resp = rtapi.get_resources( + ResourcesPerPage=2, + PaginationToken=resp['PaginationToken'] + ) + for resource in resp['ResourceTagMappingList']: + response_keys.remove(resource['Tags'][0]['Key']) + + response_keys.should.have.length_of(0) + + +@mock_ec2 +@mock_resourcegroupstaggingapi +def test_get_resources_ec2(): + client = boto3.client('ec2', region_name='eu-central-1') + + instances = client.run_instances( + ImageId='ami-123', + MinCount=1, + MaxCount=1, + InstanceType='t2.micro', + TagSpecifications=[ + { + 'ResourceType': 'instance', + 'Tags': [ + { + 'Key': 'MY_TAG1', + 'Value': 'MY_VALUE1', + }, + { + 'Key': 'MY_TAG2', + 'Value': 'MY_VALUE2', + }, + ], + }, + { + 'ResourceType': 'instance', + 'Tags': [ + { + 'Key': 'MY_TAG3', + 'Value': 'MY_VALUE3', + }, + ] + }, + ], + ) + instance_id = instances['Instances'][0]['InstanceId'] + image_id = client.create_image(Name='testami', InstanceId=instance_id)['ImageId'] + + client.create_tags( + Resources=[image_id], + Tags=[{'Key': 'ami', 'Value': 'test'}] + ) + + rtapi = boto3.client('resourcegroupstaggingapi', region_name='eu-central-1') + resp = rtapi.get_resources() + # Check we have 1 entry for Instance, 1 Entry for AMI + resp['ResourceTagMappingList'].should.have.length_of(2) + + # 1 Entry for AMI + resp = rtapi.get_resources(ResourceTypeFilters=['ec2:image']) + resp['ResourceTagMappingList'].should.have.length_of(1) + resp['ResourceTagMappingList'][0]['ResourceARN'].should.contain('image/') + + # As were iterating the same data, this rules out that the test above was a fluke + resp = rtapi.get_resources(ResourceTypeFilters=['ec2:instance']) + resp['ResourceTagMappingList'].should.have.length_of(1) + resp['ResourceTagMappingList'][0]['ResourceARN'].should.contain('instance/') + + # Basic test of tag filters + resp = rtapi.get_resources(TagFilters=[{'Key': 'MY_TAG1', 'Values': ['MY_VALUE1', 'some_other_value']}]) + resp['ResourceTagMappingList'].should.have.length_of(1) + resp['ResourceTagMappingList'][0]['ResourceARN'].should.contain('instance/') + + +@mock_ec2 +@mock_resourcegroupstaggingapi +def test_get_tag_keys_ec2(): + client = boto3.client('ec2', region_name='eu-central-1') + + client.run_instances( + ImageId='ami-123', + MinCount=1, + MaxCount=1, + InstanceType='t2.micro', + TagSpecifications=[ + { + 'ResourceType': 'instance', + 'Tags': [ + { + 'Key': 'MY_TAG1', + 'Value': 'MY_VALUE1', + }, + { + 'Key': 'MY_TAG2', + 'Value': 'MY_VALUE2', + }, + ], + }, + { + 'ResourceType': 'instance', + 'Tags': [ + { + 'Key': 'MY_TAG3', + 'Value': 'MY_VALUE3', + }, + ] + }, + ], + ) + + rtapi = boto3.client('resourcegroupstaggingapi', region_name='eu-central-1') + resp = rtapi.get_tag_keys() + + resp['TagKeys'].should.contain('MY_TAG1') + resp['TagKeys'].should.contain('MY_TAG2') + resp['TagKeys'].should.contain('MY_TAG3') + + # TODO test pagenation + + +@mock_ec2 +@mock_resourcegroupstaggingapi +def test_get_tag_values_ec2(): + client = boto3.client('ec2', region_name='eu-central-1') + + client.run_instances( + ImageId='ami-123', + MinCount=1, + MaxCount=1, + InstanceType='t2.micro', + TagSpecifications=[ + { + 'ResourceType': 'instance', + 'Tags': [ + { + 'Key': 'MY_TAG1', + 'Value': 'MY_VALUE1', + }, + { + 'Key': 'MY_TAG2', + 'Value': 'MY_VALUE2', + }, + ], + }, + { + 'ResourceType': 'instance', + 'Tags': [ + { + 'Key': 'MY_TAG3', + 'Value': 'MY_VALUE3', + }, + ] + }, + ], + ) + client.run_instances( + ImageId='ami-123', + MinCount=1, + MaxCount=1, + InstanceType='t2.micro', + TagSpecifications=[ + { + 'ResourceType': 'instance', + 'Tags': [ + { + 'Key': 'MY_TAG1', + 'Value': 'MY_VALUE4', + }, + { + 'Key': 'MY_TAG2', + 'Value': 'MY_VALUE5', + }, + ], + }, + { + 'ResourceType': 'instance', + 'Tags': [ + { + 'Key': 'MY_TAG3', + 'Value': 'MY_VALUE6', + }, + ] + }, + ], + ) + + rtapi = boto3.client('resourcegroupstaggingapi', region_name='eu-central-1') + resp = rtapi.get_tag_values(Key='MY_TAG1') + + resp['TagValues'].should.contain('MY_VALUE1') + resp['TagValues'].should.contain('MY_VALUE4') + +@mock_ec2 +@mock_elbv2 +@mock_resourcegroupstaggingapi +def test_get_resources_elbv2(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.0/26', + AvailabilityZone='us-east-1b') + + conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[ + { + 'Key': 'key_name', + 'Value': 'a_value' + }, + { + 'Key': 'key_2', + 'Value': 'val2' + } + ] + ) + + conn.create_load_balancer( + Name='my-other-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + ) + + rtapi = boto3.client('resourcegroupstaggingapi', region_name='us-east-1') + + resp = rtapi.get_resources(ResourceTypeFilters=['elasticloadbalancer:loadbalancer']) + + resp['ResourceTagMappingList'].should.have.length_of(2) + resp['ResourceTagMappingList'][0]['ResourceARN'].should.contain('loadbalancer/') + resp = rtapi.get_resources( + ResourceTypeFilters=['elasticloadbalancer:loadbalancer'], + TagFilters=[{ + 'Key': 'key_name' + }] + ) + + resp['ResourceTagMappingList'].should.have.length_of(1) + resp['ResourceTagMappingList'][0]['Tags'].should.contain({'Key': 'key_name', 'Value': 'a_value'}) + + # TODO test pagenation diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 6af23849c..f26964ab7 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -1529,6 +1529,28 @@ def test_boto3_copy_object_with_versioning(): # Version should be different to previous version obj2_version_new.should_not.equal(obj2_version) + client.copy_object(CopySource={'Bucket': 'blah', 'Key': 'test2', 'VersionId': obj2_version}, Bucket='blah', Key='test3') + obj3_version_new = client.get_object(Bucket='blah', Key='test3')['VersionId'] + obj3_version_new.should_not.equal(obj2_version_new) + + # Copy file that doesn't exist + with assert_raises(ClientError) as e: + client.copy_object(CopySource={'Bucket': 'blah', 'Key': 'test4', 'VersionId': obj2_version}, Bucket='blah', Key='test5') + e.exception.response['Error']['Code'].should.equal('404') + + response = client.create_multipart_upload(Bucket='blah', Key='test4') + upload_id = response['UploadId'] + response = client.upload_part_copy(Bucket='blah', Key='test4', CopySource={'Bucket': 'blah', 'Key': 'test3', 'VersionId': obj3_version_new}, + UploadId=upload_id, PartNumber=1) + etag = response["CopyPartResult"]["ETag"] + client.complete_multipart_upload( + Bucket='blah', Key='test4', UploadId=upload_id, + MultipartUpload={'Parts': [{'ETag': etag, 'PartNumber': 1}]}) + + response = client.get_object(Bucket='blah', Key='test4') + data = response["Body"].read() + data.should.equal(b'test2') + @mock_s3 def test_boto3_copy_object_from_unversioned_to_versioned_bucket(): @@ -2762,6 +2784,7 @@ def test_boto3_multiple_delete_markers(): latest['Key'].should.equal('key-with-versions-and-unicode-ó') oldest['Key'].should.equal('key-with-versions-and-unicode-ó') + @mock_s3 def test_get_stream_gzipped(): payload = b"this is some stuff here" @@ -2820,3 +2843,80 @@ def test_boto3_bucket_name_too_short(): with assert_raises(ClientError) as exc: s3.create_bucket(Bucket='x'*2) exc.exception.response['Error']['Code'].should.equal('InvalidBucketName') + +@mock_s3 +def test_accelerated_none_when_unspecified(): + bucket_name = 'some_bucket' + s3 = boto3.client('s3') + s3.create_bucket(Bucket=bucket_name) + resp = s3.get_bucket_accelerate_configuration(Bucket=bucket_name) + resp.shouldnt.have.key('Status') + +@mock_s3 +def test_can_enable_bucket_acceleration(): + bucket_name = 'some_bucket' + s3 = boto3.client('s3') + s3.create_bucket(Bucket=bucket_name) + resp = s3.put_bucket_accelerate_configuration( + Bucket=bucket_name, + AccelerateConfiguration={'Status': 'Enabled'}, + ) + resp.keys().should.have.length_of(1) # Response contains nothing (only HTTP headers) + resp = s3.get_bucket_accelerate_configuration(Bucket=bucket_name) + resp.should.have.key('Status') + resp['Status'].should.equal('Enabled') + +@mock_s3 +def test_can_suspend_bucket_acceleration(): + bucket_name = 'some_bucket' + s3 = boto3.client('s3') + s3.create_bucket(Bucket=bucket_name) + resp = s3.put_bucket_accelerate_configuration( + Bucket=bucket_name, + AccelerateConfiguration={'Status': 'Enabled'}, + ) + resp = s3.put_bucket_accelerate_configuration( + Bucket=bucket_name, + AccelerateConfiguration={'Status': 'Suspended'}, + ) + resp.keys().should.have.length_of(1) # Response contains nothing (only HTTP headers) + resp = s3.get_bucket_accelerate_configuration(Bucket=bucket_name) + resp.should.have.key('Status') + resp['Status'].should.equal('Suspended') + +@mock_s3 +def test_suspending_acceleration_on_not_configured_bucket_does_nothing(): + bucket_name = 'some_bucket' + s3 = boto3.client('s3') + s3.create_bucket(Bucket=bucket_name) + resp = s3.put_bucket_accelerate_configuration( + Bucket=bucket_name, + AccelerateConfiguration={'Status': 'Suspended'}, + ) + resp.keys().should.have.length_of(1) # Response contains nothing (only HTTP headers) + resp = s3.get_bucket_accelerate_configuration(Bucket=bucket_name) + resp.shouldnt.have.key('Status') + +@mock_s3 +def test_accelerate_configuration_status_validation(): + bucket_name = 'some_bucket' + s3 = boto3.client('s3') + s3.create_bucket(Bucket=bucket_name) + with assert_raises(ClientError) as exc: + s3.put_bucket_accelerate_configuration( + Bucket=bucket_name, + AccelerateConfiguration={'Status': 'bad_status'}, + ) + exc.exception.response['Error']['Code'].should.equal('MalformedXML') + +@mock_s3 +def test_accelerate_configuration_is_not_supported_when_bucket_name_has_dots(): + bucket_name = 'some.bucket.with.dots' + s3 = boto3.client('s3') + s3.create_bucket(Bucket=bucket_name) + with assert_raises(ClientError) as exc: + s3.put_bucket_accelerate_configuration( + Bucket=bucket_name, + AccelerateConfiguration={'Status': 'Enabled'}, + ) + exc.exception.response['Error']['Code'].should.equal('InvalidRequest') diff --git a/tests/test_secretsmanager/test_secretsmanager.py b/tests/test_secretsmanager/test_secretsmanager.py index 81ce93cc3..6735924eb 100644 --- a/tests/test_secretsmanager/test_secretsmanager.py +++ b/tests/test_secretsmanager/test_secretsmanager.py @@ -4,13 +4,15 @@ import boto3 from moto import mock_secretsmanager from botocore.exceptions import ClientError -import sure # noqa import string +import unittest import pytz from datetime import datetime -import unittest from nose.tools import assert_raises +DEFAULT_SECRET_NAME = 'test-secret' + + @mock_secretsmanager def test_get_secret_value(): conn = boto3.client('secretsmanager', region_name='us-west-2') @@ -389,34 +391,32 @@ def test_restore_secret_that_does_not_exist(): @mock_secretsmanager def test_rotate_secret(): - secret_name = 'test-secret' conn = boto3.client('secretsmanager', region_name='us-west-2') - conn.create_secret(Name=secret_name, + conn.create_secret(Name=DEFAULT_SECRET_NAME, SecretString='foosecret') - rotated_secret = conn.rotate_secret(SecretId=secret_name) + rotated_secret = conn.rotate_secret(SecretId=DEFAULT_SECRET_NAME) assert rotated_secret assert rotated_secret['ARN'] != '' # Test arn not empty - assert rotated_secret['Name'] == secret_name + assert rotated_secret['Name'] == DEFAULT_SECRET_NAME assert rotated_secret['VersionId'] != '' @mock_secretsmanager def test_rotate_secret_enable_rotation(): - secret_name = 'test-secret' conn = boto3.client('secretsmanager', region_name='us-west-2') - conn.create_secret(Name=secret_name, + conn.create_secret(Name=DEFAULT_SECRET_NAME, SecretString='foosecret') - initial_description = conn.describe_secret(SecretId=secret_name) + initial_description = conn.describe_secret(SecretId=DEFAULT_SECRET_NAME) assert initial_description assert initial_description['RotationEnabled'] is False assert initial_description['RotationRules']['AutomaticallyAfterDays'] == 0 - conn.rotate_secret(SecretId=secret_name, + conn.rotate_secret(SecretId=DEFAULT_SECRET_NAME, RotationRules={'AutomaticallyAfterDays': 42}) - rotated_description = conn.describe_secret(SecretId=secret_name) + rotated_description = conn.describe_secret(SecretId=DEFAULT_SECRET_NAME) assert rotated_description assert rotated_description['RotationEnabled'] is True assert rotated_description['RotationRules']['AutomaticallyAfterDays'] == 42 @@ -460,9 +460,8 @@ def test_rotate_secret_client_request_token_too_short(): @mock_secretsmanager def test_rotate_secret_client_request_token_too_long(): - secret_name = 'test-secret' conn = boto3.client('secretsmanager', region_name='us-west-2') - conn.create_secret(Name=secret_name, + conn.create_secret(Name=DEFAULT_SECRET_NAME, SecretString='foosecret') client_request_token = ( @@ -470,19 +469,18 @@ def test_rotate_secret_client_request_token_too_long(): 'ED9F8B6C-85B7-446A-B7E4-38F2A3BEB13C' ) with assert_raises(ClientError): - result = conn.rotate_secret(SecretId=secret_name, + result = conn.rotate_secret(SecretId=DEFAULT_SECRET_NAME, ClientRequestToken=client_request_token) @mock_secretsmanager def test_rotate_secret_rotation_lambda_arn_too_long(): - secret_name = 'test-secret' conn = boto3.client('secretsmanager', region_name='us-west-2') - conn.create_secret(Name=secret_name, + conn.create_secret(Name=DEFAULT_SECRET_NAME, SecretString='foosecret') rotation_lambda_arn = '85B7-446A-B7E4' * 147 # == 2058 characters with assert_raises(ClientError): - result = conn.rotate_secret(SecretId=secret_name, + result = conn.rotate_secret(SecretId=DEFAULT_SECRET_NAME, RotationLambdaARN=rotation_lambda_arn) @mock_secretsmanager @@ -494,12 +492,78 @@ def test_rotate_secret_rotation_period_zero(): @mock_secretsmanager def test_rotate_secret_rotation_period_too_long(): - secret_name = 'test-secret' conn = boto3.client('secretsmanager', region_name='us-west-2') - conn.create_secret(Name=secret_name, + conn.create_secret(Name=DEFAULT_SECRET_NAME, SecretString='foosecret') rotation_rules = {'AutomaticallyAfterDays': 1001} with assert_raises(ClientError): - result = conn.rotate_secret(SecretId=secret_name, + result = conn.rotate_secret(SecretId=DEFAULT_SECRET_NAME, RotationRules=rotation_rules) + +@mock_secretsmanager +def test_put_secret_value_puts_new_secret(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + put_secret_value_dict = conn.put_secret_value(SecretId=DEFAULT_SECRET_NAME, + SecretString='foosecret', + VersionStages=['AWSCURRENT']) + version_id = put_secret_value_dict['VersionId'] + + get_secret_value_dict = conn.get_secret_value(SecretId=DEFAULT_SECRET_NAME, + VersionId=version_id, + VersionStage='AWSCURRENT') + + assert get_secret_value_dict + assert get_secret_value_dict['SecretString'] == 'foosecret' + +@mock_secretsmanager +def test_put_secret_value_can_get_first_version_if_put_twice(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + put_secret_value_dict = conn.put_secret_value(SecretId=DEFAULT_SECRET_NAME, + SecretString='first_secret', + VersionStages=['AWSCURRENT']) + first_version_id = put_secret_value_dict['VersionId'] + conn.put_secret_value(SecretId=DEFAULT_SECRET_NAME, + SecretString='second_secret', + VersionStages=['AWSCURRENT']) + + first_secret_value_dict = conn.get_secret_value(SecretId=DEFAULT_SECRET_NAME, + VersionId=first_version_id) + first_secret_value = first_secret_value_dict['SecretString'] + + assert first_secret_value == 'first_secret' + + +@mock_secretsmanager +def test_put_secret_value_versions_differ_if_same_secret_put_twice(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + put_secret_value_dict = conn.put_secret_value(SecretId=DEFAULT_SECRET_NAME, + SecretString='dupe_secret', + VersionStages=['AWSCURRENT']) + first_version_id = put_secret_value_dict['VersionId'] + put_secret_value_dict = conn.put_secret_value(SecretId=DEFAULT_SECRET_NAME, + SecretString='dupe_secret', + VersionStages=['AWSCURRENT']) + second_version_id = put_secret_value_dict['VersionId'] + + assert first_version_id != second_version_id + + +@mock_secretsmanager +def test_can_list_secret_version_ids(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + put_secret_value_dict = conn.put_secret_value(SecretId=DEFAULT_SECRET_NAME, + SecretString='dupe_secret', + VersionStages=['AWSCURRENT']) + first_version_id = put_secret_value_dict['VersionId'] + put_secret_value_dict = conn.put_secret_value(SecretId=DEFAULT_SECRET_NAME, + SecretString='dupe_secret', + VersionStages=['AWSCURRENT']) + second_version_id = put_secret_value_dict['VersionId'] + + versions_list = conn.list_secret_version_ids(SecretId=DEFAULT_SECRET_NAME) + + returned_version_ids = [v['VersionId'] for v in versions_list['Versions']] + + assert [first_version_id, second_version_id].sort() == returned_version_ids.sort() + diff --git a/tests/test_secretsmanager/test_server.py b/tests/test_secretsmanager/test_server.py index d0f495f57..23d823239 100644 --- a/tests/test_secretsmanager/test_server.py +++ b/tests/test_secretsmanager/test_server.py @@ -10,6 +10,8 @@ from moto import mock_secretsmanager Test the different server responses for secretsmanager ''' +DEFAULT_SECRET_NAME = 'test-secret' + @mock_secretsmanager def test_get_secret_value(): @@ -18,19 +20,20 @@ def test_get_secret_value(): test_client = backend.test_client() create_secret = test_client.post('/', - data={"Name": "test-secret", + data={"Name": DEFAULT_SECRET_NAME, "SecretString": "foo-secret"}, headers={ "X-Amz-Target": "secretsmanager.CreateSecret"}, ) get_secret = test_client.post('/', - data={"SecretId": "test-secret", - "VersionStage": "AWSCURRENT"}, - headers={ - "X-Amz-Target": "secretsmanager.GetSecretValue"}, - ) + data={"SecretId": DEFAULT_SECRET_NAME, + "VersionStage": "AWSCURRENT"}, + headers={ + "X-Amz-Target": "secretsmanager.GetSecretValue"}, + ) json_data = json.loads(get_secret.data.decode("utf-8")) + assert json_data['SecretString'] == 'foo-secret' @mock_secretsmanager @@ -55,7 +58,7 @@ def test_get_secret_that_does_not_match(): test_client = backend.test_client() create_secret = test_client.post('/', - data={"Name": "test-secret", + data={"Name": DEFAULT_SECRET_NAME, "SecretString": "foo-secret"}, headers={ "X-Amz-Target": "secretsmanager.CreateSecret"}, @@ -165,7 +168,7 @@ def test_describe_secret_that_does_not_match(): test_client = backend.test_client() create_secret = test_client.post('/', - data={"Name": "test-secret", + data={"Name": DEFAULT_SECRET_NAME, "SecretString": "foosecret"}, headers={ "X-Amz-Target": "secretsmanager.CreateSecret" @@ -188,7 +191,7 @@ def test_rotate_secret(): test_client = backend.test_client() create_secret = test_client.post('/', - data={"Name": "test-secret", + data={"Name": DEFAULT_SECRET_NAME, "SecretString": "foosecret"}, headers={ "X-Amz-Target": "secretsmanager.CreateSecret" @@ -197,7 +200,7 @@ def test_rotate_secret(): client_request_token = "EXAMPLE2-90ab-cdef-fedc-ba987SECRET2" rotate_secret = test_client.post('/', - data={"SecretId": "test-secret", + data={"SecretId": DEFAULT_SECRET_NAME, "ClientRequestToken": client_request_token}, headers={ "X-Amz-Target": "secretsmanager.RotateSecret" @@ -207,7 +210,7 @@ def test_rotate_secret(): json_data = json.loads(rotate_secret.data.decode("utf-8")) assert json_data # Returned dict is not empty assert json_data['ARN'] != '' - assert json_data['Name'] == 'test-secret' + assert json_data['Name'] == DEFAULT_SECRET_NAME assert json_data['VersionId'] == client_request_token # @mock_secretsmanager @@ -289,7 +292,7 @@ def test_rotate_secret_that_does_not_match(): test_client = backend.test_client() create_secret = test_client.post('/', - data={"Name": "test-secret", + data={"Name": DEFAULT_SECRET_NAME, "SecretString": "foosecret"}, headers={ "X-Amz-Target": "secretsmanager.CreateSecret" @@ -313,7 +316,7 @@ def test_rotate_secret_client_request_token_too_short(): test_client = backend.test_client() create_secret = test_client.post('/', - data={"Name": "test-secret", + data={"Name": DEFAULT_SECRET_NAME, "SecretString": "foosecret"}, headers={ "X-Amz-Target": "secretsmanager.CreateSecret" @@ -322,7 +325,7 @@ def test_rotate_secret_client_request_token_too_short(): client_request_token = "ED9F8B6C-85B7-B7E4-38F2A3BEB13C" rotate_secret = test_client.post('/', - data={"SecretId": "test-secret", + data={"SecretId": DEFAULT_SECRET_NAME, "ClientRequestToken": client_request_token}, headers={ "X-Amz-Target": "secretsmanager.RotateSecret" @@ -339,7 +342,7 @@ def test_rotate_secret_client_request_token_too_long(): test_client = backend.test_client() create_secret = test_client.post('/', - data={"Name": "test-secret", + data={"Name": DEFAULT_SECRET_NAME, "SecretString": "foosecret"}, headers={ "X-Amz-Target": "secretsmanager.CreateSecret" @@ -351,7 +354,7 @@ def test_rotate_secret_client_request_token_too_long(): 'ED9F8B6C-85B7-446A-B7E4-38F2A3BEB13C' ) rotate_secret = test_client.post('/', - data={"SecretId": "test-secret", + data={"SecretId": DEFAULT_SECRET_NAME, "ClientRequestToken": client_request_token}, headers={ "X-Amz-Target": "secretsmanager.RotateSecret" @@ -368,7 +371,7 @@ def test_rotate_secret_rotation_lambda_arn_too_long(): test_client = backend.test_client() create_secret = test_client.post('/', - data={"Name": "test-secret", + data={"Name": DEFAULT_SECRET_NAME, "SecretString": "foosecret"}, headers={ "X-Amz-Target": "secretsmanager.CreateSecret" @@ -377,7 +380,7 @@ def test_rotate_secret_rotation_lambda_arn_too_long(): rotation_lambda_arn = '85B7-446A-B7E4' * 147 # == 2058 characters rotate_secret = test_client.post('/', - data={"SecretId": "test-secret", + data={"SecretId": DEFAULT_SECRET_NAME, "RotationLambdaARN": rotation_lambda_arn}, headers={ "X-Amz-Target": "secretsmanager.RotateSecret" @@ -389,7 +392,165 @@ def test_rotate_secret_rotation_lambda_arn_too_long(): assert json_data['__type'] == 'InvalidParameterException' -# + + + +@mock_secretsmanager +def test_put_secret_value_puts_new_secret(): + backend = server.create_backend_app('secretsmanager') + test_client = backend.test_client() + + test_client.post('/', + data={ + "SecretId": DEFAULT_SECRET_NAME, + "SecretString": "foosecret", + "VersionStages": ["AWSCURRENT"]}, + headers={ + "X-Amz-Target": "secretsmanager.PutSecretValue"}, + ) + + put_second_secret_value_json = test_client.post('/', + data={ + "SecretId": DEFAULT_SECRET_NAME, + "SecretString": "foosecret", + "VersionStages": ["AWSCURRENT"]}, + headers={ + "X-Amz-Target": "secretsmanager.PutSecretValue"}, + ) + second_secret_json_data = json.loads(put_second_secret_value_json.data.decode("utf-8")) + + version_id = second_secret_json_data['VersionId'] + + secret_value_json = test_client.post('/', + data={ + "SecretId": DEFAULT_SECRET_NAME, + "VersionId": version_id, + "VersionStage": 'AWSCURRENT'}, + headers={ + "X-Amz-Target": "secretsmanager.GetSecretValue"}, + ) + + second_secret_json_data = json.loads(secret_value_json.data.decode("utf-8")) + + assert second_secret_json_data + assert second_secret_json_data['SecretString'] == 'foosecret' + + +@mock_secretsmanager +def test_put_secret_value_can_get_first_version_if_put_twice(): + backend = server.create_backend_app('secretsmanager') + test_client = backend.test_client() + + first_secret_string = 'first_secret' + second_secret_string = 'second_secret' + + put_first_secret_value_json = test_client.post('/', + data={ + "SecretId": DEFAULT_SECRET_NAME, + "SecretString": first_secret_string, + "VersionStages": ["AWSCURRENT"]}, + headers={ + "X-Amz-Target": "secretsmanager.PutSecretValue"}, + ) + + first_secret_json_data = json.loads(put_first_secret_value_json.data.decode("utf-8")) + + first_secret_version_id = first_secret_json_data['VersionId'] + + test_client.post('/', + data={ + "SecretId": DEFAULT_SECRET_NAME, + "SecretString": second_secret_string, + "VersionStages": ["AWSCURRENT"]}, + headers={ + "X-Amz-Target": "secretsmanager.PutSecretValue"}, + ) + + get_first_secret_value_json = test_client.post('/', + data={ + "SecretId": DEFAULT_SECRET_NAME, + "VersionId": first_secret_version_id, + "VersionStage": 'AWSCURRENT'}, + headers={ + "X-Amz-Target": "secretsmanager.GetSecretValue"}, + ) + + get_first_secret_json_data = json.loads(get_first_secret_value_json.data.decode("utf-8")) + + assert get_first_secret_json_data + assert get_first_secret_json_data['SecretString'] == first_secret_string + + +@mock_secretsmanager +def test_put_secret_value_versions_differ_if_same_secret_put_twice(): + backend = server.create_backend_app('secretsmanager') + test_client = backend.test_client() + + put_first_secret_value_json = test_client.post('/', + data={ + "SecretId": DEFAULT_SECRET_NAME, + "SecretString": "secret", + "VersionStages": ["AWSCURRENT"]}, + headers={ + "X-Amz-Target": "secretsmanager.PutSecretValue"}, + ) + first_secret_json_data = json.loads(put_first_secret_value_json.data.decode("utf-8")) + first_secret_version_id = first_secret_json_data['VersionId'] + + put_second_secret_value_json = test_client.post('/', + data={ + "SecretId": DEFAULT_SECRET_NAME, + "SecretString": "secret", + "VersionStages": ["AWSCURRENT"]}, + headers={ + "X-Amz-Target": "secretsmanager.PutSecretValue"}, + ) + second_secret_json_data = json.loads(put_second_secret_value_json.data.decode("utf-8")) + second_secret_version_id = second_secret_json_data['VersionId'] + + assert first_secret_version_id != second_secret_version_id + + +@mock_secretsmanager +def test_can_list_secret_version_ids(): + backend = server.create_backend_app('secretsmanager') + test_client = backend.test_client() + + put_first_secret_value_json = test_client.post('/', + data={ + "SecretId": DEFAULT_SECRET_NAME, + "SecretString": "secret", + "VersionStages": ["AWSCURRENT"]}, + headers={ + "X-Amz-Target": "secretsmanager.PutSecretValue"}, + ) + first_secret_json_data = json.loads(put_first_secret_value_json.data.decode("utf-8")) + first_secret_version_id = first_secret_json_data['VersionId'] + put_second_secret_value_json = test_client.post('/', + data={ + "SecretId": DEFAULT_SECRET_NAME, + "SecretString": "secret", + "VersionStages": ["AWSCURRENT"]}, + headers={ + "X-Amz-Target": "secretsmanager.PutSecretValue"}, + ) + second_secret_json_data = json.loads(put_second_secret_value_json.data.decode("utf-8")) + second_secret_version_id = second_secret_json_data['VersionId'] + + list_secret_versions_json = test_client.post('/', + data={ + "SecretId": DEFAULT_SECRET_NAME, }, + headers={ + "X-Amz-Target": "secretsmanager.ListSecretVersionIds"}, + ) + + versions_list = json.loads(list_secret_versions_json.data.decode("utf-8")) + + returned_version_ids = [v['VersionId'] for v in versions_list['Versions']] + + assert [first_secret_version_id, second_secret_version_id].sort() == returned_version_ids.sort() + +# # The following tests should work, but fail on the embedded dict in # RotationRules. The error message suggests a problem deeper in the code, which # needs further investigation. diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index f070625c0..d53ae50f7 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -1,1237 +1,1239 @@ -# -*- coding: utf-8 -*- -from __future__ import unicode_literals -import os - -import boto -import boto3 -import botocore.exceptions -from botocore.exceptions import ClientError -from boto.exception import SQSError -from boto.sqs.message import RawMessage, Message - -from freezegun import freeze_time -import base64 -import json -import sure # noqa -import time -import uuid - -from moto import settings, mock_sqs, mock_sqs_deprecated -from tests.helpers import requires_boto_gte -import tests.backport_assert_raises # noqa -from nose.tools import assert_raises -from nose import SkipTest - - -@mock_sqs -def test_create_fifo_queue_fail(): - sqs = boto3.client('sqs', region_name='us-east-1') - - try: - sqs.create_queue( - QueueName='test-queue', - Attributes={ - 'FifoQueue': 'true', - } - ) - except botocore.exceptions.ClientError as err: - err.response['Error']['Code'].should.equal('InvalidParameterValue') - else: - raise RuntimeError('Should of raised InvalidParameterValue Exception') - - -@mock_sqs -def test_create_queue_with_same_attributes(): - sqs = boto3.client('sqs', region_name='us-east-1') - - dlq_url = sqs.create_queue(QueueName='test-queue-dlq')['QueueUrl'] - dlq_arn = sqs.get_queue_attributes(QueueUrl=dlq_url)['Attributes']['QueueArn'] - - attributes = { - 'DelaySeconds': '900', - 'MaximumMessageSize': '262144', - 'MessageRetentionPeriod': '1209600', - 'ReceiveMessageWaitTimeSeconds': '20', - 'RedrivePolicy': '{"deadLetterTargetArn": "%s", "maxReceiveCount": 100}' % (dlq_arn), - 'VisibilityTimeout': '43200' - } - - sqs.create_queue( - QueueName='test-queue', - Attributes=attributes - ) - - sqs.create_queue( - QueueName='test-queue', - Attributes=attributes - ) - - -@mock_sqs -def test_create_queue_with_different_attributes_fail(): - sqs = boto3.client('sqs', region_name='us-east-1') - - sqs.create_queue( - QueueName='test-queue', - Attributes={ - 'VisibilityTimeout': '10', - } - ) - try: - sqs.create_queue( - QueueName='test-queue', - Attributes={ - 'VisibilityTimeout': '60', - } - ) - except botocore.exceptions.ClientError as err: - err.response['Error']['Code'].should.equal('QueueAlreadyExists') - else: - raise RuntimeError('Should of raised QueueAlreadyExists Exception') - - -@mock_sqs -def test_create_fifo_queue(): - sqs = boto3.client('sqs', region_name='us-east-1') - resp = sqs.create_queue( - QueueName='test-queue.fifo', - Attributes={ - 'FifoQueue': 'true', - } - ) - queue_url = resp['QueueUrl'] - - response = sqs.get_queue_attributes(QueueUrl=queue_url) - response['Attributes'].should.contain('FifoQueue') - response['Attributes']['FifoQueue'].should.equal('true') - - -@mock_sqs -def test_create_queue(): - sqs = boto3.resource('sqs', region_name='us-east-1') - - new_queue = sqs.create_queue(QueueName='test-queue') - new_queue.should_not.be.none - new_queue.should.have.property('url').should.contain('test-queue') - - queue = sqs.get_queue_by_name(QueueName='test-queue') - queue.attributes.get('QueueArn').should_not.be.none - queue.attributes.get('QueueArn').split(':')[-1].should.equal('test-queue') - queue.attributes.get('QueueArn').split(':')[3].should.equal('us-east-1') - queue.attributes.get('VisibilityTimeout').should_not.be.none - queue.attributes.get('VisibilityTimeout').should.equal('30') - - -@mock_sqs -def test_create_queue_kms(): - sqs = boto3.resource('sqs', region_name='us-east-1') - - new_queue = sqs.create_queue( - QueueName='test-queue', - Attributes={ - 'KmsMasterKeyId': 'master-key-id', - 'KmsDataKeyReusePeriodSeconds': '600' - }) - new_queue.should_not.be.none - - queue = sqs.get_queue_by_name(QueueName='test-queue') - - queue.attributes.get('KmsMasterKeyId').should.equal('master-key-id') - queue.attributes.get('KmsDataKeyReusePeriodSeconds').should.equal('600') - - -@mock_sqs -def test_get_nonexistent_queue(): - sqs = boto3.resource('sqs', region_name='us-east-1') - with assert_raises(ClientError) as err: - sqs.get_queue_by_name(QueueName='nonexisting-queue') - ex = err.exception - ex.operation_name.should.equal('GetQueueUrl') - ex.response['Error']['Code'].should.equal( - 'AWS.SimpleQueueService.NonExistentQueue') - - with assert_raises(ClientError) as err: - sqs.Queue('http://whatever-incorrect-queue-address').load() - ex = err.exception - ex.operation_name.should.equal('GetQueueAttributes') - ex.response['Error']['Code'].should.equal( - 'AWS.SimpleQueueService.NonExistentQueue') - - -@mock_sqs -def test_message_send_without_attributes(): - sqs = boto3.resource('sqs', region_name='us-east-1') - queue = sqs.create_queue(QueueName="blah") - msg = queue.send_message( - MessageBody="derp" - ) - msg.get('MD5OfMessageBody').should.equal( - '58fd9edd83341c29f1aebba81c31e257') - msg.shouldnt.have.key('MD5OfMessageAttributes') - msg.get('MessageId').should_not.contain(' \n') - - messages = queue.receive_messages() - messages.should.have.length_of(1) - - -@mock_sqs -def test_message_send_with_attributes(): - sqs = boto3.resource('sqs', region_name='us-east-1') - queue = sqs.create_queue(QueueName="blah") - msg = queue.send_message( - MessageBody="derp", - MessageAttributes={ - 'timestamp': { - 'StringValue': '1493147359900', - 'DataType': 'Number', - } - } - ) - msg.get('MD5OfMessageBody').should.equal( - '58fd9edd83341c29f1aebba81c31e257') - msg.get('MD5OfMessageAttributes').should.equal( - '235c5c510d26fb653d073faed50ae77c') - msg.get('MessageId').should_not.contain(' \n') - - messages = queue.receive_messages() - messages.should.have.length_of(1) - - -@mock_sqs -def test_message_with_complex_attributes(): - sqs = boto3.resource('sqs', region_name='us-east-1') - queue = sqs.create_queue(QueueName="blah") - msg = queue.send_message( - MessageBody="derp", - MessageAttributes={ - 'ccc': {'StringValue': 'testjunk', 'DataType': 'String'}, - 'aaa': {'BinaryValue': b'\x02\x03\x04', 'DataType': 'Binary'}, - 'zzz': {'DataType': 'Number', 'StringValue': '0230.01'}, - 'öther_encodings': {'DataType': 'String', 'StringValue': 'T\xFCst'} - } - ) - msg.get('MD5OfMessageBody').should.equal( - '58fd9edd83341c29f1aebba81c31e257') - msg.get('MD5OfMessageAttributes').should.equal( - '8ae21a7957029ef04146b42aeaa18a22') - msg.get('MessageId').should_not.contain(' \n') - - messages = queue.receive_messages() - messages.should.have.length_of(1) - - -@mock_sqs -def test_send_message_with_message_group_id(): - sqs = boto3.resource('sqs', region_name='us-east-1') - queue = sqs.create_queue(QueueName="test-group-id.fifo", - Attributes={'FifoQueue': 'true'}) - - sent = queue.send_message( - MessageBody="mydata", - MessageDeduplicationId="dedupe_id_1", - MessageGroupId="group_id_1", - ) - - messages = queue.receive_messages() - messages.should.have.length_of(1) - - message_attributes = messages[0].attributes - message_attributes.should.contain('MessageGroupId') - message_attributes['MessageGroupId'].should.equal('group_id_1') - message_attributes.should.contain('MessageDeduplicationId') - message_attributes['MessageDeduplicationId'].should.equal('dedupe_id_1') - - -@mock_sqs -def test_send_message_with_unicode_characters(): - body_one = 'Héllo!😀' - - sqs = boto3.resource('sqs', region_name='us-east-1') - queue = sqs.create_queue(QueueName="blah") - msg = queue.send_message(MessageBody=body_one) - - messages = queue.receive_messages() - message_body = messages[0].body - - message_body.should.equal(body_one) - - -@mock_sqs -def test_set_queue_attributes(): - sqs = boto3.resource('sqs', region_name='us-east-1') - queue = sqs.create_queue(QueueName="blah") - - queue.attributes['VisibilityTimeout'].should.equal("30") - - queue.set_attributes(Attributes={"VisibilityTimeout": "45"}) - queue.attributes['VisibilityTimeout'].should.equal("45") - - -@mock_sqs -def test_create_queues_in_multiple_region(): - west1_conn = boto3.client('sqs', region_name='us-west-1') - west1_conn.create_queue(QueueName="blah") - - west2_conn = boto3.client('sqs', region_name='us-west-2') - west2_conn.create_queue(QueueName="test-queue") - - list(west1_conn.list_queues()['QueueUrls']).should.have.length_of(1) - list(west2_conn.list_queues()['QueueUrls']).should.have.length_of(1) - - if settings.TEST_SERVER_MODE: - base_url = 'http://localhost:5000' - else: - base_url = 'https://us-west-1.queue.amazonaws.com' - - west1_conn.list_queues()['QueueUrls'][0].should.equal( - '{base_url}/123456789012/blah'.format(base_url=base_url)) - - -@mock_sqs -def test_get_queue_with_prefix(): - conn = boto3.client("sqs", region_name='us-west-1') - conn.create_queue(QueueName="prefixa-queue") - conn.create_queue(QueueName="prefixb-queue") - conn.create_queue(QueueName="test-queue") - - conn.list_queues()['QueueUrls'].should.have.length_of(3) - - queue = conn.list_queues(QueueNamePrefix="test-")['QueueUrls'] - queue.should.have.length_of(1) - - if settings.TEST_SERVER_MODE: - base_url = 'http://localhost:5000' - else: - base_url = 'https://us-west-1.queue.amazonaws.com' - - queue[0].should.equal( - "{base_url}/123456789012/test-queue".format(base_url=base_url)) - - -@mock_sqs -def test_delete_queue(): - sqs = boto3.resource('sqs', region_name='us-east-1') - conn = boto3.client("sqs", region_name='us-east-1') - conn.create_queue(QueueName="test-queue", - Attributes={"VisibilityTimeout": "3"}) - queue = sqs.Queue('test-queue') - - conn.list_queues()['QueueUrls'].should.have.length_of(1) - - queue.delete() - conn.list_queues().get('QueueUrls').should.equal(None) - - with assert_raises(botocore.exceptions.ClientError): - queue.delete() - - -@mock_sqs -def test_set_queue_attribute(): - sqs = boto3.resource('sqs', region_name='us-east-1') - conn = boto3.client("sqs", region_name='us-east-1') - conn.create_queue(QueueName="test-queue", - Attributes={"VisibilityTimeout": '3'}) - - queue = sqs.Queue("test-queue") - queue.attributes['VisibilityTimeout'].should.equal('3') - - queue.set_attributes(Attributes={"VisibilityTimeout": '45'}) - queue = sqs.Queue("test-queue") - queue.attributes['VisibilityTimeout'].should.equal('45') - - -@mock_sqs -def test_send_receive_message_without_attributes(): - sqs = boto3.resource('sqs', region_name='us-east-1') - conn = boto3.client("sqs", region_name='us-east-1') - conn.create_queue(QueueName="test-queue") - queue = sqs.Queue("test-queue") - - body_one = 'this is a test message' - body_two = 'this is another test message' - - queue.send_message(MessageBody=body_one) - queue.send_message(MessageBody=body_two) - - messages = conn.receive_message( - QueueUrl=queue.url, MaxNumberOfMessages=2)['Messages'] - - message1 = messages[0] - message2 = messages[1] - - message1['Body'].should.equal(body_one) - message2['Body'].should.equal(body_two) - - message1.shouldnt.have.key('MD5OfMessageAttributes') - message2.shouldnt.have.key('MD5OfMessageAttributes') - - -@mock_sqs -def test_send_receive_message_with_attributes(): - sqs = boto3.resource('sqs', region_name='us-east-1') - conn = boto3.client("sqs", region_name='us-east-1') - conn.create_queue(QueueName="test-queue") - queue = sqs.Queue("test-queue") - - body_one = 'this is a test message' - body_two = 'this is another test message' - - queue.send_message( - MessageBody=body_one, - MessageAttributes={ - 'timestamp': { - 'StringValue': '1493147359900', - 'DataType': 'Number', - } - } - ) - - queue.send_message( - MessageBody=body_two, - MessageAttributes={ - 'timestamp': { - 'StringValue': '1493147359901', - 'DataType': 'Number', - } - } - ) - - messages = conn.receive_message( - QueueUrl=queue.url, MaxNumberOfMessages=2)['Messages'] - - message1 = messages[0] - message2 = messages[1] - - message1.get('Body').should.equal(body_one) - message2.get('Body').should.equal(body_two) - - message1.get('MD5OfMessageAttributes').should.equal('235c5c510d26fb653d073faed50ae77c') - message2.get('MD5OfMessageAttributes').should.equal('994258b45346a2cc3f9cbb611aa7af30') - - -@mock_sqs -def test_send_receive_message_timestamps(): - sqs = boto3.resource('sqs', region_name='us-east-1') - conn = boto3.client("sqs", region_name='us-east-1') - conn.create_queue(QueueName="test-queue") - queue = sqs.Queue("test-queue") - - queue.send_message(MessageBody="derp") - messages = conn.receive_message( - QueueUrl=queue.url, MaxNumberOfMessages=1)['Messages'] - - message = messages[0] - sent_timestamp = message.get('Attributes').get('SentTimestamp') - approximate_first_receive_timestamp = message.get('Attributes').get('ApproximateFirstReceiveTimestamp') - - int.when.called_with(sent_timestamp).shouldnt.throw(ValueError) - int.when.called_with(approximate_first_receive_timestamp).shouldnt.throw(ValueError) - - -@mock_sqs -def test_max_number_of_messages_invalid_param(): - sqs = boto3.resource('sqs', region_name='us-east-1') - queue = sqs.create_queue(QueueName='test-queue') - - with assert_raises(ClientError): - queue.receive_messages(MaxNumberOfMessages=11) - - with assert_raises(ClientError): - queue.receive_messages(MaxNumberOfMessages=0) - - # no error but also no messages returned - queue.receive_messages(MaxNumberOfMessages=1, WaitTimeSeconds=0) - - -@mock_sqs -def test_wait_time_seconds_invalid_param(): - sqs = boto3.resource('sqs', region_name='us-east-1') - queue = sqs.create_queue(QueueName='test-queue') - - with assert_raises(ClientError): - queue.receive_messages(WaitTimeSeconds=-1) - - with assert_raises(ClientError): - queue.receive_messages(WaitTimeSeconds=21) - - # no error but also no messages returned - queue.receive_messages(WaitTimeSeconds=0) - - -@mock_sqs -def test_receive_messages_with_wait_seconds_timeout_of_zero(): - """ - test that zero messages is returned with a wait_seconds_timeout of zero, - previously this created an infinite loop and nothing was returned - :return: - """ - - sqs = boto3.resource('sqs', region_name='us-east-1') - queue = sqs.create_queue(QueueName="blah") - - messages = queue.receive_messages(WaitTimeSeconds=0) - messages.should.equal([]) - - -@mock_sqs_deprecated -def test_send_message_with_xml_characters(): - conn = boto.connect_sqs('the_key', 'the_secret') - queue = conn.create_queue("test-queue", visibility_timeout=3) - queue.set_message_class(RawMessage) - - body_one = '< & >' - - queue.write(queue.new_message(body_one)) - - messages = conn.receive_message(queue, number_messages=1) - - messages[0].get_body().should.equal(body_one) - - -@requires_boto_gte("2.28") -@mock_sqs_deprecated -def test_send_message_with_attributes(): - conn = boto.connect_sqs('the_key', 'the_secret') - queue = conn.create_queue("test-queue", visibility_timeout=3) - queue.set_message_class(RawMessage) - - body = 'this is a test message' - message = queue.new_message(body) - BASE64_BINARY = base64.b64encode(b'binary value').decode('utf-8') - message_attributes = { - 'test.attribute_name': {'data_type': 'String', 'string_value': 'attribute value'}, - 'test.binary_attribute': {'data_type': 'Binary', 'binary_value': BASE64_BINARY}, - 'test.number_attribute': {'data_type': 'Number', 'string_value': 'string value'} - } - message.message_attributes = message_attributes - - queue.write(message) - - messages = conn.receive_message(queue) - - messages[0].get_body().should.equal(body) - - for name, value in message_attributes.items(): - dict(messages[0].message_attributes[name]).should.equal(value) - - -@mock_sqs_deprecated -def test_send_message_with_delay(): - conn = boto.connect_sqs('the_key', 'the_secret') - queue = conn.create_queue("test-queue", visibility_timeout=3) - queue.set_message_class(RawMessage) - - body_one = 'this is a test message' - body_two = 'this is another test message' - - queue.write(queue.new_message(body_one), delay_seconds=3) - queue.write(queue.new_message(body_two)) - - queue.count().should.equal(1) - - messages = conn.receive_message(queue, number_messages=2) - assert len(messages) == 1 - message = messages[0] - assert message.get_body().should.equal(body_two) - queue.count().should.equal(0) - - -@mock_sqs_deprecated -def test_send_large_message_fails(): - conn = boto.connect_sqs('the_key', 'the_secret') - queue = conn.create_queue("test-queue", visibility_timeout=3) - queue.set_message_class(RawMessage) - - body_one = 'test message' * 200000 - huge_message = queue.new_message(body_one) - - queue.write.when.called_with(huge_message).should.throw(SQSError) - - -@mock_sqs_deprecated -def test_message_becomes_inflight_when_received(): - conn = boto.connect_sqs('the_key', 'the_secret') - queue = conn.create_queue("test-queue", visibility_timeout=2) - queue.set_message_class(RawMessage) - - body_one = 'this is a test message' - queue.write(queue.new_message(body_one)) - queue.count().should.equal(1) - - messages = conn.receive_message(queue, number_messages=1) - queue.count().should.equal(0) - - assert len(messages) == 1 - - # Wait - time.sleep(3) - - queue.count().should.equal(1) - - -@mock_sqs_deprecated -def test_receive_message_with_explicit_visibility_timeout(): - conn = boto.connect_sqs('the_key', 'the_secret') - queue = conn.create_queue("test-queue", visibility_timeout=3) - queue.set_message_class(RawMessage) - - body_one = 'this is another test message' - queue.write(queue.new_message(body_one)) - - queue.count().should.equal(1) - messages = conn.receive_message( - queue, number_messages=1, visibility_timeout=0) - - assert len(messages) == 1 - - # Message should remain visible - queue.count().should.equal(1) - - -@mock_sqs_deprecated -def test_change_message_visibility(): - conn = boto.connect_sqs('the_key', 'the_secret') - queue = conn.create_queue("test-queue", visibility_timeout=2) - queue.set_message_class(RawMessage) - - body_one = 'this is another test message' - queue.write(queue.new_message(body_one)) - - queue.count().should.equal(1) - messages = conn.receive_message(queue, number_messages=1) - - assert len(messages) == 1 - - queue.count().should.equal(0) - - messages[0].change_visibility(2) - - # Wait - time.sleep(1) - - # Message is not visible - queue.count().should.equal(0) - - time.sleep(2) - - # Message now becomes visible - queue.count().should.equal(1) - - messages = conn.receive_message(queue, number_messages=1) - messages[0].delete() - queue.count().should.equal(0) - - -@mock_sqs_deprecated -def test_message_attributes(): - conn = boto.connect_sqs('the_key', 'the_secret') - queue = conn.create_queue("test-queue", visibility_timeout=2) - queue.set_message_class(RawMessage) - - body_one = 'this is another test message' - queue.write(queue.new_message(body_one)) - - queue.count().should.equal(1) - - messages = conn.receive_message(queue, number_messages=1) - queue.count().should.equal(0) - - assert len(messages) == 1 - - message_attributes = messages[0].attributes - - assert message_attributes.get('ApproximateFirstReceiveTimestamp') - assert int(message_attributes.get('ApproximateReceiveCount')) == 1 - assert message_attributes.get('SentTimestamp') - assert message_attributes.get('SenderId') - - -@mock_sqs_deprecated -def test_read_message_from_queue(): - conn = boto.connect_sqs() - queue = conn.create_queue('testqueue') - queue.set_message_class(RawMessage) - - body = 'foo bar baz' - queue.write(queue.new_message(body)) - message = queue.read(1) - message.get_body().should.equal(body) - - -@mock_sqs_deprecated -def test_queue_length(): - conn = boto.connect_sqs('the_key', 'the_secret') - queue = conn.create_queue("test-queue", visibility_timeout=3) - queue.set_message_class(RawMessage) - - queue.write(queue.new_message('this is a test message')) - queue.write(queue.new_message('this is another test message')) - queue.count().should.equal(2) - - -@mock_sqs_deprecated -def test_delete_message(): - conn = boto.connect_sqs('the_key', 'the_secret') - queue = conn.create_queue("test-queue", visibility_timeout=3) - queue.set_message_class(RawMessage) - - queue.write(queue.new_message('this is a test message')) - queue.write(queue.new_message('this is another test message')) - queue.count().should.equal(2) - - messages = conn.receive_message(queue, number_messages=1) - assert len(messages) == 1 - messages[0].delete() - queue.count().should.equal(1) - - messages = conn.receive_message(queue, number_messages=1) - assert len(messages) == 1 - messages[0].delete() - queue.count().should.equal(0) - - -@mock_sqs_deprecated -def test_send_batch_operation(): - conn = boto.connect_sqs('the_key', 'the_secret') - queue = conn.create_queue("test-queue", visibility_timeout=3) - - # See https://github.com/boto/boto/issues/831 - queue.set_message_class(RawMessage) - - queue.write_batch([ - ("my_first_message", 'test message 1', 0), - ("my_second_message", 'test message 2', 0), - ("my_third_message", 'test message 3', 0), - ]) - - messages = queue.get_messages(3) - messages[0].get_body().should.equal("test message 1") - - # Test that pulling more messages doesn't break anything - messages = queue.get_messages(2) - - -@requires_boto_gte("2.28") -@mock_sqs_deprecated -def test_send_batch_operation_with_message_attributes(): - conn = boto.connect_sqs('the_key', 'the_secret') - queue = conn.create_queue("test-queue", visibility_timeout=3) - queue.set_message_class(RawMessage) - - message_tuple = ("my_first_message", 'test message 1', 0, { - 'name1': {'data_type': 'String', 'string_value': 'foo'}}) - queue.write_batch([message_tuple]) - - messages = queue.get_messages() - messages[0].get_body().should.equal("test message 1") - - for name, value in message_tuple[3].items(): - dict(messages[0].message_attributes[name]).should.equal(value) - - -@mock_sqs_deprecated -def test_delete_batch_operation(): - conn = boto.connect_sqs('the_key', 'the_secret') - queue = conn.create_queue("test-queue", visibility_timeout=3) - - conn.send_message_batch(queue, [ - ("my_first_message", 'test message 1', 0), - ("my_second_message", 'test message 2', 0), - ("my_third_message", 'test message 3', 0), - ]) - - messages = queue.get_messages(2) - queue.delete_message_batch(messages) - - queue.count().should.equal(1) - - -@mock_sqs_deprecated -def test_queue_attributes(): - conn = boto.connect_sqs('the_key', 'the_secret') - - queue_name = 'test-queue' - visibility_timeout = 3 - - queue = conn.create_queue( - queue_name, visibility_timeout=visibility_timeout) - - attributes = queue.get_attributes() - - attributes['QueueArn'].should.look_like( - 'arn:aws:sqs:us-east-1:123456789012:%s' % queue_name) - - attributes['VisibilityTimeout'].should.look_like(str(visibility_timeout)) - - attribute_names = queue.get_attributes().keys() - attribute_names.should.contain('ApproximateNumberOfMessagesNotVisible') - attribute_names.should.contain('MessageRetentionPeriod') - attribute_names.should.contain('ApproximateNumberOfMessagesDelayed') - attribute_names.should.contain('MaximumMessageSize') - attribute_names.should.contain('CreatedTimestamp') - attribute_names.should.contain('ApproximateNumberOfMessages') - attribute_names.should.contain('ReceiveMessageWaitTimeSeconds') - attribute_names.should.contain('DelaySeconds') - attribute_names.should.contain('VisibilityTimeout') - attribute_names.should.contain('LastModifiedTimestamp') - attribute_names.should.contain('QueueArn') - - -@mock_sqs_deprecated -def test_change_message_visibility_on_invalid_receipt(): - conn = boto.connect_sqs('the_key', 'the_secret') - queue = conn.create_queue("test-queue", visibility_timeout=1) - queue.set_message_class(RawMessage) - - queue.write(queue.new_message('this is another test message')) - queue.count().should.equal(1) - messages = conn.receive_message(queue, number_messages=1) - - assert len(messages) == 1 - - original_message = messages[0] - - queue.count().should.equal(0) - - time.sleep(2) - - queue.count().should.equal(1) - - messages = conn.receive_message(queue, number_messages=1) - - assert len(messages) == 1 - - original_message.change_visibility.when.called_with( - 100).should.throw(SQSError) - - -@mock_sqs_deprecated -def test_change_message_visibility_on_visible_message(): - conn = boto.connect_sqs('the_key', 'the_secret') - queue = conn.create_queue("test-queue", visibility_timeout=1) - queue.set_message_class(RawMessage) - - queue.write(queue.new_message('this is another test message')) - queue.count().should.equal(1) - messages = conn.receive_message(queue, number_messages=1) - - assert len(messages) == 1 - - original_message = messages[0] - - queue.count().should.equal(0) - - time.sleep(2) - - queue.count().should.equal(1) - - original_message.change_visibility.when.called_with( - 100).should.throw(SQSError) - - -@mock_sqs_deprecated -def test_purge_action(): - conn = boto.sqs.connect_to_region("us-east-1") - - queue = conn.create_queue('new-queue') - queue.write(queue.new_message('this is another test message')) - queue.count().should.equal(1) - - queue.purge() - - queue.count().should.equal(0) - - -@mock_sqs_deprecated -def test_delete_message_after_visibility_timeout(): - VISIBILITY_TIMEOUT = 1 - conn = boto.sqs.connect_to_region("us-east-1") - new_queue = conn.create_queue( - 'new-queue', visibility_timeout=VISIBILITY_TIMEOUT) - - m1 = Message() - m1.set_body('Message 1!') - new_queue.write(m1) - - assert new_queue.count() == 1 - - m1_retrieved = new_queue.read() - - time.sleep(VISIBILITY_TIMEOUT + 1) - - m1_retrieved.delete() - - assert new_queue.count() == 0 - - -@mock_sqs -def test_batch_change_message_visibility(): - if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'true': - raise SkipTest('Cant manipulate time in server mode') - - with freeze_time("2015-01-01 12:00:00"): - sqs = boto3.client('sqs', region_name='us-east-1') - resp = sqs.create_queue( - QueueName='test-dlr-queue.fifo', - Attributes={'FifoQueue': 'true'} - ) - queue_url = resp['QueueUrl'] - - sqs.send_message(QueueUrl=queue_url, MessageBody='msg1') - sqs.send_message(QueueUrl=queue_url, MessageBody='msg2') - sqs.send_message(QueueUrl=queue_url, MessageBody='msg3') - - with freeze_time("2015-01-01 12:01:00"): - receive_resp = sqs.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=2) - len(receive_resp['Messages']).should.equal(2) - - handles = [item['ReceiptHandle'] for item in receive_resp['Messages']] - entries = [{'Id': str(uuid.uuid4()), 'ReceiptHandle': handle, 'VisibilityTimeout': 43200} for handle in handles] - - resp = sqs.change_message_visibility_batch(QueueUrl=queue_url, Entries=entries) - len(resp['Successful']).should.equal(2) - - with freeze_time("2015-01-01 14:00:00"): - resp = sqs.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=3) - len(resp['Messages']).should.equal(1) - - with freeze_time("2015-01-01 16:00:00"): - resp = sqs.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=3) - len(resp['Messages']).should.equal(1) - - with freeze_time("2015-01-02 12:00:00"): - resp = sqs.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=3) - len(resp['Messages']).should.equal(3) - - -@mock_sqs -def test_permissions(): - client = boto3.client('sqs', region_name='us-east-1') - - resp = client.create_queue( - QueueName='test-dlr-queue.fifo', - Attributes={'FifoQueue': 'true'} - ) - queue_url = resp['QueueUrl'] - - client.add_permission(QueueUrl=queue_url, Label='account1', AWSAccountIds=['111111111111'], Actions=['*']) - client.add_permission(QueueUrl=queue_url, Label='account2', AWSAccountIds=['222211111111'], Actions=['SendMessage']) - - with assert_raises(ClientError): - client.add_permission(QueueUrl=queue_url, Label='account2', AWSAccountIds=['222211111111'], Actions=['SomeRubbish']) - - client.remove_permission(QueueUrl=queue_url, Label='account2') - - with assert_raises(ClientError): - client.remove_permission(QueueUrl=queue_url, Label='non_existant') - - -@mock_sqs -def test_tags(): - client = boto3.client('sqs', region_name='us-east-1') - - resp = client.create_queue( - QueueName='test-dlr-queue.fifo', - Attributes={'FifoQueue': 'true'} - ) - queue_url = resp['QueueUrl'] - - client.tag_queue( - QueueUrl=queue_url, - Tags={ - 'test1': 'value1', - 'test2': 'value2', - } - ) - - resp = client.list_queue_tags(QueueUrl=queue_url) - resp['Tags'].should.contain('test1') - resp['Tags'].should.contain('test2') - - client.untag_queue( - QueueUrl=queue_url, - TagKeys=['test2'] - ) - - resp = client.list_queue_tags(QueueUrl=queue_url) - resp['Tags'].should.contain('test1') - resp['Tags'].should_not.contain('test2') - - -@mock_sqs -def test_create_fifo_queue_with_dlq(): - sqs = boto3.client('sqs', region_name='us-east-1') - resp = sqs.create_queue( - QueueName='test-dlr-queue.fifo', - Attributes={'FifoQueue': 'true'} - ) - queue_url1 = resp['QueueUrl'] - queue_arn1 = sqs.get_queue_attributes(QueueUrl=queue_url1)['Attributes']['QueueArn'] - - resp = sqs.create_queue( - QueueName='test-dlr-queue', - Attributes={'FifoQueue': 'false'} - ) - queue_url2 = resp['QueueUrl'] - queue_arn2 = sqs.get_queue_attributes(QueueUrl=queue_url2)['Attributes']['QueueArn'] - - sqs.create_queue( - QueueName='test-queue.fifo', - Attributes={ - 'FifoQueue': 'true', - 'RedrivePolicy': json.dumps({'deadLetterTargetArn': queue_arn1, 'maxReceiveCount': 2}) - } - ) - - # Cant have fifo queue with non fifo DLQ - with assert_raises(ClientError): - sqs.create_queue( - QueueName='test-queue2.fifo', - Attributes={ - 'FifoQueue': 'true', - 'RedrivePolicy': json.dumps({'deadLetterTargetArn': queue_arn2, 'maxReceiveCount': 2}) - } - ) - - -@mock_sqs -def test_queue_with_dlq(): - if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'true': - raise SkipTest('Cant manipulate time in server mode') - - sqs = boto3.client('sqs', region_name='us-east-1') - - with freeze_time("2015-01-01 12:00:00"): - resp = sqs.create_queue( - QueueName='test-dlr-queue.fifo', - Attributes={'FifoQueue': 'true'} - ) - queue_url1 = resp['QueueUrl'] - queue_arn1 = sqs.get_queue_attributes(QueueUrl=queue_url1)['Attributes']['QueueArn'] - - resp = sqs.create_queue( - QueueName='test-queue.fifo', - Attributes={ - 'FifoQueue': 'true', - 'RedrivePolicy': json.dumps({'deadLetterTargetArn': queue_arn1, 'maxReceiveCount': 2}) - } - ) - queue_url2 = resp['QueueUrl'] - - sqs.send_message(QueueUrl=queue_url2, MessageBody='msg1') - sqs.send_message(QueueUrl=queue_url2, MessageBody='msg2') - - with freeze_time("2015-01-01 13:00:00"): - resp = sqs.receive_message(QueueUrl=queue_url2, VisibilityTimeout=30, WaitTimeSeconds=0) - resp['Messages'][0]['Body'].should.equal('msg1') - - with freeze_time("2015-01-01 13:01:00"): - resp = sqs.receive_message(QueueUrl=queue_url2, VisibilityTimeout=30, WaitTimeSeconds=0) - resp['Messages'][0]['Body'].should.equal('msg1') - - with freeze_time("2015-01-01 13:02:00"): - resp = sqs.receive_message(QueueUrl=queue_url2, VisibilityTimeout=30, WaitTimeSeconds=0) - len(resp['Messages']).should.equal(1) - - resp = sqs.receive_message(QueueUrl=queue_url1, VisibilityTimeout=30, WaitTimeSeconds=0) - resp['Messages'][0]['Body'].should.equal('msg1') - - # Might as well test list source queues - - resp = sqs.list_dead_letter_source_queues(QueueUrl=queue_url1) - resp['queueUrls'][0].should.equal(queue_url2) - - -@mock_sqs -def test_redrive_policy_available(): - sqs = boto3.client('sqs', region_name='us-east-1') - - resp = sqs.create_queue(QueueName='test-deadletter') - queue_url1 = resp['QueueUrl'] - queue_arn1 = sqs.get_queue_attributes(QueueUrl=queue_url1)['Attributes']['QueueArn'] - redrive_policy = { - 'deadLetterTargetArn': queue_arn1, - 'maxReceiveCount': 1, - } - - resp = sqs.create_queue( - QueueName='test-queue', - Attributes={ - 'RedrivePolicy': json.dumps(redrive_policy) - } - ) - - queue_url2 = resp['QueueUrl'] - attributes = sqs.get_queue_attributes(QueueUrl=queue_url2)['Attributes'] - assert 'RedrivePolicy' in attributes - assert json.loads(attributes['RedrivePolicy']) == redrive_policy - - # Cant have redrive policy without maxReceiveCount - with assert_raises(ClientError): - sqs.create_queue( - QueueName='test-queue2', - Attributes={ - 'FifoQueue': 'true', - 'RedrivePolicy': json.dumps({'deadLetterTargetArn': queue_arn1}) - } - ) - - -@mock_sqs -def test_redrive_policy_non_existent_queue(): - sqs = boto3.client('sqs', region_name='us-east-1') - redrive_policy = { - 'deadLetterTargetArn': 'arn:aws:sqs:us-east-1:123456789012:no-queue', - 'maxReceiveCount': 1, - } - - with assert_raises(ClientError): - sqs.create_queue( - QueueName='test-queue', - Attributes={ - 'RedrivePolicy': json.dumps(redrive_policy) - } - ) - - -@mock_sqs -def test_redrive_policy_set_attributes(): - sqs = boto3.resource('sqs', region_name='us-east-1') - - queue = sqs.create_queue(QueueName='test-queue') - deadletter_queue = sqs.create_queue(QueueName='test-deadletter') - - redrive_policy = { - 'deadLetterTargetArn': deadletter_queue.attributes['QueueArn'], - 'maxReceiveCount': 1, - } - - queue.set_attributes(Attributes={ - 'RedrivePolicy': json.dumps(redrive_policy)}) - - copy = sqs.get_queue_by_name(QueueName='test-queue') - assert 'RedrivePolicy' in copy.attributes - copy_policy = json.loads(copy.attributes['RedrivePolicy']) - assert copy_policy == redrive_policy - - -@mock_sqs -def test_receive_messages_with_message_group_id(): - sqs = boto3.resource('sqs', region_name='us-east-1') - queue = sqs.create_queue(QueueName="test-queue.fifo", - Attributes={ - 'FifoQueue': 'true', - }) - queue.set_attributes(Attributes={"VisibilityTimeout": "3600"}) - queue.send_message( - MessageBody="message-1", - MessageGroupId="group" - ) - queue.send_message( - MessageBody="message-2", - MessageGroupId="group" - ) - - messages = queue.receive_messages() - messages.should.have.length_of(1) - message = messages[0] - - # received message is not deleted! - - messages = queue.receive_messages(WaitTimeSeconds=0) - messages.should.have.length_of(0) - - # message is now processed, next one should be available - message.delete() - messages = queue.receive_messages() - messages.should.have.length_of(1) - - -@mock_sqs -def test_receive_messages_with_message_group_id_on_requeue(): - sqs = boto3.resource('sqs', region_name='us-east-1') - queue = sqs.create_queue(QueueName="test-queue.fifo", - Attributes={ - 'FifoQueue': 'true', - }) - queue.set_attributes(Attributes={"VisibilityTimeout": "3600"}) - queue.send_message( - MessageBody="message-1", - MessageGroupId="group" - ) - queue.send_message( - MessageBody="message-2", - MessageGroupId="group" - ) - - messages = queue.receive_messages() - messages.should.have.length_of(1) - message = messages[0] - - # received message is not deleted! - - messages = queue.receive_messages(WaitTimeSeconds=0) - messages.should.have.length_of(0) - - # message is now available again, next one should be available - message.change_visibility(VisibilityTimeout=0) - messages = queue.receive_messages() - messages.should.have.length_of(1) - messages[0].message_id.should.equal(message.message_id) - - -@mock_sqs -def test_receive_messages_with_message_group_id_on_visibility_timeout(): - if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'true': - raise SkipTest('Cant manipulate time in server mode') - - with freeze_time("2015-01-01 12:00:00"): - sqs = boto3.resource('sqs', region_name='us-east-1') - queue = sqs.create_queue(QueueName="test-queue.fifo", - Attributes={ - 'FifoQueue': 'true', - }) - queue.set_attributes(Attributes={"VisibilityTimeout": "3600"}) - queue.send_message( - MessageBody="message-1", - MessageGroupId="group" - ) - queue.send_message( - MessageBody="message-2", - MessageGroupId="group" - ) - - messages = queue.receive_messages() - messages.should.have.length_of(1) - message = messages[0] - - # received message is not deleted! - - messages = queue.receive_messages(WaitTimeSeconds=0) - messages.should.have.length_of(0) - - message.change_visibility(VisibilityTimeout=10) - - with freeze_time("2015-01-01 12:00:05"): - # no timeout yet - messages = queue.receive_messages(WaitTimeSeconds=0) - messages.should.have.length_of(0) - - with freeze_time("2015-01-01 12:00:15"): - # message is now available again, next one should be available - messages = queue.receive_messages() - messages.should.have.length_of(1) - messages[0].message_id.should.equal(message.message_id) - -@mock_sqs -def test_receive_message_for_queue_with_receive_message_wait_time_seconds_set(): - sqs = boto3.resource('sqs', region_name='us-east-1') - - queue = sqs.create_queue( - QueueName='test-queue', - Attributes={ - 'ReceiveMessageWaitTimeSeconds': '2', - } - ) - - queue.receive_messages() +# -*- coding: utf-8 -*- +from __future__ import unicode_literals +import os + +import boto +import boto3 +import botocore.exceptions +from botocore.exceptions import ClientError +from boto.exception import SQSError +from boto.sqs.message import RawMessage, Message + +from freezegun import freeze_time +import base64 +import json +import sure # noqa +import time +import uuid + +from moto import settings, mock_sqs, mock_sqs_deprecated +from tests.helpers import requires_boto_gte +import tests.backport_assert_raises # noqa +from nose.tools import assert_raises +from nose import SkipTest + + +@mock_sqs +def test_create_fifo_queue_fail(): + sqs = boto3.client('sqs', region_name='us-east-1') + + try: + sqs.create_queue( + QueueName='test-queue', + Attributes={ + 'FifoQueue': 'true', + } + ) + except botocore.exceptions.ClientError as err: + err.response['Error']['Code'].should.equal('InvalidParameterValue') + else: + raise RuntimeError('Should of raised InvalidParameterValue Exception') + + +@mock_sqs +def test_create_queue_with_same_attributes(): + sqs = boto3.client('sqs', region_name='us-east-1') + + dlq_url = sqs.create_queue(QueueName='test-queue-dlq')['QueueUrl'] + dlq_arn = sqs.get_queue_attributes(QueueUrl=dlq_url)['Attributes']['QueueArn'] + + attributes = { + 'DelaySeconds': '900', + 'MaximumMessageSize': '262144', + 'MessageRetentionPeriod': '1209600', + 'ReceiveMessageWaitTimeSeconds': '20', + 'RedrivePolicy': '{"deadLetterTargetArn": "%s", "maxReceiveCount": 100}' % (dlq_arn), + 'VisibilityTimeout': '43200' + } + + sqs.create_queue( + QueueName='test-queue', + Attributes=attributes + ) + + sqs.create_queue( + QueueName='test-queue', + Attributes=attributes + ) + + +@mock_sqs +def test_create_queue_with_different_attributes_fail(): + sqs = boto3.client('sqs', region_name='us-east-1') + + sqs.create_queue( + QueueName='test-queue', + Attributes={ + 'VisibilityTimeout': '10', + } + ) + try: + sqs.create_queue( + QueueName='test-queue', + Attributes={ + 'VisibilityTimeout': '60', + } + ) + except botocore.exceptions.ClientError as err: + err.response['Error']['Code'].should.equal('QueueAlreadyExists') + else: + raise RuntimeError('Should of raised QueueAlreadyExists Exception') + + +@mock_sqs +def test_create_fifo_queue(): + sqs = boto3.client('sqs', region_name='us-east-1') + resp = sqs.create_queue( + QueueName='test-queue.fifo', + Attributes={ + 'FifoQueue': 'true', + } + ) + queue_url = resp['QueueUrl'] + + response = sqs.get_queue_attributes(QueueUrl=queue_url) + response['Attributes'].should.contain('FifoQueue') + response['Attributes']['FifoQueue'].should.equal('true') + + +@mock_sqs +def test_create_queue(): + sqs = boto3.resource('sqs', region_name='us-east-1') + + new_queue = sqs.create_queue(QueueName='test-queue') + new_queue.should_not.be.none + new_queue.should.have.property('url').should.contain('test-queue') + + queue = sqs.get_queue_by_name(QueueName='test-queue') + queue.attributes.get('QueueArn').should_not.be.none + queue.attributes.get('QueueArn').split(':')[-1].should.equal('test-queue') + queue.attributes.get('QueueArn').split(':')[3].should.equal('us-east-1') + queue.attributes.get('VisibilityTimeout').should_not.be.none + queue.attributes.get('VisibilityTimeout').should.equal('30') + + +@mock_sqs +def test_create_queue_kms(): + sqs = boto3.resource('sqs', region_name='us-east-1') + + new_queue = sqs.create_queue( + QueueName='test-queue', + Attributes={ + 'KmsMasterKeyId': 'master-key-id', + 'KmsDataKeyReusePeriodSeconds': '600' + }) + new_queue.should_not.be.none + + queue = sqs.get_queue_by_name(QueueName='test-queue') + + queue.attributes.get('KmsMasterKeyId').should.equal('master-key-id') + queue.attributes.get('KmsDataKeyReusePeriodSeconds').should.equal('600') + + +@mock_sqs +def test_get_nonexistent_queue(): + sqs = boto3.resource('sqs', region_name='us-east-1') + with assert_raises(ClientError) as err: + sqs.get_queue_by_name(QueueName='nonexisting-queue') + ex = err.exception + ex.operation_name.should.equal('GetQueueUrl') + ex.response['Error']['Code'].should.equal( + 'AWS.SimpleQueueService.NonExistentQueue') + + with assert_raises(ClientError) as err: + sqs.Queue('http://whatever-incorrect-queue-address').load() + ex = err.exception + ex.operation_name.should.equal('GetQueueAttributes') + ex.response['Error']['Code'].should.equal( + 'AWS.SimpleQueueService.NonExistentQueue') + + +@mock_sqs +def test_message_send_without_attributes(): + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName="blah") + msg = queue.send_message( + MessageBody="derp" + ) + msg.get('MD5OfMessageBody').should.equal( + '58fd9edd83341c29f1aebba81c31e257') + msg.shouldnt.have.key('MD5OfMessageAttributes') + msg.get('MessageId').should_not.contain(' \n') + + messages = queue.receive_messages() + messages.should.have.length_of(1) + + +@mock_sqs +def test_message_send_with_attributes(): + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName="blah") + msg = queue.send_message( + MessageBody="derp", + MessageAttributes={ + 'timestamp': { + 'StringValue': '1493147359900', + 'DataType': 'Number', + } + } + ) + msg.get('MD5OfMessageBody').should.equal( + '58fd9edd83341c29f1aebba81c31e257') + msg.get('MD5OfMessageAttributes').should.equal( + '235c5c510d26fb653d073faed50ae77c') + msg.get('MessageId').should_not.contain(' \n') + + messages = queue.receive_messages() + messages.should.have.length_of(1) + + +@mock_sqs +def test_message_with_complex_attributes(): + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName="blah") + msg = queue.send_message( + MessageBody="derp", + MessageAttributes={ + 'ccc': {'StringValue': 'testjunk', 'DataType': 'String'}, + 'aaa': {'BinaryValue': b'\x02\x03\x04', 'DataType': 'Binary'}, + 'zzz': {'DataType': 'Number', 'StringValue': '0230.01'}, + 'öther_encodings': {'DataType': 'String', 'StringValue': 'T\xFCst'} + } + ) + msg.get('MD5OfMessageBody').should.equal( + '58fd9edd83341c29f1aebba81c31e257') + msg.get('MD5OfMessageAttributes').should.equal( + '8ae21a7957029ef04146b42aeaa18a22') + msg.get('MessageId').should_not.contain(' \n') + + messages = queue.receive_messages() + messages.should.have.length_of(1) + + +@mock_sqs +def test_send_message_with_message_group_id(): + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName="test-group-id.fifo", + Attributes={'FifoQueue': 'true'}) + + sent = queue.send_message( + MessageBody="mydata", + MessageDeduplicationId="dedupe_id_1", + MessageGroupId="group_id_1", + ) + + messages = queue.receive_messages() + messages.should.have.length_of(1) + + message_attributes = messages[0].attributes + message_attributes.should.contain('MessageGroupId') + message_attributes['MessageGroupId'].should.equal('group_id_1') + message_attributes.should.contain('MessageDeduplicationId') + message_attributes['MessageDeduplicationId'].should.equal('dedupe_id_1') + + +@mock_sqs +def test_send_message_with_unicode_characters(): + body_one = 'Héllo!😀' + + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName="blah") + msg = queue.send_message(MessageBody=body_one) + + messages = queue.receive_messages() + message_body = messages[0].body + + message_body.should.equal(body_one) + + +@mock_sqs +def test_set_queue_attributes(): + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName="blah") + + queue.attributes['VisibilityTimeout'].should.equal("30") + + queue.set_attributes(Attributes={"VisibilityTimeout": "45"}) + queue.attributes['VisibilityTimeout'].should.equal("45") + + +@mock_sqs +def test_create_queues_in_multiple_region(): + west1_conn = boto3.client('sqs', region_name='us-west-1') + west1_conn.create_queue(QueueName="blah") + + west2_conn = boto3.client('sqs', region_name='us-west-2') + west2_conn.create_queue(QueueName="test-queue") + + list(west1_conn.list_queues()['QueueUrls']).should.have.length_of(1) + list(west2_conn.list_queues()['QueueUrls']).should.have.length_of(1) + + if settings.TEST_SERVER_MODE: + base_url = 'http://localhost:5000' + else: + base_url = 'https://us-west-1.queue.amazonaws.com' + + west1_conn.list_queues()['QueueUrls'][0].should.equal( + '{base_url}/123456789012/blah'.format(base_url=base_url)) + + +@mock_sqs +def test_get_queue_with_prefix(): + conn = boto3.client("sqs", region_name='us-west-1') + conn.create_queue(QueueName="prefixa-queue") + conn.create_queue(QueueName="prefixb-queue") + conn.create_queue(QueueName="test-queue") + + conn.list_queues()['QueueUrls'].should.have.length_of(3) + + queue = conn.list_queues(QueueNamePrefix="test-")['QueueUrls'] + queue.should.have.length_of(1) + + if settings.TEST_SERVER_MODE: + base_url = 'http://localhost:5000' + else: + base_url = 'https://us-west-1.queue.amazonaws.com' + + queue[0].should.equal( + "{base_url}/123456789012/test-queue".format(base_url=base_url)) + + +@mock_sqs +def test_delete_queue(): + sqs = boto3.resource('sqs', region_name='us-east-1') + conn = boto3.client("sqs", region_name='us-east-1') + conn.create_queue(QueueName="test-queue", + Attributes={"VisibilityTimeout": "3"}) + queue = sqs.Queue('test-queue') + + conn.list_queues()['QueueUrls'].should.have.length_of(1) + + queue.delete() + conn.list_queues().get('QueueUrls').should.equal(None) + + with assert_raises(botocore.exceptions.ClientError): + queue.delete() + + +@mock_sqs +def test_set_queue_attribute(): + sqs = boto3.resource('sqs', region_name='us-east-1') + conn = boto3.client("sqs", region_name='us-east-1') + conn.create_queue(QueueName="test-queue", + Attributes={"VisibilityTimeout": '3'}) + + queue = sqs.Queue("test-queue") + queue.attributes['VisibilityTimeout'].should.equal('3') + + queue.set_attributes(Attributes={"VisibilityTimeout": '45'}) + queue = sqs.Queue("test-queue") + queue.attributes['VisibilityTimeout'].should.equal('45') + + +@mock_sqs +def test_send_receive_message_without_attributes(): + sqs = boto3.resource('sqs', region_name='us-east-1') + conn = boto3.client("sqs", region_name='us-east-1') + conn.create_queue(QueueName="test-queue") + queue = sqs.Queue("test-queue") + + body_one = 'this is a test message' + body_two = 'this is another test message' + + queue.send_message(MessageBody=body_one) + queue.send_message(MessageBody=body_two) + + messages = conn.receive_message( + QueueUrl=queue.url, MaxNumberOfMessages=2)['Messages'] + + message1 = messages[0] + message2 = messages[1] + + message1['Body'].should.equal(body_one) + message2['Body'].should.equal(body_two) + + message1.shouldnt.have.key('MD5OfMessageAttributes') + message2.shouldnt.have.key('MD5OfMessageAttributes') + + +@mock_sqs +def test_send_receive_message_with_attributes(): + sqs = boto3.resource('sqs', region_name='us-east-1') + conn = boto3.client("sqs", region_name='us-east-1') + conn.create_queue(QueueName="test-queue") + queue = sqs.Queue("test-queue") + + body_one = 'this is a test message' + body_two = 'this is another test message' + + queue.send_message( + MessageBody=body_one, + MessageAttributes={ + 'timestamp': { + 'StringValue': '1493147359900', + 'DataType': 'Number', + } + } + ) + + queue.send_message( + MessageBody=body_two, + MessageAttributes={ + 'timestamp': { + 'StringValue': '1493147359901', + 'DataType': 'Number', + } + } + ) + + messages = conn.receive_message( + QueueUrl=queue.url, MaxNumberOfMessages=2)['Messages'] + + message1 = messages[0] + message2 = messages[1] + + message1.get('Body').should.equal(body_one) + message2.get('Body').should.equal(body_two) + + message1.get('MD5OfMessageAttributes').should.equal('235c5c510d26fb653d073faed50ae77c') + message2.get('MD5OfMessageAttributes').should.equal('994258b45346a2cc3f9cbb611aa7af30') + + +@mock_sqs +def test_send_receive_message_timestamps(): + sqs = boto3.resource('sqs', region_name='us-east-1') + conn = boto3.client("sqs", region_name='us-east-1') + conn.create_queue(QueueName="test-queue") + queue = sqs.Queue("test-queue") + + response = queue.send_message(MessageBody="derp") + assert response['ResponseMetadata']['RequestId'] + + messages = conn.receive_message( + QueueUrl=queue.url, MaxNumberOfMessages=1)['Messages'] + + message = messages[0] + sent_timestamp = message.get('Attributes').get('SentTimestamp') + approximate_first_receive_timestamp = message.get('Attributes').get('ApproximateFirstReceiveTimestamp') + + int.when.called_with(sent_timestamp).shouldnt.throw(ValueError) + int.when.called_with(approximate_first_receive_timestamp).shouldnt.throw(ValueError) + + +@mock_sqs +def test_max_number_of_messages_invalid_param(): + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName='test-queue') + + with assert_raises(ClientError): + queue.receive_messages(MaxNumberOfMessages=11) + + with assert_raises(ClientError): + queue.receive_messages(MaxNumberOfMessages=0) + + # no error but also no messages returned + queue.receive_messages(MaxNumberOfMessages=1, WaitTimeSeconds=0) + + +@mock_sqs +def test_wait_time_seconds_invalid_param(): + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName='test-queue') + + with assert_raises(ClientError): + queue.receive_messages(WaitTimeSeconds=-1) + + with assert_raises(ClientError): + queue.receive_messages(WaitTimeSeconds=21) + + # no error but also no messages returned + queue.receive_messages(WaitTimeSeconds=0) + + +@mock_sqs +def test_receive_messages_with_wait_seconds_timeout_of_zero(): + """ + test that zero messages is returned with a wait_seconds_timeout of zero, + previously this created an infinite loop and nothing was returned + :return: + """ + + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName="blah") + + messages = queue.receive_messages(WaitTimeSeconds=0) + messages.should.equal([]) + + +@mock_sqs_deprecated +def test_send_message_with_xml_characters(): + conn = boto.connect_sqs('the_key', 'the_secret') + queue = conn.create_queue("test-queue", visibility_timeout=3) + queue.set_message_class(RawMessage) + + body_one = '< & >' + + queue.write(queue.new_message(body_one)) + + messages = conn.receive_message(queue, number_messages=1) + + messages[0].get_body().should.equal(body_one) + + +@requires_boto_gte("2.28") +@mock_sqs_deprecated +def test_send_message_with_attributes(): + conn = boto.connect_sqs('the_key', 'the_secret') + queue = conn.create_queue("test-queue", visibility_timeout=3) + queue.set_message_class(RawMessage) + + body = 'this is a test message' + message = queue.new_message(body) + BASE64_BINARY = base64.b64encode(b'binary value').decode('utf-8') + message_attributes = { + 'test.attribute_name': {'data_type': 'String', 'string_value': 'attribute value'}, + 'test.binary_attribute': {'data_type': 'Binary', 'binary_value': BASE64_BINARY}, + 'test.number_attribute': {'data_type': 'Number', 'string_value': 'string value'} + } + message.message_attributes = message_attributes + + queue.write(message) + + messages = conn.receive_message(queue) + + messages[0].get_body().should.equal(body) + + for name, value in message_attributes.items(): + dict(messages[0].message_attributes[name]).should.equal(value) + + +@mock_sqs_deprecated +def test_send_message_with_delay(): + conn = boto.connect_sqs('the_key', 'the_secret') + queue = conn.create_queue("test-queue", visibility_timeout=3) + queue.set_message_class(RawMessage) + + body_one = 'this is a test message' + body_two = 'this is another test message' + + queue.write(queue.new_message(body_one), delay_seconds=3) + queue.write(queue.new_message(body_two)) + + queue.count().should.equal(1) + + messages = conn.receive_message(queue, number_messages=2) + assert len(messages) == 1 + message = messages[0] + assert message.get_body().should.equal(body_two) + queue.count().should.equal(0) + + +@mock_sqs_deprecated +def test_send_large_message_fails(): + conn = boto.connect_sqs('the_key', 'the_secret') + queue = conn.create_queue("test-queue", visibility_timeout=3) + queue.set_message_class(RawMessage) + + body_one = 'test message' * 200000 + huge_message = queue.new_message(body_one) + + queue.write.when.called_with(huge_message).should.throw(SQSError) + + +@mock_sqs_deprecated +def test_message_becomes_inflight_when_received(): + conn = boto.connect_sqs('the_key', 'the_secret') + queue = conn.create_queue("test-queue", visibility_timeout=2) + queue.set_message_class(RawMessage) + + body_one = 'this is a test message' + queue.write(queue.new_message(body_one)) + queue.count().should.equal(1) + + messages = conn.receive_message(queue, number_messages=1) + queue.count().should.equal(0) + + assert len(messages) == 1 + + # Wait + time.sleep(3) + + queue.count().should.equal(1) + + +@mock_sqs_deprecated +def test_receive_message_with_explicit_visibility_timeout(): + conn = boto.connect_sqs('the_key', 'the_secret') + queue = conn.create_queue("test-queue", visibility_timeout=3) + queue.set_message_class(RawMessage) + + body_one = 'this is another test message' + queue.write(queue.new_message(body_one)) + + queue.count().should.equal(1) + messages = conn.receive_message( + queue, number_messages=1, visibility_timeout=0) + + assert len(messages) == 1 + + # Message should remain visible + queue.count().should.equal(1) + + +@mock_sqs_deprecated +def test_change_message_visibility(): + conn = boto.connect_sqs('the_key', 'the_secret') + queue = conn.create_queue("test-queue", visibility_timeout=2) + queue.set_message_class(RawMessage) + + body_one = 'this is another test message' + queue.write(queue.new_message(body_one)) + + queue.count().should.equal(1) + messages = conn.receive_message(queue, number_messages=1) + + assert len(messages) == 1 + + queue.count().should.equal(0) + + messages[0].change_visibility(2) + + # Wait + time.sleep(1) + + # Message is not visible + queue.count().should.equal(0) + + time.sleep(2) + + # Message now becomes visible + queue.count().should.equal(1) + + messages = conn.receive_message(queue, number_messages=1) + messages[0].delete() + queue.count().should.equal(0) + + +@mock_sqs_deprecated +def test_message_attributes(): + conn = boto.connect_sqs('the_key', 'the_secret') + queue = conn.create_queue("test-queue", visibility_timeout=2) + queue.set_message_class(RawMessage) + + body_one = 'this is another test message' + queue.write(queue.new_message(body_one)) + + queue.count().should.equal(1) + + messages = conn.receive_message(queue, number_messages=1) + queue.count().should.equal(0) + + assert len(messages) == 1 + + message_attributes = messages[0].attributes + + assert message_attributes.get('ApproximateFirstReceiveTimestamp') + assert int(message_attributes.get('ApproximateReceiveCount')) == 1 + assert message_attributes.get('SentTimestamp') + assert message_attributes.get('SenderId') + + +@mock_sqs_deprecated +def test_read_message_from_queue(): + conn = boto.connect_sqs() + queue = conn.create_queue('testqueue') + queue.set_message_class(RawMessage) + + body = 'foo bar baz' + queue.write(queue.new_message(body)) + message = queue.read(1) + message.get_body().should.equal(body) + + +@mock_sqs_deprecated +def test_queue_length(): + conn = boto.connect_sqs('the_key', 'the_secret') + queue = conn.create_queue("test-queue", visibility_timeout=3) + queue.set_message_class(RawMessage) + + queue.write(queue.new_message('this is a test message')) + queue.write(queue.new_message('this is another test message')) + queue.count().should.equal(2) + + +@mock_sqs_deprecated +def test_delete_message(): + conn = boto.connect_sqs('the_key', 'the_secret') + queue = conn.create_queue("test-queue", visibility_timeout=3) + queue.set_message_class(RawMessage) + + queue.write(queue.new_message('this is a test message')) + queue.write(queue.new_message('this is another test message')) + queue.count().should.equal(2) + + messages = conn.receive_message(queue, number_messages=1) + assert len(messages) == 1 + messages[0].delete() + queue.count().should.equal(1) + + messages = conn.receive_message(queue, number_messages=1) + assert len(messages) == 1 + messages[0].delete() + queue.count().should.equal(0) + + +@mock_sqs_deprecated +def test_send_batch_operation(): + conn = boto.connect_sqs('the_key', 'the_secret') + queue = conn.create_queue("test-queue", visibility_timeout=3) + + # See https://github.com/boto/boto/issues/831 + queue.set_message_class(RawMessage) + + queue.write_batch([ + ("my_first_message", 'test message 1', 0), + ("my_second_message", 'test message 2', 0), + ("my_third_message", 'test message 3', 0), + ]) + + messages = queue.get_messages(3) + messages[0].get_body().should.equal("test message 1") + + # Test that pulling more messages doesn't break anything + messages = queue.get_messages(2) + + +@requires_boto_gte("2.28") +@mock_sqs_deprecated +def test_send_batch_operation_with_message_attributes(): + conn = boto.connect_sqs('the_key', 'the_secret') + queue = conn.create_queue("test-queue", visibility_timeout=3) + queue.set_message_class(RawMessage) + + message_tuple = ("my_first_message", 'test message 1', 0, { + 'name1': {'data_type': 'String', 'string_value': 'foo'}}) + queue.write_batch([message_tuple]) + + messages = queue.get_messages() + messages[0].get_body().should.equal("test message 1") + + for name, value in message_tuple[3].items(): + dict(messages[0].message_attributes[name]).should.equal(value) + + +@mock_sqs_deprecated +def test_delete_batch_operation(): + conn = boto.connect_sqs('the_key', 'the_secret') + queue = conn.create_queue("test-queue", visibility_timeout=3) + + conn.send_message_batch(queue, [ + ("my_first_message", 'test message 1', 0), + ("my_second_message", 'test message 2', 0), + ("my_third_message", 'test message 3', 0), + ]) + + messages = queue.get_messages(2) + queue.delete_message_batch(messages) + + queue.count().should.equal(1) + + +@mock_sqs_deprecated +def test_queue_attributes(): + conn = boto.connect_sqs('the_key', 'the_secret') + + queue_name = 'test-queue' + visibility_timeout = 3 + + queue = conn.create_queue( + queue_name, visibility_timeout=visibility_timeout) + + attributes = queue.get_attributes() + + attributes['QueueArn'].should.look_like( + 'arn:aws:sqs:us-east-1:123456789012:%s' % queue_name) + + attributes['VisibilityTimeout'].should.look_like(str(visibility_timeout)) + + attribute_names = queue.get_attributes().keys() + attribute_names.should.contain('ApproximateNumberOfMessagesNotVisible') + attribute_names.should.contain('MessageRetentionPeriod') + attribute_names.should.contain('ApproximateNumberOfMessagesDelayed') + attribute_names.should.contain('MaximumMessageSize') + attribute_names.should.contain('CreatedTimestamp') + attribute_names.should.contain('ApproximateNumberOfMessages') + attribute_names.should.contain('ReceiveMessageWaitTimeSeconds') + attribute_names.should.contain('DelaySeconds') + attribute_names.should.contain('VisibilityTimeout') + attribute_names.should.contain('LastModifiedTimestamp') + attribute_names.should.contain('QueueArn') + + +@mock_sqs_deprecated +def test_change_message_visibility_on_invalid_receipt(): + conn = boto.connect_sqs('the_key', 'the_secret') + queue = conn.create_queue("test-queue", visibility_timeout=1) + queue.set_message_class(RawMessage) + + queue.write(queue.new_message('this is another test message')) + queue.count().should.equal(1) + messages = conn.receive_message(queue, number_messages=1) + + assert len(messages) == 1 + + original_message = messages[0] + + queue.count().should.equal(0) + + time.sleep(2) + + queue.count().should.equal(1) + + messages = conn.receive_message(queue, number_messages=1) + + assert len(messages) == 1 + + original_message.change_visibility.when.called_with( + 100).should.throw(SQSError) + + +@mock_sqs_deprecated +def test_change_message_visibility_on_visible_message(): + conn = boto.connect_sqs('the_key', 'the_secret') + queue = conn.create_queue("test-queue", visibility_timeout=1) + queue.set_message_class(RawMessage) + + queue.write(queue.new_message('this is another test message')) + queue.count().should.equal(1) + messages = conn.receive_message(queue, number_messages=1) + + assert len(messages) == 1 + + original_message = messages[0] + + queue.count().should.equal(0) + + time.sleep(2) + + queue.count().should.equal(1) + + original_message.change_visibility.when.called_with( + 100).should.throw(SQSError) + + +@mock_sqs_deprecated +def test_purge_action(): + conn = boto.sqs.connect_to_region("us-east-1") + + queue = conn.create_queue('new-queue') + queue.write(queue.new_message('this is another test message')) + queue.count().should.equal(1) + + queue.purge() + + queue.count().should.equal(0) + + +@mock_sqs_deprecated +def test_delete_message_after_visibility_timeout(): + VISIBILITY_TIMEOUT = 1 + conn = boto.sqs.connect_to_region("us-east-1") + new_queue = conn.create_queue( + 'new-queue', visibility_timeout=VISIBILITY_TIMEOUT) + + m1 = Message() + m1.set_body('Message 1!') + new_queue.write(m1) + + assert new_queue.count() == 1 + + m1_retrieved = new_queue.read() + + time.sleep(VISIBILITY_TIMEOUT + 1) + + m1_retrieved.delete() + + assert new_queue.count() == 0 + + +@mock_sqs +def test_batch_change_message_visibility(): + if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'true': + raise SkipTest('Cant manipulate time in server mode') + + with freeze_time("2015-01-01 12:00:00"): + sqs = boto3.client('sqs', region_name='us-east-1') + resp = sqs.create_queue( + QueueName='test-dlr-queue.fifo', + Attributes={'FifoQueue': 'true'} + ) + queue_url = resp['QueueUrl'] + + sqs.send_message(QueueUrl=queue_url, MessageBody='msg1') + sqs.send_message(QueueUrl=queue_url, MessageBody='msg2') + sqs.send_message(QueueUrl=queue_url, MessageBody='msg3') + + with freeze_time("2015-01-01 12:01:00"): + receive_resp = sqs.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=2) + len(receive_resp['Messages']).should.equal(2) + + handles = [item['ReceiptHandle'] for item in receive_resp['Messages']] + entries = [{'Id': str(uuid.uuid4()), 'ReceiptHandle': handle, 'VisibilityTimeout': 43200} for handle in handles] + + resp = sqs.change_message_visibility_batch(QueueUrl=queue_url, Entries=entries) + len(resp['Successful']).should.equal(2) + + with freeze_time("2015-01-01 14:00:00"): + resp = sqs.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=3) + len(resp['Messages']).should.equal(1) + + with freeze_time("2015-01-01 16:00:00"): + resp = sqs.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=3) + len(resp['Messages']).should.equal(1) + + with freeze_time("2015-01-02 12:00:00"): + resp = sqs.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=3) + len(resp['Messages']).should.equal(3) + + +@mock_sqs +def test_permissions(): + client = boto3.client('sqs', region_name='us-east-1') + + resp = client.create_queue( + QueueName='test-dlr-queue.fifo', + Attributes={'FifoQueue': 'true'} + ) + queue_url = resp['QueueUrl'] + + client.add_permission(QueueUrl=queue_url, Label='account1', AWSAccountIds=['111111111111'], Actions=['*']) + client.add_permission(QueueUrl=queue_url, Label='account2', AWSAccountIds=['222211111111'], Actions=['SendMessage']) + + with assert_raises(ClientError): + client.add_permission(QueueUrl=queue_url, Label='account2', AWSAccountIds=['222211111111'], Actions=['SomeRubbish']) + + client.remove_permission(QueueUrl=queue_url, Label='account2') + + with assert_raises(ClientError): + client.remove_permission(QueueUrl=queue_url, Label='non_existant') + + +@mock_sqs +def test_tags(): + client = boto3.client('sqs', region_name='us-east-1') + + resp = client.create_queue( + QueueName='test-dlr-queue.fifo', + Attributes={'FifoQueue': 'true'} + ) + queue_url = resp['QueueUrl'] + + client.tag_queue( + QueueUrl=queue_url, + Tags={ + 'test1': 'value1', + 'test2': 'value2', + } + ) + + resp = client.list_queue_tags(QueueUrl=queue_url) + resp['Tags'].should.contain('test1') + resp['Tags'].should.contain('test2') + + client.untag_queue( + QueueUrl=queue_url, + TagKeys=['test2'] + ) + + resp = client.list_queue_tags(QueueUrl=queue_url) + resp['Tags'].should.contain('test1') + resp['Tags'].should_not.contain('test2') + + +@mock_sqs +def test_create_fifo_queue_with_dlq(): + sqs = boto3.client('sqs', region_name='us-east-1') + resp = sqs.create_queue( + QueueName='test-dlr-queue.fifo', + Attributes={'FifoQueue': 'true'} + ) + queue_url1 = resp['QueueUrl'] + queue_arn1 = sqs.get_queue_attributes(QueueUrl=queue_url1)['Attributes']['QueueArn'] + + resp = sqs.create_queue( + QueueName='test-dlr-queue', + Attributes={'FifoQueue': 'false'} + ) + queue_url2 = resp['QueueUrl'] + queue_arn2 = sqs.get_queue_attributes(QueueUrl=queue_url2)['Attributes']['QueueArn'] + + sqs.create_queue( + QueueName='test-queue.fifo', + Attributes={ + 'FifoQueue': 'true', + 'RedrivePolicy': json.dumps({'deadLetterTargetArn': queue_arn1, 'maxReceiveCount': 2}) + } + ) + + # Cant have fifo queue with non fifo DLQ + with assert_raises(ClientError): + sqs.create_queue( + QueueName='test-queue2.fifo', + Attributes={ + 'FifoQueue': 'true', + 'RedrivePolicy': json.dumps({'deadLetterTargetArn': queue_arn2, 'maxReceiveCount': 2}) + } + ) + + +@mock_sqs +def test_queue_with_dlq(): + if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'true': + raise SkipTest('Cant manipulate time in server mode') + + sqs = boto3.client('sqs', region_name='us-east-1') + + with freeze_time("2015-01-01 12:00:00"): + resp = sqs.create_queue( + QueueName='test-dlr-queue.fifo', + Attributes={'FifoQueue': 'true'} + ) + queue_url1 = resp['QueueUrl'] + queue_arn1 = sqs.get_queue_attributes(QueueUrl=queue_url1)['Attributes']['QueueArn'] + + resp = sqs.create_queue( + QueueName='test-queue.fifo', + Attributes={ + 'FifoQueue': 'true', + 'RedrivePolicy': json.dumps({'deadLetterTargetArn': queue_arn1, 'maxReceiveCount': 2}) + } + ) + queue_url2 = resp['QueueUrl'] + + sqs.send_message(QueueUrl=queue_url2, MessageBody='msg1') + sqs.send_message(QueueUrl=queue_url2, MessageBody='msg2') + + with freeze_time("2015-01-01 13:00:00"): + resp = sqs.receive_message(QueueUrl=queue_url2, VisibilityTimeout=30, WaitTimeSeconds=0) + resp['Messages'][0]['Body'].should.equal('msg1') + + with freeze_time("2015-01-01 13:01:00"): + resp = sqs.receive_message(QueueUrl=queue_url2, VisibilityTimeout=30, WaitTimeSeconds=0) + resp['Messages'][0]['Body'].should.equal('msg1') + + with freeze_time("2015-01-01 13:02:00"): + resp = sqs.receive_message(QueueUrl=queue_url2, VisibilityTimeout=30, WaitTimeSeconds=0) + len(resp['Messages']).should.equal(1) + + resp = sqs.receive_message(QueueUrl=queue_url1, VisibilityTimeout=30, WaitTimeSeconds=0) + resp['Messages'][0]['Body'].should.equal('msg1') + + # Might as well test list source queues + + resp = sqs.list_dead_letter_source_queues(QueueUrl=queue_url1) + resp['queueUrls'][0].should.equal(queue_url2) + + +@mock_sqs +def test_redrive_policy_available(): + sqs = boto3.client('sqs', region_name='us-east-1') + + resp = sqs.create_queue(QueueName='test-deadletter') + queue_url1 = resp['QueueUrl'] + queue_arn1 = sqs.get_queue_attributes(QueueUrl=queue_url1)['Attributes']['QueueArn'] + redrive_policy = { + 'deadLetterTargetArn': queue_arn1, + 'maxReceiveCount': 1, + } + + resp = sqs.create_queue( + QueueName='test-queue', + Attributes={ + 'RedrivePolicy': json.dumps(redrive_policy) + } + ) + + queue_url2 = resp['QueueUrl'] + attributes = sqs.get_queue_attributes(QueueUrl=queue_url2)['Attributes'] + assert 'RedrivePolicy' in attributes + assert json.loads(attributes['RedrivePolicy']) == redrive_policy + + # Cant have redrive policy without maxReceiveCount + with assert_raises(ClientError): + sqs.create_queue( + QueueName='test-queue2', + Attributes={ + 'FifoQueue': 'true', + 'RedrivePolicy': json.dumps({'deadLetterTargetArn': queue_arn1}) + } + ) + + +@mock_sqs +def test_redrive_policy_non_existent_queue(): + sqs = boto3.client('sqs', region_name='us-east-1') + redrive_policy = { + 'deadLetterTargetArn': 'arn:aws:sqs:us-east-1:123456789012:no-queue', + 'maxReceiveCount': 1, + } + + with assert_raises(ClientError): + sqs.create_queue( + QueueName='test-queue', + Attributes={ + 'RedrivePolicy': json.dumps(redrive_policy) + } + ) + + +@mock_sqs +def test_redrive_policy_set_attributes(): + sqs = boto3.resource('sqs', region_name='us-east-1') + + queue = sqs.create_queue(QueueName='test-queue') + deadletter_queue = sqs.create_queue(QueueName='test-deadletter') + + redrive_policy = { + 'deadLetterTargetArn': deadletter_queue.attributes['QueueArn'], + 'maxReceiveCount': 1, + } + + queue.set_attributes(Attributes={ + 'RedrivePolicy': json.dumps(redrive_policy)}) + + copy = sqs.get_queue_by_name(QueueName='test-queue') + assert 'RedrivePolicy' in copy.attributes + copy_policy = json.loads(copy.attributes['RedrivePolicy']) + assert copy_policy == redrive_policy + + +@mock_sqs +def test_receive_messages_with_message_group_id(): + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName="test-queue.fifo", + Attributes={ + 'FifoQueue': 'true', + }) + queue.set_attributes(Attributes={"VisibilityTimeout": "3600"}) + queue.send_message( + MessageBody="message-1", + MessageGroupId="group" + ) + queue.send_message( + MessageBody="message-2", + MessageGroupId="group" + ) + + messages = queue.receive_messages() + messages.should.have.length_of(1) + message = messages[0] + + # received message is not deleted! + + messages = queue.receive_messages(WaitTimeSeconds=0) + messages.should.have.length_of(0) + + # message is now processed, next one should be available + message.delete() + messages = queue.receive_messages() + messages.should.have.length_of(1) + + +@mock_sqs +def test_receive_messages_with_message_group_id_on_requeue(): + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName="test-queue.fifo", + Attributes={ + 'FifoQueue': 'true', + }) + queue.set_attributes(Attributes={"VisibilityTimeout": "3600"}) + queue.send_message( + MessageBody="message-1", + MessageGroupId="group" + ) + queue.send_message( + MessageBody="message-2", + MessageGroupId="group" + ) + + messages = queue.receive_messages() + messages.should.have.length_of(1) + message = messages[0] + + # received message is not deleted! + + messages = queue.receive_messages(WaitTimeSeconds=0) + messages.should.have.length_of(0) + + # message is now available again, next one should be available + message.change_visibility(VisibilityTimeout=0) + messages = queue.receive_messages() + messages.should.have.length_of(1) + messages[0].message_id.should.equal(message.message_id) + + +@mock_sqs +def test_receive_messages_with_message_group_id_on_visibility_timeout(): + if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'true': + raise SkipTest('Cant manipulate time in server mode') + + with freeze_time("2015-01-01 12:00:00"): + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName="test-queue.fifo", + Attributes={ + 'FifoQueue': 'true', + }) + queue.set_attributes(Attributes={"VisibilityTimeout": "3600"}) + queue.send_message( + MessageBody="message-1", + MessageGroupId="group" + ) + queue.send_message( + MessageBody="message-2", + MessageGroupId="group" + ) + + messages = queue.receive_messages() + messages.should.have.length_of(1) + message = messages[0] + + # received message is not deleted! + + messages = queue.receive_messages(WaitTimeSeconds=0) + messages.should.have.length_of(0) + + message.change_visibility(VisibilityTimeout=10) + + with freeze_time("2015-01-01 12:00:05"): + # no timeout yet + messages = queue.receive_messages(WaitTimeSeconds=0) + messages.should.have.length_of(0) + + with freeze_time("2015-01-01 12:00:15"): + # message is now available again, next one should be available + messages = queue.receive_messages() + messages.should.have.length_of(1) + messages[0].message_id.should.equal(message.message_id) + +@mock_sqs +def test_receive_message_for_queue_with_receive_message_wait_time_seconds_set(): + sqs = boto3.resource('sqs', region_name='us-east-1') + + queue = sqs.create_queue( + QueueName='test-queue', + Attributes={ + 'ReceiveMessageWaitTimeSeconds': '2', + } + ) + + queue.receive_messages() diff --git a/wait_for.py b/wait_for.py index cba4bc665..1f291c16b 100755 --- a/wait_for.py +++ b/wait_for.py @@ -1,31 +1,32 @@ -import time - -try: - # py2 - import urllib2 as urllib - from urllib2 import URLError - import socket - import httplib - - EXCEPTIONS = (URLError, socket.error, httplib.BadStatusLine) -except ImportError: - # py3 - import urllib.request as urllib - from urllib.error import URLError - - EXCEPTIONS = (URLError, ConnectionResetError) - - -start_ts = time.time() -print("Waiting for service to come up") -while True: - try: - urllib.urlopen('http://localhost:5000/', timeout=1) - break - except EXCEPTIONS: - elapsed_s = time.time() - start_ts - if elapsed_s > 60: - raise - - print('.') - time.sleep(1) +import time + +try: + # py2 + import urllib2 as urllib + from urllib2 import URLError + import socket + import httplib + + EXCEPTIONS = (URLError, socket.error, httplib.BadStatusLine) +except ImportError: + # py3 + import urllib.request as urllib + from urllib.error import URLError + import socket + + EXCEPTIONS = (URLError, socket.timeout, ConnectionResetError) + + +start_ts = time.time() +print("Waiting for service to come up") +while True: + try: + urllib.urlopen('http://localhost:5000/', timeout=1) + break + except EXCEPTIONS: + elapsed_s = time.time() - start_ts + if elapsed_s > 60: + raise + + print('.') + time.sleep(1)